Add files using upload-large-folder tool
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- .gitattributes +2 -0
- llava_next/lib/python3.10/site-packages/scipy/cluster/__pycache__/hierarchy.cpython-310.pyc +3 -0
- llava_next/lib/python3.10/site-packages/scipy/linalg/tests/data/carex_20_data.npz +3 -0
- llava_next/lib/python3.10/site-packages/scipy/optimize/README +76 -0
- llava_next/lib/python3.10/site-packages/scipy/optimize/__init__.py +452 -0
- llava_next/lib/python3.10/site-packages/scipy/optimize/_bracket.py +666 -0
- llava_next/lib/python3.10/site-packages/scipy/optimize/_chandrupatla.py +549 -0
- llava_next/lib/python3.10/site-packages/scipy/optimize/_cobyla_py.py +316 -0
- llava_next/lib/python3.10/site-packages/scipy/optimize/_constraints.py +590 -0
- llava_next/lib/python3.10/site-packages/scipy/optimize/_dcsrch.py +728 -0
- llava_next/lib/python3.10/site-packages/scipy/optimize/_differentiable_functions.py +693 -0
- llava_next/lib/python3.10/site-packages/scipy/optimize/_differentialevolution.py +1951 -0
- llava_next/lib/python3.10/site-packages/scipy/optimize/_direct.cpython-310-x86_64-linux-gnu.so +0 -0
- llava_next/lib/python3.10/site-packages/scipy/optimize/_dual_annealing.py +732 -0
- llava_next/lib/python3.10/site-packages/scipy/optimize/_isotonic.py +158 -0
- llava_next/lib/python3.10/site-packages/scipy/optimize/_lbfgsb_py.py +543 -0
- llava_next/lib/python3.10/site-packages/scipy/optimize/_linesearch.py +896 -0
- llava_next/lib/python3.10/site-packages/scipy/optimize/_linprog_doc.py +1434 -0
- llava_next/lib/python3.10/site-packages/scipy/optimize/_linprog_rs.py +572 -0
- llava_next/lib/python3.10/site-packages/scipy/optimize/_lsap.cpython-310-x86_64-linux-gnu.so +0 -0
- llava_next/lib/python3.10/site-packages/scipy/optimize/_milp.py +392 -0
- llava_next/lib/python3.10/site-packages/scipy/optimize/_minpack.cpython-310-x86_64-linux-gnu.so +0 -0
- llava_next/lib/python3.10/site-packages/scipy/optimize/_minpack_py.py +1164 -0
- llava_next/lib/python3.10/site-packages/scipy/optimize/_moduleTNC.cpython-310-x86_64-linux-gnu.so +3 -0
- llava_next/lib/python3.10/site-packages/scipy/optimize/_nonlin.py +1585 -0
- llava_next/lib/python3.10/site-packages/scipy/optimize/_qap.py +731 -0
- llava_next/lib/python3.10/site-packages/scipy/optimize/_remove_redundancy.py +522 -0
- llava_next/lib/python3.10/site-packages/scipy/optimize/_root.py +732 -0
- llava_next/lib/python3.10/site-packages/scipy/optimize/_root_scalar.py +525 -0
- llava_next/lib/python3.10/site-packages/scipy/optimize/_shgo.py +1598 -0
- llava_next/lib/python3.10/site-packages/scipy/optimize/_slsqp_py.py +510 -0
- llava_next/lib/python3.10/site-packages/scipy/optimize/_trustregion.py +304 -0
- llava_next/lib/python3.10/site-packages/scipy/optimize/_trustregion_dogleg.py +122 -0
- llava_next/lib/python3.10/site-packages/scipy/optimize/_trustregion_exact.py +438 -0
- llava_next/lib/python3.10/site-packages/scipy/optimize/_trustregion_krylov.py +65 -0
- llava_next/lib/python3.10/site-packages/scipy/optimize/_zeros.cpython-310-x86_64-linux-gnu.so +0 -0
- llava_next/lib/python3.10/site-packages/scipy/optimize/cython_optimize.pxd +11 -0
- llava_next/lib/python3.10/site-packages/scipy/optimize/linesearch.py +18 -0
- llava_next/lib/python3.10/site-packages/scipy/optimize/minpack.py +27 -0
- llava_next/lib/python3.10/site-packages/scipy/optimize/moduleTNC.py +19 -0
- llava_next/lib/python3.10/site-packages/scipy/optimize/nonlin.py +29 -0
- llava_next/lib/python3.10/site-packages/scipy/optimize/optimize.py +40 -0
- llava_next/lib/python3.10/site-packages/scipy/optimize/tnc.py +22 -0
- llava_next/lib/python3.10/site-packages/scipy/optimize/zeros.py +26 -0
- llava_next/lib/python3.10/site-packages/scipy/special/_precompute/__pycache__/__init__.cpython-310.pyc +0 -0
- llava_next/lib/python3.10/site-packages/scipy/special/_precompute/__pycache__/cosine_cdf.cpython-310.pyc +0 -0
- llava_next/lib/python3.10/site-packages/scipy/special/_precompute/__pycache__/expn_asy.cpython-310.pyc +0 -0
- llava_next/lib/python3.10/site-packages/scipy/special/_precompute/__pycache__/gammainc_asy.cpython-310.pyc +0 -0
- llava_next/lib/python3.10/site-packages/scipy/special/_precompute/__pycache__/gammainc_data.cpython-310.pyc +0 -0
- llava_next/lib/python3.10/site-packages/scipy/special/_precompute/__pycache__/hyp2f1_data.cpython-310.pyc +0 -0
.gitattributes
CHANGED
|
@@ -349,3 +349,5 @@ llava_next/lib/python3.10/site-packages/scipy/stats/tests/__pycache__/test_mores
|
|
| 349 |
llava_next/lib/python3.10/site-packages/scipy/stats/_rcont/rcont.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
|
| 350 |
llava_next/lib/python3.10/site-packages/scipy/stats/tests/__pycache__/test_stats.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
|
| 351 |
llava_next/lib/python3.10/site-packages/scipy/stats/__pycache__/_morestats.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
|
|
|
| 349 |
llava_next/lib/python3.10/site-packages/scipy/stats/_rcont/rcont.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
|
| 350 |
llava_next/lib/python3.10/site-packages/scipy/stats/tests/__pycache__/test_stats.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
|
| 351 |
llava_next/lib/python3.10/site-packages/scipy/stats/__pycache__/_morestats.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
|
| 352 |
+
llava_next/lib/python3.10/site-packages/scipy/cluster/__pycache__/hierarchy.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
|
| 353 |
+
llava_next/lib/python3.10/site-packages/scipy/optimize/_moduleTNC.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
|
llava_next/lib/python3.10/site-packages/scipy/cluster/__pycache__/hierarchy.cpython-310.pyc
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:6c8ff3c6a3ffb2248f334cd8b237ecab483c6b6c57982eb950d95f3435bef174
|
| 3 |
+
size 130979
|
llava_next/lib/python3.10/site-packages/scipy/linalg/tests/data/carex_20_data.npz
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:14e222d34a7118c7284a1675c6feceee77b84df951a5c6ba2a5ee9ff3054fa1d
|
| 3 |
+
size 31231
|
llava_next/lib/python3.10/site-packages/scipy/optimize/README
ADDED
|
@@ -0,0 +1,76 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
From the website for the L-BFGS-B code (from at
|
| 2 |
+
http://www.ece.northwestern.edu/~nocedal/lbfgsb.html):
|
| 3 |
+
|
| 4 |
+
"""
|
| 5 |
+
L-BFGS-B is a limited-memory quasi-Newton code for bound-constrained
|
| 6 |
+
optimization, i.e. for problems where the only constraints are of the
|
| 7 |
+
form l<= x <= u.
|
| 8 |
+
"""
|
| 9 |
+
|
| 10 |
+
This is a Python wrapper (using F2PY) written by David M. Cooke
|
| 11 |
+
<cookedm@physics.mcmaster.ca> and released as version 0.9 on April 9, 2004.
|
| 12 |
+
The wrapper was slightly modified by Joonas Paalasmaa for the 3.0 version
|
| 13 |
+
in March 2012.
|
| 14 |
+
|
| 15 |
+
License of L-BFGS-B (Fortran code)
|
| 16 |
+
==================================
|
| 17 |
+
|
| 18 |
+
The version included here (in lbfgsb.f) is 3.0 (released April 25, 2011). It was
|
| 19 |
+
written by Ciyou Zhu, Richard Byrd, and Jorge Nocedal <nocedal@ece.nwu.edu>. It
|
| 20 |
+
carries the following condition for use:
|
| 21 |
+
|
| 22 |
+
"""
|
| 23 |
+
This software is freely available, but we expect that all publications
|
| 24 |
+
describing work using this software, or all commercial products using it,
|
| 25 |
+
quote at least one of the references given below. This software is released
|
| 26 |
+
under the BSD License.
|
| 27 |
+
|
| 28 |
+
References
|
| 29 |
+
* R. H. Byrd, P. Lu and J. Nocedal. A Limited Memory Algorithm for Bound
|
| 30 |
+
Constrained Optimization, (1995), SIAM Journal on Scientific and
|
| 31 |
+
Statistical Computing, 16, 5, pp. 1190-1208.
|
| 32 |
+
* C. Zhu, R. H. Byrd and J. Nocedal. L-BFGS-B: Algorithm 778: L-BFGS-B,
|
| 33 |
+
FORTRAN routines for large scale bound constrained optimization (1997),
|
| 34 |
+
ACM Transactions on Mathematical Software, 23, 4, pp. 550 - 560.
|
| 35 |
+
* J.L. Morales and J. Nocedal. L-BFGS-B: Remark on Algorithm 778: L-BFGS-B,
|
| 36 |
+
FORTRAN routines for large scale bound constrained optimization (2011),
|
| 37 |
+
ACM Transactions on Mathematical Software, 38, 1.
|
| 38 |
+
"""
|
| 39 |
+
|
| 40 |
+
The Python wrapper
|
| 41 |
+
==================
|
| 42 |
+
|
| 43 |
+
This code uses F2PY (http://cens.ioc.ee/projects/f2py2e/) to generate
|
| 44 |
+
the wrapper around the Fortran code.
|
| 45 |
+
|
| 46 |
+
The Python code and wrapper are copyrighted 2004 by David M. Cooke
|
| 47 |
+
<cookedm@physics.mcmaster.ca>.
|
| 48 |
+
|
| 49 |
+
Example usage
|
| 50 |
+
=============
|
| 51 |
+
|
| 52 |
+
An example of the usage is given at the bottom of the lbfgsb.py file.
|
| 53 |
+
Run it with 'python lbfgsb.py'.
|
| 54 |
+
|
| 55 |
+
License for the Python wrapper
|
| 56 |
+
==============================
|
| 57 |
+
|
| 58 |
+
Copyright (c) 2004 David M. Cooke <cookedm@physics.mcmaster.ca>
|
| 59 |
+
|
| 60 |
+
Permission is hereby granted, free of charge, to any person obtaining a copy of
|
| 61 |
+
this software and associated documentation files (the "Software"), to deal in
|
| 62 |
+
the Software without restriction, including without limitation the rights to
|
| 63 |
+
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
|
| 64 |
+
of the Software, and to permit persons to whom the Software is furnished to do
|
| 65 |
+
so, subject to the following conditions:
|
| 66 |
+
|
| 67 |
+
The above copyright notice and this permission notice shall be included in all
|
| 68 |
+
copies or substantial portions of the Software.
|
| 69 |
+
|
| 70 |
+
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
| 71 |
+
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
| 72 |
+
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
| 73 |
+
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
| 74 |
+
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
| 75 |
+
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
| 76 |
+
SOFTWARE.
|
llava_next/lib/python3.10/site-packages/scipy/optimize/__init__.py
ADDED
|
@@ -0,0 +1,452 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
=====================================================
|
| 3 |
+
Optimization and root finding (:mod:`scipy.optimize`)
|
| 4 |
+
=====================================================
|
| 5 |
+
|
| 6 |
+
.. currentmodule:: scipy.optimize
|
| 7 |
+
|
| 8 |
+
.. toctree::
|
| 9 |
+
:hidden:
|
| 10 |
+
|
| 11 |
+
optimize.cython_optimize
|
| 12 |
+
|
| 13 |
+
SciPy ``optimize`` provides functions for minimizing (or maximizing)
|
| 14 |
+
objective functions, possibly subject to constraints. It includes
|
| 15 |
+
solvers for nonlinear problems (with support for both local and global
|
| 16 |
+
optimization algorithms), linear programming, constrained
|
| 17 |
+
and nonlinear least-squares, root finding, and curve fitting.
|
| 18 |
+
|
| 19 |
+
Common functions and objects, shared across different solvers, are:
|
| 20 |
+
|
| 21 |
+
.. autosummary::
|
| 22 |
+
:toctree: generated/
|
| 23 |
+
|
| 24 |
+
show_options - Show specific options optimization solvers.
|
| 25 |
+
OptimizeResult - The optimization result returned by some optimizers.
|
| 26 |
+
OptimizeWarning - The optimization encountered problems.
|
| 27 |
+
|
| 28 |
+
|
| 29 |
+
Optimization
|
| 30 |
+
============
|
| 31 |
+
|
| 32 |
+
Scalar functions optimization
|
| 33 |
+
-----------------------------
|
| 34 |
+
|
| 35 |
+
.. autosummary::
|
| 36 |
+
:toctree: generated/
|
| 37 |
+
|
| 38 |
+
minimize_scalar - Interface for minimizers of univariate functions
|
| 39 |
+
|
| 40 |
+
The `minimize_scalar` function supports the following methods:
|
| 41 |
+
|
| 42 |
+
.. toctree::
|
| 43 |
+
|
| 44 |
+
optimize.minimize_scalar-brent
|
| 45 |
+
optimize.minimize_scalar-bounded
|
| 46 |
+
optimize.minimize_scalar-golden
|
| 47 |
+
|
| 48 |
+
Local (multivariate) optimization
|
| 49 |
+
---------------------------------
|
| 50 |
+
|
| 51 |
+
.. autosummary::
|
| 52 |
+
:toctree: generated/
|
| 53 |
+
|
| 54 |
+
minimize - Interface for minimizers of multivariate functions.
|
| 55 |
+
|
| 56 |
+
The `minimize` function supports the following methods:
|
| 57 |
+
|
| 58 |
+
.. toctree::
|
| 59 |
+
|
| 60 |
+
optimize.minimize-neldermead
|
| 61 |
+
optimize.minimize-powell
|
| 62 |
+
optimize.minimize-cg
|
| 63 |
+
optimize.minimize-bfgs
|
| 64 |
+
optimize.minimize-newtoncg
|
| 65 |
+
optimize.minimize-lbfgsb
|
| 66 |
+
optimize.minimize-tnc
|
| 67 |
+
optimize.minimize-cobyla
|
| 68 |
+
optimize.minimize-cobyqa
|
| 69 |
+
optimize.minimize-slsqp
|
| 70 |
+
optimize.minimize-trustconstr
|
| 71 |
+
optimize.minimize-dogleg
|
| 72 |
+
optimize.minimize-trustncg
|
| 73 |
+
optimize.minimize-trustkrylov
|
| 74 |
+
optimize.minimize-trustexact
|
| 75 |
+
|
| 76 |
+
Constraints are passed to `minimize` function as a single object or
|
| 77 |
+
as a list of objects from the following classes:
|
| 78 |
+
|
| 79 |
+
.. autosummary::
|
| 80 |
+
:toctree: generated/
|
| 81 |
+
|
| 82 |
+
NonlinearConstraint - Class defining general nonlinear constraints.
|
| 83 |
+
LinearConstraint - Class defining general linear constraints.
|
| 84 |
+
|
| 85 |
+
Simple bound constraints are handled separately and there is a special class
|
| 86 |
+
for them:
|
| 87 |
+
|
| 88 |
+
.. autosummary::
|
| 89 |
+
:toctree: generated/
|
| 90 |
+
|
| 91 |
+
Bounds - Bound constraints.
|
| 92 |
+
|
| 93 |
+
Quasi-Newton strategies implementing `HessianUpdateStrategy`
|
| 94 |
+
interface can be used to approximate the Hessian in `minimize`
|
| 95 |
+
function (available only for the 'trust-constr' method). Available
|
| 96 |
+
quasi-Newton methods implementing this interface are:
|
| 97 |
+
|
| 98 |
+
.. autosummary::
|
| 99 |
+
:toctree: generated/
|
| 100 |
+
|
| 101 |
+
BFGS - Broyden-Fletcher-Goldfarb-Shanno (BFGS) Hessian update strategy.
|
| 102 |
+
SR1 - Symmetric-rank-1 Hessian update strategy.
|
| 103 |
+
|
| 104 |
+
.. _global_optimization:
|
| 105 |
+
|
| 106 |
+
Global optimization
|
| 107 |
+
-------------------
|
| 108 |
+
|
| 109 |
+
.. autosummary::
|
| 110 |
+
:toctree: generated/
|
| 111 |
+
|
| 112 |
+
basinhopping - Basinhopping stochastic optimizer.
|
| 113 |
+
brute - Brute force searching optimizer.
|
| 114 |
+
differential_evolution - Stochastic optimizer using differential evolution.
|
| 115 |
+
|
| 116 |
+
shgo - Simplicial homology global optimizer.
|
| 117 |
+
dual_annealing - Dual annealing stochastic optimizer.
|
| 118 |
+
direct - DIRECT (Dividing Rectangles) optimizer.
|
| 119 |
+
|
| 120 |
+
Least-squares and curve fitting
|
| 121 |
+
===============================
|
| 122 |
+
|
| 123 |
+
Nonlinear least-squares
|
| 124 |
+
-----------------------
|
| 125 |
+
|
| 126 |
+
.. autosummary::
|
| 127 |
+
:toctree: generated/
|
| 128 |
+
|
| 129 |
+
least_squares - Solve a nonlinear least-squares problem with bounds on the variables.
|
| 130 |
+
|
| 131 |
+
Linear least-squares
|
| 132 |
+
--------------------
|
| 133 |
+
|
| 134 |
+
.. autosummary::
|
| 135 |
+
:toctree: generated/
|
| 136 |
+
|
| 137 |
+
nnls - Linear least-squares problem with non-negativity constraint.
|
| 138 |
+
lsq_linear - Linear least-squares problem with bound constraints.
|
| 139 |
+
isotonic_regression - Least squares problem of isotonic regression via PAVA.
|
| 140 |
+
|
| 141 |
+
Curve fitting
|
| 142 |
+
-------------
|
| 143 |
+
|
| 144 |
+
.. autosummary::
|
| 145 |
+
:toctree: generated/
|
| 146 |
+
|
| 147 |
+
curve_fit -- Fit curve to a set of points.
|
| 148 |
+
|
| 149 |
+
Root finding
|
| 150 |
+
============
|
| 151 |
+
|
| 152 |
+
Scalar functions
|
| 153 |
+
----------------
|
| 154 |
+
.. autosummary::
|
| 155 |
+
:toctree: generated/
|
| 156 |
+
|
| 157 |
+
root_scalar - Unified interface for nonlinear solvers of scalar functions.
|
| 158 |
+
brentq - quadratic interpolation Brent method.
|
| 159 |
+
brenth - Brent method, modified by Harris with hyperbolic extrapolation.
|
| 160 |
+
ridder - Ridder's method.
|
| 161 |
+
bisect - Bisection method.
|
| 162 |
+
newton - Newton's method (also Secant and Halley's methods).
|
| 163 |
+
toms748 - Alefeld, Potra & Shi Algorithm 748.
|
| 164 |
+
RootResults - The root finding result returned by some root finders.
|
| 165 |
+
|
| 166 |
+
The `root_scalar` function supports the following methods:
|
| 167 |
+
|
| 168 |
+
.. toctree::
|
| 169 |
+
|
| 170 |
+
optimize.root_scalar-brentq
|
| 171 |
+
optimize.root_scalar-brenth
|
| 172 |
+
optimize.root_scalar-bisect
|
| 173 |
+
optimize.root_scalar-ridder
|
| 174 |
+
optimize.root_scalar-newton
|
| 175 |
+
optimize.root_scalar-toms748
|
| 176 |
+
optimize.root_scalar-secant
|
| 177 |
+
optimize.root_scalar-halley
|
| 178 |
+
|
| 179 |
+
|
| 180 |
+
|
| 181 |
+
The table below lists situations and appropriate methods, along with
|
| 182 |
+
*asymptotic* convergence rates per iteration (and per function evaluation)
|
| 183 |
+
for successful convergence to a simple root(*).
|
| 184 |
+
Bisection is the slowest of them all, adding one bit of accuracy for each
|
| 185 |
+
function evaluation, but is guaranteed to converge.
|
| 186 |
+
The other bracketing methods all (eventually) increase the number of accurate
|
| 187 |
+
bits by about 50% for every function evaluation.
|
| 188 |
+
The derivative-based methods, all built on `newton`, can converge quite quickly
|
| 189 |
+
if the initial value is close to the root. They can also be applied to
|
| 190 |
+
functions defined on (a subset of) the complex plane.
|
| 191 |
+
|
| 192 |
+
+-------------+----------+----------+-----------+-------------+-------------+----------------+
|
| 193 |
+
| Domain of f | Bracket? | Derivatives? | Solvers | Convergence |
|
| 194 |
+
+ + +----------+-----------+ +-------------+----------------+
|
| 195 |
+
| | | `fprime` | `fprime2` | | Guaranteed? | Rate(s)(*) |
|
| 196 |
+
+=============+==========+==========+===========+=============+=============+================+
|
| 197 |
+
| `R` | Yes | N/A | N/A | - bisection | - Yes | - 1 "Linear" |
|
| 198 |
+
| | | | | - brentq | - Yes | - >=1, <= 1.62 |
|
| 199 |
+
| | | | | - brenth | - Yes | - >=1, <= 1.62 |
|
| 200 |
+
| | | | | - ridder | - Yes | - 2.0 (1.41) |
|
| 201 |
+
| | | | | - toms748 | - Yes | - 2.7 (1.65) |
|
| 202 |
+
+-------------+----------+----------+-----------+-------------+-------------+----------------+
|
| 203 |
+
| `R` or `C` | No | No | No | secant | No | 1.62 (1.62) |
|
| 204 |
+
+-------------+----------+----------+-----------+-------------+-------------+----------------+
|
| 205 |
+
| `R` or `C` | No | Yes | No | newton | No | 2.00 (1.41) |
|
| 206 |
+
+-------------+----------+----------+-----------+-------------+-------------+----------------+
|
| 207 |
+
| `R` or `C` | No | Yes | Yes | halley | No | 3.00 (1.44) |
|
| 208 |
+
+-------------+----------+----------+-----------+-------------+-------------+----------------+
|
| 209 |
+
|
| 210 |
+
.. seealso::
|
| 211 |
+
|
| 212 |
+
`scipy.optimize.cython_optimize` -- Typed Cython versions of root finding functions
|
| 213 |
+
|
| 214 |
+
Fixed point finding:
|
| 215 |
+
|
| 216 |
+
.. autosummary::
|
| 217 |
+
:toctree: generated/
|
| 218 |
+
|
| 219 |
+
fixed_point - Single-variable fixed-point solver.
|
| 220 |
+
|
| 221 |
+
Multidimensional
|
| 222 |
+
----------------
|
| 223 |
+
|
| 224 |
+
.. autosummary::
|
| 225 |
+
:toctree: generated/
|
| 226 |
+
|
| 227 |
+
root - Unified interface for nonlinear solvers of multivariate functions.
|
| 228 |
+
|
| 229 |
+
The `root` function supports the following methods:
|
| 230 |
+
|
| 231 |
+
.. toctree::
|
| 232 |
+
|
| 233 |
+
optimize.root-hybr
|
| 234 |
+
optimize.root-lm
|
| 235 |
+
optimize.root-broyden1
|
| 236 |
+
optimize.root-broyden2
|
| 237 |
+
optimize.root-anderson
|
| 238 |
+
optimize.root-linearmixing
|
| 239 |
+
optimize.root-diagbroyden
|
| 240 |
+
optimize.root-excitingmixing
|
| 241 |
+
optimize.root-krylov
|
| 242 |
+
optimize.root-dfsane
|
| 243 |
+
|
| 244 |
+
Linear programming / MILP
|
| 245 |
+
=========================
|
| 246 |
+
|
| 247 |
+
.. autosummary::
|
| 248 |
+
:toctree: generated/
|
| 249 |
+
|
| 250 |
+
milp -- Mixed integer linear programming.
|
| 251 |
+
linprog -- Unified interface for minimizers of linear programming problems.
|
| 252 |
+
|
| 253 |
+
The `linprog` function supports the following methods:
|
| 254 |
+
|
| 255 |
+
.. toctree::
|
| 256 |
+
|
| 257 |
+
optimize.linprog-simplex
|
| 258 |
+
optimize.linprog-interior-point
|
| 259 |
+
optimize.linprog-revised_simplex
|
| 260 |
+
optimize.linprog-highs-ipm
|
| 261 |
+
optimize.linprog-highs-ds
|
| 262 |
+
optimize.linprog-highs
|
| 263 |
+
|
| 264 |
+
The simplex, interior-point, and revised simplex methods support callback
|
| 265 |
+
functions, such as:
|
| 266 |
+
|
| 267 |
+
.. autosummary::
|
| 268 |
+
:toctree: generated/
|
| 269 |
+
|
| 270 |
+
linprog_verbose_callback -- Sample callback function for linprog (simplex).
|
| 271 |
+
|
| 272 |
+
Assignment problems
|
| 273 |
+
===================
|
| 274 |
+
|
| 275 |
+
.. autosummary::
|
| 276 |
+
:toctree: generated/
|
| 277 |
+
|
| 278 |
+
linear_sum_assignment -- Solves the linear-sum assignment problem.
|
| 279 |
+
quadratic_assignment -- Solves the quadratic assignment problem.
|
| 280 |
+
|
| 281 |
+
The `quadratic_assignment` function supports the following methods:
|
| 282 |
+
|
| 283 |
+
.. toctree::
|
| 284 |
+
|
| 285 |
+
optimize.qap-faq
|
| 286 |
+
optimize.qap-2opt
|
| 287 |
+
|
| 288 |
+
Utilities
|
| 289 |
+
=========
|
| 290 |
+
|
| 291 |
+
Finite-difference approximation
|
| 292 |
+
-------------------------------
|
| 293 |
+
|
| 294 |
+
.. autosummary::
|
| 295 |
+
:toctree: generated/
|
| 296 |
+
|
| 297 |
+
approx_fprime - Approximate the gradient of a scalar function.
|
| 298 |
+
check_grad - Check the supplied derivative using finite differences.
|
| 299 |
+
|
| 300 |
+
|
| 301 |
+
Line search
|
| 302 |
+
-----------
|
| 303 |
+
|
| 304 |
+
.. autosummary::
|
| 305 |
+
:toctree: generated/
|
| 306 |
+
|
| 307 |
+
bracket - Bracket a minimum, given two starting points.
|
| 308 |
+
line_search - Return a step that satisfies the strong Wolfe conditions.
|
| 309 |
+
|
| 310 |
+
Hessian approximation
|
| 311 |
+
---------------------
|
| 312 |
+
|
| 313 |
+
.. autosummary::
|
| 314 |
+
:toctree: generated/
|
| 315 |
+
|
| 316 |
+
LbfgsInvHessProduct - Linear operator for L-BFGS approximate inverse Hessian.
|
| 317 |
+
HessianUpdateStrategy - Interface for implementing Hessian update strategies
|
| 318 |
+
|
| 319 |
+
Benchmark problems
|
| 320 |
+
------------------
|
| 321 |
+
|
| 322 |
+
.. autosummary::
|
| 323 |
+
:toctree: generated/
|
| 324 |
+
|
| 325 |
+
rosen - The Rosenbrock function.
|
| 326 |
+
rosen_der - The derivative of the Rosenbrock function.
|
| 327 |
+
rosen_hess - The Hessian matrix of the Rosenbrock function.
|
| 328 |
+
rosen_hess_prod - Product of the Rosenbrock Hessian with a vector.
|
| 329 |
+
|
| 330 |
+
Legacy functions
|
| 331 |
+
================
|
| 332 |
+
|
| 333 |
+
The functions below are not recommended for use in new scripts;
|
| 334 |
+
all of these methods are accessible via a newer, more consistent
|
| 335 |
+
interfaces, provided by the interfaces above.
|
| 336 |
+
|
| 337 |
+
Optimization
|
| 338 |
+
------------
|
| 339 |
+
|
| 340 |
+
General-purpose multivariate methods:
|
| 341 |
+
|
| 342 |
+
.. autosummary::
|
| 343 |
+
:toctree: generated/
|
| 344 |
+
|
| 345 |
+
fmin - Nelder-Mead Simplex algorithm.
|
| 346 |
+
fmin_powell - Powell's (modified) conjugate direction method.
|
| 347 |
+
fmin_cg - Non-linear (Polak-Ribiere) conjugate gradient algorithm.
|
| 348 |
+
fmin_bfgs - Quasi-Newton method (Broydon-Fletcher-Goldfarb-Shanno).
|
| 349 |
+
fmin_ncg - Line-search Newton Conjugate Gradient.
|
| 350 |
+
|
| 351 |
+
Constrained multivariate methods:
|
| 352 |
+
|
| 353 |
+
.. autosummary::
|
| 354 |
+
:toctree: generated/
|
| 355 |
+
|
| 356 |
+
fmin_l_bfgs_b - Zhu, Byrd, and Nocedal's constrained optimizer.
|
| 357 |
+
fmin_tnc - Truncated Newton code.
|
| 358 |
+
fmin_cobyla - Constrained optimization by linear approximation.
|
| 359 |
+
fmin_slsqp - Minimization using sequential least-squares programming.
|
| 360 |
+
|
| 361 |
+
Univariate (scalar) minimization methods:
|
| 362 |
+
|
| 363 |
+
.. autosummary::
|
| 364 |
+
:toctree: generated/
|
| 365 |
+
|
| 366 |
+
fminbound - Bounded minimization of a scalar function.
|
| 367 |
+
brent - 1-D function minimization using Brent method.
|
| 368 |
+
golden - 1-D function minimization using Golden Section method.
|
| 369 |
+
|
| 370 |
+
Least-squares
|
| 371 |
+
-------------
|
| 372 |
+
|
| 373 |
+
.. autosummary::
|
| 374 |
+
:toctree: generated/
|
| 375 |
+
|
| 376 |
+
leastsq - Minimize the sum of squares of M equations in N unknowns.
|
| 377 |
+
|
| 378 |
+
Root finding
|
| 379 |
+
------------
|
| 380 |
+
|
| 381 |
+
General nonlinear solvers:
|
| 382 |
+
|
| 383 |
+
.. autosummary::
|
| 384 |
+
:toctree: generated/
|
| 385 |
+
|
| 386 |
+
fsolve - Non-linear multivariable equation solver.
|
| 387 |
+
broyden1 - Broyden's first method.
|
| 388 |
+
broyden2 - Broyden's second method.
|
| 389 |
+
NoConvergence - Exception raised when nonlinear solver does not converge.
|
| 390 |
+
|
| 391 |
+
Large-scale nonlinear solvers:
|
| 392 |
+
|
| 393 |
+
.. autosummary::
|
| 394 |
+
:toctree: generated/
|
| 395 |
+
|
| 396 |
+
newton_krylov
|
| 397 |
+
anderson
|
| 398 |
+
|
| 399 |
+
BroydenFirst
|
| 400 |
+
InverseJacobian
|
| 401 |
+
KrylovJacobian
|
| 402 |
+
|
| 403 |
+
Simple iteration solvers:
|
| 404 |
+
|
| 405 |
+
.. autosummary::
|
| 406 |
+
:toctree: generated/
|
| 407 |
+
|
| 408 |
+
excitingmixing
|
| 409 |
+
linearmixing
|
| 410 |
+
diagbroyden
|
| 411 |
+
|
| 412 |
+
""" # noqa: E501
|
| 413 |
+
|
| 414 |
+
from ._optimize import *
|
| 415 |
+
from ._minimize import *
|
| 416 |
+
from ._root import *
|
| 417 |
+
from ._root_scalar import *
|
| 418 |
+
from ._minpack_py import *
|
| 419 |
+
from ._zeros_py import *
|
| 420 |
+
from ._lbfgsb_py import fmin_l_bfgs_b, LbfgsInvHessProduct
|
| 421 |
+
from ._tnc import fmin_tnc
|
| 422 |
+
from ._cobyla_py import fmin_cobyla
|
| 423 |
+
from ._nonlin import *
|
| 424 |
+
from ._slsqp_py import fmin_slsqp
|
| 425 |
+
from ._nnls import nnls
|
| 426 |
+
from ._basinhopping import basinhopping
|
| 427 |
+
from ._linprog import linprog, linprog_verbose_callback
|
| 428 |
+
from ._lsap import linear_sum_assignment
|
| 429 |
+
from ._differentialevolution import differential_evolution
|
| 430 |
+
from ._lsq import least_squares, lsq_linear
|
| 431 |
+
from ._isotonic import isotonic_regression
|
| 432 |
+
from ._constraints import (NonlinearConstraint,
|
| 433 |
+
LinearConstraint,
|
| 434 |
+
Bounds)
|
| 435 |
+
from ._hessian_update_strategy import HessianUpdateStrategy, BFGS, SR1
|
| 436 |
+
from ._shgo import shgo
|
| 437 |
+
from ._dual_annealing import dual_annealing
|
| 438 |
+
from ._qap import quadratic_assignment
|
| 439 |
+
from ._direct_py import direct
|
| 440 |
+
from ._milp import milp
|
| 441 |
+
|
| 442 |
+
# Deprecated namespaces, to be removed in v2.0.0
|
| 443 |
+
from . import (
|
| 444 |
+
cobyla, lbfgsb, linesearch, minpack, minpack2, moduleTNC, nonlin, optimize,
|
| 445 |
+
slsqp, tnc, zeros
|
| 446 |
+
)
|
| 447 |
+
|
| 448 |
+
__all__ = [s for s in dir() if not s.startswith('_')]
|
| 449 |
+
|
| 450 |
+
from scipy._lib._testutils import PytestTester
|
| 451 |
+
test = PytestTester(__name__)
|
| 452 |
+
del PytestTester
|
llava_next/lib/python3.10/site-packages/scipy/optimize/_bracket.py
ADDED
|
@@ -0,0 +1,666 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import numpy as np
|
| 2 |
+
import scipy._lib._elementwise_iterative_method as eim
|
| 3 |
+
from scipy._lib._util import _RichResult
|
| 4 |
+
|
| 5 |
+
_ELIMITS = -1 # used in _bracket_root
|
| 6 |
+
_ESTOPONESIDE = 2 # used in _bracket_root
|
| 7 |
+
|
| 8 |
+
def _bracket_root_iv(func, xl0, xr0, xmin, xmax, factor, args, maxiter):
|
| 9 |
+
|
| 10 |
+
if not callable(func):
|
| 11 |
+
raise ValueError('`func` must be callable.')
|
| 12 |
+
|
| 13 |
+
if not np.iterable(args):
|
| 14 |
+
args = (args,)
|
| 15 |
+
|
| 16 |
+
xl0 = np.asarray(xl0)[()]
|
| 17 |
+
if not np.issubdtype(xl0.dtype, np.number) or np.iscomplex(xl0).any():
|
| 18 |
+
raise ValueError('`xl0` must be numeric and real.')
|
| 19 |
+
|
| 20 |
+
xr0 = xl0 + 1 if xr0 is None else xr0
|
| 21 |
+
xmin = -np.inf if xmin is None else xmin
|
| 22 |
+
xmax = np.inf if xmax is None else xmax
|
| 23 |
+
factor = 2. if factor is None else factor
|
| 24 |
+
xl0, xr0, xmin, xmax, factor = np.broadcast_arrays(xl0, xr0, xmin, xmax, factor)
|
| 25 |
+
|
| 26 |
+
if not np.issubdtype(xr0.dtype, np.number) or np.iscomplex(xr0).any():
|
| 27 |
+
raise ValueError('`xr0` must be numeric and real.')
|
| 28 |
+
|
| 29 |
+
if not np.issubdtype(xmin.dtype, np.number) or np.iscomplex(xmin).any():
|
| 30 |
+
raise ValueError('`xmin` must be numeric and real.')
|
| 31 |
+
|
| 32 |
+
if not np.issubdtype(xmax.dtype, np.number) or np.iscomplex(xmax).any():
|
| 33 |
+
raise ValueError('`xmax` must be numeric and real.')
|
| 34 |
+
|
| 35 |
+
if not np.issubdtype(factor.dtype, np.number) or np.iscomplex(factor).any():
|
| 36 |
+
raise ValueError('`factor` must be numeric and real.')
|
| 37 |
+
if not np.all(factor > 1):
|
| 38 |
+
raise ValueError('All elements of `factor` must be greater than 1.')
|
| 39 |
+
|
| 40 |
+
maxiter = np.asarray(maxiter)
|
| 41 |
+
message = '`maxiter` must be a non-negative integer.'
|
| 42 |
+
if (not np.issubdtype(maxiter.dtype, np.number) or maxiter.shape != tuple()
|
| 43 |
+
or np.iscomplex(maxiter)):
|
| 44 |
+
raise ValueError(message)
|
| 45 |
+
maxiter_int = int(maxiter[()])
|
| 46 |
+
if not maxiter == maxiter_int or maxiter < 0:
|
| 47 |
+
raise ValueError(message)
|
| 48 |
+
|
| 49 |
+
return func, xl0, xr0, xmin, xmax, factor, args, maxiter
|
| 50 |
+
|
| 51 |
+
|
| 52 |
+
def _bracket_root(func, xl0, xr0=None, *, xmin=None, xmax=None, factor=None,
|
| 53 |
+
args=(), maxiter=1000):
|
| 54 |
+
"""Bracket the root of a monotonic scalar function of one variable
|
| 55 |
+
|
| 56 |
+
This function works elementwise when `xl0`, `xr0`, `xmin`, `xmax`, `factor`, and
|
| 57 |
+
the elements of `args` are broadcastable arrays.
|
| 58 |
+
|
| 59 |
+
Parameters
|
| 60 |
+
----------
|
| 61 |
+
func : callable
|
| 62 |
+
The function for which the root is to be bracketed.
|
| 63 |
+
The signature must be::
|
| 64 |
+
|
| 65 |
+
func(x: ndarray, *args) -> ndarray
|
| 66 |
+
|
| 67 |
+
where each element of ``x`` is a finite real and ``args`` is a tuple,
|
| 68 |
+
which may contain an arbitrary number of arrays that are broadcastable
|
| 69 |
+
with `x`. ``func`` must be an elementwise function: each element
|
| 70 |
+
``func(x)[i]`` must equal ``func(x[i])`` for all indices ``i``.
|
| 71 |
+
xl0, xr0: float array_like
|
| 72 |
+
Starting guess of bracket, which need not contain a root. If `xr0` is
|
| 73 |
+
not provided, ``xr0 = xl0 + 1``. Must be broadcastable with one another.
|
| 74 |
+
xmin, xmax : float array_like, optional
|
| 75 |
+
Minimum and maximum allowable endpoints of the bracket, inclusive. Must
|
| 76 |
+
be broadcastable with `xl0` and `xr0`.
|
| 77 |
+
factor : float array_like, default: 2
|
| 78 |
+
The factor used to grow the bracket. See notes for details.
|
| 79 |
+
args : tuple, optional
|
| 80 |
+
Additional positional arguments to be passed to `func`. Must be arrays
|
| 81 |
+
broadcastable with `xl0`, `xr0`, `xmin`, and `xmax`. If the callable to be
|
| 82 |
+
bracketed requires arguments that are not broadcastable with these
|
| 83 |
+
arrays, wrap that callable with `func` such that `func` accepts
|
| 84 |
+
only `x` and broadcastable arrays.
|
| 85 |
+
maxiter : int, optional
|
| 86 |
+
The maximum number of iterations of the algorithm to perform.
|
| 87 |
+
|
| 88 |
+
Returns
|
| 89 |
+
-------
|
| 90 |
+
res : _RichResult
|
| 91 |
+
An instance of `scipy._lib._util._RichResult` with the following
|
| 92 |
+
attributes. The descriptions are written as though the values will be
|
| 93 |
+
scalars; however, if `func` returns an array, the outputs will be
|
| 94 |
+
arrays of the same shape.
|
| 95 |
+
|
| 96 |
+
xl, xr : float
|
| 97 |
+
The lower and upper ends of the bracket, if the algorithm
|
| 98 |
+
terminated successfully.
|
| 99 |
+
fl, fr : float
|
| 100 |
+
The function value at the lower and upper ends of the bracket.
|
| 101 |
+
nfev : int
|
| 102 |
+
The number of function evaluations required to find the bracket.
|
| 103 |
+
This is distinct from the number of times `func` is *called*
|
| 104 |
+
because the function may evaluated at multiple points in a single
|
| 105 |
+
call.
|
| 106 |
+
nit : int
|
| 107 |
+
The number of iterations of the algorithm that were performed.
|
| 108 |
+
status : int
|
| 109 |
+
An integer representing the exit status of the algorithm.
|
| 110 |
+
|
| 111 |
+
- ``0`` : The algorithm produced a valid bracket.
|
| 112 |
+
- ``-1`` : The bracket expanded to the allowable limits without finding a bracket.
|
| 113 |
+
- ``-2`` : The maximum number of iterations was reached.
|
| 114 |
+
- ``-3`` : A non-finite value was encountered.
|
| 115 |
+
- ``-4`` : Iteration was terminated by `callback`.
|
| 116 |
+
- ``-5``: The initial bracket does not satisfy `xmin <= xl0 < xr0 < xmax`.
|
| 117 |
+
- ``1`` : The algorithm is proceeding normally (in `callback` only).
|
| 118 |
+
- ``2`` : A bracket was found in the opposite search direction (in `callback` only).
|
| 119 |
+
|
| 120 |
+
success : bool
|
| 121 |
+
``True`` when the algorithm terminated successfully (status ``0``).
|
| 122 |
+
|
| 123 |
+
Notes
|
| 124 |
+
-----
|
| 125 |
+
This function generalizes an algorithm found in pieces throughout
|
| 126 |
+
`scipy.stats`. The strategy is to iteratively grow the bracket `(l, r)`
|
| 127 |
+
until ``func(l) < 0 < func(r)``. The bracket grows to the left as follows.
|
| 128 |
+
|
| 129 |
+
- If `xmin` is not provided, the distance between `xl0` and `l` is iteratively
|
| 130 |
+
increased by `factor`.
|
| 131 |
+
- If `xmin` is provided, the distance between `xmin` and `l` is iteratively
|
| 132 |
+
decreased by `factor`. Note that this also *increases* the bracket size.
|
| 133 |
+
|
| 134 |
+
Growth of the bracket to the right is analogous.
|
| 135 |
+
|
| 136 |
+
Growth of the bracket in one direction stops when the endpoint is no longer
|
| 137 |
+
finite, the function value at the endpoint is no longer finite, or the
|
| 138 |
+
endpoint reaches its limiting value (`xmin` or `xmax`). Iteration terminates
|
| 139 |
+
when the bracket stops growing in both directions, the bracket surrounds
|
| 140 |
+
the root, or a root is found (accidentally).
|
| 141 |
+
|
| 142 |
+
If two brackets are found - that is, a bracket is found on both sides in
|
| 143 |
+
the same iteration, the smaller of the two is returned.
|
| 144 |
+
If roots of the function are found, both `l` and `r` are set to the
|
| 145 |
+
leftmost root.
|
| 146 |
+
|
| 147 |
+
""" # noqa: E501
|
| 148 |
+
# Todo:
|
| 149 |
+
# - find bracket with sign change in specified direction
|
| 150 |
+
# - Add tolerance
|
| 151 |
+
# - allow factor < 1?
|
| 152 |
+
|
| 153 |
+
callback = None # works; I just don't want to test it
|
| 154 |
+
temp = _bracket_root_iv(func, xl0, xr0, xmin, xmax, factor, args, maxiter)
|
| 155 |
+
func, xl0, xr0, xmin, xmax, factor, args, maxiter = temp
|
| 156 |
+
|
| 157 |
+
xs = (xl0, xr0)
|
| 158 |
+
temp = eim._initialize(func, xs, args)
|
| 159 |
+
func, xs, fs, args, shape, dtype, xp = temp # line split for PEP8
|
| 160 |
+
xl0, xr0 = xs
|
| 161 |
+
xmin = np.broadcast_to(xmin, shape).astype(dtype, copy=False).ravel()
|
| 162 |
+
xmax = np.broadcast_to(xmax, shape).astype(dtype, copy=False).ravel()
|
| 163 |
+
invalid_bracket = ~((xmin <= xl0) & (xl0 < xr0) & (xr0 <= xmax))
|
| 164 |
+
|
| 165 |
+
# The approach is to treat the left and right searches as though they were
|
| 166 |
+
# (almost) totally independent one-sided bracket searches. (The interaction
|
| 167 |
+
# is considered when checking for termination and preparing the result
|
| 168 |
+
# object.)
|
| 169 |
+
# `x` is the "moving" end of the bracket
|
| 170 |
+
x = np.concatenate(xs)
|
| 171 |
+
f = np.concatenate(fs)
|
| 172 |
+
invalid_bracket = np.concatenate((invalid_bracket, invalid_bracket))
|
| 173 |
+
n = len(x) // 2
|
| 174 |
+
|
| 175 |
+
# `x_last` is the previous location of the moving end of the bracket. If
|
| 176 |
+
# the signs of `f` and `f_last` are different, `x` and `x_last` form a
|
| 177 |
+
# bracket.
|
| 178 |
+
x_last = np.concatenate((x[n:], x[:n]))
|
| 179 |
+
f_last = np.concatenate((f[n:], f[:n]))
|
| 180 |
+
# `x0` is the "fixed" end of the bracket.
|
| 181 |
+
x0 = x_last
|
| 182 |
+
# We don't need to retain the corresponding function value, since the
|
| 183 |
+
# fixed end of the bracket is only needed to compute the new value of the
|
| 184 |
+
# moving end; it is never returned.
|
| 185 |
+
limit = np.concatenate((xmin, xmax))
|
| 186 |
+
|
| 187 |
+
factor = np.broadcast_to(factor, shape).astype(dtype, copy=False).ravel()
|
| 188 |
+
factor = np.concatenate((factor, factor))
|
| 189 |
+
|
| 190 |
+
active = np.arange(2*n)
|
| 191 |
+
args = [np.concatenate((arg, arg)) for arg in args]
|
| 192 |
+
|
| 193 |
+
# This is needed due to inner workings of `eim._loop`.
|
| 194 |
+
# We're abusing it a tiny bit.
|
| 195 |
+
shape = shape + (2,)
|
| 196 |
+
|
| 197 |
+
# `d` is for "distance".
|
| 198 |
+
# For searches without a limit, the distance between the fixed end of the
|
| 199 |
+
# bracket `x0` and the moving end `x` will grow by `factor` each iteration.
|
| 200 |
+
# For searches with a limit, the distance between the `limit` and moving
|
| 201 |
+
# end of the bracket `x` will shrink by `factor` each iteration.
|
| 202 |
+
i = np.isinf(limit)
|
| 203 |
+
ni = ~i
|
| 204 |
+
d = np.zeros_like(x)
|
| 205 |
+
d[i] = x[i] - x0[i]
|
| 206 |
+
d[ni] = limit[ni] - x[ni]
|
| 207 |
+
|
| 208 |
+
status = np.full_like(x, eim._EINPROGRESS, dtype=int) # in progress
|
| 209 |
+
status[invalid_bracket] = eim._EINPUTERR
|
| 210 |
+
nit, nfev = 0, 1 # one function evaluation per side performed above
|
| 211 |
+
|
| 212 |
+
work = _RichResult(x=x, x0=x0, f=f, limit=limit, factor=factor,
|
| 213 |
+
active=active, d=d, x_last=x_last, f_last=f_last,
|
| 214 |
+
nit=nit, nfev=nfev, status=status, args=args,
|
| 215 |
+
xl=None, xr=None, fl=None, fr=None, n=n)
|
| 216 |
+
res_work_pairs = [('status', 'status'), ('xl', 'xl'), ('xr', 'xr'),
|
| 217 |
+
('nit', 'nit'), ('nfev', 'nfev'), ('fl', 'fl'),
|
| 218 |
+
('fr', 'fr'), ('x', 'x'), ('f', 'f'),
|
| 219 |
+
('x_last', 'x_last'), ('f_last', 'f_last')]
|
| 220 |
+
|
| 221 |
+
def pre_func_eval(work):
|
| 222 |
+
# Initialize moving end of bracket
|
| 223 |
+
x = np.zeros_like(work.x)
|
| 224 |
+
|
| 225 |
+
# Unlimited brackets grow by `factor` by increasing distance from fixed
|
| 226 |
+
# end to moving end.
|
| 227 |
+
i = np.isinf(work.limit) # indices of unlimited brackets
|
| 228 |
+
work.d[i] *= work.factor[i]
|
| 229 |
+
x[i] = work.x0[i] + work.d[i]
|
| 230 |
+
|
| 231 |
+
# Limited brackets grow by decreasing the distance from the limit to
|
| 232 |
+
# the moving end.
|
| 233 |
+
ni = ~i # indices of limited brackets
|
| 234 |
+
work.d[ni] /= work.factor[ni]
|
| 235 |
+
x[ni] = work.limit[ni] - work.d[ni]
|
| 236 |
+
|
| 237 |
+
return x
|
| 238 |
+
|
| 239 |
+
def post_func_eval(x, f, work):
|
| 240 |
+
# Keep track of the previous location of the moving end so that we can
|
| 241 |
+
# return a narrower bracket. (The alternative is to remember the
|
| 242 |
+
# original fixed end, but then the bracket would be wider than needed.)
|
| 243 |
+
work.x_last = work.x
|
| 244 |
+
work.f_last = work.f
|
| 245 |
+
work.x = x
|
| 246 |
+
work.f = f
|
| 247 |
+
|
| 248 |
+
def check_termination(work):
|
| 249 |
+
# Condition 0: initial bracket is invalid
|
| 250 |
+
stop = (work.status == eim._EINPUTERR)
|
| 251 |
+
|
| 252 |
+
# Condition 1: a valid bracket (or the root itself) has been found
|
| 253 |
+
sf = np.sign(work.f)
|
| 254 |
+
sf_last = np.sign(work.f_last)
|
| 255 |
+
i = ((sf_last == -sf) | (sf_last == 0) | (sf == 0)) & ~stop
|
| 256 |
+
work.status[i] = eim._ECONVERGED
|
| 257 |
+
stop[i] = True
|
| 258 |
+
|
| 259 |
+
# Condition 2: the other side's search found a valid bracket.
|
| 260 |
+
# (If we just found a bracket with the rightward search, we can stop
|
| 261 |
+
# the leftward search, and vice-versa.)
|
| 262 |
+
# To do this, we need to set the status of the other side's search;
|
| 263 |
+
# this is tricky because `work.status` contains only the *active*
|
| 264 |
+
# elements, so we don't immediately know the index of the element we
|
| 265 |
+
# need to set - or even if it's still there. (That search may have
|
| 266 |
+
# terminated already, e.g. by reaching its `limit`.)
|
| 267 |
+
# To facilitate this, `work.active` contains a unit integer index of
|
| 268 |
+
# each search. Index `k` (`k < n)` and `k + n` correspond with a
|
| 269 |
+
# leftward and rightward search, respectively. Elements are removed
|
| 270 |
+
# from `work.active` just as they are removed from `work.status`, so
|
| 271 |
+
# we use `work.active` to help find the right location in
|
| 272 |
+
# `work.status`.
|
| 273 |
+
# Get the integer indices of the elements that can also stop
|
| 274 |
+
also_stop = (work.active[i] + work.n) % (2*work.n)
|
| 275 |
+
# Check whether they are still active.
|
| 276 |
+
# To start, we need to find out where in `work.active` they would
|
| 277 |
+
# appear if they are indeed there.
|
| 278 |
+
j = np.searchsorted(work.active, also_stop)
|
| 279 |
+
# If the location exceeds the length of the `work.active`, they are
|
| 280 |
+
# not there.
|
| 281 |
+
j = j[j < len(work.active)]
|
| 282 |
+
# Check whether they are still there.
|
| 283 |
+
j = j[also_stop == work.active[j]]
|
| 284 |
+
# Now convert these to boolean indices to use with `work.status`.
|
| 285 |
+
i = np.zeros_like(stop)
|
| 286 |
+
i[j] = True # boolean indices of elements that can also stop
|
| 287 |
+
i = i & ~stop
|
| 288 |
+
work.status[i] = _ESTOPONESIDE
|
| 289 |
+
stop[i] = True
|
| 290 |
+
|
| 291 |
+
# Condition 3: moving end of bracket reaches limit
|
| 292 |
+
i = (work.x == work.limit) & ~stop
|
| 293 |
+
work.status[i] = _ELIMITS
|
| 294 |
+
stop[i] = True
|
| 295 |
+
|
| 296 |
+
# Condition 4: non-finite value encountered
|
| 297 |
+
i = ~(np.isfinite(work.x) & np.isfinite(work.f)) & ~stop
|
| 298 |
+
work.status[i] = eim._EVALUEERR
|
| 299 |
+
stop[i] = True
|
| 300 |
+
|
| 301 |
+
return stop
|
| 302 |
+
|
| 303 |
+
def post_termination_check(work):
|
| 304 |
+
pass
|
| 305 |
+
|
| 306 |
+
def customize_result(res, shape):
|
| 307 |
+
n = len(res['x']) // 2
|
| 308 |
+
|
| 309 |
+
# To avoid ambiguity, below we refer to `xl0`, the initial left endpoint
|
| 310 |
+
# as `a` and `xr0`, the initial right endpoint, as `b`.
|
| 311 |
+
# Because we treat the two one-sided searches as though they were
|
| 312 |
+
# independent, what we keep track of in `work` and what we want to
|
| 313 |
+
# return in `res` look quite different. Combine the results from the
|
| 314 |
+
# two one-sided searches before reporting the results to the user.
|
| 315 |
+
# - "a" refers to the leftward search (the moving end started at `a`)
|
| 316 |
+
# - "b" refers to the rightward search (the moving end started at `b`)
|
| 317 |
+
# - "l" refers to the left end of the bracket (closer to -oo)
|
| 318 |
+
# - "r" refers to the right end of the bracket (closer to +oo)
|
| 319 |
+
xal = res['x'][:n]
|
| 320 |
+
xar = res['x_last'][:n]
|
| 321 |
+
xbl = res['x_last'][n:]
|
| 322 |
+
xbr = res['x'][n:]
|
| 323 |
+
|
| 324 |
+
fal = res['f'][:n]
|
| 325 |
+
far = res['f_last'][:n]
|
| 326 |
+
fbl = res['f_last'][n:]
|
| 327 |
+
fbr = res['f'][n:]
|
| 328 |
+
|
| 329 |
+
# Initialize the brackets and corresponding function values to return
|
| 330 |
+
# to the user. Brackets may not be valid (e.g. there is no root,
|
| 331 |
+
# there weren't enough iterations, NaN encountered), but we still need
|
| 332 |
+
# to return something. One option would be all NaNs, but what I've
|
| 333 |
+
# chosen here is the left- and right-most points at which the function
|
| 334 |
+
# has been evaluated. This gives the user some information about what
|
| 335 |
+
# interval of the real line has been searched and shows that there is
|
| 336 |
+
# no sign change between the two ends.
|
| 337 |
+
xl = xal.copy()
|
| 338 |
+
fl = fal.copy()
|
| 339 |
+
xr = xbr.copy()
|
| 340 |
+
fr = fbr.copy()
|
| 341 |
+
|
| 342 |
+
# `status` indicates whether the bracket is valid or not. If so,
|
| 343 |
+
# we want to adjust the bracket we return to be the narrowest possible
|
| 344 |
+
# given the points at which we evaluated the function.
|
| 345 |
+
# For example if bracket "a" is valid and smaller than bracket "b" OR
|
| 346 |
+
# if bracket "a" is valid and bracket "b" is not valid, we want to
|
| 347 |
+
# return bracket "a" (and vice versa).
|
| 348 |
+
sa = res['status'][:n]
|
| 349 |
+
sb = res['status'][n:]
|
| 350 |
+
|
| 351 |
+
da = xar - xal
|
| 352 |
+
db = xbr - xbl
|
| 353 |
+
|
| 354 |
+
i1 = ((da <= db) & (sa == 0)) | ((sa == 0) & (sb != 0))
|
| 355 |
+
i2 = ((db <= da) & (sb == 0)) | ((sb == 0) & (sa != 0))
|
| 356 |
+
|
| 357 |
+
xr[i1] = xar[i1]
|
| 358 |
+
fr[i1] = far[i1]
|
| 359 |
+
xl[i2] = xbl[i2]
|
| 360 |
+
fl[i2] = fbl[i2]
|
| 361 |
+
|
| 362 |
+
# Finish assembling the result object
|
| 363 |
+
res['xl'] = xl
|
| 364 |
+
res['xr'] = xr
|
| 365 |
+
res['fl'] = fl
|
| 366 |
+
res['fr'] = fr
|
| 367 |
+
|
| 368 |
+
res['nit'] = np.maximum(res['nit'][:n], res['nit'][n:])
|
| 369 |
+
res['nfev'] = res['nfev'][:n] + res['nfev'][n:]
|
| 370 |
+
# If the status on one side is zero, the status is zero. In any case,
|
| 371 |
+
# report the status from one side only.
|
| 372 |
+
res['status'] = np.choose(sa == 0, (sb, sa))
|
| 373 |
+
res['success'] = (res['status'] == 0)
|
| 374 |
+
|
| 375 |
+
del res['x']
|
| 376 |
+
del res['f']
|
| 377 |
+
del res['x_last']
|
| 378 |
+
del res['f_last']
|
| 379 |
+
|
| 380 |
+
return shape[:-1]
|
| 381 |
+
|
| 382 |
+
return eim._loop(work, callback, shape, maxiter, func, args, dtype,
|
| 383 |
+
pre_func_eval, post_func_eval, check_termination,
|
| 384 |
+
post_termination_check, customize_result, res_work_pairs,
|
| 385 |
+
xp)
|
| 386 |
+
|
| 387 |
+
|
| 388 |
+
def _bracket_minimum_iv(func, xm0, xl0, xr0, xmin, xmax, factor, args, maxiter):
|
| 389 |
+
|
| 390 |
+
if not callable(func):
|
| 391 |
+
raise ValueError('`func` must be callable.')
|
| 392 |
+
|
| 393 |
+
if not np.iterable(args):
|
| 394 |
+
args = (args,)
|
| 395 |
+
|
| 396 |
+
xm0 = np.asarray(xm0)[()]
|
| 397 |
+
if not np.issubdtype(xm0.dtype, np.number) or np.iscomplex(xm0).any():
|
| 398 |
+
raise ValueError('`xm0` must be numeric and real.')
|
| 399 |
+
|
| 400 |
+
xmin = -np.inf if xmin is None else xmin
|
| 401 |
+
xmax = np.inf if xmax is None else xmax
|
| 402 |
+
|
| 403 |
+
# If xl0 (xr0) is not supplied, fill with a dummy value for the sake
|
| 404 |
+
# of broadcasting. We need to wait until xmin (xmax) has been validated
|
| 405 |
+
# to compute the default values.
|
| 406 |
+
xl0_not_supplied = False
|
| 407 |
+
if xl0 is None:
|
| 408 |
+
xl0 = np.nan
|
| 409 |
+
xl0_not_supplied = True
|
| 410 |
+
|
| 411 |
+
xr0_not_supplied = False
|
| 412 |
+
if xr0 is None:
|
| 413 |
+
xr0 = np.nan
|
| 414 |
+
xr0_not_supplied = True
|
| 415 |
+
|
| 416 |
+
factor = 2.0 if factor is None else factor
|
| 417 |
+
xl0, xm0, xr0, xmin, xmax, factor = np.broadcast_arrays(
|
| 418 |
+
xl0, xm0, xr0, xmin, xmax, factor
|
| 419 |
+
)
|
| 420 |
+
|
| 421 |
+
if not np.issubdtype(xl0.dtype, np.number) or np.iscomplex(xl0).any():
|
| 422 |
+
raise ValueError('`xl0` must be numeric and real.')
|
| 423 |
+
|
| 424 |
+
if not np.issubdtype(xr0.dtype, np.number) or np.iscomplex(xr0).any():
|
| 425 |
+
raise ValueError('`xr0` must be numeric and real.')
|
| 426 |
+
|
| 427 |
+
if not np.issubdtype(xmin.dtype, np.number) or np.iscomplex(xmin).any():
|
| 428 |
+
raise ValueError('`xmin` must be numeric and real.')
|
| 429 |
+
|
| 430 |
+
if not np.issubdtype(xmax.dtype, np.number) or np.iscomplex(xmax).any():
|
| 431 |
+
raise ValueError('`xmax` must be numeric and real.')
|
| 432 |
+
|
| 433 |
+
if not np.issubdtype(factor.dtype, np.number) or np.iscomplex(factor).any():
|
| 434 |
+
raise ValueError('`factor` must be numeric and real.')
|
| 435 |
+
if not np.all(factor > 1):
|
| 436 |
+
raise ValueError('All elements of `factor` must be greater than 1.')
|
| 437 |
+
|
| 438 |
+
# Calculate default values of xl0 and/or xr0 if they have not been supplied
|
| 439 |
+
# by the user. We need to be careful to ensure xl0 and xr0 are not outside
|
| 440 |
+
# of (xmin, xmax).
|
| 441 |
+
if xl0_not_supplied:
|
| 442 |
+
xl0 = xm0 - np.minimum((xm0 - xmin)/16, 0.5)
|
| 443 |
+
if xr0_not_supplied:
|
| 444 |
+
xr0 = xm0 + np.minimum((xmax - xm0)/16, 0.5)
|
| 445 |
+
|
| 446 |
+
maxiter = np.asarray(maxiter)
|
| 447 |
+
message = '`maxiter` must be a non-negative integer.'
|
| 448 |
+
if (not np.issubdtype(maxiter.dtype, np.number) or maxiter.shape != tuple()
|
| 449 |
+
or np.iscomplex(maxiter)):
|
| 450 |
+
raise ValueError(message)
|
| 451 |
+
maxiter_int = int(maxiter[()])
|
| 452 |
+
if not maxiter == maxiter_int or maxiter < 0:
|
| 453 |
+
raise ValueError(message)
|
| 454 |
+
|
| 455 |
+
return func, xm0, xl0, xr0, xmin, xmax, factor, args, maxiter
|
| 456 |
+
|
| 457 |
+
|
| 458 |
+
def _bracket_minimum(func, xm0, *, xl0=None, xr0=None, xmin=None, xmax=None,
|
| 459 |
+
factor=None, args=(), maxiter=1000):
|
| 460 |
+
"""Bracket the minimum of a unimodal scalar function of one variable
|
| 461 |
+
|
| 462 |
+
This function works elementwise when `xm0`, `xl0`, `xr0`, `xmin`, `xmax`,
|
| 463 |
+
and the elements of `args` are broadcastable arrays.
|
| 464 |
+
|
| 465 |
+
Parameters
|
| 466 |
+
----------
|
| 467 |
+
func : callable
|
| 468 |
+
The function for which the minimum is to be bracketed.
|
| 469 |
+
The signature must be::
|
| 470 |
+
|
| 471 |
+
func(x: ndarray, *args) -> ndarray
|
| 472 |
+
|
| 473 |
+
where each element of ``x`` is a finite real and ``args`` is a tuple,
|
| 474 |
+
which may contain an arbitrary number of arrays that are broadcastable
|
| 475 |
+
with ``x``. `func` must be an elementwise function: each element
|
| 476 |
+
``func(x)[i]`` must equal ``func(x[i])`` for all indices `i`.
|
| 477 |
+
xm0: float array_like
|
| 478 |
+
Starting guess for middle point of bracket.
|
| 479 |
+
xl0, xr0: float array_like, optional
|
| 480 |
+
Starting guesses for left and right endpoints of the bracket. Must be
|
| 481 |
+
broadcastable with one another and with `xm0`.
|
| 482 |
+
xmin, xmax : float array_like, optional
|
| 483 |
+
Minimum and maximum allowable endpoints of the bracket, inclusive. Must
|
| 484 |
+
be broadcastable with `xl0`, `xm0`, and `xr0`.
|
| 485 |
+
factor : float array_like, optional
|
| 486 |
+
Controls expansion of bracket endpoint in downhill direction. Works
|
| 487 |
+
differently in the cases where a limit is set in the downhill direction
|
| 488 |
+
with `xmax` or `xmin`. See Notes.
|
| 489 |
+
args : tuple, optional
|
| 490 |
+
Additional positional arguments to be passed to `func`. Must be arrays
|
| 491 |
+
broadcastable with `xl0`, `xm0`, `xr0`, `xmin`, and `xmax`. If the
|
| 492 |
+
callable to be bracketed requires arguments that are not broadcastable
|
| 493 |
+
with these arrays, wrap that callable with `func` such that `func`
|
| 494 |
+
accepts only ``x`` and broadcastable arrays.
|
| 495 |
+
maxiter : int, optional
|
| 496 |
+
The maximum number of iterations of the algorithm to perform. The number
|
| 497 |
+
of function evaluations is three greater than the number of iterations.
|
| 498 |
+
|
| 499 |
+
Returns
|
| 500 |
+
-------
|
| 501 |
+
res : _RichResult
|
| 502 |
+
An instance of `scipy._lib._util._RichResult` with the following
|
| 503 |
+
attributes. The descriptions are written as though the values will be
|
| 504 |
+
scalars; however, if `func` returns an array, the outputs will be
|
| 505 |
+
arrays of the same shape.
|
| 506 |
+
|
| 507 |
+
xl, xm, xr : float
|
| 508 |
+
The left, middle, and right points of the bracket, if the algorithm
|
| 509 |
+
terminated successfully.
|
| 510 |
+
fl, fm, fr : float
|
| 511 |
+
The function value at the left, middle, and right points of the bracket.
|
| 512 |
+
nfev : int
|
| 513 |
+
The number of function evaluations required to find the bracket.
|
| 514 |
+
nit : int
|
| 515 |
+
The number of iterations of the algorithm that were performed.
|
| 516 |
+
status : int
|
| 517 |
+
An integer representing the exit status of the algorithm.
|
| 518 |
+
|
| 519 |
+
- ``0`` : The algorithm produced a valid bracket.
|
| 520 |
+
- ``-1`` : The bracket expanded to the allowable limits. Assuming
|
| 521 |
+
unimodality, this implies the endpoint at the limit is a
|
| 522 |
+
minimizer.
|
| 523 |
+
- ``-2`` : The maximum number of iterations was reached.
|
| 524 |
+
- ``-3`` : A non-finite value was encountered.
|
| 525 |
+
- ``-4`` : ``None`` shall pass.
|
| 526 |
+
- ``-5`` : The initial bracket does not satisfy
|
| 527 |
+
`xmin <= xl0 < xm0 < xr0 <= xmax`.
|
| 528 |
+
|
| 529 |
+
success : bool
|
| 530 |
+
``True`` when the algorithm terminated successfully (status ``0``).
|
| 531 |
+
|
| 532 |
+
Notes
|
| 533 |
+
-----
|
| 534 |
+
Similar to `scipy.optimize.bracket`, this function seeks to find real
|
| 535 |
+
points ``xl < xm < xr`` such that ``f(xl) >= f(xm)`` and ``f(xr) >= f(xm)``,
|
| 536 |
+
where at least one of the inequalities is strict. Unlike `scipy.optimize.bracket`,
|
| 537 |
+
this function can operate in a vectorized manner on array input, so long as
|
| 538 |
+
the input arrays are broadcastable with each other. Also unlike
|
| 539 |
+
`scipy.optimize.bracket`, users may specify minimum and maximum endpoints
|
| 540 |
+
for the desired bracket.
|
| 541 |
+
|
| 542 |
+
Given an initial trio of points ``xl = xl0``, ``xm = xm0``, ``xr = xr0``,
|
| 543 |
+
the algorithm checks if these points already give a valid bracket. If not,
|
| 544 |
+
a new endpoint, ``w`` is chosen in the "downhill" direction, ``xm`` becomes the new
|
| 545 |
+
opposite endpoint, and either `xl` or `xr` becomes the new middle point,
|
| 546 |
+
depending on which direction is downhill. The algorithm repeats from here.
|
| 547 |
+
|
| 548 |
+
The new endpoint `w` is chosen differently depending on whether or not a
|
| 549 |
+
boundary `xmin` or `xmax` has been set in the downhill direction. Without
|
| 550 |
+
loss of generality, suppose the downhill direction is to the right, so that
|
| 551 |
+
``f(xl) > f(xm) > f(xr)``. If there is no boundary to the right, then `w`
|
| 552 |
+
is chosen to be ``xr + factor * (xr - xm)`` where `factor` is controlled by
|
| 553 |
+
the user (defaults to 2.0) so that step sizes increase in geometric proportion.
|
| 554 |
+
If there is a boundary, `xmax` in this case, then `w` is chosen to be
|
| 555 |
+
``xmax - (xmax - xr)/factor``, with steps slowing to a stop at
|
| 556 |
+
`xmax`. This cautious approach ensures that a minimum near but distinct from
|
| 557 |
+
the boundary isn't missed while also detecting whether or not the `xmax` is
|
| 558 |
+
a minimizer when `xmax` is reached after a finite number of steps.
|
| 559 |
+
""" # noqa: E501
|
| 560 |
+
callback = None # works; I just don't want to test it
|
| 561 |
+
|
| 562 |
+
temp = _bracket_minimum_iv(func, xm0, xl0, xr0, xmin, xmax, factor, args, maxiter)
|
| 563 |
+
func, xm0, xl0, xr0, xmin, xmax, factor, args, maxiter = temp
|
| 564 |
+
|
| 565 |
+
xs = (xl0, xm0, xr0)
|
| 566 |
+
temp = eim._initialize(func, xs, args)
|
| 567 |
+
func, xs, fs, args, shape, dtype, xp = temp
|
| 568 |
+
|
| 569 |
+
xl0, xm0, xr0 = xs
|
| 570 |
+
fl0, fm0, fr0 = fs
|
| 571 |
+
xmin = np.broadcast_to(xmin, shape).astype(dtype, copy=False).ravel()
|
| 572 |
+
xmax = np.broadcast_to(xmax, shape).astype(dtype, copy=False).ravel()
|
| 573 |
+
invalid_bracket = ~((xmin <= xl0) & (xl0 < xm0) & (xm0 < xr0) & (xr0 <= xmax))
|
| 574 |
+
# We will modify factor later on so make a copy. np.broadcast_to returns
|
| 575 |
+
# a read-only view.
|
| 576 |
+
factor = np.broadcast_to(factor, shape).astype(dtype, copy=True).ravel()
|
| 577 |
+
|
| 578 |
+
# To simplify the logic, swap xl and xr if f(xl) < f(xr). We should always be
|
| 579 |
+
# marching downhill in the direction from xl to xr.
|
| 580 |
+
comp = fl0 < fr0
|
| 581 |
+
xl0[comp], xr0[comp] = xr0[comp], xl0[comp]
|
| 582 |
+
fl0[comp], fr0[comp] = fr0[comp], fl0[comp]
|
| 583 |
+
# We only need the boundary in the direction we're traveling.
|
| 584 |
+
limit = np.where(comp, xmin, xmax)
|
| 585 |
+
|
| 586 |
+
unlimited = np.isinf(limit)
|
| 587 |
+
limited = ~unlimited
|
| 588 |
+
step = np.empty_like(xl0)
|
| 589 |
+
|
| 590 |
+
step[unlimited] = (xr0[unlimited] - xm0[unlimited])
|
| 591 |
+
step[limited] = (limit[limited] - xr0[limited])
|
| 592 |
+
|
| 593 |
+
# Step size is divided by factor for case where there is a limit.
|
| 594 |
+
factor[limited] = 1 / factor[limited]
|
| 595 |
+
|
| 596 |
+
status = np.full_like(xl0, eim._EINPROGRESS, dtype=int)
|
| 597 |
+
status[invalid_bracket] = eim._EINPUTERR
|
| 598 |
+
nit, nfev = 0, 3
|
| 599 |
+
|
| 600 |
+
work = _RichResult(xl=xl0, xm=xm0, xr=xr0, xr0=xr0, fl=fl0, fm=fm0, fr=fr0,
|
| 601 |
+
step=step, limit=limit, limited=limited, factor=factor, nit=nit,
|
| 602 |
+
nfev=nfev, status=status, args=args)
|
| 603 |
+
|
| 604 |
+
res_work_pairs = [('status', 'status'), ('xl', 'xl'), ('xm', 'xm'), ('xr', 'xr'),
|
| 605 |
+
('nit', 'nit'), ('nfev', 'nfev'), ('fl', 'fl'), ('fm', 'fm'),
|
| 606 |
+
('fr', 'fr')]
|
| 607 |
+
|
| 608 |
+
def pre_func_eval(work):
|
| 609 |
+
work.step *= work.factor
|
| 610 |
+
x = np.empty_like(work.xr)
|
| 611 |
+
x[~work.limited] = work.xr0[~work.limited] + work.step[~work.limited]
|
| 612 |
+
x[work.limited] = work.limit[work.limited] - work.step[work.limited]
|
| 613 |
+
# Since the new bracket endpoint is calculated from an offset with the
|
| 614 |
+
# limit, it may be the case that the new endpoint equals the old endpoint,
|
| 615 |
+
# when the old endpoint is sufficiently close to the limit. We use the
|
| 616 |
+
# limit itself as the new endpoint in these cases.
|
| 617 |
+
x[work.limited] = np.where(
|
| 618 |
+
x[work.limited] == work.xr[work.limited],
|
| 619 |
+
work.limit[work.limited],
|
| 620 |
+
x[work.limited],
|
| 621 |
+
)
|
| 622 |
+
return x
|
| 623 |
+
|
| 624 |
+
def post_func_eval(x, f, work):
|
| 625 |
+
work.xl, work.xm, work.xr = work.xm, work.xr, x
|
| 626 |
+
work.fl, work.fm, work.fr = work.fm, work.fr, f
|
| 627 |
+
|
| 628 |
+
def check_termination(work):
|
| 629 |
+
# Condition 0: Initial bracket is invalid.
|
| 630 |
+
stop = (work.status == eim._EINPUTERR)
|
| 631 |
+
|
| 632 |
+
# Condition 1: A valid bracket has been found.
|
| 633 |
+
i = (
|
| 634 |
+
(work.fl >= work.fm) & (work.fr > work.fm)
|
| 635 |
+
| (work.fl > work.fm) & (work.fr >= work.fm)
|
| 636 |
+
) & ~stop
|
| 637 |
+
work.status[i] = eim._ECONVERGED
|
| 638 |
+
stop[i] = True
|
| 639 |
+
|
| 640 |
+
# Condition 2: Moving end of bracket reaches limit.
|
| 641 |
+
i = (work.xr == work.limit) & ~stop
|
| 642 |
+
work.status[i] = _ELIMITS
|
| 643 |
+
stop[i] = True
|
| 644 |
+
|
| 645 |
+
# Condition 3: non-finite value encountered
|
| 646 |
+
i = ~(np.isfinite(work.xr) & np.isfinite(work.fr)) & ~stop
|
| 647 |
+
work.status[i] = eim._EVALUEERR
|
| 648 |
+
stop[i] = True
|
| 649 |
+
|
| 650 |
+
return stop
|
| 651 |
+
|
| 652 |
+
def post_termination_check(work):
|
| 653 |
+
pass
|
| 654 |
+
|
| 655 |
+
def customize_result(res, shape):
|
| 656 |
+
# Reorder entries of xl and xr if they were swapped due to f(xl0) < f(xr0).
|
| 657 |
+
comp = res['xl'] > res['xr']
|
| 658 |
+
res['xl'][comp], res['xr'][comp] = res['xr'][comp], res['xl'][comp]
|
| 659 |
+
res['fl'][comp], res['fr'][comp] = res['fr'][comp], res['fl'][comp]
|
| 660 |
+
return shape
|
| 661 |
+
|
| 662 |
+
return eim._loop(work, callback, shape,
|
| 663 |
+
maxiter, func, args, dtype,
|
| 664 |
+
pre_func_eval, post_func_eval,
|
| 665 |
+
check_termination, post_termination_check,
|
| 666 |
+
customize_result, res_work_pairs, xp)
|
llava_next/lib/python3.10/site-packages/scipy/optimize/_chandrupatla.py
ADDED
|
@@ -0,0 +1,549 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import math
|
| 2 |
+
import numpy as np
|
| 3 |
+
import scipy._lib._elementwise_iterative_method as eim
|
| 4 |
+
from scipy._lib._util import _RichResult
|
| 5 |
+
from scipy._lib._array_api import xp_clip, xp_minimum, xp_sign
|
| 6 |
+
|
| 7 |
+
# TODO:
|
| 8 |
+
# - (maybe?) don't use fancy indexing assignment
|
| 9 |
+
# - figure out how to replace the new `try`/`except`s
|
| 10 |
+
|
| 11 |
+
|
| 12 |
+
def _chandrupatla(func, a, b, *, args=(), xatol=None, xrtol=None,
|
| 13 |
+
fatol=None, frtol=0, maxiter=None, callback=None):
|
| 14 |
+
"""Find the root of an elementwise function using Chandrupatla's algorithm.
|
| 15 |
+
|
| 16 |
+
For each element of the output of `func`, `chandrupatla` seeks the scalar
|
| 17 |
+
root that makes the element 0. This function allows for `a`, `b`, and the
|
| 18 |
+
output of `func` to be of any broadcastable shapes.
|
| 19 |
+
|
| 20 |
+
Parameters
|
| 21 |
+
----------
|
| 22 |
+
func : callable
|
| 23 |
+
The function whose root is desired. The signature must be::
|
| 24 |
+
|
| 25 |
+
func(x: ndarray, *args) -> ndarray
|
| 26 |
+
|
| 27 |
+
where each element of ``x`` is a finite real and ``args`` is a tuple,
|
| 28 |
+
which may contain an arbitrary number of components of any type(s).
|
| 29 |
+
``func`` must be an elementwise function: each element ``func(x)[i]``
|
| 30 |
+
must equal ``func(x[i])`` for all indices ``i``. `_chandrupatla`
|
| 31 |
+
seeks an array ``x`` such that ``func(x)`` is an array of zeros.
|
| 32 |
+
a, b : array_like
|
| 33 |
+
The lower and upper bounds of the root of the function. Must be
|
| 34 |
+
broadcastable with one another.
|
| 35 |
+
args : tuple, optional
|
| 36 |
+
Additional positional arguments to be passed to `func`.
|
| 37 |
+
xatol, xrtol, fatol, frtol : float, optional
|
| 38 |
+
Absolute and relative tolerances on the root and function value.
|
| 39 |
+
See Notes for details.
|
| 40 |
+
maxiter : int, optional
|
| 41 |
+
The maximum number of iterations of the algorithm to perform.
|
| 42 |
+
The default is the maximum possible number of bisections within
|
| 43 |
+
the (normal) floating point numbers of the relevant dtype.
|
| 44 |
+
callback : callable, optional
|
| 45 |
+
An optional user-supplied function to be called before the first
|
| 46 |
+
iteration and after each iteration.
|
| 47 |
+
Called as ``callback(res)``, where ``res`` is a ``_RichResult``
|
| 48 |
+
similar to that returned by `_chandrupatla` (but containing the current
|
| 49 |
+
iterate's values of all variables). If `callback` raises a
|
| 50 |
+
``StopIteration``, the algorithm will terminate immediately and
|
| 51 |
+
`_chandrupatla` will return a result.
|
| 52 |
+
|
| 53 |
+
Returns
|
| 54 |
+
-------
|
| 55 |
+
res : _RichResult
|
| 56 |
+
An instance of `scipy._lib._util._RichResult` with the following
|
| 57 |
+
attributes. The descriptions are written as though the values will be
|
| 58 |
+
scalars; however, if `func` returns an array, the outputs will be
|
| 59 |
+
arrays of the same shape.
|
| 60 |
+
|
| 61 |
+
x : float
|
| 62 |
+
The root of the function, if the algorithm terminated successfully.
|
| 63 |
+
nfev : int
|
| 64 |
+
The number of times the function was called to find the root.
|
| 65 |
+
nit : int
|
| 66 |
+
The number of iterations of Chandrupatla's algorithm performed.
|
| 67 |
+
status : int
|
| 68 |
+
An integer representing the exit status of the algorithm.
|
| 69 |
+
``0`` : The algorithm converged to the specified tolerances.
|
| 70 |
+
``-1`` : The algorithm encountered an invalid bracket.
|
| 71 |
+
``-2`` : The maximum number of iterations was reached.
|
| 72 |
+
``-3`` : A non-finite value was encountered.
|
| 73 |
+
``-4`` : Iteration was terminated by `callback`.
|
| 74 |
+
``1`` : The algorithm is proceeding normally (in `callback` only).
|
| 75 |
+
success : bool
|
| 76 |
+
``True`` when the algorithm terminated successfully (status ``0``).
|
| 77 |
+
fun : float
|
| 78 |
+
The value of `func` evaluated at `x`.
|
| 79 |
+
xl, xr : float
|
| 80 |
+
The lower and upper ends of the bracket.
|
| 81 |
+
fl, fr : float
|
| 82 |
+
The function value at the lower and upper ends of the bracket.
|
| 83 |
+
|
| 84 |
+
Notes
|
| 85 |
+
-----
|
| 86 |
+
Implemented based on Chandrupatla's original paper [1]_.
|
| 87 |
+
|
| 88 |
+
If ``xl`` and ``xr`` are the left and right ends of the bracket,
|
| 89 |
+
``xmin = xl if abs(func(xl)) <= abs(func(xr)) else xr``,
|
| 90 |
+
and ``fmin0 = min(func(a), func(b))``, then the algorithm is considered to
|
| 91 |
+
have converged when ``abs(xr - xl) < xatol + abs(xmin) * xrtol`` or
|
| 92 |
+
``fun(xmin) <= fatol + abs(fmin0) * frtol``. This is equivalent to the
|
| 93 |
+
termination condition described in [1]_ with ``xrtol = 4e-10``,
|
| 94 |
+
``xatol = 1e-5``, and ``fatol = frtol = 0``. The default values are
|
| 95 |
+
``xatol = 4*tiny``, ``xrtol = 4*eps``, ``frtol = 0``, and ``fatol = tiny``,
|
| 96 |
+
where ``eps`` and ``tiny`` are the precision and smallest normal number
|
| 97 |
+
of the result ``dtype`` of function inputs and outputs.
|
| 98 |
+
|
| 99 |
+
References
|
| 100 |
+
----------
|
| 101 |
+
|
| 102 |
+
.. [1] Chandrupatla, Tirupathi R.
|
| 103 |
+
"A new hybrid quadratic/bisection algorithm for finding the zero of a
|
| 104 |
+
nonlinear function without using derivatives".
|
| 105 |
+
Advances in Engineering Software, 28(3), 145-149.
|
| 106 |
+
https://doi.org/10.1016/s0965-9978(96)00051-8
|
| 107 |
+
|
| 108 |
+
See Also
|
| 109 |
+
--------
|
| 110 |
+
brentq, brenth, ridder, bisect, newton
|
| 111 |
+
|
| 112 |
+
Examples
|
| 113 |
+
--------
|
| 114 |
+
>>> from scipy import optimize
|
| 115 |
+
>>> def f(x, c):
|
| 116 |
+
... return x**3 - 2*x - c
|
| 117 |
+
>>> c = 5
|
| 118 |
+
>>> res = optimize._chandrupatla._chandrupatla(f, 0, 3, args=(c,))
|
| 119 |
+
>>> res.x
|
| 120 |
+
2.0945514818937463
|
| 121 |
+
|
| 122 |
+
>>> c = [3, 4, 5]
|
| 123 |
+
>>> res = optimize._chandrupatla._chandrupatla(f, 0, 3, args=(c,))
|
| 124 |
+
>>> res.x
|
| 125 |
+
array([1.8932892 , 2. , 2.09455148])
|
| 126 |
+
|
| 127 |
+
"""
|
| 128 |
+
res = _chandrupatla_iv(func, args, xatol, xrtol,
|
| 129 |
+
fatol, frtol, maxiter, callback)
|
| 130 |
+
func, args, xatol, xrtol, fatol, frtol, maxiter, callback = res
|
| 131 |
+
|
| 132 |
+
# Initialization
|
| 133 |
+
temp = eim._initialize(func, (a, b), args)
|
| 134 |
+
func, xs, fs, args, shape, dtype, xp = temp
|
| 135 |
+
x1, x2 = xs
|
| 136 |
+
f1, f2 = fs
|
| 137 |
+
status = xp.full_like(x1, eim._EINPROGRESS, dtype=xp.int32) # in progress
|
| 138 |
+
nit, nfev = 0, 2 # two function evaluations performed above
|
| 139 |
+
finfo = xp.finfo(dtype)
|
| 140 |
+
xatol = 4*finfo.smallest_normal if xatol is None else xatol
|
| 141 |
+
xrtol = 4*finfo.eps if xrtol is None else xrtol
|
| 142 |
+
fatol = finfo.smallest_normal if fatol is None else fatol
|
| 143 |
+
frtol = frtol * xp_minimum(xp.abs(f1), xp.abs(f2))
|
| 144 |
+
maxiter = (math.log2(finfo.max) - math.log2(finfo.smallest_normal)
|
| 145 |
+
if maxiter is None else maxiter)
|
| 146 |
+
work = _RichResult(x1=x1, f1=f1, x2=x2, f2=f2, x3=None, f3=None, t=0.5,
|
| 147 |
+
xatol=xatol, xrtol=xrtol, fatol=fatol, frtol=frtol,
|
| 148 |
+
nit=nit, nfev=nfev, status=status)
|
| 149 |
+
res_work_pairs = [('status', 'status'), ('x', 'xmin'), ('fun', 'fmin'),
|
| 150 |
+
('nit', 'nit'), ('nfev', 'nfev'), ('xl', 'x1'),
|
| 151 |
+
('fl', 'f1'), ('xr', 'x2'), ('fr', 'f2')]
|
| 152 |
+
|
| 153 |
+
def pre_func_eval(work):
|
| 154 |
+
# [1] Figure 1 (first box)
|
| 155 |
+
x = work.x1 + work.t * (work.x2 - work.x1)
|
| 156 |
+
return x
|
| 157 |
+
|
| 158 |
+
def post_func_eval(x, f, work):
|
| 159 |
+
# [1] Figure 1 (first diamond and boxes)
|
| 160 |
+
# Note: y/n are reversed in figure; compare to BASIC in appendix
|
| 161 |
+
work.x3, work.f3 = (xp.asarray(work.x2, copy=True),
|
| 162 |
+
xp.asarray(work.f2, copy=True))
|
| 163 |
+
j = xp.sign(f) == xp.sign(work.f1)
|
| 164 |
+
nj = ~j
|
| 165 |
+
work.x3[j], work.f3[j] = work.x1[j], work.f1[j]
|
| 166 |
+
work.x2[nj], work.f2[nj] = work.x1[nj], work.f1[nj]
|
| 167 |
+
work.x1, work.f1 = x, f
|
| 168 |
+
|
| 169 |
+
def check_termination(work):
|
| 170 |
+
# [1] Figure 1 (second diamond)
|
| 171 |
+
# Check for all terminal conditions and record statuses.
|
| 172 |
+
|
| 173 |
+
# See [1] Section 4 (first two sentences)
|
| 174 |
+
i = xp.abs(work.f1) < xp.abs(work.f2)
|
| 175 |
+
work.xmin = xp.where(i, work.x1, work.x2)
|
| 176 |
+
work.fmin = xp.where(i, work.f1, work.f2)
|
| 177 |
+
stop = xp.zeros_like(work.x1, dtype=xp.bool) # termination condition met
|
| 178 |
+
|
| 179 |
+
# If function value tolerance is met, report successful convergence,
|
| 180 |
+
# regardless of other conditions. Note that `frtol` has been redefined
|
| 181 |
+
# as `frtol = frtol * minimum(f1, f2)`, where `f1` and `f2` are the
|
| 182 |
+
# function evaluated at the original ends of the bracket.
|
| 183 |
+
i = xp.abs(work.fmin) <= work.fatol + work.frtol
|
| 184 |
+
work.status[i] = eim._ECONVERGED
|
| 185 |
+
stop[i] = True
|
| 186 |
+
|
| 187 |
+
# If the bracket is no longer valid, report failure (unless a function
|
| 188 |
+
# tolerance is met, as detected above).
|
| 189 |
+
i = (xp_sign(work.f1) == xp_sign(work.f2)) & ~stop
|
| 190 |
+
NaN = xp.asarray(xp.nan, dtype=work.xmin.dtype)
|
| 191 |
+
work.xmin[i], work.fmin[i], work.status[i] = NaN, NaN, eim._ESIGNERR
|
| 192 |
+
stop[i] = True
|
| 193 |
+
|
| 194 |
+
# If the abscissae are non-finite or either function value is NaN,
|
| 195 |
+
# report failure.
|
| 196 |
+
x_nonfinite = ~(xp.isfinite(work.x1) & xp.isfinite(work.x2))
|
| 197 |
+
f_nan = xp.isnan(work.f1) & xp.isnan(work.f2)
|
| 198 |
+
i = (x_nonfinite | f_nan) & ~stop
|
| 199 |
+
work.xmin[i], work.fmin[i], work.status[i] = NaN, NaN, eim._EVALUEERR
|
| 200 |
+
stop[i] = True
|
| 201 |
+
|
| 202 |
+
# This is the convergence criterion used in bisect. Chandrupatla's
|
| 203 |
+
# criterion is equivalent to this except with a factor of 4 on `xrtol`.
|
| 204 |
+
work.dx = xp.abs(work.x2 - work.x1)
|
| 205 |
+
work.tol = xp.abs(work.xmin) * work.xrtol + work.xatol
|
| 206 |
+
i = work.dx < work.tol
|
| 207 |
+
work.status[i] = eim._ECONVERGED
|
| 208 |
+
stop[i] = True
|
| 209 |
+
|
| 210 |
+
return stop
|
| 211 |
+
|
| 212 |
+
def post_termination_check(work):
|
| 213 |
+
# [1] Figure 1 (third diamond and boxes / Equation 1)
|
| 214 |
+
xi1 = (work.x1 - work.x2) / (work.x3 - work.x2)
|
| 215 |
+
phi1 = (work.f1 - work.f2) / (work.f3 - work.f2)
|
| 216 |
+
alpha = (work.x3 - work.x1) / (work.x2 - work.x1)
|
| 217 |
+
j = ((1 - xp.sqrt(1 - xi1)) < phi1) & (phi1 < xp.sqrt(xi1))
|
| 218 |
+
|
| 219 |
+
f1j, f2j, f3j, alphaj = work.f1[j], work.f2[j], work.f3[j], alpha[j]
|
| 220 |
+
t = xp.full_like(alpha, 0.5)
|
| 221 |
+
t[j] = (f1j / (f1j - f2j) * f3j / (f3j - f2j)
|
| 222 |
+
- alphaj * f1j / (f3j - f1j) * f2j / (f2j - f3j))
|
| 223 |
+
|
| 224 |
+
# [1] Figure 1 (last box; see also BASIC in appendix with comment
|
| 225 |
+
# "Adjust T Away from the Interval Boundary")
|
| 226 |
+
tl = 0.5 * work.tol / work.dx
|
| 227 |
+
work.t = xp_clip(t, tl, 1 - tl)
|
| 228 |
+
|
| 229 |
+
def customize_result(res, shape):
|
| 230 |
+
xl, xr, fl, fr = res['xl'], res['xr'], res['fl'], res['fr']
|
| 231 |
+
i = res['xl'] < res['xr']
|
| 232 |
+
res['xl'] = xp.where(i, xl, xr)
|
| 233 |
+
res['xr'] = xp.where(i, xr, xl)
|
| 234 |
+
res['fl'] = xp.where(i, fl, fr)
|
| 235 |
+
res['fr'] = xp.where(i, fr, fl)
|
| 236 |
+
return shape
|
| 237 |
+
|
| 238 |
+
return eim._loop(work, callback, shape, maxiter, func, args, dtype,
|
| 239 |
+
pre_func_eval, post_func_eval, check_termination,
|
| 240 |
+
post_termination_check, customize_result, res_work_pairs,
|
| 241 |
+
xp=xp)
|
| 242 |
+
|
| 243 |
+
|
| 244 |
+
def _chandrupatla_iv(func, args, xatol, xrtol,
|
| 245 |
+
fatol, frtol, maxiter, callback):
|
| 246 |
+
# Input validation for `_chandrupatla`
|
| 247 |
+
|
| 248 |
+
if not callable(func):
|
| 249 |
+
raise ValueError('`func` must be callable.')
|
| 250 |
+
|
| 251 |
+
if not np.iterable(args):
|
| 252 |
+
args = (args,)
|
| 253 |
+
|
| 254 |
+
# tolerances are floats, not arrays; OK to use NumPy
|
| 255 |
+
tols = np.asarray([xatol if xatol is not None else 1,
|
| 256 |
+
xrtol if xrtol is not None else 1,
|
| 257 |
+
fatol if fatol is not None else 1,
|
| 258 |
+
frtol if frtol is not None else 1])
|
| 259 |
+
if (not np.issubdtype(tols.dtype, np.number) or np.any(tols < 0)
|
| 260 |
+
or np.any(np.isnan(tols)) or tols.shape != (4,)):
|
| 261 |
+
raise ValueError('Tolerances must be non-negative scalars.')
|
| 262 |
+
|
| 263 |
+
if maxiter is not None:
|
| 264 |
+
maxiter_int = int(maxiter)
|
| 265 |
+
if maxiter != maxiter_int or maxiter < 0:
|
| 266 |
+
raise ValueError('`maxiter` must be a non-negative integer.')
|
| 267 |
+
|
| 268 |
+
if callback is not None and not callable(callback):
|
| 269 |
+
raise ValueError('`callback` must be callable.')
|
| 270 |
+
|
| 271 |
+
return func, args, xatol, xrtol, fatol, frtol, maxiter, callback
|
| 272 |
+
|
| 273 |
+
|
| 274 |
+
def _chandrupatla_minimize(func, x1, x2, x3, *, args=(), xatol=None,
|
| 275 |
+
xrtol=None, fatol=None, frtol=None, maxiter=100,
|
| 276 |
+
callback=None):
|
| 277 |
+
"""Find the minimizer of an elementwise function.
|
| 278 |
+
|
| 279 |
+
For each element of the output of `func`, `_chandrupatla_minimize` seeks
|
| 280 |
+
the scalar minimizer that minimizes the element. This function allows for
|
| 281 |
+
`x1`, `x2`, `x3`, and the elements of `args` to be arrays of any
|
| 282 |
+
broadcastable shapes.
|
| 283 |
+
|
| 284 |
+
Parameters
|
| 285 |
+
----------
|
| 286 |
+
func : callable
|
| 287 |
+
The function whose minimizer is desired. The signature must be::
|
| 288 |
+
|
| 289 |
+
func(x: ndarray, *args) -> ndarray
|
| 290 |
+
|
| 291 |
+
where each element of ``x`` is a finite real and ``args`` is a tuple,
|
| 292 |
+
which may contain an arbitrary number of arrays that are broadcastable
|
| 293 |
+
with `x`. ``func`` must be an elementwise function: each element
|
| 294 |
+
``func(x)[i]`` must equal ``func(x[i])`` for all indices ``i``.
|
| 295 |
+
`_chandrupatla` seeks an array ``x`` such that ``func(x)`` is an array
|
| 296 |
+
of minima.
|
| 297 |
+
x1, x2, x3 : array_like
|
| 298 |
+
The abscissae of a standard scalar minimization bracket. A bracket is
|
| 299 |
+
valid if ``x1 < x2 < x3`` and ``func(x1) > func(x2) <= func(x3)``.
|
| 300 |
+
Must be broadcastable with one another and `args`.
|
| 301 |
+
args : tuple, optional
|
| 302 |
+
Additional positional arguments to be passed to `func`. Must be arrays
|
| 303 |
+
broadcastable with `x1`, `x2`, and `x3`. If the callable to be
|
| 304 |
+
differentiated requires arguments that are not broadcastable with `x`,
|
| 305 |
+
wrap that callable with `func` such that `func` accepts only `x` and
|
| 306 |
+
broadcastable arrays.
|
| 307 |
+
xatol, xrtol, fatol, frtol : float, optional
|
| 308 |
+
Absolute and relative tolerances on the minimizer and function value.
|
| 309 |
+
See Notes for details.
|
| 310 |
+
maxiter : int, optional
|
| 311 |
+
The maximum number of iterations of the algorithm to perform.
|
| 312 |
+
callback : callable, optional
|
| 313 |
+
An optional user-supplied function to be called before the first
|
| 314 |
+
iteration and after each iteration.
|
| 315 |
+
Called as ``callback(res)``, where ``res`` is a ``_RichResult``
|
| 316 |
+
similar to that returned by `_chandrupatla_minimize` (but containing
|
| 317 |
+
the current iterate's values of all variables). If `callback` raises a
|
| 318 |
+
``StopIteration``, the algorithm will terminate immediately and
|
| 319 |
+
`_chandrupatla_minimize` will return a result.
|
| 320 |
+
|
| 321 |
+
Returns
|
| 322 |
+
-------
|
| 323 |
+
res : _RichResult
|
| 324 |
+
An instance of `scipy._lib._util._RichResult` with the following
|
| 325 |
+
attributes. (The descriptions are written as though the values will be
|
| 326 |
+
scalars; however, if `func` returns an array, the outputs will be
|
| 327 |
+
arrays of the same shape.)
|
| 328 |
+
|
| 329 |
+
success : bool
|
| 330 |
+
``True`` when the algorithm terminated successfully (status ``0``).
|
| 331 |
+
status : int
|
| 332 |
+
An integer representing the exit status of the algorithm.
|
| 333 |
+
``0`` : The algorithm converged to the specified tolerances.
|
| 334 |
+
``-1`` : The algorithm encountered an invalid bracket.
|
| 335 |
+
``-2`` : The maximum number of iterations was reached.
|
| 336 |
+
``-3`` : A non-finite value was encountered.
|
| 337 |
+
``-4`` : Iteration was terminated by `callback`.
|
| 338 |
+
``1`` : The algorithm is proceeding normally (in `callback` only).
|
| 339 |
+
x : float
|
| 340 |
+
The minimizer of the function, if the algorithm terminated
|
| 341 |
+
successfully.
|
| 342 |
+
fun : float
|
| 343 |
+
The value of `func` evaluated at `x`.
|
| 344 |
+
nfev : int
|
| 345 |
+
The number of points at which `func` was evaluated.
|
| 346 |
+
nit : int
|
| 347 |
+
The number of iterations of the algorithm that were performed.
|
| 348 |
+
xl, xm, xr : float
|
| 349 |
+
The final three-point bracket.
|
| 350 |
+
fl, fm, fr : float
|
| 351 |
+
The function value at the bracket points.
|
| 352 |
+
|
| 353 |
+
Notes
|
| 354 |
+
-----
|
| 355 |
+
Implemented based on Chandrupatla's original paper [1]_.
|
| 356 |
+
|
| 357 |
+
If ``x1 < x2 < x3`` are the points of the bracket and ``f1 > f2 <= f3``
|
| 358 |
+
are the values of ``func`` at those points, then the algorithm is
|
| 359 |
+
considered to have converged when ``x3 - x1 <= abs(x2)*xrtol + xatol``
|
| 360 |
+
or ``(f1 - 2*f2 + f3)/2 <= abs(f2)*frtol + fatol``. Note that first of
|
| 361 |
+
these differs from the termination conditions described in [1]_. The
|
| 362 |
+
default values of `xrtol` is the square root of the precision of the
|
| 363 |
+
appropriate dtype, and ``xatol = fatol = frtol`` is the smallest normal
|
| 364 |
+
number of the appropriate dtype.
|
| 365 |
+
|
| 366 |
+
References
|
| 367 |
+
----------
|
| 368 |
+
.. [1] Chandrupatla, Tirupathi R. (1998).
|
| 369 |
+
"An efficient quadratic fit-sectioning algorithm for minimization
|
| 370 |
+
without derivatives".
|
| 371 |
+
Computer Methods in Applied Mechanics and Engineering, 152 (1-2),
|
| 372 |
+
211-217. https://doi.org/10.1016/S0045-7825(97)00190-4
|
| 373 |
+
|
| 374 |
+
See Also
|
| 375 |
+
--------
|
| 376 |
+
golden, brent, bounded
|
| 377 |
+
|
| 378 |
+
Examples
|
| 379 |
+
--------
|
| 380 |
+
>>> from scipy.optimize._chandrupatla import _chandrupatla_minimize
|
| 381 |
+
>>> def f(x, args=1):
|
| 382 |
+
... return (x - args)**2
|
| 383 |
+
>>> res = _chandrupatla_minimize(f, -5, 0, 5)
|
| 384 |
+
>>> res.x
|
| 385 |
+
1.0
|
| 386 |
+
>>> c = [1, 1.5, 2]
|
| 387 |
+
>>> res = _chandrupatla_minimize(f, -5, 0, 5, args=(c,))
|
| 388 |
+
>>> res.x
|
| 389 |
+
array([1. , 1.5, 2. ])
|
| 390 |
+
"""
|
| 391 |
+
res = _chandrupatla_iv(func, args, xatol, xrtol,
|
| 392 |
+
fatol, frtol, maxiter, callback)
|
| 393 |
+
func, args, xatol, xrtol, fatol, frtol, maxiter, callback = res
|
| 394 |
+
|
| 395 |
+
# Initialization
|
| 396 |
+
xs = (x1, x2, x3)
|
| 397 |
+
temp = eim._initialize(func, xs, args)
|
| 398 |
+
func, xs, fs, args, shape, dtype, xp = temp # line split for PEP8
|
| 399 |
+
x1, x2, x3 = xs
|
| 400 |
+
f1, f2, f3 = fs
|
| 401 |
+
phi = dtype.type(0.5 + 0.5*5**0.5) # golden ratio
|
| 402 |
+
status = np.full_like(x1, eim._EINPROGRESS, dtype=int) # in progress
|
| 403 |
+
nit, nfev = 0, 3 # three function evaluations performed above
|
| 404 |
+
fatol = np.finfo(dtype).tiny if fatol is None else fatol
|
| 405 |
+
frtol = np.finfo(dtype).tiny if frtol is None else frtol
|
| 406 |
+
xatol = np.finfo(dtype).tiny if xatol is None else xatol
|
| 407 |
+
xrtol = np.sqrt(np.finfo(dtype).eps) if xrtol is None else xrtol
|
| 408 |
+
|
| 409 |
+
# Ensure that x1 < x2 < x3 initially.
|
| 410 |
+
xs, fs = np.vstack((x1, x2, x3)), np.vstack((f1, f2, f3))
|
| 411 |
+
i = np.argsort(xs, axis=0)
|
| 412 |
+
x1, x2, x3 = np.take_along_axis(xs, i, axis=0)
|
| 413 |
+
f1, f2, f3 = np.take_along_axis(fs, i, axis=0)
|
| 414 |
+
q0 = x3.copy() # "At the start, q0 is set at x3..." ([1] after (7))
|
| 415 |
+
|
| 416 |
+
work = _RichResult(x1=x1, f1=f1, x2=x2, f2=f2, x3=x3, f3=f3, phi=phi,
|
| 417 |
+
xatol=xatol, xrtol=xrtol, fatol=fatol, frtol=frtol,
|
| 418 |
+
nit=nit, nfev=nfev, status=status, q0=q0, args=args)
|
| 419 |
+
res_work_pairs = [('status', 'status'),
|
| 420 |
+
('x', 'x2'), ('fun', 'f2'),
|
| 421 |
+
('nit', 'nit'), ('nfev', 'nfev'),
|
| 422 |
+
('xl', 'x1'), ('xm', 'x2'), ('xr', 'x3'),
|
| 423 |
+
('fl', 'f1'), ('fm', 'f2'), ('fr', 'f3')]
|
| 424 |
+
|
| 425 |
+
def pre_func_eval(work):
|
| 426 |
+
# `_check_termination` is called first -> `x3 - x2 > x2 - x1`
|
| 427 |
+
# But let's calculate a few terms that we'll reuse
|
| 428 |
+
x21 = work.x2 - work.x1
|
| 429 |
+
x32 = work.x3 - work.x2
|
| 430 |
+
|
| 431 |
+
# [1] Section 3. "The quadratic minimum point Q1 is calculated using
|
| 432 |
+
# the relations developed in the previous section." [1] Section 2 (5/6)
|
| 433 |
+
A = x21 * (work.f3 - work.f2)
|
| 434 |
+
B = x32 * (work.f1 - work.f2)
|
| 435 |
+
C = A / (A + B)
|
| 436 |
+
# q1 = C * (work.x1 + work.x2) / 2 + (1 - C) * (work.x2 + work.x3) / 2
|
| 437 |
+
q1 = 0.5 * (C*(work.x1 - work.x3) + work.x2 + work.x3) # much faster
|
| 438 |
+
# this is an array, so multiplying by 0.5 does not change dtype
|
| 439 |
+
|
| 440 |
+
# "If Q1 and Q0 are sufficiently close... Q1 is accepted if it is
|
| 441 |
+
# sufficiently away from the inside point x2"
|
| 442 |
+
i = abs(q1 - work.q0) < 0.5 * abs(x21) # [1] (7)
|
| 443 |
+
xi = q1[i]
|
| 444 |
+
# Later, after (9), "If the point Q1 is in a +/- xtol neighborhood of
|
| 445 |
+
# x2, the new point is chosen in the larger interval at a distance
|
| 446 |
+
# tol away from x2."
|
| 447 |
+
# See also QBASIC code after "Accept Ql adjust if close to X2".
|
| 448 |
+
j = abs(q1[i] - work.x2[i]) <= work.xtol[i]
|
| 449 |
+
xi[j] = work.x2[i][j] + np.sign(x32[i][j]) * work.xtol[i][j]
|
| 450 |
+
|
| 451 |
+
# "If condition (7) is not satisfied, golden sectioning of the larger
|
| 452 |
+
# interval is carried out to introduce the new point."
|
| 453 |
+
# (For simplicity, we go ahead and calculate it for all points, but we
|
| 454 |
+
# change the elements for which the condition was satisfied.)
|
| 455 |
+
x = work.x2 + (2 - work.phi) * x32
|
| 456 |
+
x[i] = xi
|
| 457 |
+
|
| 458 |
+
# "We define Q0 as the value of Q1 at the previous iteration."
|
| 459 |
+
work.q0 = q1
|
| 460 |
+
return x
|
| 461 |
+
|
| 462 |
+
def post_func_eval(x, f, work):
|
| 463 |
+
# Standard logic for updating a three-point bracket based on a new
|
| 464 |
+
# point. In QBASIC code, see "IF SGN(X-X2) = SGN(X3-X2) THEN...".
|
| 465 |
+
# There is an awful lot of data copying going on here; this would
|
| 466 |
+
# probably benefit from code optimization or implementation in Pythran.
|
| 467 |
+
i = np.sign(x - work.x2) == np.sign(work.x3 - work.x2)
|
| 468 |
+
xi, x1i, x2i, x3i = x[i], work.x1[i], work.x2[i], work.x3[i],
|
| 469 |
+
fi, f1i, f2i, f3i = f[i], work.f1[i], work.f2[i], work.f3[i]
|
| 470 |
+
j = fi > f2i
|
| 471 |
+
x3i[j], f3i[j] = xi[j], fi[j]
|
| 472 |
+
j = ~j
|
| 473 |
+
x1i[j], f1i[j], x2i[j], f2i[j] = x2i[j], f2i[j], xi[j], fi[j]
|
| 474 |
+
|
| 475 |
+
ni = ~i
|
| 476 |
+
xni, x1ni, x2ni, x3ni = x[ni], work.x1[ni], work.x2[ni], work.x3[ni],
|
| 477 |
+
fni, f1ni, f2ni, f3ni = f[ni], work.f1[ni], work.f2[ni], work.f3[ni]
|
| 478 |
+
j = fni > f2ni
|
| 479 |
+
x1ni[j], f1ni[j] = xni[j], fni[j]
|
| 480 |
+
j = ~j
|
| 481 |
+
x3ni[j], f3ni[j], x2ni[j], f2ni[j] = x2ni[j], f2ni[j], xni[j], fni[j]
|
| 482 |
+
|
| 483 |
+
work.x1[i], work.x2[i], work.x3[i] = x1i, x2i, x3i
|
| 484 |
+
work.f1[i], work.f2[i], work.f3[i] = f1i, f2i, f3i
|
| 485 |
+
work.x1[ni], work.x2[ni], work.x3[ni] = x1ni, x2ni, x3ni,
|
| 486 |
+
work.f1[ni], work.f2[ni], work.f3[ni] = f1ni, f2ni, f3ni
|
| 487 |
+
|
| 488 |
+
def check_termination(work):
|
| 489 |
+
# Check for all terminal conditions and record statuses.
|
| 490 |
+
stop = np.zeros_like(work.x1, dtype=bool) # termination condition met
|
| 491 |
+
|
| 492 |
+
# Bracket is invalid; stop and don't return minimizer/minimum
|
| 493 |
+
i = ((work.f2 > work.f1) | (work.f2 > work.f3))
|
| 494 |
+
work.x2[i], work.f2[i] = np.nan, np.nan
|
| 495 |
+
stop[i], work.status[i] = True, eim._ESIGNERR
|
| 496 |
+
|
| 497 |
+
# Non-finite values; stop and don't return minimizer/minimum
|
| 498 |
+
finite = np.isfinite(work.x1+work.x2+work.x3+work.f1+work.f2+work.f3)
|
| 499 |
+
i = ~(finite | stop)
|
| 500 |
+
work.x2[i], work.f2[i] = np.nan, np.nan
|
| 501 |
+
stop[i], work.status[i] = True, eim._EVALUEERR
|
| 502 |
+
|
| 503 |
+
# [1] Section 3 "Points 1 and 3 are interchanged if necessary to make
|
| 504 |
+
# the (x2, x3) the larger interval."
|
| 505 |
+
# Note: I had used np.choose; this is much faster. This would be a good
|
| 506 |
+
# place to save e.g. `work.x3 - work.x2` for reuse, but I tried and
|
| 507 |
+
# didn't notice a speed boost, so let's keep it simple.
|
| 508 |
+
i = abs(work.x3 - work.x2) < abs(work.x2 - work.x1)
|
| 509 |
+
temp = work.x1[i]
|
| 510 |
+
work.x1[i] = work.x3[i]
|
| 511 |
+
work.x3[i] = temp
|
| 512 |
+
temp = work.f1[i]
|
| 513 |
+
work.f1[i] = work.f3[i]
|
| 514 |
+
work.f3[i] = temp
|
| 515 |
+
|
| 516 |
+
# [1] Section 3 (bottom of page 212)
|
| 517 |
+
# "We set a tolerance value xtol..."
|
| 518 |
+
work.xtol = abs(work.x2) * work.xrtol + work.xatol # [1] (8)
|
| 519 |
+
# "The convergence based on interval is achieved when..."
|
| 520 |
+
# Note: Equality allowed in case of `xtol=0`
|
| 521 |
+
i = abs(work.x3 - work.x2) <= 2 * work.xtol # [1] (9)
|
| 522 |
+
|
| 523 |
+
# "We define ftol using..."
|
| 524 |
+
ftol = abs(work.f2) * work.frtol + work.fatol # [1] (10)
|
| 525 |
+
# "The convergence based on function values is achieved when..."
|
| 526 |
+
# Note 1: modify in place to incorporate tolerance on function value.
|
| 527 |
+
# Note 2: factor of 2 is not in the text; see QBASIC start of DO loop
|
| 528 |
+
i |= (work.f1 - 2 * work.f2 + work.f3) <= 2*ftol # [1] (11)
|
| 529 |
+
i &= ~stop
|
| 530 |
+
stop[i], work.status[i] = True, eim._ECONVERGED
|
| 531 |
+
|
| 532 |
+
return stop
|
| 533 |
+
|
| 534 |
+
def post_termination_check(work):
|
| 535 |
+
pass
|
| 536 |
+
|
| 537 |
+
def customize_result(res, shape):
|
| 538 |
+
xl, xr, fl, fr = res['xl'], res['xr'], res['fl'], res['fr']
|
| 539 |
+
i = res['xl'] < res['xr']
|
| 540 |
+
res['xl'] = np.choose(i, (xr, xl))
|
| 541 |
+
res['xr'] = np.choose(i, (xl, xr))
|
| 542 |
+
res['fl'] = np.choose(i, (fr, fl))
|
| 543 |
+
res['fr'] = np.choose(i, (fl, fr))
|
| 544 |
+
return shape
|
| 545 |
+
|
| 546 |
+
return eim._loop(work, callback, shape, maxiter, func, args, dtype,
|
| 547 |
+
pre_func_eval, post_func_eval, check_termination,
|
| 548 |
+
post_termination_check, customize_result, res_work_pairs,
|
| 549 |
+
xp=xp)
|
llava_next/lib/python3.10/site-packages/scipy/optimize/_cobyla_py.py
ADDED
|
@@ -0,0 +1,316 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Interface to Constrained Optimization By Linear Approximation
|
| 3 |
+
|
| 4 |
+
Functions
|
| 5 |
+
---------
|
| 6 |
+
.. autosummary::
|
| 7 |
+
:toctree: generated/
|
| 8 |
+
|
| 9 |
+
fmin_cobyla
|
| 10 |
+
|
| 11 |
+
"""
|
| 12 |
+
|
| 13 |
+
import functools
|
| 14 |
+
from threading import RLock
|
| 15 |
+
|
| 16 |
+
import numpy as np
|
| 17 |
+
from scipy.optimize import _cobyla as cobyla
|
| 18 |
+
from ._optimize import (OptimizeResult, _check_unknown_options,
|
| 19 |
+
_prepare_scalar_function)
|
| 20 |
+
try:
|
| 21 |
+
from itertools import izip
|
| 22 |
+
except ImportError:
|
| 23 |
+
izip = zip
|
| 24 |
+
|
| 25 |
+
__all__ = ['fmin_cobyla']
|
| 26 |
+
|
| 27 |
+
# Workaround as _cobyla.minimize is not threadsafe
|
| 28 |
+
# due to an unknown f2py bug and can segfault,
|
| 29 |
+
# see gh-9658.
|
| 30 |
+
_module_lock = RLock()
|
| 31 |
+
def synchronized(func):
|
| 32 |
+
@functools.wraps(func)
|
| 33 |
+
def wrapper(*args, **kwargs):
|
| 34 |
+
with _module_lock:
|
| 35 |
+
return func(*args, **kwargs)
|
| 36 |
+
return wrapper
|
| 37 |
+
|
| 38 |
+
@synchronized
|
| 39 |
+
def fmin_cobyla(func, x0, cons, args=(), consargs=None, rhobeg=1.0,
|
| 40 |
+
rhoend=1e-4, maxfun=1000, disp=None, catol=2e-4,
|
| 41 |
+
*, callback=None):
|
| 42 |
+
"""
|
| 43 |
+
Minimize a function using the Constrained Optimization By Linear
|
| 44 |
+
Approximation (COBYLA) method. This method wraps a FORTRAN
|
| 45 |
+
implementation of the algorithm.
|
| 46 |
+
|
| 47 |
+
Parameters
|
| 48 |
+
----------
|
| 49 |
+
func : callable
|
| 50 |
+
Function to minimize. In the form func(x, \\*args).
|
| 51 |
+
x0 : ndarray
|
| 52 |
+
Initial guess.
|
| 53 |
+
cons : sequence
|
| 54 |
+
Constraint functions; must all be ``>=0`` (a single function
|
| 55 |
+
if only 1 constraint). Each function takes the parameters `x`
|
| 56 |
+
as its first argument, and it can return either a single number or
|
| 57 |
+
an array or list of numbers.
|
| 58 |
+
args : tuple, optional
|
| 59 |
+
Extra arguments to pass to function.
|
| 60 |
+
consargs : tuple, optional
|
| 61 |
+
Extra arguments to pass to constraint functions (default of None means
|
| 62 |
+
use same extra arguments as those passed to func).
|
| 63 |
+
Use ``()`` for no extra arguments.
|
| 64 |
+
rhobeg : float, optional
|
| 65 |
+
Reasonable initial changes to the variables.
|
| 66 |
+
rhoend : float, optional
|
| 67 |
+
Final accuracy in the optimization (not precisely guaranteed). This
|
| 68 |
+
is a lower bound on the size of the trust region.
|
| 69 |
+
disp : {0, 1, 2, 3}, optional
|
| 70 |
+
Controls the frequency of output; 0 implies no output.
|
| 71 |
+
maxfun : int, optional
|
| 72 |
+
Maximum number of function evaluations.
|
| 73 |
+
catol : float, optional
|
| 74 |
+
Absolute tolerance for constraint violations.
|
| 75 |
+
callback : callable, optional
|
| 76 |
+
Called after each iteration, as ``callback(x)``, where ``x`` is the
|
| 77 |
+
current parameter vector.
|
| 78 |
+
|
| 79 |
+
Returns
|
| 80 |
+
-------
|
| 81 |
+
x : ndarray
|
| 82 |
+
The argument that minimises `f`.
|
| 83 |
+
|
| 84 |
+
See also
|
| 85 |
+
--------
|
| 86 |
+
minimize: Interface to minimization algorithms for multivariate
|
| 87 |
+
functions. See the 'COBYLA' `method` in particular.
|
| 88 |
+
|
| 89 |
+
Notes
|
| 90 |
+
-----
|
| 91 |
+
This algorithm is based on linear approximations to the objective
|
| 92 |
+
function and each constraint. We briefly describe the algorithm.
|
| 93 |
+
|
| 94 |
+
Suppose the function is being minimized over k variables. At the
|
| 95 |
+
jth iteration the algorithm has k+1 points v_1, ..., v_(k+1),
|
| 96 |
+
an approximate solution x_j, and a radius RHO_j.
|
| 97 |
+
(i.e., linear plus a constant) approximations to the objective
|
| 98 |
+
function and constraint functions such that their function values
|
| 99 |
+
agree with the linear approximation on the k+1 points v_1,.., v_(k+1).
|
| 100 |
+
This gives a linear program to solve (where the linear approximations
|
| 101 |
+
of the constraint functions are constrained to be non-negative).
|
| 102 |
+
|
| 103 |
+
However, the linear approximations are likely only good
|
| 104 |
+
approximations near the current simplex, so the linear program is
|
| 105 |
+
given the further requirement that the solution, which
|
| 106 |
+
will become x_(j+1), must be within RHO_j from x_j. RHO_j only
|
| 107 |
+
decreases, never increases. The initial RHO_j is rhobeg and the
|
| 108 |
+
final RHO_j is rhoend. In this way COBYLA's iterations behave
|
| 109 |
+
like a trust region algorithm.
|
| 110 |
+
|
| 111 |
+
Additionally, the linear program may be inconsistent, or the
|
| 112 |
+
approximation may give poor improvement. For details about
|
| 113 |
+
how these issues are resolved, as well as how the points v_i are
|
| 114 |
+
updated, refer to the source code or the references below.
|
| 115 |
+
|
| 116 |
+
|
| 117 |
+
References
|
| 118 |
+
----------
|
| 119 |
+
Powell M.J.D. (1994), "A direct search optimization method that models
|
| 120 |
+
the objective and constraint functions by linear interpolation.", in
|
| 121 |
+
Advances in Optimization and Numerical Analysis, eds. S. Gomez and
|
| 122 |
+
J-P Hennart, Kluwer Academic (Dordrecht), pp. 51-67
|
| 123 |
+
|
| 124 |
+
Powell M.J.D. (1998), "Direct search algorithms for optimization
|
| 125 |
+
calculations", Acta Numerica 7, 287-336
|
| 126 |
+
|
| 127 |
+
Powell M.J.D. (2007), "A view of algorithms for optimization without
|
| 128 |
+
derivatives", Cambridge University Technical Report DAMTP 2007/NA03
|
| 129 |
+
|
| 130 |
+
|
| 131 |
+
Examples
|
| 132 |
+
--------
|
| 133 |
+
Minimize the objective function f(x,y) = x*y subject
|
| 134 |
+
to the constraints x**2 + y**2 < 1 and y > 0::
|
| 135 |
+
|
| 136 |
+
>>> def objective(x):
|
| 137 |
+
... return x[0]*x[1]
|
| 138 |
+
...
|
| 139 |
+
>>> def constr1(x):
|
| 140 |
+
... return 1 - (x[0]**2 + x[1]**2)
|
| 141 |
+
...
|
| 142 |
+
>>> def constr2(x):
|
| 143 |
+
... return x[1]
|
| 144 |
+
...
|
| 145 |
+
>>> from scipy.optimize import fmin_cobyla
|
| 146 |
+
>>> fmin_cobyla(objective, [0.0, 0.1], [constr1, constr2], rhoend=1e-7)
|
| 147 |
+
array([-0.70710685, 0.70710671])
|
| 148 |
+
|
| 149 |
+
The exact solution is (-sqrt(2)/2, sqrt(2)/2).
|
| 150 |
+
|
| 151 |
+
|
| 152 |
+
|
| 153 |
+
"""
|
| 154 |
+
err = "cons must be a sequence of callable functions or a single"\
|
| 155 |
+
" callable function."
|
| 156 |
+
try:
|
| 157 |
+
len(cons)
|
| 158 |
+
except TypeError as e:
|
| 159 |
+
if callable(cons):
|
| 160 |
+
cons = [cons]
|
| 161 |
+
else:
|
| 162 |
+
raise TypeError(err) from e
|
| 163 |
+
else:
|
| 164 |
+
for thisfunc in cons:
|
| 165 |
+
if not callable(thisfunc):
|
| 166 |
+
raise TypeError(err)
|
| 167 |
+
|
| 168 |
+
if consargs is None:
|
| 169 |
+
consargs = args
|
| 170 |
+
|
| 171 |
+
# build constraints
|
| 172 |
+
con = tuple({'type': 'ineq', 'fun': c, 'args': consargs} for c in cons)
|
| 173 |
+
|
| 174 |
+
# options
|
| 175 |
+
opts = {'rhobeg': rhobeg,
|
| 176 |
+
'tol': rhoend,
|
| 177 |
+
'disp': disp,
|
| 178 |
+
'maxiter': maxfun,
|
| 179 |
+
'catol': catol,
|
| 180 |
+
'callback': callback}
|
| 181 |
+
|
| 182 |
+
sol = _minimize_cobyla(func, x0, args, constraints=con,
|
| 183 |
+
**opts)
|
| 184 |
+
if disp and not sol['success']:
|
| 185 |
+
print(f"COBYLA failed to find a solution: {sol.message}")
|
| 186 |
+
return sol['x']
|
| 187 |
+
|
| 188 |
+
|
| 189 |
+
@synchronized
|
| 190 |
+
def _minimize_cobyla(fun, x0, args=(), constraints=(),
|
| 191 |
+
rhobeg=1.0, tol=1e-4, maxiter=1000,
|
| 192 |
+
disp=False, catol=2e-4, callback=None, bounds=None,
|
| 193 |
+
**unknown_options):
|
| 194 |
+
"""
|
| 195 |
+
Minimize a scalar function of one or more variables using the
|
| 196 |
+
Constrained Optimization BY Linear Approximation (COBYLA) algorithm.
|
| 197 |
+
|
| 198 |
+
Options
|
| 199 |
+
-------
|
| 200 |
+
rhobeg : float
|
| 201 |
+
Reasonable initial changes to the variables.
|
| 202 |
+
tol : float
|
| 203 |
+
Final accuracy in the optimization (not precisely guaranteed).
|
| 204 |
+
This is a lower bound on the size of the trust region.
|
| 205 |
+
disp : bool
|
| 206 |
+
Set to True to print convergence messages. If False,
|
| 207 |
+
`verbosity` is ignored as set to 0.
|
| 208 |
+
maxiter : int
|
| 209 |
+
Maximum number of function evaluations.
|
| 210 |
+
catol : float
|
| 211 |
+
Tolerance (absolute) for constraint violations
|
| 212 |
+
|
| 213 |
+
"""
|
| 214 |
+
_check_unknown_options(unknown_options)
|
| 215 |
+
maxfun = maxiter
|
| 216 |
+
rhoend = tol
|
| 217 |
+
iprint = int(bool(disp))
|
| 218 |
+
|
| 219 |
+
# check constraints
|
| 220 |
+
if isinstance(constraints, dict):
|
| 221 |
+
constraints = (constraints, )
|
| 222 |
+
|
| 223 |
+
if bounds:
|
| 224 |
+
i_lb = np.isfinite(bounds.lb)
|
| 225 |
+
if np.any(i_lb):
|
| 226 |
+
def lb_constraint(x, *args, **kwargs):
|
| 227 |
+
return x[i_lb] - bounds.lb[i_lb]
|
| 228 |
+
|
| 229 |
+
constraints.append({'type': 'ineq', 'fun': lb_constraint})
|
| 230 |
+
|
| 231 |
+
i_ub = np.isfinite(bounds.ub)
|
| 232 |
+
if np.any(i_ub):
|
| 233 |
+
def ub_constraint(x):
|
| 234 |
+
return bounds.ub[i_ub] - x[i_ub]
|
| 235 |
+
|
| 236 |
+
constraints.append({'type': 'ineq', 'fun': ub_constraint})
|
| 237 |
+
|
| 238 |
+
for ic, con in enumerate(constraints):
|
| 239 |
+
# check type
|
| 240 |
+
try:
|
| 241 |
+
ctype = con['type'].lower()
|
| 242 |
+
except KeyError as e:
|
| 243 |
+
raise KeyError('Constraint %d has no type defined.' % ic) from e
|
| 244 |
+
except TypeError as e:
|
| 245 |
+
raise TypeError('Constraints must be defined using a '
|
| 246 |
+
'dictionary.') from e
|
| 247 |
+
except AttributeError as e:
|
| 248 |
+
raise TypeError("Constraint's type must be a string.") from e
|
| 249 |
+
else:
|
| 250 |
+
if ctype != 'ineq':
|
| 251 |
+
raise ValueError("Constraints of type '%s' not handled by "
|
| 252 |
+
"COBYLA." % con['type'])
|
| 253 |
+
|
| 254 |
+
# check function
|
| 255 |
+
if 'fun' not in con:
|
| 256 |
+
raise KeyError('Constraint %d has no function defined.' % ic)
|
| 257 |
+
|
| 258 |
+
# check extra arguments
|
| 259 |
+
if 'args' not in con:
|
| 260 |
+
con['args'] = ()
|
| 261 |
+
|
| 262 |
+
# m is the total number of constraint values
|
| 263 |
+
# it takes into account that some constraints may be vector-valued
|
| 264 |
+
cons_lengths = []
|
| 265 |
+
for c in constraints:
|
| 266 |
+
f = c['fun'](x0, *c['args'])
|
| 267 |
+
try:
|
| 268 |
+
cons_length = len(f)
|
| 269 |
+
except TypeError:
|
| 270 |
+
cons_length = 1
|
| 271 |
+
cons_lengths.append(cons_length)
|
| 272 |
+
m = sum(cons_lengths)
|
| 273 |
+
|
| 274 |
+
# create the ScalarFunction, cobyla doesn't require derivative function
|
| 275 |
+
def _jac(x, *args):
|
| 276 |
+
return None
|
| 277 |
+
|
| 278 |
+
sf = _prepare_scalar_function(fun, x0, args=args, jac=_jac)
|
| 279 |
+
|
| 280 |
+
def calcfc(x, con):
|
| 281 |
+
f = sf.fun(x)
|
| 282 |
+
i = 0
|
| 283 |
+
for size, c in izip(cons_lengths, constraints):
|
| 284 |
+
con[i: i + size] = c['fun'](x, *c['args'])
|
| 285 |
+
i += size
|
| 286 |
+
return f
|
| 287 |
+
|
| 288 |
+
def wrapped_callback(x):
|
| 289 |
+
if callback is not None:
|
| 290 |
+
callback(np.copy(x))
|
| 291 |
+
|
| 292 |
+
info = np.zeros(4, np.float64)
|
| 293 |
+
xopt, info = cobyla.minimize(calcfc, m=m, x=np.copy(x0), rhobeg=rhobeg,
|
| 294 |
+
rhoend=rhoend, iprint=iprint, maxfun=maxfun,
|
| 295 |
+
dinfo=info, callback=wrapped_callback)
|
| 296 |
+
|
| 297 |
+
if info[3] > catol:
|
| 298 |
+
# Check constraint violation
|
| 299 |
+
info[0] = 4
|
| 300 |
+
|
| 301 |
+
return OptimizeResult(x=xopt,
|
| 302 |
+
status=int(info[0]),
|
| 303 |
+
success=info[0] == 1,
|
| 304 |
+
message={1: 'Optimization terminated successfully.',
|
| 305 |
+
2: 'Maximum number of function evaluations '
|
| 306 |
+
'has been exceeded.',
|
| 307 |
+
3: 'Rounding errors are becoming damaging '
|
| 308 |
+
'in COBYLA subroutine.',
|
| 309 |
+
4: 'Did not converge to a solution '
|
| 310 |
+
'satisfying the constraints. See '
|
| 311 |
+
'`maxcv` for magnitude of violation.',
|
| 312 |
+
5: 'NaN result encountered.'
|
| 313 |
+
}.get(info[0], 'Unknown exit status.'),
|
| 314 |
+
nfev=int(info[1]),
|
| 315 |
+
fun=info[2],
|
| 316 |
+
maxcv=info[3])
|
llava_next/lib/python3.10/site-packages/scipy/optimize/_constraints.py
ADDED
|
@@ -0,0 +1,590 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Constraints definition for minimize."""
|
| 2 |
+
import numpy as np
|
| 3 |
+
from ._hessian_update_strategy import BFGS
|
| 4 |
+
from ._differentiable_functions import (
|
| 5 |
+
VectorFunction, LinearVectorFunction, IdentityVectorFunction)
|
| 6 |
+
from ._optimize import OptimizeWarning
|
| 7 |
+
from warnings import warn, catch_warnings, simplefilter, filterwarnings
|
| 8 |
+
from scipy.sparse import issparse
|
| 9 |
+
|
| 10 |
+
|
| 11 |
+
def _arr_to_scalar(x):
|
| 12 |
+
# If x is a numpy array, return x.item(). This will
|
| 13 |
+
# fail if the array has more than one element.
|
| 14 |
+
return x.item() if isinstance(x, np.ndarray) else x
|
| 15 |
+
|
| 16 |
+
|
| 17 |
+
class NonlinearConstraint:
|
| 18 |
+
"""Nonlinear constraint on the variables.
|
| 19 |
+
|
| 20 |
+
The constraint has the general inequality form::
|
| 21 |
+
|
| 22 |
+
lb <= fun(x) <= ub
|
| 23 |
+
|
| 24 |
+
Here the vector of independent variables x is passed as ndarray of shape
|
| 25 |
+
(n,) and ``fun`` returns a vector with m components.
|
| 26 |
+
|
| 27 |
+
It is possible to use equal bounds to represent an equality constraint or
|
| 28 |
+
infinite bounds to represent a one-sided constraint.
|
| 29 |
+
|
| 30 |
+
Parameters
|
| 31 |
+
----------
|
| 32 |
+
fun : callable
|
| 33 |
+
The function defining the constraint.
|
| 34 |
+
The signature is ``fun(x) -> array_like, shape (m,)``.
|
| 35 |
+
lb, ub : array_like
|
| 36 |
+
Lower and upper bounds on the constraint. Each array must have the
|
| 37 |
+
shape (m,) or be a scalar, in the latter case a bound will be the same
|
| 38 |
+
for all components of the constraint. Use ``np.inf`` with an
|
| 39 |
+
appropriate sign to specify a one-sided constraint.
|
| 40 |
+
Set components of `lb` and `ub` equal to represent an equality
|
| 41 |
+
constraint. Note that you can mix constraints of different types:
|
| 42 |
+
interval, one-sided or equality, by setting different components of
|
| 43 |
+
`lb` and `ub` as necessary.
|
| 44 |
+
jac : {callable, '2-point', '3-point', 'cs'}, optional
|
| 45 |
+
Method of computing the Jacobian matrix (an m-by-n matrix,
|
| 46 |
+
where element (i, j) is the partial derivative of f[i] with
|
| 47 |
+
respect to x[j]). The keywords {'2-point', '3-point',
|
| 48 |
+
'cs'} select a finite difference scheme for the numerical estimation.
|
| 49 |
+
A callable must have the following signature:
|
| 50 |
+
``jac(x) -> {ndarray, sparse matrix}, shape (m, n)``.
|
| 51 |
+
Default is '2-point'.
|
| 52 |
+
hess : {callable, '2-point', '3-point', 'cs', HessianUpdateStrategy, None}, optional
|
| 53 |
+
Method for computing the Hessian matrix. The keywords
|
| 54 |
+
{'2-point', '3-point', 'cs'} select a finite difference scheme for
|
| 55 |
+
numerical estimation. Alternatively, objects implementing
|
| 56 |
+
`HessianUpdateStrategy` interface can be used to approximate the
|
| 57 |
+
Hessian. Currently available implementations are:
|
| 58 |
+
|
| 59 |
+
- `BFGS` (default option)
|
| 60 |
+
- `SR1`
|
| 61 |
+
|
| 62 |
+
A callable must return the Hessian matrix of ``dot(fun, v)`` and
|
| 63 |
+
must have the following signature:
|
| 64 |
+
``hess(x, v) -> {LinearOperator, sparse matrix, array_like}, shape (n, n)``.
|
| 65 |
+
Here ``v`` is ndarray with shape (m,) containing Lagrange multipliers.
|
| 66 |
+
keep_feasible : array_like of bool, optional
|
| 67 |
+
Whether to keep the constraint components feasible throughout
|
| 68 |
+
iterations. A single value set this property for all components.
|
| 69 |
+
Default is False. Has no effect for equality constraints.
|
| 70 |
+
finite_diff_rel_step: None or array_like, optional
|
| 71 |
+
Relative step size for the finite difference approximation. Default is
|
| 72 |
+
None, which will select a reasonable value automatically depending
|
| 73 |
+
on a finite difference scheme.
|
| 74 |
+
finite_diff_jac_sparsity: {None, array_like, sparse matrix}, optional
|
| 75 |
+
Defines the sparsity structure of the Jacobian matrix for finite
|
| 76 |
+
difference estimation, its shape must be (m, n). If the Jacobian has
|
| 77 |
+
only few non-zero elements in *each* row, providing the sparsity
|
| 78 |
+
structure will greatly speed up the computations. A zero entry means
|
| 79 |
+
that a corresponding element in the Jacobian is identically zero.
|
| 80 |
+
If provided, forces the use of 'lsmr' trust-region solver.
|
| 81 |
+
If None (default) then dense differencing will be used.
|
| 82 |
+
|
| 83 |
+
Notes
|
| 84 |
+
-----
|
| 85 |
+
Finite difference schemes {'2-point', '3-point', 'cs'} may be used for
|
| 86 |
+
approximating either the Jacobian or the Hessian. We, however, do not allow
|
| 87 |
+
its use for approximating both simultaneously. Hence whenever the Jacobian
|
| 88 |
+
is estimated via finite-differences, we require the Hessian to be estimated
|
| 89 |
+
using one of the quasi-Newton strategies.
|
| 90 |
+
|
| 91 |
+
The scheme 'cs' is potentially the most accurate, but requires the function
|
| 92 |
+
to correctly handles complex inputs and be analytically continuable to the
|
| 93 |
+
complex plane. The scheme '3-point' is more accurate than '2-point' but
|
| 94 |
+
requires twice as many operations.
|
| 95 |
+
|
| 96 |
+
Examples
|
| 97 |
+
--------
|
| 98 |
+
Constrain ``x[0] < sin(x[1]) + 1.9``
|
| 99 |
+
|
| 100 |
+
>>> from scipy.optimize import NonlinearConstraint
|
| 101 |
+
>>> import numpy as np
|
| 102 |
+
>>> con = lambda x: x[0] - np.sin(x[1])
|
| 103 |
+
>>> nlc = NonlinearConstraint(con, -np.inf, 1.9)
|
| 104 |
+
|
| 105 |
+
"""
|
| 106 |
+
def __init__(self, fun, lb, ub, jac='2-point', hess=BFGS(),
|
| 107 |
+
keep_feasible=False, finite_diff_rel_step=None,
|
| 108 |
+
finite_diff_jac_sparsity=None):
|
| 109 |
+
self.fun = fun
|
| 110 |
+
self.lb = lb
|
| 111 |
+
self.ub = ub
|
| 112 |
+
self.finite_diff_rel_step = finite_diff_rel_step
|
| 113 |
+
self.finite_diff_jac_sparsity = finite_diff_jac_sparsity
|
| 114 |
+
self.jac = jac
|
| 115 |
+
self.hess = hess
|
| 116 |
+
self.keep_feasible = keep_feasible
|
| 117 |
+
|
| 118 |
+
|
| 119 |
+
class LinearConstraint:
|
| 120 |
+
"""Linear constraint on the variables.
|
| 121 |
+
|
| 122 |
+
The constraint has the general inequality form::
|
| 123 |
+
|
| 124 |
+
lb <= A.dot(x) <= ub
|
| 125 |
+
|
| 126 |
+
Here the vector of independent variables x is passed as ndarray of shape
|
| 127 |
+
(n,) and the matrix A has shape (m, n).
|
| 128 |
+
|
| 129 |
+
It is possible to use equal bounds to represent an equality constraint or
|
| 130 |
+
infinite bounds to represent a one-sided constraint.
|
| 131 |
+
|
| 132 |
+
Parameters
|
| 133 |
+
----------
|
| 134 |
+
A : {array_like, sparse matrix}, shape (m, n)
|
| 135 |
+
Matrix defining the constraint.
|
| 136 |
+
lb, ub : dense array_like, optional
|
| 137 |
+
Lower and upper limits on the constraint. Each array must have the
|
| 138 |
+
shape (m,) or be a scalar, in the latter case a bound will be the same
|
| 139 |
+
for all components of the constraint. Use ``np.inf`` with an
|
| 140 |
+
appropriate sign to specify a one-sided constraint.
|
| 141 |
+
Set components of `lb` and `ub` equal to represent an equality
|
| 142 |
+
constraint. Note that you can mix constraints of different types:
|
| 143 |
+
interval, one-sided or equality, by setting different components of
|
| 144 |
+
`lb` and `ub` as necessary. Defaults to ``lb = -np.inf``
|
| 145 |
+
and ``ub = np.inf`` (no limits).
|
| 146 |
+
keep_feasible : dense array_like of bool, optional
|
| 147 |
+
Whether to keep the constraint components feasible throughout
|
| 148 |
+
iterations. A single value set this property for all components.
|
| 149 |
+
Default is False. Has no effect for equality constraints.
|
| 150 |
+
"""
|
| 151 |
+
def _input_validation(self):
|
| 152 |
+
if self.A.ndim != 2:
|
| 153 |
+
message = "`A` must have exactly two dimensions."
|
| 154 |
+
raise ValueError(message)
|
| 155 |
+
|
| 156 |
+
try:
|
| 157 |
+
shape = self.A.shape[0:1]
|
| 158 |
+
self.lb = np.broadcast_to(self.lb, shape)
|
| 159 |
+
self.ub = np.broadcast_to(self.ub, shape)
|
| 160 |
+
self.keep_feasible = np.broadcast_to(self.keep_feasible, shape)
|
| 161 |
+
except ValueError:
|
| 162 |
+
message = ("`lb`, `ub`, and `keep_feasible` must be broadcastable "
|
| 163 |
+
"to shape `A.shape[0:1]`")
|
| 164 |
+
raise ValueError(message)
|
| 165 |
+
|
| 166 |
+
def __init__(self, A, lb=-np.inf, ub=np.inf, keep_feasible=False):
|
| 167 |
+
if not issparse(A):
|
| 168 |
+
# In some cases, if the constraint is not valid, this emits a
|
| 169 |
+
# VisibleDeprecationWarning about ragged nested sequences
|
| 170 |
+
# before eventually causing an error. `scipy.optimize.milp` would
|
| 171 |
+
# prefer that this just error out immediately so it can handle it
|
| 172 |
+
# rather than concerning the user.
|
| 173 |
+
with catch_warnings():
|
| 174 |
+
simplefilter("error")
|
| 175 |
+
self.A = np.atleast_2d(A).astype(np.float64)
|
| 176 |
+
else:
|
| 177 |
+
self.A = A
|
| 178 |
+
if issparse(lb) or issparse(ub):
|
| 179 |
+
raise ValueError("Constraint limits must be dense arrays.")
|
| 180 |
+
self.lb = np.atleast_1d(lb).astype(np.float64)
|
| 181 |
+
self.ub = np.atleast_1d(ub).astype(np.float64)
|
| 182 |
+
|
| 183 |
+
if issparse(keep_feasible):
|
| 184 |
+
raise ValueError("`keep_feasible` must be a dense array.")
|
| 185 |
+
self.keep_feasible = np.atleast_1d(keep_feasible).astype(bool)
|
| 186 |
+
self._input_validation()
|
| 187 |
+
|
| 188 |
+
def residual(self, x):
|
| 189 |
+
"""
|
| 190 |
+
Calculate the residual between the constraint function and the limits
|
| 191 |
+
|
| 192 |
+
For a linear constraint of the form::
|
| 193 |
+
|
| 194 |
+
lb <= A@x <= ub
|
| 195 |
+
|
| 196 |
+
the lower and upper residuals between ``A@x`` and the limits are values
|
| 197 |
+
``sl`` and ``sb`` such that::
|
| 198 |
+
|
| 199 |
+
lb + sl == A@x == ub - sb
|
| 200 |
+
|
| 201 |
+
When all elements of ``sl`` and ``sb`` are positive, all elements of
|
| 202 |
+
the constraint are satisfied; a negative element in ``sl`` or ``sb``
|
| 203 |
+
indicates that the corresponding element of the constraint is not
|
| 204 |
+
satisfied.
|
| 205 |
+
|
| 206 |
+
Parameters
|
| 207 |
+
----------
|
| 208 |
+
x: array_like
|
| 209 |
+
Vector of independent variables
|
| 210 |
+
|
| 211 |
+
Returns
|
| 212 |
+
-------
|
| 213 |
+
sl, sb : array-like
|
| 214 |
+
The lower and upper residuals
|
| 215 |
+
"""
|
| 216 |
+
return self.A@x - self.lb, self.ub - self.A@x
|
| 217 |
+
|
| 218 |
+
|
| 219 |
+
class Bounds:
|
| 220 |
+
"""Bounds constraint on the variables.
|
| 221 |
+
|
| 222 |
+
The constraint has the general inequality form::
|
| 223 |
+
|
| 224 |
+
lb <= x <= ub
|
| 225 |
+
|
| 226 |
+
It is possible to use equal bounds to represent an equality constraint or
|
| 227 |
+
infinite bounds to represent a one-sided constraint.
|
| 228 |
+
|
| 229 |
+
Parameters
|
| 230 |
+
----------
|
| 231 |
+
lb, ub : dense array_like, optional
|
| 232 |
+
Lower and upper bounds on independent variables. `lb`, `ub`, and
|
| 233 |
+
`keep_feasible` must be the same shape or broadcastable.
|
| 234 |
+
Set components of `lb` and `ub` equal
|
| 235 |
+
to fix a variable. Use ``np.inf`` with an appropriate sign to disable
|
| 236 |
+
bounds on all or some variables. Note that you can mix constraints of
|
| 237 |
+
different types: interval, one-sided or equality, by setting different
|
| 238 |
+
components of `lb` and `ub` as necessary. Defaults to ``lb = -np.inf``
|
| 239 |
+
and ``ub = np.inf`` (no bounds).
|
| 240 |
+
keep_feasible : dense array_like of bool, optional
|
| 241 |
+
Whether to keep the constraint components feasible throughout
|
| 242 |
+
iterations. Must be broadcastable with `lb` and `ub`.
|
| 243 |
+
Default is False. Has no effect for equality constraints.
|
| 244 |
+
"""
|
| 245 |
+
def _input_validation(self):
|
| 246 |
+
try:
|
| 247 |
+
res = np.broadcast_arrays(self.lb, self.ub, self.keep_feasible)
|
| 248 |
+
self.lb, self.ub, self.keep_feasible = res
|
| 249 |
+
except ValueError:
|
| 250 |
+
message = "`lb`, `ub`, and `keep_feasible` must be broadcastable."
|
| 251 |
+
raise ValueError(message)
|
| 252 |
+
|
| 253 |
+
def __init__(self, lb=-np.inf, ub=np.inf, keep_feasible=False):
|
| 254 |
+
if issparse(lb) or issparse(ub):
|
| 255 |
+
raise ValueError("Lower and upper bounds must be dense arrays.")
|
| 256 |
+
self.lb = np.atleast_1d(lb)
|
| 257 |
+
self.ub = np.atleast_1d(ub)
|
| 258 |
+
|
| 259 |
+
if issparse(keep_feasible):
|
| 260 |
+
raise ValueError("`keep_feasible` must be a dense array.")
|
| 261 |
+
self.keep_feasible = np.atleast_1d(keep_feasible).astype(bool)
|
| 262 |
+
self._input_validation()
|
| 263 |
+
|
| 264 |
+
def __repr__(self):
|
| 265 |
+
start = f"{type(self).__name__}({self.lb!r}, {self.ub!r}"
|
| 266 |
+
if np.any(self.keep_feasible):
|
| 267 |
+
end = f", keep_feasible={self.keep_feasible!r})"
|
| 268 |
+
else:
|
| 269 |
+
end = ")"
|
| 270 |
+
return start + end
|
| 271 |
+
|
| 272 |
+
def residual(self, x):
|
| 273 |
+
"""Calculate the residual (slack) between the input and the bounds
|
| 274 |
+
|
| 275 |
+
For a bound constraint of the form::
|
| 276 |
+
|
| 277 |
+
lb <= x <= ub
|
| 278 |
+
|
| 279 |
+
the lower and upper residuals between `x` and the bounds are values
|
| 280 |
+
``sl`` and ``sb`` such that::
|
| 281 |
+
|
| 282 |
+
lb + sl == x == ub - sb
|
| 283 |
+
|
| 284 |
+
When all elements of ``sl`` and ``sb`` are positive, all elements of
|
| 285 |
+
``x`` lie within the bounds; a negative element in ``sl`` or ``sb``
|
| 286 |
+
indicates that the corresponding element of ``x`` is out of bounds.
|
| 287 |
+
|
| 288 |
+
Parameters
|
| 289 |
+
----------
|
| 290 |
+
x: array_like
|
| 291 |
+
Vector of independent variables
|
| 292 |
+
|
| 293 |
+
Returns
|
| 294 |
+
-------
|
| 295 |
+
sl, sb : array-like
|
| 296 |
+
The lower and upper residuals
|
| 297 |
+
"""
|
| 298 |
+
return x - self.lb, self.ub - x
|
| 299 |
+
|
| 300 |
+
|
| 301 |
+
class PreparedConstraint:
|
| 302 |
+
"""Constraint prepared from a user defined constraint.
|
| 303 |
+
|
| 304 |
+
On creation it will check whether a constraint definition is valid and
|
| 305 |
+
the initial point is feasible. If created successfully, it will contain
|
| 306 |
+
the attributes listed below.
|
| 307 |
+
|
| 308 |
+
Parameters
|
| 309 |
+
----------
|
| 310 |
+
constraint : {NonlinearConstraint, LinearConstraint`, Bounds}
|
| 311 |
+
Constraint to check and prepare.
|
| 312 |
+
x0 : array_like
|
| 313 |
+
Initial vector of independent variables.
|
| 314 |
+
sparse_jacobian : bool or None, optional
|
| 315 |
+
If bool, then the Jacobian of the constraint will be converted
|
| 316 |
+
to the corresponded format if necessary. If None (default), such
|
| 317 |
+
conversion is not made.
|
| 318 |
+
finite_diff_bounds : 2-tuple, optional
|
| 319 |
+
Lower and upper bounds on the independent variables for the finite
|
| 320 |
+
difference approximation, if applicable. Defaults to no bounds.
|
| 321 |
+
|
| 322 |
+
Attributes
|
| 323 |
+
----------
|
| 324 |
+
fun : {VectorFunction, LinearVectorFunction, IdentityVectorFunction}
|
| 325 |
+
Function defining the constraint wrapped by one of the convenience
|
| 326 |
+
classes.
|
| 327 |
+
bounds : 2-tuple
|
| 328 |
+
Contains lower and upper bounds for the constraints --- lb and ub.
|
| 329 |
+
These are converted to ndarray and have a size equal to the number of
|
| 330 |
+
the constraints.
|
| 331 |
+
keep_feasible : ndarray
|
| 332 |
+
Array indicating which components must be kept feasible with a size
|
| 333 |
+
equal to the number of the constraints.
|
| 334 |
+
"""
|
| 335 |
+
def __init__(self, constraint, x0, sparse_jacobian=None,
|
| 336 |
+
finite_diff_bounds=(-np.inf, np.inf)):
|
| 337 |
+
if isinstance(constraint, NonlinearConstraint):
|
| 338 |
+
fun = VectorFunction(constraint.fun, x0,
|
| 339 |
+
constraint.jac, constraint.hess,
|
| 340 |
+
constraint.finite_diff_rel_step,
|
| 341 |
+
constraint.finite_diff_jac_sparsity,
|
| 342 |
+
finite_diff_bounds, sparse_jacobian)
|
| 343 |
+
elif isinstance(constraint, LinearConstraint):
|
| 344 |
+
fun = LinearVectorFunction(constraint.A, x0, sparse_jacobian)
|
| 345 |
+
elif isinstance(constraint, Bounds):
|
| 346 |
+
fun = IdentityVectorFunction(x0, sparse_jacobian)
|
| 347 |
+
else:
|
| 348 |
+
raise ValueError("`constraint` of an unknown type is passed.")
|
| 349 |
+
|
| 350 |
+
m = fun.m
|
| 351 |
+
|
| 352 |
+
lb = np.asarray(constraint.lb, dtype=float)
|
| 353 |
+
ub = np.asarray(constraint.ub, dtype=float)
|
| 354 |
+
keep_feasible = np.asarray(constraint.keep_feasible, dtype=bool)
|
| 355 |
+
|
| 356 |
+
lb = np.broadcast_to(lb, m)
|
| 357 |
+
ub = np.broadcast_to(ub, m)
|
| 358 |
+
keep_feasible = np.broadcast_to(keep_feasible, m)
|
| 359 |
+
|
| 360 |
+
if keep_feasible.shape != (m,):
|
| 361 |
+
raise ValueError("`keep_feasible` has a wrong shape.")
|
| 362 |
+
|
| 363 |
+
mask = keep_feasible & (lb != ub)
|
| 364 |
+
f0 = fun.f
|
| 365 |
+
if np.any(f0[mask] < lb[mask]) or np.any(f0[mask] > ub[mask]):
|
| 366 |
+
raise ValueError("`x0` is infeasible with respect to some "
|
| 367 |
+
"inequality constraint with `keep_feasible` "
|
| 368 |
+
"set to True.")
|
| 369 |
+
|
| 370 |
+
self.fun = fun
|
| 371 |
+
self.bounds = (lb, ub)
|
| 372 |
+
self.keep_feasible = keep_feasible
|
| 373 |
+
|
| 374 |
+
def violation(self, x):
|
| 375 |
+
"""How much the constraint is exceeded by.
|
| 376 |
+
|
| 377 |
+
Parameters
|
| 378 |
+
----------
|
| 379 |
+
x : array-like
|
| 380 |
+
Vector of independent variables
|
| 381 |
+
|
| 382 |
+
Returns
|
| 383 |
+
-------
|
| 384 |
+
excess : array-like
|
| 385 |
+
How much the constraint is exceeded by, for each of the
|
| 386 |
+
constraints specified by `PreparedConstraint.fun`.
|
| 387 |
+
"""
|
| 388 |
+
with catch_warnings():
|
| 389 |
+
# Ignore the following warning, it's not important when
|
| 390 |
+
# figuring out total violation
|
| 391 |
+
# UserWarning: delta_grad == 0.0. Check if the approximated
|
| 392 |
+
# function is linear
|
| 393 |
+
filterwarnings("ignore", "delta_grad", UserWarning)
|
| 394 |
+
ev = self.fun.fun(np.asarray(x))
|
| 395 |
+
|
| 396 |
+
excess_lb = np.maximum(self.bounds[0] - ev, 0)
|
| 397 |
+
excess_ub = np.maximum(ev - self.bounds[1], 0)
|
| 398 |
+
|
| 399 |
+
return excess_lb + excess_ub
|
| 400 |
+
|
| 401 |
+
|
| 402 |
+
def new_bounds_to_old(lb, ub, n):
|
| 403 |
+
"""Convert the new bounds representation to the old one.
|
| 404 |
+
|
| 405 |
+
The new representation is a tuple (lb, ub) and the old one is a list
|
| 406 |
+
containing n tuples, ith containing lower and upper bound on a ith
|
| 407 |
+
variable.
|
| 408 |
+
If any of the entries in lb/ub are -np.inf/np.inf they are replaced by
|
| 409 |
+
None.
|
| 410 |
+
"""
|
| 411 |
+
lb = np.broadcast_to(lb, n)
|
| 412 |
+
ub = np.broadcast_to(ub, n)
|
| 413 |
+
|
| 414 |
+
lb = [float(x) if x > -np.inf else None for x in lb]
|
| 415 |
+
ub = [float(x) if x < np.inf else None for x in ub]
|
| 416 |
+
|
| 417 |
+
return list(zip(lb, ub))
|
| 418 |
+
|
| 419 |
+
|
| 420 |
+
def old_bound_to_new(bounds):
|
| 421 |
+
"""Convert the old bounds representation to the new one.
|
| 422 |
+
|
| 423 |
+
The new representation is a tuple (lb, ub) and the old one is a list
|
| 424 |
+
containing n tuples, ith containing lower and upper bound on a ith
|
| 425 |
+
variable.
|
| 426 |
+
If any of the entries in lb/ub are None they are replaced by
|
| 427 |
+
-np.inf/np.inf.
|
| 428 |
+
"""
|
| 429 |
+
lb, ub = zip(*bounds)
|
| 430 |
+
|
| 431 |
+
# Convert occurrences of None to -inf or inf, and replace occurrences of
|
| 432 |
+
# any numpy array x with x.item(). Then wrap the results in numpy arrays.
|
| 433 |
+
lb = np.array([float(_arr_to_scalar(x)) if x is not None else -np.inf
|
| 434 |
+
for x in lb])
|
| 435 |
+
ub = np.array([float(_arr_to_scalar(x)) if x is not None else np.inf
|
| 436 |
+
for x in ub])
|
| 437 |
+
|
| 438 |
+
return lb, ub
|
| 439 |
+
|
| 440 |
+
|
| 441 |
+
def strict_bounds(lb, ub, keep_feasible, n_vars):
|
| 442 |
+
"""Remove bounds which are not asked to be kept feasible."""
|
| 443 |
+
strict_lb = np.resize(lb, n_vars).astype(float)
|
| 444 |
+
strict_ub = np.resize(ub, n_vars).astype(float)
|
| 445 |
+
keep_feasible = np.resize(keep_feasible, n_vars)
|
| 446 |
+
strict_lb[~keep_feasible] = -np.inf
|
| 447 |
+
strict_ub[~keep_feasible] = np.inf
|
| 448 |
+
return strict_lb, strict_ub
|
| 449 |
+
|
| 450 |
+
|
| 451 |
+
def new_constraint_to_old(con, x0):
|
| 452 |
+
"""
|
| 453 |
+
Converts new-style constraint objects to old-style constraint dictionaries.
|
| 454 |
+
"""
|
| 455 |
+
if isinstance(con, NonlinearConstraint):
|
| 456 |
+
if (con.finite_diff_jac_sparsity is not None or
|
| 457 |
+
con.finite_diff_rel_step is not None or
|
| 458 |
+
not isinstance(con.hess, BFGS) or # misses user specified BFGS
|
| 459 |
+
con.keep_feasible):
|
| 460 |
+
warn("Constraint options `finite_diff_jac_sparsity`, "
|
| 461 |
+
"`finite_diff_rel_step`, `keep_feasible`, and `hess`"
|
| 462 |
+
"are ignored by this method.",
|
| 463 |
+
OptimizeWarning, stacklevel=3)
|
| 464 |
+
|
| 465 |
+
fun = con.fun
|
| 466 |
+
if callable(con.jac):
|
| 467 |
+
jac = con.jac
|
| 468 |
+
else:
|
| 469 |
+
jac = None
|
| 470 |
+
|
| 471 |
+
else: # LinearConstraint
|
| 472 |
+
if np.any(con.keep_feasible):
|
| 473 |
+
warn("Constraint option `keep_feasible` is ignored by this method.",
|
| 474 |
+
OptimizeWarning, stacklevel=3)
|
| 475 |
+
|
| 476 |
+
A = con.A
|
| 477 |
+
if issparse(A):
|
| 478 |
+
A = A.toarray()
|
| 479 |
+
def fun(x):
|
| 480 |
+
return np.dot(A, x)
|
| 481 |
+
def jac(x):
|
| 482 |
+
return A
|
| 483 |
+
|
| 484 |
+
# FIXME: when bugs in VectorFunction/LinearVectorFunction are worked out,
|
| 485 |
+
# use pcon.fun.fun and pcon.fun.jac. Until then, get fun/jac above.
|
| 486 |
+
pcon = PreparedConstraint(con, x0)
|
| 487 |
+
lb, ub = pcon.bounds
|
| 488 |
+
|
| 489 |
+
i_eq = lb == ub
|
| 490 |
+
i_bound_below = np.logical_xor(lb != -np.inf, i_eq)
|
| 491 |
+
i_bound_above = np.logical_xor(ub != np.inf, i_eq)
|
| 492 |
+
i_unbounded = np.logical_and(lb == -np.inf, ub == np.inf)
|
| 493 |
+
|
| 494 |
+
if np.any(i_unbounded):
|
| 495 |
+
warn("At least one constraint is unbounded above and below. Such "
|
| 496 |
+
"constraints are ignored.",
|
| 497 |
+
OptimizeWarning, stacklevel=3)
|
| 498 |
+
|
| 499 |
+
ceq = []
|
| 500 |
+
if np.any(i_eq):
|
| 501 |
+
def f_eq(x):
|
| 502 |
+
y = np.array(fun(x)).flatten()
|
| 503 |
+
return y[i_eq] - lb[i_eq]
|
| 504 |
+
ceq = [{"type": "eq", "fun": f_eq}]
|
| 505 |
+
|
| 506 |
+
if jac is not None:
|
| 507 |
+
def j_eq(x):
|
| 508 |
+
dy = jac(x)
|
| 509 |
+
if issparse(dy):
|
| 510 |
+
dy = dy.toarray()
|
| 511 |
+
dy = np.atleast_2d(dy)
|
| 512 |
+
return dy[i_eq, :]
|
| 513 |
+
ceq[0]["jac"] = j_eq
|
| 514 |
+
|
| 515 |
+
cineq = []
|
| 516 |
+
n_bound_below = np.sum(i_bound_below)
|
| 517 |
+
n_bound_above = np.sum(i_bound_above)
|
| 518 |
+
if n_bound_below + n_bound_above:
|
| 519 |
+
def f_ineq(x):
|
| 520 |
+
y = np.zeros(n_bound_below + n_bound_above)
|
| 521 |
+
y_all = np.array(fun(x)).flatten()
|
| 522 |
+
y[:n_bound_below] = y_all[i_bound_below] - lb[i_bound_below]
|
| 523 |
+
y[n_bound_below:] = -(y_all[i_bound_above] - ub[i_bound_above])
|
| 524 |
+
return y
|
| 525 |
+
cineq = [{"type": "ineq", "fun": f_ineq}]
|
| 526 |
+
|
| 527 |
+
if jac is not None:
|
| 528 |
+
def j_ineq(x):
|
| 529 |
+
dy = np.zeros((n_bound_below + n_bound_above, len(x0)))
|
| 530 |
+
dy_all = jac(x)
|
| 531 |
+
if issparse(dy_all):
|
| 532 |
+
dy_all = dy_all.toarray()
|
| 533 |
+
dy_all = np.atleast_2d(dy_all)
|
| 534 |
+
dy[:n_bound_below, :] = dy_all[i_bound_below]
|
| 535 |
+
dy[n_bound_below:, :] = -dy_all[i_bound_above]
|
| 536 |
+
return dy
|
| 537 |
+
cineq[0]["jac"] = j_ineq
|
| 538 |
+
|
| 539 |
+
old_constraints = ceq + cineq
|
| 540 |
+
|
| 541 |
+
if len(old_constraints) > 1:
|
| 542 |
+
warn("Equality and inequality constraints are specified in the same "
|
| 543 |
+
"element of the constraint list. For efficient use with this "
|
| 544 |
+
"method, equality and inequality constraints should be specified "
|
| 545 |
+
"in separate elements of the constraint list. ",
|
| 546 |
+
OptimizeWarning, stacklevel=3)
|
| 547 |
+
return old_constraints
|
| 548 |
+
|
| 549 |
+
|
| 550 |
+
def old_constraint_to_new(ic, con):
|
| 551 |
+
"""
|
| 552 |
+
Converts old-style constraint dictionaries to new-style constraint objects.
|
| 553 |
+
"""
|
| 554 |
+
# check type
|
| 555 |
+
try:
|
| 556 |
+
ctype = con['type'].lower()
|
| 557 |
+
except KeyError as e:
|
| 558 |
+
raise KeyError('Constraint %d has no type defined.' % ic) from e
|
| 559 |
+
except TypeError as e:
|
| 560 |
+
raise TypeError(
|
| 561 |
+
'Constraints must be a sequence of dictionaries.'
|
| 562 |
+
) from e
|
| 563 |
+
except AttributeError as e:
|
| 564 |
+
raise TypeError("Constraint's type must be a string.") from e
|
| 565 |
+
else:
|
| 566 |
+
if ctype not in ['eq', 'ineq']:
|
| 567 |
+
raise ValueError("Unknown constraint type '%s'." % con['type'])
|
| 568 |
+
if 'fun' not in con:
|
| 569 |
+
raise ValueError('Constraint %d has no function defined.' % ic)
|
| 570 |
+
|
| 571 |
+
lb = 0
|
| 572 |
+
if ctype == 'eq':
|
| 573 |
+
ub = 0
|
| 574 |
+
else:
|
| 575 |
+
ub = np.inf
|
| 576 |
+
|
| 577 |
+
jac = '2-point'
|
| 578 |
+
if 'args' in con:
|
| 579 |
+
args = con['args']
|
| 580 |
+
def fun(x):
|
| 581 |
+
return con["fun"](x, *args)
|
| 582 |
+
if 'jac' in con:
|
| 583 |
+
def jac(x):
|
| 584 |
+
return con["jac"](x, *args)
|
| 585 |
+
else:
|
| 586 |
+
fun = con['fun']
|
| 587 |
+
if 'jac' in con:
|
| 588 |
+
jac = con['jac']
|
| 589 |
+
|
| 590 |
+
return NonlinearConstraint(fun, lb, ub, jac)
|
llava_next/lib/python3.10/site-packages/scipy/optimize/_dcsrch.py
ADDED
|
@@ -0,0 +1,728 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import numpy as np
|
| 2 |
+
|
| 3 |
+
"""
|
| 4 |
+
# 2023 - ported from minpack2.dcsrch, dcstep (Fortran) to Python
|
| 5 |
+
c MINPACK-1 Project. June 1983.
|
| 6 |
+
c Argonne National Laboratory.
|
| 7 |
+
c Jorge J. More' and David J. Thuente.
|
| 8 |
+
c
|
| 9 |
+
c MINPACK-2 Project. November 1993.
|
| 10 |
+
c Argonne National Laboratory and University of Minnesota.
|
| 11 |
+
c Brett M. Averick, Richard G. Carter, and Jorge J. More'.
|
| 12 |
+
"""
|
| 13 |
+
|
| 14 |
+
# NOTE this file was linted by black on first commit, and can be kept that way.
|
| 15 |
+
|
| 16 |
+
|
| 17 |
+
class DCSRCH:
|
| 18 |
+
"""
|
| 19 |
+
Parameters
|
| 20 |
+
----------
|
| 21 |
+
phi : callable phi(alpha)
|
| 22 |
+
Function at point `alpha`
|
| 23 |
+
derphi : callable phi'(alpha)
|
| 24 |
+
Objective function derivative. Returns a scalar.
|
| 25 |
+
ftol : float
|
| 26 |
+
A nonnegative tolerance for the sufficient decrease condition.
|
| 27 |
+
gtol : float
|
| 28 |
+
A nonnegative tolerance for the curvature condition.
|
| 29 |
+
xtol : float
|
| 30 |
+
A nonnegative relative tolerance for an acceptable step. The
|
| 31 |
+
subroutine exits with a warning if the relative difference between
|
| 32 |
+
sty and stx is less than xtol.
|
| 33 |
+
stpmin : float
|
| 34 |
+
A nonnegative lower bound for the step.
|
| 35 |
+
stpmax :
|
| 36 |
+
A nonnegative upper bound for the step.
|
| 37 |
+
|
| 38 |
+
Notes
|
| 39 |
+
-----
|
| 40 |
+
|
| 41 |
+
This subroutine finds a step that satisfies a sufficient
|
| 42 |
+
decrease condition and a curvature condition.
|
| 43 |
+
|
| 44 |
+
Each call of the subroutine updates an interval with
|
| 45 |
+
endpoints stx and sty. The interval is initially chosen
|
| 46 |
+
so that it contains a minimizer of the modified function
|
| 47 |
+
|
| 48 |
+
psi(stp) = f(stp) - f(0) - ftol*stp*f'(0).
|
| 49 |
+
|
| 50 |
+
If psi(stp) <= 0 and f'(stp) >= 0 for some step, then the
|
| 51 |
+
interval is chosen so that it contains a minimizer of f.
|
| 52 |
+
|
| 53 |
+
The algorithm is designed to find a step that satisfies
|
| 54 |
+
the sufficient decrease condition
|
| 55 |
+
|
| 56 |
+
f(stp) <= f(0) + ftol*stp*f'(0),
|
| 57 |
+
|
| 58 |
+
and the curvature condition
|
| 59 |
+
|
| 60 |
+
abs(f'(stp)) <= gtol*abs(f'(0)).
|
| 61 |
+
|
| 62 |
+
If ftol is less than gtol and if, for example, the function
|
| 63 |
+
is bounded below, then there is always a step which satisfies
|
| 64 |
+
both conditions.
|
| 65 |
+
|
| 66 |
+
If no step can be found that satisfies both conditions, then
|
| 67 |
+
the algorithm stops with a warning. In this case stp only
|
| 68 |
+
satisfies the sufficient decrease condition.
|
| 69 |
+
|
| 70 |
+
A typical invocation of dcsrch has the following outline:
|
| 71 |
+
|
| 72 |
+
Evaluate the function at stp = 0.0d0; store in f.
|
| 73 |
+
Evaluate the gradient at stp = 0.0d0; store in g.
|
| 74 |
+
Choose a starting step stp.
|
| 75 |
+
|
| 76 |
+
task = 'START'
|
| 77 |
+
10 continue
|
| 78 |
+
call dcsrch(stp,f,g,ftol,gtol,xtol,task,stpmin,stpmax,
|
| 79 |
+
isave,dsave)
|
| 80 |
+
if (task .eq. 'FG') then
|
| 81 |
+
Evaluate the function and the gradient at stp
|
| 82 |
+
go to 10
|
| 83 |
+
end if
|
| 84 |
+
|
| 85 |
+
NOTE: The user must not alter work arrays between calls.
|
| 86 |
+
|
| 87 |
+
The subroutine statement is
|
| 88 |
+
|
| 89 |
+
subroutine dcsrch(f,g,stp,ftol,gtol,xtol,stpmin,stpmax,
|
| 90 |
+
task,isave,dsave)
|
| 91 |
+
where
|
| 92 |
+
|
| 93 |
+
stp is a double precision variable.
|
| 94 |
+
On entry stp is the current estimate of a satisfactory
|
| 95 |
+
step. On initial entry, a positive initial estimate
|
| 96 |
+
must be provided.
|
| 97 |
+
On exit stp is the current estimate of a satisfactory step
|
| 98 |
+
if task = 'FG'. If task = 'CONV' then stp satisfies
|
| 99 |
+
the sufficient decrease and curvature condition.
|
| 100 |
+
|
| 101 |
+
f is a double precision variable.
|
| 102 |
+
On initial entry f is the value of the function at 0.
|
| 103 |
+
On subsequent entries f is the value of the
|
| 104 |
+
function at stp.
|
| 105 |
+
On exit f is the value of the function at stp.
|
| 106 |
+
|
| 107 |
+
g is a double precision variable.
|
| 108 |
+
On initial entry g is the derivative of the function at 0.
|
| 109 |
+
On subsequent entries g is the derivative of the
|
| 110 |
+
function at stp.
|
| 111 |
+
On exit g is the derivative of the function at stp.
|
| 112 |
+
|
| 113 |
+
ftol is a double precision variable.
|
| 114 |
+
On entry ftol specifies a nonnegative tolerance for the
|
| 115 |
+
sufficient decrease condition.
|
| 116 |
+
On exit ftol is unchanged.
|
| 117 |
+
|
| 118 |
+
gtol is a double precision variable.
|
| 119 |
+
On entry gtol specifies a nonnegative tolerance for the
|
| 120 |
+
curvature condition.
|
| 121 |
+
On exit gtol is unchanged.
|
| 122 |
+
|
| 123 |
+
xtol is a double precision variable.
|
| 124 |
+
On entry xtol specifies a nonnegative relative tolerance
|
| 125 |
+
for an acceptable step. The subroutine exits with a
|
| 126 |
+
warning if the relative difference between sty and stx
|
| 127 |
+
is less than xtol.
|
| 128 |
+
|
| 129 |
+
On exit xtol is unchanged.
|
| 130 |
+
|
| 131 |
+
task is a character variable of length at least 60.
|
| 132 |
+
On initial entry task must be set to 'START'.
|
| 133 |
+
On exit task indicates the required action:
|
| 134 |
+
|
| 135 |
+
If task(1:2) = 'FG' then evaluate the function and
|
| 136 |
+
derivative at stp and call dcsrch again.
|
| 137 |
+
|
| 138 |
+
If task(1:4) = 'CONV' then the search is successful.
|
| 139 |
+
|
| 140 |
+
If task(1:4) = 'WARN' then the subroutine is not able
|
| 141 |
+
to satisfy the convergence conditions. The exit value of
|
| 142 |
+
stp contains the best point found during the search.
|
| 143 |
+
|
| 144 |
+
If task(1:5) = 'ERROR' then there is an error in the
|
| 145 |
+
input arguments.
|
| 146 |
+
|
| 147 |
+
On exit with convergence, a warning or an error, the
|
| 148 |
+
variable task contains additional information.
|
| 149 |
+
|
| 150 |
+
stpmin is a double precision variable.
|
| 151 |
+
On entry stpmin is a nonnegative lower bound for the step.
|
| 152 |
+
On exit stpmin is unchanged.
|
| 153 |
+
|
| 154 |
+
stpmax is a double precision variable.
|
| 155 |
+
On entry stpmax is a nonnegative upper bound for the step.
|
| 156 |
+
On exit stpmax is unchanged.
|
| 157 |
+
|
| 158 |
+
isave is an integer work array of dimension 2.
|
| 159 |
+
|
| 160 |
+
dsave is a double precision work array of dimension 13.
|
| 161 |
+
|
| 162 |
+
Subprograms called
|
| 163 |
+
|
| 164 |
+
MINPACK-2 ... dcstep
|
| 165 |
+
MINPACK-1 Project. June 1983.
|
| 166 |
+
Argonne National Laboratory.
|
| 167 |
+
Jorge J. More' and David J. Thuente.
|
| 168 |
+
|
| 169 |
+
MINPACK-2 Project. November 1993.
|
| 170 |
+
Argonne National Laboratory and University of Minnesota.
|
| 171 |
+
Brett M. Averick, Richard G. Carter, and Jorge J. More'.
|
| 172 |
+
"""
|
| 173 |
+
|
| 174 |
+
def __init__(self, phi, derphi, ftol, gtol, xtol, stpmin, stpmax):
|
| 175 |
+
self.stage = None
|
| 176 |
+
self.ginit = None
|
| 177 |
+
self.gtest = None
|
| 178 |
+
self.gx = None
|
| 179 |
+
self.gy = None
|
| 180 |
+
self.finit = None
|
| 181 |
+
self.fx = None
|
| 182 |
+
self.fy = None
|
| 183 |
+
self.stx = None
|
| 184 |
+
self.sty = None
|
| 185 |
+
self.stmin = None
|
| 186 |
+
self.stmax = None
|
| 187 |
+
self.width = None
|
| 188 |
+
self.width1 = None
|
| 189 |
+
|
| 190 |
+
# leave all assessment of tolerances/limits to the first call of
|
| 191 |
+
# this object
|
| 192 |
+
self.ftol = ftol
|
| 193 |
+
self.gtol = gtol
|
| 194 |
+
self.xtol = xtol
|
| 195 |
+
self.stpmin = stpmin
|
| 196 |
+
self.stpmax = stpmax
|
| 197 |
+
|
| 198 |
+
self.phi = phi
|
| 199 |
+
self.derphi = derphi
|
| 200 |
+
|
| 201 |
+
def __call__(self, alpha1, phi0=None, derphi0=None, maxiter=100):
|
| 202 |
+
"""
|
| 203 |
+
Parameters
|
| 204 |
+
----------
|
| 205 |
+
alpha1 : float
|
| 206 |
+
alpha1 is the current estimate of a satisfactory
|
| 207 |
+
step. A positive initial estimate must be provided.
|
| 208 |
+
phi0 : float
|
| 209 |
+
the value of `phi` at 0 (if known).
|
| 210 |
+
derphi0 : float
|
| 211 |
+
the derivative of `derphi` at 0 (if known).
|
| 212 |
+
maxiter : int
|
| 213 |
+
|
| 214 |
+
Returns
|
| 215 |
+
-------
|
| 216 |
+
alpha : float
|
| 217 |
+
Step size, or None if no suitable step was found.
|
| 218 |
+
phi : float
|
| 219 |
+
Value of `phi` at the new point `alpha`.
|
| 220 |
+
phi0 : float
|
| 221 |
+
Value of `phi` at `alpha=0`.
|
| 222 |
+
task : bytes
|
| 223 |
+
On exit task indicates status information.
|
| 224 |
+
|
| 225 |
+
If task[:4] == b'CONV' then the search is successful.
|
| 226 |
+
|
| 227 |
+
If task[:4] == b'WARN' then the subroutine is not able
|
| 228 |
+
to satisfy the convergence conditions. The exit value of
|
| 229 |
+
stp contains the best point found during the search.
|
| 230 |
+
|
| 231 |
+
If task[:5] == b'ERROR' then there is an error in the
|
| 232 |
+
input arguments.
|
| 233 |
+
"""
|
| 234 |
+
if phi0 is None:
|
| 235 |
+
phi0 = self.phi(0.0)
|
| 236 |
+
if derphi0 is None:
|
| 237 |
+
derphi0 = self.derphi(0.0)
|
| 238 |
+
|
| 239 |
+
phi1 = phi0
|
| 240 |
+
derphi1 = derphi0
|
| 241 |
+
|
| 242 |
+
task = b"START"
|
| 243 |
+
for i in range(maxiter):
|
| 244 |
+
stp, phi1, derphi1, task = self._iterate(
|
| 245 |
+
alpha1, phi1, derphi1, task
|
| 246 |
+
)
|
| 247 |
+
|
| 248 |
+
if not np.isfinite(stp):
|
| 249 |
+
task = b"WARN"
|
| 250 |
+
stp = None
|
| 251 |
+
break
|
| 252 |
+
|
| 253 |
+
if task[:2] == b"FG":
|
| 254 |
+
alpha1 = stp
|
| 255 |
+
phi1 = self.phi(stp)
|
| 256 |
+
derphi1 = self.derphi(stp)
|
| 257 |
+
else:
|
| 258 |
+
break
|
| 259 |
+
else:
|
| 260 |
+
# maxiter reached, the line search did not converge
|
| 261 |
+
stp = None
|
| 262 |
+
task = b"WARNING: dcsrch did not converge within max iterations"
|
| 263 |
+
|
| 264 |
+
if task[:5] == b"ERROR" or task[:4] == b"WARN":
|
| 265 |
+
stp = None # failed
|
| 266 |
+
|
| 267 |
+
return stp, phi1, phi0, task
|
| 268 |
+
|
| 269 |
+
def _iterate(self, stp, f, g, task):
|
| 270 |
+
"""
|
| 271 |
+
Parameters
|
| 272 |
+
----------
|
| 273 |
+
stp : float
|
| 274 |
+
The current estimate of a satisfactory step. On initial entry, a
|
| 275 |
+
positive initial estimate must be provided.
|
| 276 |
+
f : float
|
| 277 |
+
On first call f is the value of the function at 0. On subsequent
|
| 278 |
+
entries f should be the value of the function at stp.
|
| 279 |
+
g : float
|
| 280 |
+
On initial entry g is the derivative of the function at 0. On
|
| 281 |
+
subsequent entries g is the derivative of the function at stp.
|
| 282 |
+
task : bytes
|
| 283 |
+
On initial entry task must be set to 'START'.
|
| 284 |
+
|
| 285 |
+
On exit with convergence, a warning or an error, the
|
| 286 |
+
variable task contains additional information.
|
| 287 |
+
|
| 288 |
+
|
| 289 |
+
Returns
|
| 290 |
+
-------
|
| 291 |
+
stp, f, g, task: tuple
|
| 292 |
+
|
| 293 |
+
stp : float
|
| 294 |
+
the current estimate of a satisfactory step if task = 'FG'. If
|
| 295 |
+
task = 'CONV' then stp satisfies the sufficient decrease and
|
| 296 |
+
curvature condition.
|
| 297 |
+
f : float
|
| 298 |
+
the value of the function at stp.
|
| 299 |
+
g : float
|
| 300 |
+
the derivative of the function at stp.
|
| 301 |
+
task : bytes
|
| 302 |
+
On exit task indicates the required action:
|
| 303 |
+
|
| 304 |
+
If task(1:2) == b'FG' then evaluate the function and
|
| 305 |
+
derivative at stp and call dcsrch again.
|
| 306 |
+
|
| 307 |
+
If task(1:4) == b'CONV' then the search is successful.
|
| 308 |
+
|
| 309 |
+
If task(1:4) == b'WARN' then the subroutine is not able
|
| 310 |
+
to satisfy the convergence conditions. The exit value of
|
| 311 |
+
stp contains the best point found during the search.
|
| 312 |
+
|
| 313 |
+
If task(1:5) == b'ERROR' then there is an error in the
|
| 314 |
+
input arguments.
|
| 315 |
+
"""
|
| 316 |
+
p5 = 0.5
|
| 317 |
+
p66 = 0.66
|
| 318 |
+
xtrapl = 1.1
|
| 319 |
+
xtrapu = 4.0
|
| 320 |
+
|
| 321 |
+
if task[:5] == b"START":
|
| 322 |
+
if stp < self.stpmin:
|
| 323 |
+
task = b"ERROR: STP .LT. STPMIN"
|
| 324 |
+
if stp > self.stpmax:
|
| 325 |
+
task = b"ERROR: STP .GT. STPMAX"
|
| 326 |
+
if g >= 0:
|
| 327 |
+
task = b"ERROR: INITIAL G .GE. ZERO"
|
| 328 |
+
if self.ftol < 0:
|
| 329 |
+
task = b"ERROR: FTOL .LT. ZERO"
|
| 330 |
+
if self.gtol < 0:
|
| 331 |
+
task = b"ERROR: GTOL .LT. ZERO"
|
| 332 |
+
if self.xtol < 0:
|
| 333 |
+
task = b"ERROR: XTOL .LT. ZERO"
|
| 334 |
+
if self.stpmin < 0:
|
| 335 |
+
task = b"ERROR: STPMIN .LT. ZERO"
|
| 336 |
+
if self.stpmax < self.stpmin:
|
| 337 |
+
task = b"ERROR: STPMAX .LT. STPMIN"
|
| 338 |
+
|
| 339 |
+
if task[:5] == b"ERROR":
|
| 340 |
+
return stp, f, g, task
|
| 341 |
+
|
| 342 |
+
# Initialize local variables.
|
| 343 |
+
|
| 344 |
+
self.brackt = False
|
| 345 |
+
self.stage = 1
|
| 346 |
+
self.finit = f
|
| 347 |
+
self.ginit = g
|
| 348 |
+
self.gtest = self.ftol * self.ginit
|
| 349 |
+
self.width = self.stpmax - self.stpmin
|
| 350 |
+
self.width1 = self.width / p5
|
| 351 |
+
|
| 352 |
+
# The variables stx, fx, gx contain the values of the step,
|
| 353 |
+
# function, and derivative at the best step.
|
| 354 |
+
# The variables sty, fy, gy contain the value of the step,
|
| 355 |
+
# function, and derivative at sty.
|
| 356 |
+
# The variables stp, f, g contain the values of the step,
|
| 357 |
+
# function, and derivative at stp.
|
| 358 |
+
|
| 359 |
+
self.stx = 0.0
|
| 360 |
+
self.fx = self.finit
|
| 361 |
+
self.gx = self.ginit
|
| 362 |
+
self.sty = 0.0
|
| 363 |
+
self.fy = self.finit
|
| 364 |
+
self.gy = self.ginit
|
| 365 |
+
self.stmin = 0
|
| 366 |
+
self.stmax = stp + xtrapu * stp
|
| 367 |
+
task = b"FG"
|
| 368 |
+
return stp, f, g, task
|
| 369 |
+
|
| 370 |
+
# in the original Fortran this was a location to restore variables
|
| 371 |
+
# we don't need to do that because they're attributes.
|
| 372 |
+
|
| 373 |
+
# If psi(stp) <= 0 and f'(stp) >= 0 for some step, then the
|
| 374 |
+
# algorithm enters the second stage.
|
| 375 |
+
ftest = self.finit + stp * self.gtest
|
| 376 |
+
|
| 377 |
+
if self.stage == 1 and f <= ftest and g >= 0:
|
| 378 |
+
self.stage = 2
|
| 379 |
+
|
| 380 |
+
# test for warnings
|
| 381 |
+
if self.brackt and (stp <= self.stmin or stp >= self.stmax):
|
| 382 |
+
task = b"WARNING: ROUNDING ERRORS PREVENT PROGRESS"
|
| 383 |
+
if self.brackt and self.stmax - self.stmin <= self.xtol * self.stmax:
|
| 384 |
+
task = b"WARNING: XTOL TEST SATISFIED"
|
| 385 |
+
if stp == self.stpmax and f <= ftest and g <= self.gtest:
|
| 386 |
+
task = b"WARNING: STP = STPMAX"
|
| 387 |
+
if stp == self.stpmin and (f > ftest or g >= self.gtest):
|
| 388 |
+
task = b"WARNING: STP = STPMIN"
|
| 389 |
+
|
| 390 |
+
# test for convergence
|
| 391 |
+
if f <= ftest and abs(g) <= self.gtol * -self.ginit:
|
| 392 |
+
task = b"CONVERGENCE"
|
| 393 |
+
|
| 394 |
+
# test for termination
|
| 395 |
+
if task[:4] == b"WARN" or task[:4] == b"CONV":
|
| 396 |
+
return stp, f, g, task
|
| 397 |
+
|
| 398 |
+
# A modified function is used to predict the step during the
|
| 399 |
+
# first stage if a lower function value has been obtained but
|
| 400 |
+
# the decrease is not sufficient.
|
| 401 |
+
if self.stage == 1 and f <= self.fx and f > ftest:
|
| 402 |
+
# Define the modified function and derivative values.
|
| 403 |
+
fm = f - stp * self.gtest
|
| 404 |
+
fxm = self.fx - self.stx * self.gtest
|
| 405 |
+
fym = self.fy - self.sty * self.gtest
|
| 406 |
+
gm = g - self.gtest
|
| 407 |
+
gxm = self.gx - self.gtest
|
| 408 |
+
gym = self.gy - self.gtest
|
| 409 |
+
|
| 410 |
+
# Call dcstep to update stx, sty, and to compute the new step.
|
| 411 |
+
# dcstep can have several operations which can produce NaN
|
| 412 |
+
# e.g. inf/inf. Filter these out.
|
| 413 |
+
with np.errstate(invalid="ignore", over="ignore"):
|
| 414 |
+
tup = dcstep(
|
| 415 |
+
self.stx,
|
| 416 |
+
fxm,
|
| 417 |
+
gxm,
|
| 418 |
+
self.sty,
|
| 419 |
+
fym,
|
| 420 |
+
gym,
|
| 421 |
+
stp,
|
| 422 |
+
fm,
|
| 423 |
+
gm,
|
| 424 |
+
self.brackt,
|
| 425 |
+
self.stmin,
|
| 426 |
+
self.stmax,
|
| 427 |
+
)
|
| 428 |
+
self.stx, fxm, gxm, self.sty, fym, gym, stp, self.brackt = tup
|
| 429 |
+
|
| 430 |
+
# Reset the function and derivative values for f
|
| 431 |
+
self.fx = fxm + self.stx * self.gtest
|
| 432 |
+
self.fy = fym + self.sty * self.gtest
|
| 433 |
+
self.gx = gxm + self.gtest
|
| 434 |
+
self.gy = gym + self.gtest
|
| 435 |
+
|
| 436 |
+
else:
|
| 437 |
+
# Call dcstep to update stx, sty, and to compute the new step.
|
| 438 |
+
# dcstep can have several operations which can produce NaN
|
| 439 |
+
# e.g. inf/inf. Filter these out.
|
| 440 |
+
|
| 441 |
+
with np.errstate(invalid="ignore", over="ignore"):
|
| 442 |
+
tup = dcstep(
|
| 443 |
+
self.stx,
|
| 444 |
+
self.fx,
|
| 445 |
+
self.gx,
|
| 446 |
+
self.sty,
|
| 447 |
+
self.fy,
|
| 448 |
+
self.gy,
|
| 449 |
+
stp,
|
| 450 |
+
f,
|
| 451 |
+
g,
|
| 452 |
+
self.brackt,
|
| 453 |
+
self.stmin,
|
| 454 |
+
self.stmax,
|
| 455 |
+
)
|
| 456 |
+
(
|
| 457 |
+
self.stx,
|
| 458 |
+
self.fx,
|
| 459 |
+
self.gx,
|
| 460 |
+
self.sty,
|
| 461 |
+
self.fy,
|
| 462 |
+
self.gy,
|
| 463 |
+
stp,
|
| 464 |
+
self.brackt,
|
| 465 |
+
) = tup
|
| 466 |
+
|
| 467 |
+
# Decide if a bisection step is needed
|
| 468 |
+
if self.brackt:
|
| 469 |
+
if abs(self.sty - self.stx) >= p66 * self.width1:
|
| 470 |
+
stp = self.stx + p5 * (self.sty - self.stx)
|
| 471 |
+
self.width1 = self.width
|
| 472 |
+
self.width = abs(self.sty - self.stx)
|
| 473 |
+
|
| 474 |
+
# Set the minimum and maximum steps allowed for stp.
|
| 475 |
+
if self.brackt:
|
| 476 |
+
self.stmin = min(self.stx, self.sty)
|
| 477 |
+
self.stmax = max(self.stx, self.sty)
|
| 478 |
+
else:
|
| 479 |
+
self.stmin = stp + xtrapl * (stp - self.stx)
|
| 480 |
+
self.stmax = stp + xtrapu * (stp - self.stx)
|
| 481 |
+
|
| 482 |
+
# Force the step to be within the bounds stpmax and stpmin.
|
| 483 |
+
stp = np.clip(stp, self.stpmin, self.stpmax)
|
| 484 |
+
|
| 485 |
+
# If further progress is not possible, let stp be the best
|
| 486 |
+
# point obtained during the search.
|
| 487 |
+
if (
|
| 488 |
+
self.brackt
|
| 489 |
+
and (stp <= self.stmin or stp >= self.stmax)
|
| 490 |
+
or (
|
| 491 |
+
self.brackt
|
| 492 |
+
and self.stmax - self.stmin <= self.xtol * self.stmax
|
| 493 |
+
)
|
| 494 |
+
):
|
| 495 |
+
stp = self.stx
|
| 496 |
+
|
| 497 |
+
# Obtain another function and derivative
|
| 498 |
+
task = b"FG"
|
| 499 |
+
return stp, f, g, task
|
| 500 |
+
|
| 501 |
+
|
| 502 |
+
def dcstep(stx, fx, dx, sty, fy, dy, stp, fp, dp, brackt, stpmin, stpmax):
|
| 503 |
+
"""
|
| 504 |
+
Subroutine dcstep
|
| 505 |
+
|
| 506 |
+
This subroutine computes a safeguarded step for a search
|
| 507 |
+
procedure and updates an interval that contains a step that
|
| 508 |
+
satisfies a sufficient decrease and a curvature condition.
|
| 509 |
+
|
| 510 |
+
The parameter stx contains the step with the least function
|
| 511 |
+
value. If brackt is set to .true. then a minimizer has
|
| 512 |
+
been bracketed in an interval with endpoints stx and sty.
|
| 513 |
+
The parameter stp contains the current step.
|
| 514 |
+
The subroutine assumes that if brackt is set to .true. then
|
| 515 |
+
|
| 516 |
+
min(stx,sty) < stp < max(stx,sty),
|
| 517 |
+
|
| 518 |
+
and that the derivative at stx is negative in the direction
|
| 519 |
+
of the step.
|
| 520 |
+
|
| 521 |
+
The subroutine statement is
|
| 522 |
+
|
| 523 |
+
subroutine dcstep(stx,fx,dx,sty,fy,dy,stp,fp,dp,brackt,
|
| 524 |
+
stpmin,stpmax)
|
| 525 |
+
|
| 526 |
+
where
|
| 527 |
+
|
| 528 |
+
stx is a double precision variable.
|
| 529 |
+
On entry stx is the best step obtained so far and is an
|
| 530 |
+
endpoint of the interval that contains the minimizer.
|
| 531 |
+
On exit stx is the updated best step.
|
| 532 |
+
|
| 533 |
+
fx is a double precision variable.
|
| 534 |
+
On entry fx is the function at stx.
|
| 535 |
+
On exit fx is the function at stx.
|
| 536 |
+
|
| 537 |
+
dx is a double precision variable.
|
| 538 |
+
On entry dx is the derivative of the function at
|
| 539 |
+
stx. The derivative must be negative in the direction of
|
| 540 |
+
the step, that is, dx and stp - stx must have opposite
|
| 541 |
+
signs.
|
| 542 |
+
On exit dx is the derivative of the function at stx.
|
| 543 |
+
|
| 544 |
+
sty is a double precision variable.
|
| 545 |
+
On entry sty is the second endpoint of the interval that
|
| 546 |
+
contains the minimizer.
|
| 547 |
+
On exit sty is the updated endpoint of the interval that
|
| 548 |
+
contains the minimizer.
|
| 549 |
+
|
| 550 |
+
fy is a double precision variable.
|
| 551 |
+
On entry fy is the function at sty.
|
| 552 |
+
On exit fy is the function at sty.
|
| 553 |
+
|
| 554 |
+
dy is a double precision variable.
|
| 555 |
+
On entry dy is the derivative of the function at sty.
|
| 556 |
+
On exit dy is the derivative of the function at the exit sty.
|
| 557 |
+
|
| 558 |
+
stp is a double precision variable.
|
| 559 |
+
On entry stp is the current step. If brackt is set to .true.
|
| 560 |
+
then on input stp must be between stx and sty.
|
| 561 |
+
On exit stp is a new trial step.
|
| 562 |
+
|
| 563 |
+
fp is a double precision variable.
|
| 564 |
+
On entry fp is the function at stp
|
| 565 |
+
On exit fp is unchanged.
|
| 566 |
+
|
| 567 |
+
dp is a double precision variable.
|
| 568 |
+
On entry dp is the derivative of the function at stp.
|
| 569 |
+
On exit dp is unchanged.
|
| 570 |
+
|
| 571 |
+
brackt is an logical variable.
|
| 572 |
+
On entry brackt specifies if a minimizer has been bracketed.
|
| 573 |
+
Initially brackt must be set to .false.
|
| 574 |
+
On exit brackt specifies if a minimizer has been bracketed.
|
| 575 |
+
When a minimizer is bracketed brackt is set to .true.
|
| 576 |
+
|
| 577 |
+
stpmin is a double precision variable.
|
| 578 |
+
On entry stpmin is a lower bound for the step.
|
| 579 |
+
On exit stpmin is unchanged.
|
| 580 |
+
|
| 581 |
+
stpmax is a double precision variable.
|
| 582 |
+
On entry stpmax is an upper bound for the step.
|
| 583 |
+
On exit stpmax is unchanged.
|
| 584 |
+
|
| 585 |
+
MINPACK-1 Project. June 1983
|
| 586 |
+
Argonne National Laboratory.
|
| 587 |
+
Jorge J. More' and David J. Thuente.
|
| 588 |
+
|
| 589 |
+
MINPACK-2 Project. November 1993.
|
| 590 |
+
Argonne National Laboratory and University of Minnesota.
|
| 591 |
+
Brett M. Averick and Jorge J. More'.
|
| 592 |
+
|
| 593 |
+
"""
|
| 594 |
+
sgn_dp = np.sign(dp)
|
| 595 |
+
sgn_dx = np.sign(dx)
|
| 596 |
+
|
| 597 |
+
# sgnd = dp * (dx / abs(dx))
|
| 598 |
+
sgnd = sgn_dp * sgn_dx
|
| 599 |
+
|
| 600 |
+
# First case: A higher function value. The minimum is bracketed.
|
| 601 |
+
# If the cubic step is closer to stx than the quadratic step, the
|
| 602 |
+
# cubic step is taken, otherwise the average of the cubic and
|
| 603 |
+
# quadratic steps is taken.
|
| 604 |
+
if fp > fx:
|
| 605 |
+
theta = 3.0 * (fx - fp) / (stp - stx) + dx + dp
|
| 606 |
+
s = max(abs(theta), abs(dx), abs(dp))
|
| 607 |
+
gamma = s * np.sqrt((theta / s) ** 2 - (dx / s) * (dp / s))
|
| 608 |
+
if stp < stx:
|
| 609 |
+
gamma *= -1
|
| 610 |
+
p = (gamma - dx) + theta
|
| 611 |
+
q = ((gamma - dx) + gamma) + dp
|
| 612 |
+
r = p / q
|
| 613 |
+
stpc = stx + r * (stp - stx)
|
| 614 |
+
stpq = stx + ((dx / ((fx - fp) / (stp - stx) + dx)) / 2.0) * (stp - stx)
|
| 615 |
+
if abs(stpc - stx) <= abs(stpq - stx):
|
| 616 |
+
stpf = stpc
|
| 617 |
+
else:
|
| 618 |
+
stpf = stpc + (stpq - stpc) / 2.0
|
| 619 |
+
brackt = True
|
| 620 |
+
elif sgnd < 0.0:
|
| 621 |
+
# Second case: A lower function value and derivatives of opposite
|
| 622 |
+
# sign. The minimum is bracketed. If the cubic step is farther from
|
| 623 |
+
# stp than the secant step, the cubic step is taken, otherwise the
|
| 624 |
+
# secant step is taken.
|
| 625 |
+
theta = 3 * (fx - fp) / (stp - stx) + dx + dp
|
| 626 |
+
s = max(abs(theta), abs(dx), abs(dp))
|
| 627 |
+
gamma = s * np.sqrt((theta / s) ** 2 - (dx / s) * (dp / s))
|
| 628 |
+
if stp > stx:
|
| 629 |
+
gamma *= -1
|
| 630 |
+
p = (gamma - dp) + theta
|
| 631 |
+
q = ((gamma - dp) + gamma) + dx
|
| 632 |
+
r = p / q
|
| 633 |
+
stpc = stp + r * (stx - stp)
|
| 634 |
+
stpq = stp + (dp / (dp - dx)) * (stx - stp)
|
| 635 |
+
if abs(stpc - stp) > abs(stpq - stp):
|
| 636 |
+
stpf = stpc
|
| 637 |
+
else:
|
| 638 |
+
stpf = stpq
|
| 639 |
+
brackt = True
|
| 640 |
+
elif abs(dp) < abs(dx):
|
| 641 |
+
# Third case: A lower function value, derivatives of the same sign,
|
| 642 |
+
# and the magnitude of the derivative decreases.
|
| 643 |
+
|
| 644 |
+
# The cubic step is computed only if the cubic tends to infinity
|
| 645 |
+
# in the direction of the step or if the minimum of the cubic
|
| 646 |
+
# is beyond stp. Otherwise the cubic step is defined to be the
|
| 647 |
+
# secant step.
|
| 648 |
+
theta = 3 * (fx - fp) / (stp - stx) + dx + dp
|
| 649 |
+
s = max(abs(theta), abs(dx), abs(dp))
|
| 650 |
+
|
| 651 |
+
# The case gamma = 0 only arises if the cubic does not tend
|
| 652 |
+
# to infinity in the direction of the step.
|
| 653 |
+
gamma = s * np.sqrt(max(0, (theta / s) ** 2 - (dx / s) * (dp / s)))
|
| 654 |
+
if stp > stx:
|
| 655 |
+
gamma = -gamma
|
| 656 |
+
p = (gamma - dp) + theta
|
| 657 |
+
q = (gamma + (dx - dp)) + gamma
|
| 658 |
+
r = p / q
|
| 659 |
+
if r < 0 and gamma != 0:
|
| 660 |
+
stpc = stp + r * (stx - stp)
|
| 661 |
+
elif stp > stx:
|
| 662 |
+
stpc = stpmax
|
| 663 |
+
else:
|
| 664 |
+
stpc = stpmin
|
| 665 |
+
stpq = stp + (dp / (dp - dx)) * (stx - stp)
|
| 666 |
+
|
| 667 |
+
if brackt:
|
| 668 |
+
# A minimizer has been bracketed. If the cubic step is
|
| 669 |
+
# closer to stp than the secant step, the cubic step is
|
| 670 |
+
# taken, otherwise the secant step is taken.
|
| 671 |
+
if abs(stpc - stp) < abs(stpq - stp):
|
| 672 |
+
stpf = stpc
|
| 673 |
+
else:
|
| 674 |
+
stpf = stpq
|
| 675 |
+
|
| 676 |
+
if stp > stx:
|
| 677 |
+
stpf = min(stp + 0.66 * (sty - stp), stpf)
|
| 678 |
+
else:
|
| 679 |
+
stpf = max(stp + 0.66 * (sty - stp), stpf)
|
| 680 |
+
else:
|
| 681 |
+
# A minimizer has not been bracketed. If the cubic step is
|
| 682 |
+
# farther from stp than the secant step, the cubic step is
|
| 683 |
+
# taken, otherwise the secant step is taken.
|
| 684 |
+
if abs(stpc - stp) > abs(stpq - stp):
|
| 685 |
+
stpf = stpc
|
| 686 |
+
else:
|
| 687 |
+
stpf = stpq
|
| 688 |
+
stpf = np.clip(stpf, stpmin, stpmax)
|
| 689 |
+
|
| 690 |
+
else:
|
| 691 |
+
# Fourth case: A lower function value, derivatives of the same sign,
|
| 692 |
+
# and the magnitude of the derivative does not decrease. If the
|
| 693 |
+
# minimum is not bracketed, the step is either stpmin or stpmax,
|
| 694 |
+
# otherwise the cubic step is taken.
|
| 695 |
+
if brackt:
|
| 696 |
+
theta = 3.0 * (fp - fy) / (sty - stp) + dy + dp
|
| 697 |
+
s = max(abs(theta), abs(dy), abs(dp))
|
| 698 |
+
gamma = s * np.sqrt((theta / s) ** 2 - (dy / s) * (dp / s))
|
| 699 |
+
if stp > sty:
|
| 700 |
+
gamma = -gamma
|
| 701 |
+
p = (gamma - dp) + theta
|
| 702 |
+
q = ((gamma - dp) + gamma) + dy
|
| 703 |
+
r = p / q
|
| 704 |
+
stpc = stp + r * (sty - stp)
|
| 705 |
+
stpf = stpc
|
| 706 |
+
elif stp > stx:
|
| 707 |
+
stpf = stpmax
|
| 708 |
+
else:
|
| 709 |
+
stpf = stpmin
|
| 710 |
+
|
| 711 |
+
# Update the interval which contains a minimizer.
|
| 712 |
+
if fp > fx:
|
| 713 |
+
sty = stp
|
| 714 |
+
fy = fp
|
| 715 |
+
dy = dp
|
| 716 |
+
else:
|
| 717 |
+
if sgnd < 0:
|
| 718 |
+
sty = stx
|
| 719 |
+
fy = fx
|
| 720 |
+
dy = dx
|
| 721 |
+
stx = stp
|
| 722 |
+
fx = fp
|
| 723 |
+
dx = dp
|
| 724 |
+
|
| 725 |
+
# Compute the new step.
|
| 726 |
+
stp = stpf
|
| 727 |
+
|
| 728 |
+
return stx, fx, dx, sty, fy, dy, stp, brackt
|
llava_next/lib/python3.10/site-packages/scipy/optimize/_differentiable_functions.py
ADDED
|
@@ -0,0 +1,693 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import numpy as np
|
| 2 |
+
import scipy.sparse as sps
|
| 3 |
+
from ._numdiff import approx_derivative, group_columns
|
| 4 |
+
from ._hessian_update_strategy import HessianUpdateStrategy
|
| 5 |
+
from scipy.sparse.linalg import LinearOperator
|
| 6 |
+
from scipy._lib._array_api import atleast_nd, array_namespace
|
| 7 |
+
|
| 8 |
+
|
| 9 |
+
FD_METHODS = ('2-point', '3-point', 'cs')
|
| 10 |
+
|
| 11 |
+
|
| 12 |
+
def _wrapper_fun(fun, args=()):
|
| 13 |
+
ncalls = [0]
|
| 14 |
+
|
| 15 |
+
def wrapped(x):
|
| 16 |
+
ncalls[0] += 1
|
| 17 |
+
# Send a copy because the user may overwrite it.
|
| 18 |
+
# Overwriting results in undefined behaviour because
|
| 19 |
+
# fun(self.x) will change self.x, with the two no longer linked.
|
| 20 |
+
fx = fun(np.copy(x), *args)
|
| 21 |
+
# Make sure the function returns a true scalar
|
| 22 |
+
if not np.isscalar(fx):
|
| 23 |
+
try:
|
| 24 |
+
fx = np.asarray(fx).item()
|
| 25 |
+
except (TypeError, ValueError) as e:
|
| 26 |
+
raise ValueError(
|
| 27 |
+
"The user-provided objective function "
|
| 28 |
+
"must return a scalar value."
|
| 29 |
+
) from e
|
| 30 |
+
return fx
|
| 31 |
+
return wrapped, ncalls
|
| 32 |
+
|
| 33 |
+
|
| 34 |
+
def _wrapper_grad(grad, fun=None, args=(), finite_diff_options=None):
|
| 35 |
+
ncalls = [0]
|
| 36 |
+
|
| 37 |
+
if callable(grad):
|
| 38 |
+
def wrapped(x, **kwds):
|
| 39 |
+
# kwds present to give function same signature as numdiff variant
|
| 40 |
+
ncalls[0] += 1
|
| 41 |
+
return np.atleast_1d(grad(np.copy(x), *args))
|
| 42 |
+
return wrapped, ncalls
|
| 43 |
+
|
| 44 |
+
elif grad in FD_METHODS:
|
| 45 |
+
def wrapped1(x, f0=None):
|
| 46 |
+
ncalls[0] += 1
|
| 47 |
+
return approx_derivative(
|
| 48 |
+
fun, x, f0=f0, **finite_diff_options
|
| 49 |
+
)
|
| 50 |
+
|
| 51 |
+
return wrapped1, ncalls
|
| 52 |
+
|
| 53 |
+
|
| 54 |
+
def _wrapper_hess(hess, grad=None, x0=None, args=(), finite_diff_options=None):
|
| 55 |
+
if callable(hess):
|
| 56 |
+
H = hess(np.copy(x0), *args)
|
| 57 |
+
ncalls = [1]
|
| 58 |
+
|
| 59 |
+
if sps.issparse(H):
|
| 60 |
+
def wrapped(x, **kwds):
|
| 61 |
+
ncalls[0] += 1
|
| 62 |
+
return sps.csr_matrix(hess(np.copy(x), *args))
|
| 63 |
+
|
| 64 |
+
H = sps.csr_matrix(H)
|
| 65 |
+
|
| 66 |
+
elif isinstance(H, LinearOperator):
|
| 67 |
+
def wrapped(x, **kwds):
|
| 68 |
+
ncalls[0] += 1
|
| 69 |
+
return hess(np.copy(x), *args)
|
| 70 |
+
|
| 71 |
+
else: # dense
|
| 72 |
+
def wrapped(x, **kwds):
|
| 73 |
+
ncalls[0] += 1
|
| 74 |
+
return np.atleast_2d(np.asarray(hess(np.copy(x), *args)))
|
| 75 |
+
|
| 76 |
+
H = np.atleast_2d(np.asarray(H))
|
| 77 |
+
|
| 78 |
+
return wrapped, ncalls, H
|
| 79 |
+
elif hess in FD_METHODS:
|
| 80 |
+
ncalls = [0]
|
| 81 |
+
|
| 82 |
+
def wrapped1(x, f0=None):
|
| 83 |
+
return approx_derivative(
|
| 84 |
+
grad, x, f0=f0, **finite_diff_options
|
| 85 |
+
)
|
| 86 |
+
|
| 87 |
+
return wrapped1, ncalls, None
|
| 88 |
+
|
| 89 |
+
|
| 90 |
+
class ScalarFunction:
|
| 91 |
+
"""Scalar function and its derivatives.
|
| 92 |
+
|
| 93 |
+
This class defines a scalar function F: R^n->R and methods for
|
| 94 |
+
computing or approximating its first and second derivatives.
|
| 95 |
+
|
| 96 |
+
Parameters
|
| 97 |
+
----------
|
| 98 |
+
fun : callable
|
| 99 |
+
evaluates the scalar function. Must be of the form ``fun(x, *args)``,
|
| 100 |
+
where ``x`` is the argument in the form of a 1-D array and ``args`` is
|
| 101 |
+
a tuple of any additional fixed parameters needed to completely specify
|
| 102 |
+
the function. Should return a scalar.
|
| 103 |
+
x0 : array-like
|
| 104 |
+
Provides an initial set of variables for evaluating fun. Array of real
|
| 105 |
+
elements of size (n,), where 'n' is the number of independent
|
| 106 |
+
variables.
|
| 107 |
+
args : tuple, optional
|
| 108 |
+
Any additional fixed parameters needed to completely specify the scalar
|
| 109 |
+
function.
|
| 110 |
+
grad : {callable, '2-point', '3-point', 'cs'}
|
| 111 |
+
Method for computing the gradient vector.
|
| 112 |
+
If it is a callable, it should be a function that returns the gradient
|
| 113 |
+
vector:
|
| 114 |
+
|
| 115 |
+
``grad(x, *args) -> array_like, shape (n,)``
|
| 116 |
+
|
| 117 |
+
where ``x`` is an array with shape (n,) and ``args`` is a tuple with
|
| 118 |
+
the fixed parameters.
|
| 119 |
+
Alternatively, the keywords {'2-point', '3-point', 'cs'} can be used
|
| 120 |
+
to select a finite difference scheme for numerical estimation of the
|
| 121 |
+
gradient with a relative step size. These finite difference schemes
|
| 122 |
+
obey any specified `bounds`.
|
| 123 |
+
hess : {callable, '2-point', '3-point', 'cs', HessianUpdateStrategy}
|
| 124 |
+
Method for computing the Hessian matrix. If it is callable, it should
|
| 125 |
+
return the Hessian matrix:
|
| 126 |
+
|
| 127 |
+
``hess(x, *args) -> {LinearOperator, spmatrix, array}, (n, n)``
|
| 128 |
+
|
| 129 |
+
where x is a (n,) ndarray and `args` is a tuple with the fixed
|
| 130 |
+
parameters. Alternatively, the keywords {'2-point', '3-point', 'cs'}
|
| 131 |
+
select a finite difference scheme for numerical estimation. Or, objects
|
| 132 |
+
implementing `HessianUpdateStrategy` interface can be used to
|
| 133 |
+
approximate the Hessian.
|
| 134 |
+
Whenever the gradient is estimated via finite-differences, the Hessian
|
| 135 |
+
cannot be estimated with options {'2-point', '3-point', 'cs'} and needs
|
| 136 |
+
to be estimated using one of the quasi-Newton strategies.
|
| 137 |
+
finite_diff_rel_step : None or array_like
|
| 138 |
+
Relative step size to use. The absolute step size is computed as
|
| 139 |
+
``h = finite_diff_rel_step * sign(x0) * max(1, abs(x0))``, possibly
|
| 140 |
+
adjusted to fit into the bounds. For ``method='3-point'`` the sign
|
| 141 |
+
of `h` is ignored. If None then finite_diff_rel_step is selected
|
| 142 |
+
automatically,
|
| 143 |
+
finite_diff_bounds : tuple of array_like
|
| 144 |
+
Lower and upper bounds on independent variables. Defaults to no bounds,
|
| 145 |
+
(-np.inf, np.inf). Each bound must match the size of `x0` or be a
|
| 146 |
+
scalar, in the latter case the bound will be the same for all
|
| 147 |
+
variables. Use it to limit the range of function evaluation.
|
| 148 |
+
epsilon : None or array_like, optional
|
| 149 |
+
Absolute step size to use, possibly adjusted to fit into the bounds.
|
| 150 |
+
For ``method='3-point'`` the sign of `epsilon` is ignored. By default
|
| 151 |
+
relative steps are used, only if ``epsilon is not None`` are absolute
|
| 152 |
+
steps used.
|
| 153 |
+
|
| 154 |
+
Notes
|
| 155 |
+
-----
|
| 156 |
+
This class implements a memoization logic. There are methods `fun`,
|
| 157 |
+
`grad`, hess` and corresponding attributes `f`, `g` and `H`. The following
|
| 158 |
+
things should be considered:
|
| 159 |
+
|
| 160 |
+
1. Use only public methods `fun`, `grad` and `hess`.
|
| 161 |
+
2. After one of the methods is called, the corresponding attribute
|
| 162 |
+
will be set. However, a subsequent call with a different argument
|
| 163 |
+
of *any* of the methods may overwrite the attribute.
|
| 164 |
+
"""
|
| 165 |
+
def __init__(self, fun, x0, args, grad, hess, finite_diff_rel_step,
|
| 166 |
+
finite_diff_bounds, epsilon=None):
|
| 167 |
+
if not callable(grad) and grad not in FD_METHODS:
|
| 168 |
+
raise ValueError(
|
| 169 |
+
f"`grad` must be either callable or one of {FD_METHODS}."
|
| 170 |
+
)
|
| 171 |
+
|
| 172 |
+
if not (callable(hess) or hess in FD_METHODS
|
| 173 |
+
or isinstance(hess, HessianUpdateStrategy)):
|
| 174 |
+
raise ValueError(
|
| 175 |
+
f"`hess` must be either callable, HessianUpdateStrategy"
|
| 176 |
+
f" or one of {FD_METHODS}."
|
| 177 |
+
)
|
| 178 |
+
|
| 179 |
+
if grad in FD_METHODS and hess in FD_METHODS:
|
| 180 |
+
raise ValueError("Whenever the gradient is estimated via "
|
| 181 |
+
"finite-differences, we require the Hessian "
|
| 182 |
+
"to be estimated using one of the "
|
| 183 |
+
"quasi-Newton strategies.")
|
| 184 |
+
|
| 185 |
+
self.xp = xp = array_namespace(x0)
|
| 186 |
+
_x = atleast_nd(x0, ndim=1, xp=xp)
|
| 187 |
+
_dtype = xp.float64
|
| 188 |
+
if xp.isdtype(_x.dtype, "real floating"):
|
| 189 |
+
_dtype = _x.dtype
|
| 190 |
+
|
| 191 |
+
# original arguments
|
| 192 |
+
self._wrapped_fun, self._nfev = _wrapper_fun(fun, args=args)
|
| 193 |
+
self._orig_fun = fun
|
| 194 |
+
self._orig_grad = grad
|
| 195 |
+
self._orig_hess = hess
|
| 196 |
+
self._args = args
|
| 197 |
+
|
| 198 |
+
# promotes to floating
|
| 199 |
+
self.x = xp.astype(_x, _dtype)
|
| 200 |
+
self.x_dtype = _dtype
|
| 201 |
+
self.n = self.x.size
|
| 202 |
+
self.f_updated = False
|
| 203 |
+
self.g_updated = False
|
| 204 |
+
self.H_updated = False
|
| 205 |
+
|
| 206 |
+
self._lowest_x = None
|
| 207 |
+
self._lowest_f = np.inf
|
| 208 |
+
|
| 209 |
+
finite_diff_options = {}
|
| 210 |
+
if grad in FD_METHODS:
|
| 211 |
+
finite_diff_options["method"] = grad
|
| 212 |
+
finite_diff_options["rel_step"] = finite_diff_rel_step
|
| 213 |
+
finite_diff_options["abs_step"] = epsilon
|
| 214 |
+
finite_diff_options["bounds"] = finite_diff_bounds
|
| 215 |
+
if hess in FD_METHODS:
|
| 216 |
+
finite_diff_options["method"] = hess
|
| 217 |
+
finite_diff_options["rel_step"] = finite_diff_rel_step
|
| 218 |
+
finite_diff_options["abs_step"] = epsilon
|
| 219 |
+
finite_diff_options["as_linear_operator"] = True
|
| 220 |
+
|
| 221 |
+
# Initial function evaluation
|
| 222 |
+
self._update_fun()
|
| 223 |
+
|
| 224 |
+
# Initial gradient evaluation
|
| 225 |
+
self._wrapped_grad, self._ngev = _wrapper_grad(
|
| 226 |
+
grad,
|
| 227 |
+
fun=self._wrapped_fun,
|
| 228 |
+
args=args,
|
| 229 |
+
finite_diff_options=finite_diff_options
|
| 230 |
+
)
|
| 231 |
+
self._update_grad()
|
| 232 |
+
|
| 233 |
+
# Hessian evaluation
|
| 234 |
+
if callable(hess):
|
| 235 |
+
self._wrapped_hess, self._nhev, self.H = _wrapper_hess(
|
| 236 |
+
hess, x0=x0, args=args
|
| 237 |
+
)
|
| 238 |
+
self.H_updated = True
|
| 239 |
+
elif hess in FD_METHODS:
|
| 240 |
+
self._wrapped_hess, self._nhev, self.H = _wrapper_hess(
|
| 241 |
+
hess,
|
| 242 |
+
grad=self._wrapped_grad,
|
| 243 |
+
x0=x0,
|
| 244 |
+
finite_diff_options=finite_diff_options
|
| 245 |
+
)
|
| 246 |
+
self._update_grad()
|
| 247 |
+
self.H = self._wrapped_hess(self.x, f0=self.g)
|
| 248 |
+
self.H_updated = True
|
| 249 |
+
elif isinstance(hess, HessianUpdateStrategy):
|
| 250 |
+
self.H = hess
|
| 251 |
+
self.H.initialize(self.n, 'hess')
|
| 252 |
+
self.H_updated = True
|
| 253 |
+
self.x_prev = None
|
| 254 |
+
self.g_prev = None
|
| 255 |
+
self._nhev = [0]
|
| 256 |
+
|
| 257 |
+
@property
|
| 258 |
+
def nfev(self):
|
| 259 |
+
return self._nfev[0]
|
| 260 |
+
|
| 261 |
+
@property
|
| 262 |
+
def ngev(self):
|
| 263 |
+
return self._ngev[0]
|
| 264 |
+
|
| 265 |
+
@property
|
| 266 |
+
def nhev(self):
|
| 267 |
+
return self._nhev[0]
|
| 268 |
+
|
| 269 |
+
def _update_x(self, x):
|
| 270 |
+
if isinstance(self._orig_hess, HessianUpdateStrategy):
|
| 271 |
+
self._update_grad()
|
| 272 |
+
self.x_prev = self.x
|
| 273 |
+
self.g_prev = self.g
|
| 274 |
+
# ensure that self.x is a copy of x. Don't store a reference
|
| 275 |
+
# otherwise the memoization doesn't work properly.
|
| 276 |
+
|
| 277 |
+
_x = atleast_nd(x, ndim=1, xp=self.xp)
|
| 278 |
+
self.x = self.xp.astype(_x, self.x_dtype)
|
| 279 |
+
self.f_updated = False
|
| 280 |
+
self.g_updated = False
|
| 281 |
+
self.H_updated = False
|
| 282 |
+
self._update_hess()
|
| 283 |
+
else:
|
| 284 |
+
# ensure that self.x is a copy of x. Don't store a reference
|
| 285 |
+
# otherwise the memoization doesn't work properly.
|
| 286 |
+
_x = atleast_nd(x, ndim=1, xp=self.xp)
|
| 287 |
+
self.x = self.xp.astype(_x, self.x_dtype)
|
| 288 |
+
self.f_updated = False
|
| 289 |
+
self.g_updated = False
|
| 290 |
+
self.H_updated = False
|
| 291 |
+
|
| 292 |
+
def _update_fun(self):
|
| 293 |
+
if not self.f_updated:
|
| 294 |
+
fx = self._wrapped_fun(self.x)
|
| 295 |
+
if fx < self._lowest_f:
|
| 296 |
+
self._lowest_x = self.x
|
| 297 |
+
self._lowest_f = fx
|
| 298 |
+
|
| 299 |
+
self.f = fx
|
| 300 |
+
self.f_updated = True
|
| 301 |
+
|
| 302 |
+
def _update_grad(self):
|
| 303 |
+
if not self.g_updated:
|
| 304 |
+
if self._orig_grad in FD_METHODS:
|
| 305 |
+
self._update_fun()
|
| 306 |
+
self.g = self._wrapped_grad(self.x, f0=self.f)
|
| 307 |
+
self.g_updated = True
|
| 308 |
+
|
| 309 |
+
def _update_hess(self):
|
| 310 |
+
if not self.H_updated:
|
| 311 |
+
if self._orig_hess in FD_METHODS:
|
| 312 |
+
self._update_grad()
|
| 313 |
+
self.H = self._wrapped_hess(self.x, f0=self.g)
|
| 314 |
+
elif isinstance(self._orig_hess, HessianUpdateStrategy):
|
| 315 |
+
self._update_grad()
|
| 316 |
+
self.H.update(self.x - self.x_prev, self.g - self.g_prev)
|
| 317 |
+
else: # should be callable(hess)
|
| 318 |
+
self.H = self._wrapped_hess(self.x)
|
| 319 |
+
|
| 320 |
+
self.H_updated = True
|
| 321 |
+
|
| 322 |
+
def fun(self, x):
|
| 323 |
+
if not np.array_equal(x, self.x):
|
| 324 |
+
self._update_x(x)
|
| 325 |
+
self._update_fun()
|
| 326 |
+
return self.f
|
| 327 |
+
|
| 328 |
+
def grad(self, x):
|
| 329 |
+
if not np.array_equal(x, self.x):
|
| 330 |
+
self._update_x(x)
|
| 331 |
+
self._update_grad()
|
| 332 |
+
return self.g
|
| 333 |
+
|
| 334 |
+
def hess(self, x):
|
| 335 |
+
if not np.array_equal(x, self.x):
|
| 336 |
+
self._update_x(x)
|
| 337 |
+
self._update_hess()
|
| 338 |
+
return self.H
|
| 339 |
+
|
| 340 |
+
def fun_and_grad(self, x):
|
| 341 |
+
if not np.array_equal(x, self.x):
|
| 342 |
+
self._update_x(x)
|
| 343 |
+
self._update_fun()
|
| 344 |
+
self._update_grad()
|
| 345 |
+
return self.f, self.g
|
| 346 |
+
|
| 347 |
+
|
| 348 |
+
class VectorFunction:
|
| 349 |
+
"""Vector function and its derivatives.
|
| 350 |
+
|
| 351 |
+
This class defines a vector function F: R^n->R^m and methods for
|
| 352 |
+
computing or approximating its first and second derivatives.
|
| 353 |
+
|
| 354 |
+
Notes
|
| 355 |
+
-----
|
| 356 |
+
This class implements a memoization logic. There are methods `fun`,
|
| 357 |
+
`jac`, hess` and corresponding attributes `f`, `J` and `H`. The following
|
| 358 |
+
things should be considered:
|
| 359 |
+
|
| 360 |
+
1. Use only public methods `fun`, `jac` and `hess`.
|
| 361 |
+
2. After one of the methods is called, the corresponding attribute
|
| 362 |
+
will be set. However, a subsequent call with a different argument
|
| 363 |
+
of *any* of the methods may overwrite the attribute.
|
| 364 |
+
"""
|
| 365 |
+
def __init__(self, fun, x0, jac, hess,
|
| 366 |
+
finite_diff_rel_step, finite_diff_jac_sparsity,
|
| 367 |
+
finite_diff_bounds, sparse_jacobian):
|
| 368 |
+
if not callable(jac) and jac not in FD_METHODS:
|
| 369 |
+
raise ValueError(f"`jac` must be either callable or one of {FD_METHODS}.")
|
| 370 |
+
|
| 371 |
+
if not (callable(hess) or hess in FD_METHODS
|
| 372 |
+
or isinstance(hess, HessianUpdateStrategy)):
|
| 373 |
+
raise ValueError("`hess` must be either callable,"
|
| 374 |
+
f"HessianUpdateStrategy or one of {FD_METHODS}.")
|
| 375 |
+
|
| 376 |
+
if jac in FD_METHODS and hess in FD_METHODS:
|
| 377 |
+
raise ValueError("Whenever the Jacobian is estimated via "
|
| 378 |
+
"finite-differences, we require the Hessian to "
|
| 379 |
+
"be estimated using one of the quasi-Newton "
|
| 380 |
+
"strategies.")
|
| 381 |
+
|
| 382 |
+
self.xp = xp = array_namespace(x0)
|
| 383 |
+
_x = atleast_nd(x0, ndim=1, xp=xp)
|
| 384 |
+
_dtype = xp.float64
|
| 385 |
+
if xp.isdtype(_x.dtype, "real floating"):
|
| 386 |
+
_dtype = _x.dtype
|
| 387 |
+
|
| 388 |
+
# promotes to floating
|
| 389 |
+
self.x = xp.astype(_x, _dtype)
|
| 390 |
+
self.x_dtype = _dtype
|
| 391 |
+
|
| 392 |
+
self.n = self.x.size
|
| 393 |
+
self.nfev = 0
|
| 394 |
+
self.njev = 0
|
| 395 |
+
self.nhev = 0
|
| 396 |
+
self.f_updated = False
|
| 397 |
+
self.J_updated = False
|
| 398 |
+
self.H_updated = False
|
| 399 |
+
|
| 400 |
+
finite_diff_options = {}
|
| 401 |
+
if jac in FD_METHODS:
|
| 402 |
+
finite_diff_options["method"] = jac
|
| 403 |
+
finite_diff_options["rel_step"] = finite_diff_rel_step
|
| 404 |
+
if finite_diff_jac_sparsity is not None:
|
| 405 |
+
sparsity_groups = group_columns(finite_diff_jac_sparsity)
|
| 406 |
+
finite_diff_options["sparsity"] = (finite_diff_jac_sparsity,
|
| 407 |
+
sparsity_groups)
|
| 408 |
+
finite_diff_options["bounds"] = finite_diff_bounds
|
| 409 |
+
self.x_diff = np.copy(self.x)
|
| 410 |
+
if hess in FD_METHODS:
|
| 411 |
+
finite_diff_options["method"] = hess
|
| 412 |
+
finite_diff_options["rel_step"] = finite_diff_rel_step
|
| 413 |
+
finite_diff_options["as_linear_operator"] = True
|
| 414 |
+
self.x_diff = np.copy(self.x)
|
| 415 |
+
if jac in FD_METHODS and hess in FD_METHODS:
|
| 416 |
+
raise ValueError("Whenever the Jacobian is estimated via "
|
| 417 |
+
"finite-differences, we require the Hessian to "
|
| 418 |
+
"be estimated using one of the quasi-Newton "
|
| 419 |
+
"strategies.")
|
| 420 |
+
|
| 421 |
+
# Function evaluation
|
| 422 |
+
def fun_wrapped(x):
|
| 423 |
+
self.nfev += 1
|
| 424 |
+
return np.atleast_1d(fun(x))
|
| 425 |
+
|
| 426 |
+
def update_fun():
|
| 427 |
+
self.f = fun_wrapped(self.x)
|
| 428 |
+
|
| 429 |
+
self._update_fun_impl = update_fun
|
| 430 |
+
update_fun()
|
| 431 |
+
|
| 432 |
+
self.v = np.zeros_like(self.f)
|
| 433 |
+
self.m = self.v.size
|
| 434 |
+
|
| 435 |
+
# Jacobian Evaluation
|
| 436 |
+
if callable(jac):
|
| 437 |
+
self.J = jac(self.x)
|
| 438 |
+
self.J_updated = True
|
| 439 |
+
self.njev += 1
|
| 440 |
+
|
| 441 |
+
if (sparse_jacobian or
|
| 442 |
+
sparse_jacobian is None and sps.issparse(self.J)):
|
| 443 |
+
def jac_wrapped(x):
|
| 444 |
+
self.njev += 1
|
| 445 |
+
return sps.csr_matrix(jac(x))
|
| 446 |
+
self.J = sps.csr_matrix(self.J)
|
| 447 |
+
self.sparse_jacobian = True
|
| 448 |
+
|
| 449 |
+
elif sps.issparse(self.J):
|
| 450 |
+
def jac_wrapped(x):
|
| 451 |
+
self.njev += 1
|
| 452 |
+
return jac(x).toarray()
|
| 453 |
+
self.J = self.J.toarray()
|
| 454 |
+
self.sparse_jacobian = False
|
| 455 |
+
|
| 456 |
+
else:
|
| 457 |
+
def jac_wrapped(x):
|
| 458 |
+
self.njev += 1
|
| 459 |
+
return np.atleast_2d(jac(x))
|
| 460 |
+
self.J = np.atleast_2d(self.J)
|
| 461 |
+
self.sparse_jacobian = False
|
| 462 |
+
|
| 463 |
+
def update_jac():
|
| 464 |
+
self.J = jac_wrapped(self.x)
|
| 465 |
+
|
| 466 |
+
elif jac in FD_METHODS:
|
| 467 |
+
self.J = approx_derivative(fun_wrapped, self.x, f0=self.f,
|
| 468 |
+
**finite_diff_options)
|
| 469 |
+
self.J_updated = True
|
| 470 |
+
|
| 471 |
+
if (sparse_jacobian or
|
| 472 |
+
sparse_jacobian is None and sps.issparse(self.J)):
|
| 473 |
+
def update_jac():
|
| 474 |
+
self._update_fun()
|
| 475 |
+
self.J = sps.csr_matrix(
|
| 476 |
+
approx_derivative(fun_wrapped, self.x, f0=self.f,
|
| 477 |
+
**finite_diff_options))
|
| 478 |
+
self.J = sps.csr_matrix(self.J)
|
| 479 |
+
self.sparse_jacobian = True
|
| 480 |
+
|
| 481 |
+
elif sps.issparse(self.J):
|
| 482 |
+
def update_jac():
|
| 483 |
+
self._update_fun()
|
| 484 |
+
self.J = approx_derivative(fun_wrapped, self.x, f0=self.f,
|
| 485 |
+
**finite_diff_options).toarray()
|
| 486 |
+
self.J = self.J.toarray()
|
| 487 |
+
self.sparse_jacobian = False
|
| 488 |
+
|
| 489 |
+
else:
|
| 490 |
+
def update_jac():
|
| 491 |
+
self._update_fun()
|
| 492 |
+
self.J = np.atleast_2d(
|
| 493 |
+
approx_derivative(fun_wrapped, self.x, f0=self.f,
|
| 494 |
+
**finite_diff_options))
|
| 495 |
+
self.J = np.atleast_2d(self.J)
|
| 496 |
+
self.sparse_jacobian = False
|
| 497 |
+
|
| 498 |
+
self._update_jac_impl = update_jac
|
| 499 |
+
|
| 500 |
+
# Define Hessian
|
| 501 |
+
if callable(hess):
|
| 502 |
+
self.H = hess(self.x, self.v)
|
| 503 |
+
self.H_updated = True
|
| 504 |
+
self.nhev += 1
|
| 505 |
+
|
| 506 |
+
if sps.issparse(self.H):
|
| 507 |
+
def hess_wrapped(x, v):
|
| 508 |
+
self.nhev += 1
|
| 509 |
+
return sps.csr_matrix(hess(x, v))
|
| 510 |
+
self.H = sps.csr_matrix(self.H)
|
| 511 |
+
|
| 512 |
+
elif isinstance(self.H, LinearOperator):
|
| 513 |
+
def hess_wrapped(x, v):
|
| 514 |
+
self.nhev += 1
|
| 515 |
+
return hess(x, v)
|
| 516 |
+
|
| 517 |
+
else:
|
| 518 |
+
def hess_wrapped(x, v):
|
| 519 |
+
self.nhev += 1
|
| 520 |
+
return np.atleast_2d(np.asarray(hess(x, v)))
|
| 521 |
+
self.H = np.atleast_2d(np.asarray(self.H))
|
| 522 |
+
|
| 523 |
+
def update_hess():
|
| 524 |
+
self.H = hess_wrapped(self.x, self.v)
|
| 525 |
+
elif hess in FD_METHODS:
|
| 526 |
+
def jac_dot_v(x, v):
|
| 527 |
+
return jac_wrapped(x).T.dot(v)
|
| 528 |
+
|
| 529 |
+
def update_hess():
|
| 530 |
+
self._update_jac()
|
| 531 |
+
self.H = approx_derivative(jac_dot_v, self.x,
|
| 532 |
+
f0=self.J.T.dot(self.v),
|
| 533 |
+
args=(self.v,),
|
| 534 |
+
**finite_diff_options)
|
| 535 |
+
update_hess()
|
| 536 |
+
self.H_updated = True
|
| 537 |
+
elif isinstance(hess, HessianUpdateStrategy):
|
| 538 |
+
self.H = hess
|
| 539 |
+
self.H.initialize(self.n, 'hess')
|
| 540 |
+
self.H_updated = True
|
| 541 |
+
self.x_prev = None
|
| 542 |
+
self.J_prev = None
|
| 543 |
+
|
| 544 |
+
def update_hess():
|
| 545 |
+
self._update_jac()
|
| 546 |
+
# When v is updated before x was updated, then x_prev and
|
| 547 |
+
# J_prev are None and we need this check.
|
| 548 |
+
if self.x_prev is not None and self.J_prev is not None:
|
| 549 |
+
delta_x = self.x - self.x_prev
|
| 550 |
+
delta_g = self.J.T.dot(self.v) - self.J_prev.T.dot(self.v)
|
| 551 |
+
self.H.update(delta_x, delta_g)
|
| 552 |
+
|
| 553 |
+
self._update_hess_impl = update_hess
|
| 554 |
+
|
| 555 |
+
if isinstance(hess, HessianUpdateStrategy):
|
| 556 |
+
def update_x(x):
|
| 557 |
+
self._update_jac()
|
| 558 |
+
self.x_prev = self.x
|
| 559 |
+
self.J_prev = self.J
|
| 560 |
+
_x = atleast_nd(x, ndim=1, xp=self.xp)
|
| 561 |
+
self.x = self.xp.astype(_x, self.x_dtype)
|
| 562 |
+
self.f_updated = False
|
| 563 |
+
self.J_updated = False
|
| 564 |
+
self.H_updated = False
|
| 565 |
+
self._update_hess()
|
| 566 |
+
else:
|
| 567 |
+
def update_x(x):
|
| 568 |
+
_x = atleast_nd(x, ndim=1, xp=self.xp)
|
| 569 |
+
self.x = self.xp.astype(_x, self.x_dtype)
|
| 570 |
+
self.f_updated = False
|
| 571 |
+
self.J_updated = False
|
| 572 |
+
self.H_updated = False
|
| 573 |
+
|
| 574 |
+
self._update_x_impl = update_x
|
| 575 |
+
|
| 576 |
+
def _update_v(self, v):
|
| 577 |
+
if not np.array_equal(v, self.v):
|
| 578 |
+
self.v = v
|
| 579 |
+
self.H_updated = False
|
| 580 |
+
|
| 581 |
+
def _update_x(self, x):
|
| 582 |
+
if not np.array_equal(x, self.x):
|
| 583 |
+
self._update_x_impl(x)
|
| 584 |
+
|
| 585 |
+
def _update_fun(self):
|
| 586 |
+
if not self.f_updated:
|
| 587 |
+
self._update_fun_impl()
|
| 588 |
+
self.f_updated = True
|
| 589 |
+
|
| 590 |
+
def _update_jac(self):
|
| 591 |
+
if not self.J_updated:
|
| 592 |
+
self._update_jac_impl()
|
| 593 |
+
self.J_updated = True
|
| 594 |
+
|
| 595 |
+
def _update_hess(self):
|
| 596 |
+
if not self.H_updated:
|
| 597 |
+
self._update_hess_impl()
|
| 598 |
+
self.H_updated = True
|
| 599 |
+
|
| 600 |
+
def fun(self, x):
|
| 601 |
+
self._update_x(x)
|
| 602 |
+
self._update_fun()
|
| 603 |
+
return self.f
|
| 604 |
+
|
| 605 |
+
def jac(self, x):
|
| 606 |
+
self._update_x(x)
|
| 607 |
+
self._update_jac()
|
| 608 |
+
return self.J
|
| 609 |
+
|
| 610 |
+
def hess(self, x, v):
|
| 611 |
+
# v should be updated before x.
|
| 612 |
+
self._update_v(v)
|
| 613 |
+
self._update_x(x)
|
| 614 |
+
self._update_hess()
|
| 615 |
+
return self.H
|
| 616 |
+
|
| 617 |
+
|
| 618 |
+
class LinearVectorFunction:
|
| 619 |
+
"""Linear vector function and its derivatives.
|
| 620 |
+
|
| 621 |
+
Defines a linear function F = A x, where x is N-D vector and
|
| 622 |
+
A is m-by-n matrix. The Jacobian is constant and equals to A. The Hessian
|
| 623 |
+
is identically zero and it is returned as a csr matrix.
|
| 624 |
+
"""
|
| 625 |
+
def __init__(self, A, x0, sparse_jacobian):
|
| 626 |
+
if sparse_jacobian or sparse_jacobian is None and sps.issparse(A):
|
| 627 |
+
self.J = sps.csr_matrix(A)
|
| 628 |
+
self.sparse_jacobian = True
|
| 629 |
+
elif sps.issparse(A):
|
| 630 |
+
self.J = A.toarray()
|
| 631 |
+
self.sparse_jacobian = False
|
| 632 |
+
else:
|
| 633 |
+
# np.asarray makes sure A is ndarray and not matrix
|
| 634 |
+
self.J = np.atleast_2d(np.asarray(A))
|
| 635 |
+
self.sparse_jacobian = False
|
| 636 |
+
|
| 637 |
+
self.m, self.n = self.J.shape
|
| 638 |
+
|
| 639 |
+
self.xp = xp = array_namespace(x0)
|
| 640 |
+
_x = atleast_nd(x0, ndim=1, xp=xp)
|
| 641 |
+
_dtype = xp.float64
|
| 642 |
+
if xp.isdtype(_x.dtype, "real floating"):
|
| 643 |
+
_dtype = _x.dtype
|
| 644 |
+
|
| 645 |
+
# promotes to floating
|
| 646 |
+
self.x = xp.astype(_x, _dtype)
|
| 647 |
+
self.x_dtype = _dtype
|
| 648 |
+
|
| 649 |
+
self.f = self.J.dot(self.x)
|
| 650 |
+
self.f_updated = True
|
| 651 |
+
|
| 652 |
+
self.v = np.zeros(self.m, dtype=float)
|
| 653 |
+
self.H = sps.csr_matrix((self.n, self.n))
|
| 654 |
+
|
| 655 |
+
def _update_x(self, x):
|
| 656 |
+
if not np.array_equal(x, self.x):
|
| 657 |
+
_x = atleast_nd(x, ndim=1, xp=self.xp)
|
| 658 |
+
self.x = self.xp.astype(_x, self.x_dtype)
|
| 659 |
+
self.f_updated = False
|
| 660 |
+
|
| 661 |
+
def fun(self, x):
|
| 662 |
+
self._update_x(x)
|
| 663 |
+
if not self.f_updated:
|
| 664 |
+
self.f = self.J.dot(x)
|
| 665 |
+
self.f_updated = True
|
| 666 |
+
return self.f
|
| 667 |
+
|
| 668 |
+
def jac(self, x):
|
| 669 |
+
self._update_x(x)
|
| 670 |
+
return self.J
|
| 671 |
+
|
| 672 |
+
def hess(self, x, v):
|
| 673 |
+
self._update_x(x)
|
| 674 |
+
self.v = v
|
| 675 |
+
return self.H
|
| 676 |
+
|
| 677 |
+
|
| 678 |
+
class IdentityVectorFunction(LinearVectorFunction):
|
| 679 |
+
"""Identity vector function and its derivatives.
|
| 680 |
+
|
| 681 |
+
The Jacobian is the identity matrix, returned as a dense array when
|
| 682 |
+
`sparse_jacobian=False` and as a csr matrix otherwise. The Hessian is
|
| 683 |
+
identically zero and it is returned as a csr matrix.
|
| 684 |
+
"""
|
| 685 |
+
def __init__(self, x0, sparse_jacobian):
|
| 686 |
+
n = len(x0)
|
| 687 |
+
if sparse_jacobian or sparse_jacobian is None:
|
| 688 |
+
A = sps.eye(n, format='csr')
|
| 689 |
+
sparse_jacobian = True
|
| 690 |
+
else:
|
| 691 |
+
A = np.eye(n)
|
| 692 |
+
sparse_jacobian = False
|
| 693 |
+
super().__init__(A, x0, sparse_jacobian)
|
llava_next/lib/python3.10/site-packages/scipy/optimize/_differentialevolution.py
ADDED
|
@@ -0,0 +1,1951 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
differential_evolution: The differential evolution global optimization algorithm
|
| 3 |
+
Added by Andrew Nelson 2014
|
| 4 |
+
"""
|
| 5 |
+
import warnings
|
| 6 |
+
|
| 7 |
+
import numpy as np
|
| 8 |
+
from scipy.optimize import OptimizeResult, minimize
|
| 9 |
+
from scipy.optimize._optimize import _status_message, _wrap_callback
|
| 10 |
+
from scipy._lib._util import (check_random_state, MapWrapper, _FunctionWrapper,
|
| 11 |
+
rng_integers)
|
| 12 |
+
|
| 13 |
+
from scipy.optimize._constraints import (Bounds, new_bounds_to_old,
|
| 14 |
+
NonlinearConstraint, LinearConstraint)
|
| 15 |
+
from scipy.sparse import issparse
|
| 16 |
+
|
| 17 |
+
__all__ = ['differential_evolution']
|
| 18 |
+
|
| 19 |
+
|
| 20 |
+
_MACHEPS = np.finfo(np.float64).eps
|
| 21 |
+
|
| 22 |
+
|
| 23 |
+
def differential_evolution(func, bounds, args=(), strategy='best1bin',
|
| 24 |
+
maxiter=1000, popsize=15, tol=0.01,
|
| 25 |
+
mutation=(0.5, 1), recombination=0.7, seed=None,
|
| 26 |
+
callback=None, disp=False, polish=True,
|
| 27 |
+
init='latinhypercube', atol=0, updating='immediate',
|
| 28 |
+
workers=1, constraints=(), x0=None, *,
|
| 29 |
+
integrality=None, vectorized=False):
|
| 30 |
+
"""Finds the global minimum of a multivariate function.
|
| 31 |
+
|
| 32 |
+
The differential evolution method [1]_ is stochastic in nature. It does
|
| 33 |
+
not use gradient methods to find the minimum, and can search large areas
|
| 34 |
+
of candidate space, but often requires larger numbers of function
|
| 35 |
+
evaluations than conventional gradient-based techniques.
|
| 36 |
+
|
| 37 |
+
The algorithm is due to Storn and Price [2]_.
|
| 38 |
+
|
| 39 |
+
Parameters
|
| 40 |
+
----------
|
| 41 |
+
func : callable
|
| 42 |
+
The objective function to be minimized. Must be in the form
|
| 43 |
+
``f(x, *args)``, where ``x`` is the argument in the form of a 1-D array
|
| 44 |
+
and ``args`` is a tuple of any additional fixed parameters needed to
|
| 45 |
+
completely specify the function. The number of parameters, N, is equal
|
| 46 |
+
to ``len(x)``.
|
| 47 |
+
bounds : sequence or `Bounds`
|
| 48 |
+
Bounds for variables. There are two ways to specify the bounds:
|
| 49 |
+
|
| 50 |
+
1. Instance of `Bounds` class.
|
| 51 |
+
2. ``(min, max)`` pairs for each element in ``x``, defining the
|
| 52 |
+
finite lower and upper bounds for the optimizing argument of
|
| 53 |
+
`func`.
|
| 54 |
+
|
| 55 |
+
The total number of bounds is used to determine the number of
|
| 56 |
+
parameters, N. If there are parameters whose bounds are equal the total
|
| 57 |
+
number of free parameters is ``N - N_equal``.
|
| 58 |
+
|
| 59 |
+
args : tuple, optional
|
| 60 |
+
Any additional fixed parameters needed to
|
| 61 |
+
completely specify the objective function.
|
| 62 |
+
strategy : {str, callable}, optional
|
| 63 |
+
The differential evolution strategy to use. Should be one of:
|
| 64 |
+
|
| 65 |
+
- 'best1bin'
|
| 66 |
+
- 'best1exp'
|
| 67 |
+
- 'rand1bin'
|
| 68 |
+
- 'rand1exp'
|
| 69 |
+
- 'rand2bin'
|
| 70 |
+
- 'rand2exp'
|
| 71 |
+
- 'randtobest1bin'
|
| 72 |
+
- 'randtobest1exp'
|
| 73 |
+
- 'currenttobest1bin'
|
| 74 |
+
- 'currenttobest1exp'
|
| 75 |
+
- 'best2exp'
|
| 76 |
+
- 'best2bin'
|
| 77 |
+
|
| 78 |
+
The default is 'best1bin'. Strategies that may be implemented are
|
| 79 |
+
outlined in 'Notes'.
|
| 80 |
+
Alternatively the differential evolution strategy can be customized by
|
| 81 |
+
providing a callable that constructs a trial vector. The callable must
|
| 82 |
+
have the form ``strategy(candidate: int, population: np.ndarray, rng=None)``,
|
| 83 |
+
where ``candidate`` is an integer specifying which entry of the
|
| 84 |
+
population is being evolved, ``population`` is an array of shape
|
| 85 |
+
``(S, N)`` containing all the population members (where S is the
|
| 86 |
+
total population size), and ``rng`` is the random number generator
|
| 87 |
+
being used within the solver.
|
| 88 |
+
``candidate`` will be in the range ``[0, S)``.
|
| 89 |
+
``strategy`` must return a trial vector with shape `(N,)`. The
|
| 90 |
+
fitness of this trial vector is compared against the fitness of
|
| 91 |
+
``population[candidate]``.
|
| 92 |
+
|
| 93 |
+
.. versionchanged:: 1.12.0
|
| 94 |
+
Customization of evolution strategy via a callable.
|
| 95 |
+
|
| 96 |
+
maxiter : int, optional
|
| 97 |
+
The maximum number of generations over which the entire population is
|
| 98 |
+
evolved. The maximum number of function evaluations (with no polishing)
|
| 99 |
+
is: ``(maxiter + 1) * popsize * (N - N_equal)``
|
| 100 |
+
popsize : int, optional
|
| 101 |
+
A multiplier for setting the total population size. The population has
|
| 102 |
+
``popsize * (N - N_equal)`` individuals. This keyword is overridden if
|
| 103 |
+
an initial population is supplied via the `init` keyword. When using
|
| 104 |
+
``init='sobol'`` the population size is calculated as the next power
|
| 105 |
+
of 2 after ``popsize * (N - N_equal)``.
|
| 106 |
+
tol : float, optional
|
| 107 |
+
Relative tolerance for convergence, the solving stops when
|
| 108 |
+
``np.std(pop) <= atol + tol * np.abs(np.mean(population_energies))``,
|
| 109 |
+
where and `atol` and `tol` are the absolute and relative tolerance
|
| 110 |
+
respectively.
|
| 111 |
+
mutation : float or tuple(float, float), optional
|
| 112 |
+
The mutation constant. In the literature this is also known as
|
| 113 |
+
differential weight, being denoted by F.
|
| 114 |
+
If specified as a float it should be in the range [0, 2].
|
| 115 |
+
If specified as a tuple ``(min, max)`` dithering is employed. Dithering
|
| 116 |
+
randomly changes the mutation constant on a generation by generation
|
| 117 |
+
basis. The mutation constant for that generation is taken from
|
| 118 |
+
``U[min, max)``. Dithering can help speed convergence significantly.
|
| 119 |
+
Increasing the mutation constant increases the search radius, but will
|
| 120 |
+
slow down convergence.
|
| 121 |
+
recombination : float, optional
|
| 122 |
+
The recombination constant, should be in the range [0, 1]. In the
|
| 123 |
+
literature this is also known as the crossover probability, being
|
| 124 |
+
denoted by CR. Increasing this value allows a larger number of mutants
|
| 125 |
+
to progress into the next generation, but at the risk of population
|
| 126 |
+
stability.
|
| 127 |
+
seed : {None, int, `numpy.random.Generator`, `numpy.random.RandomState`}, optional
|
| 128 |
+
If `seed` is None (or `np.random`), the `numpy.random.RandomState`
|
| 129 |
+
singleton is used.
|
| 130 |
+
If `seed` is an int, a new ``RandomState`` instance is used,
|
| 131 |
+
seeded with `seed`.
|
| 132 |
+
If `seed` is already a ``Generator`` or ``RandomState`` instance then
|
| 133 |
+
that instance is used.
|
| 134 |
+
Specify `seed` for repeatable minimizations.
|
| 135 |
+
disp : bool, optional
|
| 136 |
+
Prints the evaluated `func` at every iteration.
|
| 137 |
+
callback : callable, optional
|
| 138 |
+
A callable called after each iteration. Has the signature:
|
| 139 |
+
|
| 140 |
+
``callback(intermediate_result: OptimizeResult)``
|
| 141 |
+
|
| 142 |
+
where ``intermediate_result`` is a keyword parameter containing an
|
| 143 |
+
`OptimizeResult` with attributes ``x`` and ``fun``, the best solution
|
| 144 |
+
found so far and the objective function. Note that the name
|
| 145 |
+
of the parameter must be ``intermediate_result`` for the callback
|
| 146 |
+
to be passed an `OptimizeResult`.
|
| 147 |
+
|
| 148 |
+
The callback also supports a signature like:
|
| 149 |
+
|
| 150 |
+
``callback(x, convergence: float=val)``
|
| 151 |
+
|
| 152 |
+
``val`` represents the fractional value of the population convergence.
|
| 153 |
+
When ``val`` is greater than ``1.0``, the function halts.
|
| 154 |
+
|
| 155 |
+
Introspection is used to determine which of the signatures is invoked.
|
| 156 |
+
|
| 157 |
+
Global minimization will halt if the callback raises ``StopIteration``
|
| 158 |
+
or returns ``True``; any polishing is still carried out.
|
| 159 |
+
|
| 160 |
+
.. versionchanged:: 1.12.0
|
| 161 |
+
callback accepts the ``intermediate_result`` keyword.
|
| 162 |
+
|
| 163 |
+
polish : bool, optional
|
| 164 |
+
If True (default), then `scipy.optimize.minimize` with the `L-BFGS-B`
|
| 165 |
+
method is used to polish the best population member at the end, which
|
| 166 |
+
can improve the minimization slightly. If a constrained problem is
|
| 167 |
+
being studied then the `trust-constr` method is used instead. For large
|
| 168 |
+
problems with many constraints, polishing can take a long time due to
|
| 169 |
+
the Jacobian computations.
|
| 170 |
+
init : str or array-like, optional
|
| 171 |
+
Specify which type of population initialization is performed. Should be
|
| 172 |
+
one of:
|
| 173 |
+
|
| 174 |
+
- 'latinhypercube'
|
| 175 |
+
- 'sobol'
|
| 176 |
+
- 'halton'
|
| 177 |
+
- 'random'
|
| 178 |
+
- array specifying the initial population. The array should have
|
| 179 |
+
shape ``(S, N)``, where S is the total population size and N is
|
| 180 |
+
the number of parameters.
|
| 181 |
+
`init` is clipped to `bounds` before use.
|
| 182 |
+
|
| 183 |
+
The default is 'latinhypercube'. Latin Hypercube sampling tries to
|
| 184 |
+
maximize coverage of the available parameter space.
|
| 185 |
+
|
| 186 |
+
'sobol' and 'halton' are superior alternatives and maximize even more
|
| 187 |
+
the parameter space. 'sobol' will enforce an initial population
|
| 188 |
+
size which is calculated as the next power of 2 after
|
| 189 |
+
``popsize * (N - N_equal)``. 'halton' has no requirements but is a bit
|
| 190 |
+
less efficient. See `scipy.stats.qmc` for more details.
|
| 191 |
+
|
| 192 |
+
'random' initializes the population randomly - this has the drawback
|
| 193 |
+
that clustering can occur, preventing the whole of parameter space
|
| 194 |
+
being covered. Use of an array to specify a population could be used,
|
| 195 |
+
for example, to create a tight bunch of initial guesses in an location
|
| 196 |
+
where the solution is known to exist, thereby reducing time for
|
| 197 |
+
convergence.
|
| 198 |
+
atol : float, optional
|
| 199 |
+
Absolute tolerance for convergence, the solving stops when
|
| 200 |
+
``np.std(pop) <= atol + tol * np.abs(np.mean(population_energies))``,
|
| 201 |
+
where and `atol` and `tol` are the absolute and relative tolerance
|
| 202 |
+
respectively.
|
| 203 |
+
updating : {'immediate', 'deferred'}, optional
|
| 204 |
+
If ``'immediate'``, the best solution vector is continuously updated
|
| 205 |
+
within a single generation [4]_. This can lead to faster convergence as
|
| 206 |
+
trial vectors can take advantage of continuous improvements in the best
|
| 207 |
+
solution.
|
| 208 |
+
With ``'deferred'``, the best solution vector is updated once per
|
| 209 |
+
generation. Only ``'deferred'`` is compatible with parallelization or
|
| 210 |
+
vectorization, and the `workers` and `vectorized` keywords can
|
| 211 |
+
over-ride this option.
|
| 212 |
+
|
| 213 |
+
.. versionadded:: 1.2.0
|
| 214 |
+
|
| 215 |
+
workers : int or map-like callable, optional
|
| 216 |
+
If `workers` is an int the population is subdivided into `workers`
|
| 217 |
+
sections and evaluated in parallel
|
| 218 |
+
(uses `multiprocessing.Pool <multiprocessing>`).
|
| 219 |
+
Supply -1 to use all available CPU cores.
|
| 220 |
+
Alternatively supply a map-like callable, such as
|
| 221 |
+
`multiprocessing.Pool.map` for evaluating the population in parallel.
|
| 222 |
+
This evaluation is carried out as ``workers(func, iterable)``.
|
| 223 |
+
This option will override the `updating` keyword to
|
| 224 |
+
``updating='deferred'`` if ``workers != 1``.
|
| 225 |
+
This option overrides the `vectorized` keyword if ``workers != 1``.
|
| 226 |
+
Requires that `func` be pickleable.
|
| 227 |
+
|
| 228 |
+
.. versionadded:: 1.2.0
|
| 229 |
+
|
| 230 |
+
constraints : {NonLinearConstraint, LinearConstraint, Bounds}
|
| 231 |
+
Constraints on the solver, over and above those applied by the `bounds`
|
| 232 |
+
kwd. Uses the approach by Lampinen [5]_.
|
| 233 |
+
|
| 234 |
+
.. versionadded:: 1.4.0
|
| 235 |
+
|
| 236 |
+
x0 : None or array-like, optional
|
| 237 |
+
Provides an initial guess to the minimization. Once the population has
|
| 238 |
+
been initialized this vector replaces the first (best) member. This
|
| 239 |
+
replacement is done even if `init` is given an initial population.
|
| 240 |
+
``x0.shape == (N,)``.
|
| 241 |
+
|
| 242 |
+
.. versionadded:: 1.7.0
|
| 243 |
+
|
| 244 |
+
integrality : 1-D array, optional
|
| 245 |
+
For each decision variable, a boolean value indicating whether the
|
| 246 |
+
decision variable is constrained to integer values. The array is
|
| 247 |
+
broadcast to ``(N,)``.
|
| 248 |
+
If any decision variables are constrained to be integral, they will not
|
| 249 |
+
be changed during polishing.
|
| 250 |
+
Only integer values lying between the lower and upper bounds are used.
|
| 251 |
+
If there are no integer values lying between the bounds then a
|
| 252 |
+
`ValueError` is raised.
|
| 253 |
+
|
| 254 |
+
.. versionadded:: 1.9.0
|
| 255 |
+
|
| 256 |
+
vectorized : bool, optional
|
| 257 |
+
If ``vectorized is True``, `func` is sent an `x` array with
|
| 258 |
+
``x.shape == (N, S)``, and is expected to return an array of shape
|
| 259 |
+
``(S,)``, where `S` is the number of solution vectors to be calculated.
|
| 260 |
+
If constraints are applied, each of the functions used to construct
|
| 261 |
+
a `Constraint` object should accept an `x` array with
|
| 262 |
+
``x.shape == (N, S)``, and return an array of shape ``(M, S)``, where
|
| 263 |
+
`M` is the number of constraint components.
|
| 264 |
+
This option is an alternative to the parallelization offered by
|
| 265 |
+
`workers`, and may help in optimization speed by reducing interpreter
|
| 266 |
+
overhead from multiple function calls. This keyword is ignored if
|
| 267 |
+
``workers != 1``.
|
| 268 |
+
This option will override the `updating` keyword to
|
| 269 |
+
``updating='deferred'``.
|
| 270 |
+
See the notes section for further discussion on when to use
|
| 271 |
+
``'vectorized'``, and when to use ``'workers'``.
|
| 272 |
+
|
| 273 |
+
.. versionadded:: 1.9.0
|
| 274 |
+
|
| 275 |
+
Returns
|
| 276 |
+
-------
|
| 277 |
+
res : OptimizeResult
|
| 278 |
+
The optimization result represented as a `OptimizeResult` object.
|
| 279 |
+
Important attributes are: ``x`` the solution array, ``success`` a
|
| 280 |
+
Boolean flag indicating if the optimizer exited successfully,
|
| 281 |
+
``message`` which describes the cause of the termination,
|
| 282 |
+
``population`` the solution vectors present in the population, and
|
| 283 |
+
``population_energies`` the value of the objective function for each
|
| 284 |
+
entry in ``population``.
|
| 285 |
+
See `OptimizeResult` for a description of other attributes. If `polish`
|
| 286 |
+
was employed, and a lower minimum was obtained by the polishing, then
|
| 287 |
+
OptimizeResult also contains the ``jac`` attribute.
|
| 288 |
+
If the eventual solution does not satisfy the applied constraints
|
| 289 |
+
``success`` will be `False`.
|
| 290 |
+
|
| 291 |
+
Notes
|
| 292 |
+
-----
|
| 293 |
+
Differential evolution is a stochastic population based method that is
|
| 294 |
+
useful for global optimization problems. At each pass through the
|
| 295 |
+
population the algorithm mutates each candidate solution by mixing with
|
| 296 |
+
other candidate solutions to create a trial candidate. There are several
|
| 297 |
+
strategies [3]_ for creating trial candidates, which suit some problems
|
| 298 |
+
more than others. The 'best1bin' strategy is a good starting point for
|
| 299 |
+
many systems. In this strategy two members of the population are randomly
|
| 300 |
+
chosen. Their difference is used to mutate the best member (the 'best' in
|
| 301 |
+
'best1bin'), :math:`x_0`, so far:
|
| 302 |
+
|
| 303 |
+
.. math::
|
| 304 |
+
|
| 305 |
+
b' = x_0 + mutation * (x_{r_0} - x_{r_1})
|
| 306 |
+
|
| 307 |
+
A trial vector is then constructed. Starting with a randomly chosen ith
|
| 308 |
+
parameter the trial is sequentially filled (in modulo) with parameters
|
| 309 |
+
from ``b'`` or the original candidate. The choice of whether to use ``b'``
|
| 310 |
+
or the original candidate is made with a binomial distribution (the 'bin'
|
| 311 |
+
in 'best1bin') - a random number in [0, 1) is generated. If this number is
|
| 312 |
+
less than the `recombination` constant then the parameter is loaded from
|
| 313 |
+
``b'``, otherwise it is loaded from the original candidate. The final
|
| 314 |
+
parameter is always loaded from ``b'``. Once the trial candidate is built
|
| 315 |
+
its fitness is assessed. If the trial is better than the original candidate
|
| 316 |
+
then it takes its place. If it is also better than the best overall
|
| 317 |
+
candidate it also replaces that.
|
| 318 |
+
|
| 319 |
+
The other strategies available are outlined in Qiang and
|
| 320 |
+
Mitchell (2014) [3]_.
|
| 321 |
+
|
| 322 |
+
.. math::
|
| 323 |
+
rand1* : b' = x_{r_0} + mutation*(x_{r_1} - x_{r_2})
|
| 324 |
+
|
| 325 |
+
rand2* : b' = x_{r_0} + mutation*(x_{r_1} + x_{r_2}
|
| 326 |
+
- x_{r_3} - x_{r_4})
|
| 327 |
+
|
| 328 |
+
best1* : b' = x_0 + mutation*(x_{r_0} - x_{r_1})
|
| 329 |
+
|
| 330 |
+
best2* : b' = x_0 + mutation*(x_{r_0} + x_{r_1}
|
| 331 |
+
- x_{r_2} - x_{r_3})
|
| 332 |
+
|
| 333 |
+
currenttobest1* : b' = x_i + mutation*(x_0 - x_i
|
| 334 |
+
+ x_{r_0} - x_{r_1})
|
| 335 |
+
|
| 336 |
+
randtobest1* : b' = x_{r_0} + mutation*(x_0 - x_{r_0}
|
| 337 |
+
+ x_{r_1} - x_{r_2})
|
| 338 |
+
|
| 339 |
+
where the integers :math:`r_0, r_1, r_2, r_3, r_4` are chosen randomly
|
| 340 |
+
from the interval [0, NP) with `NP` being the total population size and
|
| 341 |
+
the original candidate having index `i`. The user can fully customize the
|
| 342 |
+
generation of the trial candidates by supplying a callable to ``strategy``.
|
| 343 |
+
|
| 344 |
+
To improve your chances of finding a global minimum use higher `popsize`
|
| 345 |
+
values, with higher `mutation` and (dithering), but lower `recombination`
|
| 346 |
+
values. This has the effect of widening the search radius, but slowing
|
| 347 |
+
convergence.
|
| 348 |
+
|
| 349 |
+
By default the best solution vector is updated continuously within a single
|
| 350 |
+
iteration (``updating='immediate'``). This is a modification [4]_ of the
|
| 351 |
+
original differential evolution algorithm which can lead to faster
|
| 352 |
+
convergence as trial vectors can immediately benefit from improved
|
| 353 |
+
solutions. To use the original Storn and Price behaviour, updating the best
|
| 354 |
+
solution once per iteration, set ``updating='deferred'``.
|
| 355 |
+
The ``'deferred'`` approach is compatible with both parallelization and
|
| 356 |
+
vectorization (``'workers'`` and ``'vectorized'`` keywords). These may
|
| 357 |
+
improve minimization speed by using computer resources more efficiently.
|
| 358 |
+
The ``'workers'`` distribute calculations over multiple processors. By
|
| 359 |
+
default the Python `multiprocessing` module is used, but other approaches
|
| 360 |
+
are also possible, such as the Message Passing Interface (MPI) used on
|
| 361 |
+
clusters [6]_ [7]_. The overhead from these approaches (creating new
|
| 362 |
+
Processes, etc) may be significant, meaning that computational speed
|
| 363 |
+
doesn't necessarily scale with the number of processors used.
|
| 364 |
+
Parallelization is best suited to computationally expensive objective
|
| 365 |
+
functions. If the objective function is less expensive, then
|
| 366 |
+
``'vectorized'`` may aid by only calling the objective function once per
|
| 367 |
+
iteration, rather than multiple times for all the population members; the
|
| 368 |
+
interpreter overhead is reduced.
|
| 369 |
+
|
| 370 |
+
.. versionadded:: 0.15.0
|
| 371 |
+
|
| 372 |
+
References
|
| 373 |
+
----------
|
| 374 |
+
.. [1] Differential evolution, Wikipedia,
|
| 375 |
+
http://en.wikipedia.org/wiki/Differential_evolution
|
| 376 |
+
.. [2] Storn, R and Price, K, Differential Evolution - a Simple and
|
| 377 |
+
Efficient Heuristic for Global Optimization over Continuous Spaces,
|
| 378 |
+
Journal of Global Optimization, 1997, 11, 341 - 359.
|
| 379 |
+
.. [3] Qiang, J., Mitchell, C., A Unified Differential Evolution Algorithm
|
| 380 |
+
for Global Optimization, 2014, https://www.osti.gov/servlets/purl/1163659
|
| 381 |
+
.. [4] Wormington, M., Panaccione, C., Matney, K. M., Bowen, D. K., -
|
| 382 |
+
Characterization of structures from X-ray scattering data using
|
| 383 |
+
genetic algorithms, Phil. Trans. R. Soc. Lond. A, 1999, 357,
|
| 384 |
+
2827-2848
|
| 385 |
+
.. [5] Lampinen, J., A constraint handling approach for the differential
|
| 386 |
+
evolution algorithm. Proceedings of the 2002 Congress on
|
| 387 |
+
Evolutionary Computation. CEC'02 (Cat. No. 02TH8600). Vol. 2. IEEE,
|
| 388 |
+
2002.
|
| 389 |
+
.. [6] https://mpi4py.readthedocs.io/en/stable/
|
| 390 |
+
.. [7] https://schwimmbad.readthedocs.io/en/latest/
|
| 391 |
+
|
| 392 |
+
|
| 393 |
+
Examples
|
| 394 |
+
--------
|
| 395 |
+
Let us consider the problem of minimizing the Rosenbrock function. This
|
| 396 |
+
function is implemented in `rosen` in `scipy.optimize`.
|
| 397 |
+
|
| 398 |
+
>>> import numpy as np
|
| 399 |
+
>>> from scipy.optimize import rosen, differential_evolution
|
| 400 |
+
>>> bounds = [(0,2), (0, 2), (0, 2), (0, 2), (0, 2)]
|
| 401 |
+
>>> result = differential_evolution(rosen, bounds)
|
| 402 |
+
>>> result.x, result.fun
|
| 403 |
+
(array([1., 1., 1., 1., 1.]), 1.9216496320061384e-19)
|
| 404 |
+
|
| 405 |
+
Now repeat, but with parallelization.
|
| 406 |
+
|
| 407 |
+
>>> result = differential_evolution(rosen, bounds, updating='deferred',
|
| 408 |
+
... workers=2)
|
| 409 |
+
>>> result.x, result.fun
|
| 410 |
+
(array([1., 1., 1., 1., 1.]), 1.9216496320061384e-19)
|
| 411 |
+
|
| 412 |
+
Let's do a constrained minimization.
|
| 413 |
+
|
| 414 |
+
>>> from scipy.optimize import LinearConstraint, Bounds
|
| 415 |
+
|
| 416 |
+
We add the constraint that the sum of ``x[0]`` and ``x[1]`` must be less
|
| 417 |
+
than or equal to 1.9. This is a linear constraint, which may be written
|
| 418 |
+
``A @ x <= 1.9``, where ``A = array([[1, 1]])``. This can be encoded as
|
| 419 |
+
a `LinearConstraint` instance:
|
| 420 |
+
|
| 421 |
+
>>> lc = LinearConstraint([[1, 1]], -np.inf, 1.9)
|
| 422 |
+
|
| 423 |
+
Specify limits using a `Bounds` object.
|
| 424 |
+
|
| 425 |
+
>>> bounds = Bounds([0., 0.], [2., 2.])
|
| 426 |
+
>>> result = differential_evolution(rosen, bounds, constraints=lc,
|
| 427 |
+
... seed=1)
|
| 428 |
+
>>> result.x, result.fun
|
| 429 |
+
(array([0.96632622, 0.93367155]), 0.0011352416852625719)
|
| 430 |
+
|
| 431 |
+
Next find the minimum of the Ackley function
|
| 432 |
+
(https://en.wikipedia.org/wiki/Test_functions_for_optimization).
|
| 433 |
+
|
| 434 |
+
>>> def ackley(x):
|
| 435 |
+
... arg1 = -0.2 * np.sqrt(0.5 * (x[0] ** 2 + x[1] ** 2))
|
| 436 |
+
... arg2 = 0.5 * (np.cos(2. * np.pi * x[0]) + np.cos(2. * np.pi * x[1]))
|
| 437 |
+
... return -20. * np.exp(arg1) - np.exp(arg2) + 20. + np.e
|
| 438 |
+
>>> bounds = [(-5, 5), (-5, 5)]
|
| 439 |
+
>>> result = differential_evolution(ackley, bounds, seed=1)
|
| 440 |
+
>>> result.x, result.fun
|
| 441 |
+
(array([0., 0.]), 4.440892098500626e-16)
|
| 442 |
+
|
| 443 |
+
The Ackley function is written in a vectorized manner, so the
|
| 444 |
+
``'vectorized'`` keyword can be employed. Note the reduced number of
|
| 445 |
+
function evaluations.
|
| 446 |
+
|
| 447 |
+
>>> result = differential_evolution(
|
| 448 |
+
... ackley, bounds, vectorized=True, updating='deferred', seed=1
|
| 449 |
+
... )
|
| 450 |
+
>>> result.x, result.fun
|
| 451 |
+
(array([0., 0.]), 4.440892098500626e-16)
|
| 452 |
+
|
| 453 |
+
The following custom strategy function mimics 'best1bin':
|
| 454 |
+
|
| 455 |
+
>>> def custom_strategy_fn(candidate, population, rng=None):
|
| 456 |
+
... parameter_count = population.shape(-1)
|
| 457 |
+
... mutation, recombination = 0.7, 0.9
|
| 458 |
+
... trial = np.copy(population[candidate])
|
| 459 |
+
... fill_point = rng.choice(parameter_count)
|
| 460 |
+
...
|
| 461 |
+
... pool = np.arange(len(population))
|
| 462 |
+
... rng.shuffle(pool)
|
| 463 |
+
...
|
| 464 |
+
... # two unique random numbers that aren't the same, and
|
| 465 |
+
... # aren't equal to candidate.
|
| 466 |
+
... idxs = []
|
| 467 |
+
... while len(idxs) < 2 and len(pool) > 0:
|
| 468 |
+
... idx = pool[0]
|
| 469 |
+
... pool = pool[1:]
|
| 470 |
+
... if idx != candidate:
|
| 471 |
+
... idxs.append(idx)
|
| 472 |
+
...
|
| 473 |
+
... r0, r1 = idxs[:2]
|
| 474 |
+
...
|
| 475 |
+
... bprime = (population[0] + mutation *
|
| 476 |
+
... (population[r0] - population[r1]))
|
| 477 |
+
...
|
| 478 |
+
... crossovers = rng.uniform(size=parameter_count)
|
| 479 |
+
... crossovers = crossovers < recombination
|
| 480 |
+
... crossovers[fill_point] = True
|
| 481 |
+
... trial = np.where(crossovers, bprime, trial)
|
| 482 |
+
... return trial
|
| 483 |
+
|
| 484 |
+
"""
|
| 485 |
+
|
| 486 |
+
# using a context manager means that any created Pool objects are
|
| 487 |
+
# cleared up.
|
| 488 |
+
with DifferentialEvolutionSolver(func, bounds, args=args,
|
| 489 |
+
strategy=strategy,
|
| 490 |
+
maxiter=maxiter,
|
| 491 |
+
popsize=popsize, tol=tol,
|
| 492 |
+
mutation=mutation,
|
| 493 |
+
recombination=recombination,
|
| 494 |
+
seed=seed, polish=polish,
|
| 495 |
+
callback=callback,
|
| 496 |
+
disp=disp, init=init, atol=atol,
|
| 497 |
+
updating=updating,
|
| 498 |
+
workers=workers,
|
| 499 |
+
constraints=constraints,
|
| 500 |
+
x0=x0,
|
| 501 |
+
integrality=integrality,
|
| 502 |
+
vectorized=vectorized) as solver:
|
| 503 |
+
ret = solver.solve()
|
| 504 |
+
|
| 505 |
+
return ret
|
| 506 |
+
|
| 507 |
+
|
| 508 |
+
class DifferentialEvolutionSolver:
|
| 509 |
+
|
| 510 |
+
"""This class implements the differential evolution solver
|
| 511 |
+
|
| 512 |
+
Parameters
|
| 513 |
+
----------
|
| 514 |
+
func : callable
|
| 515 |
+
The objective function to be minimized. Must be in the form
|
| 516 |
+
``f(x, *args)``, where ``x`` is the argument in the form of a 1-D array
|
| 517 |
+
and ``args`` is a tuple of any additional fixed parameters needed to
|
| 518 |
+
completely specify the function. The number of parameters, N, is equal
|
| 519 |
+
to ``len(x)``.
|
| 520 |
+
bounds : sequence or `Bounds`
|
| 521 |
+
Bounds for variables. There are two ways to specify the bounds:
|
| 522 |
+
|
| 523 |
+
1. Instance of `Bounds` class.
|
| 524 |
+
2. ``(min, max)`` pairs for each element in ``x``, defining the
|
| 525 |
+
finite lower and upper bounds for the optimizing argument of
|
| 526 |
+
`func`.
|
| 527 |
+
|
| 528 |
+
The total number of bounds is used to determine the number of
|
| 529 |
+
parameters, N. If there are parameters whose bounds are equal the total
|
| 530 |
+
number of free parameters is ``N - N_equal``.
|
| 531 |
+
args : tuple, optional
|
| 532 |
+
Any additional fixed parameters needed to
|
| 533 |
+
completely specify the objective function.
|
| 534 |
+
strategy : {str, callable}, optional
|
| 535 |
+
The differential evolution strategy to use. Should be one of:
|
| 536 |
+
|
| 537 |
+
- 'best1bin'
|
| 538 |
+
- 'best1exp'
|
| 539 |
+
- 'rand1bin'
|
| 540 |
+
- 'rand1exp'
|
| 541 |
+
- 'rand2bin'
|
| 542 |
+
- 'rand2exp'
|
| 543 |
+
- 'randtobest1bin'
|
| 544 |
+
- 'randtobest1exp'
|
| 545 |
+
- 'currenttobest1bin'
|
| 546 |
+
- 'currenttobest1exp'
|
| 547 |
+
- 'best2exp'
|
| 548 |
+
- 'best2bin'
|
| 549 |
+
|
| 550 |
+
The default is 'best1bin'. Strategies that may be
|
| 551 |
+
implemented are outlined in 'Notes'.
|
| 552 |
+
|
| 553 |
+
Alternatively the differential evolution strategy can be customized
|
| 554 |
+
by providing a callable that constructs a trial vector. The callable
|
| 555 |
+
must have the form
|
| 556 |
+
``strategy(candidate: int, population: np.ndarray, rng=None)``,
|
| 557 |
+
where ``candidate`` is an integer specifying which entry of the
|
| 558 |
+
population is being evolved, ``population`` is an array of shape
|
| 559 |
+
``(S, N)`` containing all the population members (where S is the
|
| 560 |
+
total population size), and ``rng`` is the random number generator
|
| 561 |
+
being used within the solver.
|
| 562 |
+
``candidate`` will be in the range ``[0, S)``.
|
| 563 |
+
``strategy`` must return a trial vector with shape `(N,)`. The
|
| 564 |
+
fitness of this trial vector is compared against the fitness of
|
| 565 |
+
``population[candidate]``.
|
| 566 |
+
maxiter : int, optional
|
| 567 |
+
The maximum number of generations over which the entire population is
|
| 568 |
+
evolved. The maximum number of function evaluations (with no polishing)
|
| 569 |
+
is: ``(maxiter + 1) * popsize * (N - N_equal)``
|
| 570 |
+
popsize : int, optional
|
| 571 |
+
A multiplier for setting the total population size. The population has
|
| 572 |
+
``popsize * (N - N_equal)`` individuals. This keyword is overridden if
|
| 573 |
+
an initial population is supplied via the `init` keyword. When using
|
| 574 |
+
``init='sobol'`` the population size is calculated as the next power
|
| 575 |
+
of 2 after ``popsize * (N - N_equal)``.
|
| 576 |
+
tol : float, optional
|
| 577 |
+
Relative tolerance for convergence, the solving stops when
|
| 578 |
+
``np.std(pop) <= atol + tol * np.abs(np.mean(population_energies))``,
|
| 579 |
+
where and `atol` and `tol` are the absolute and relative tolerance
|
| 580 |
+
respectively.
|
| 581 |
+
mutation : float or tuple(float, float), optional
|
| 582 |
+
The mutation constant. In the literature this is also known as
|
| 583 |
+
differential weight, being denoted by F.
|
| 584 |
+
If specified as a float it should be in the range [0, 2].
|
| 585 |
+
If specified as a tuple ``(min, max)`` dithering is employed. Dithering
|
| 586 |
+
randomly changes the mutation constant on a generation by generation
|
| 587 |
+
basis. The mutation constant for that generation is taken from
|
| 588 |
+
U[min, max). Dithering can help speed convergence significantly.
|
| 589 |
+
Increasing the mutation constant increases the search radius, but will
|
| 590 |
+
slow down convergence.
|
| 591 |
+
recombination : float, optional
|
| 592 |
+
The recombination constant, should be in the range [0, 1]. In the
|
| 593 |
+
literature this is also known as the crossover probability, being
|
| 594 |
+
denoted by CR. Increasing this value allows a larger number of mutants
|
| 595 |
+
to progress into the next generation, but at the risk of population
|
| 596 |
+
stability.
|
| 597 |
+
seed : {None, int, `numpy.random.Generator`, `numpy.random.RandomState`}, optional
|
| 598 |
+
If `seed` is None (or `np.random`), the `numpy.random.RandomState`
|
| 599 |
+
singleton is used.
|
| 600 |
+
If `seed` is an int, a new ``RandomState`` instance is used,
|
| 601 |
+
seeded with `seed`.
|
| 602 |
+
If `seed` is already a ``Generator`` or ``RandomState`` instance then
|
| 603 |
+
that instance is used.
|
| 604 |
+
Specify `seed` for repeatable minimizations.
|
| 605 |
+
disp : bool, optional
|
| 606 |
+
Prints the evaluated `func` at every iteration.
|
| 607 |
+
callback : callable, optional
|
| 608 |
+
A callable called after each iteration. Has the signature:
|
| 609 |
+
|
| 610 |
+
``callback(intermediate_result: OptimizeResult)``
|
| 611 |
+
|
| 612 |
+
where ``intermediate_result`` is a keyword parameter containing an
|
| 613 |
+
`OptimizeResult` with attributes ``x`` and ``fun``, the best solution
|
| 614 |
+
found so far and the objective function. Note that the name
|
| 615 |
+
of the parameter must be ``intermediate_result`` for the callback
|
| 616 |
+
to be passed an `OptimizeResult`.
|
| 617 |
+
|
| 618 |
+
The callback also supports a signature like:
|
| 619 |
+
|
| 620 |
+
``callback(x, convergence: float=val)``
|
| 621 |
+
|
| 622 |
+
``val`` represents the fractional value of the population convergence.
|
| 623 |
+
When ``val`` is greater than ``1.0``, the function halts.
|
| 624 |
+
|
| 625 |
+
Introspection is used to determine which of the signatures is invoked.
|
| 626 |
+
|
| 627 |
+
Global minimization will halt if the callback raises ``StopIteration``
|
| 628 |
+
or returns ``True``; any polishing is still carried out.
|
| 629 |
+
|
| 630 |
+
.. versionchanged:: 1.12.0
|
| 631 |
+
callback accepts the ``intermediate_result`` keyword.
|
| 632 |
+
|
| 633 |
+
polish : bool, optional
|
| 634 |
+
If True (default), then `scipy.optimize.minimize` with the `L-BFGS-B`
|
| 635 |
+
method is used to polish the best population member at the end, which
|
| 636 |
+
can improve the minimization slightly. If a constrained problem is
|
| 637 |
+
being studied then the `trust-constr` method is used instead. For large
|
| 638 |
+
problems with many constraints, polishing can take a long time due to
|
| 639 |
+
the Jacobian computations.
|
| 640 |
+
maxfun : int, optional
|
| 641 |
+
Set the maximum number of function evaluations. However, it probably
|
| 642 |
+
makes more sense to set `maxiter` instead.
|
| 643 |
+
init : str or array-like, optional
|
| 644 |
+
Specify which type of population initialization is performed. Should be
|
| 645 |
+
one of:
|
| 646 |
+
|
| 647 |
+
- 'latinhypercube'
|
| 648 |
+
- 'sobol'
|
| 649 |
+
- 'halton'
|
| 650 |
+
- 'random'
|
| 651 |
+
- array specifying the initial population. The array should have
|
| 652 |
+
shape ``(S, N)``, where S is the total population size and
|
| 653 |
+
N is the number of parameters.
|
| 654 |
+
`init` is clipped to `bounds` before use.
|
| 655 |
+
|
| 656 |
+
The default is 'latinhypercube'. Latin Hypercube sampling tries to
|
| 657 |
+
maximize coverage of the available parameter space.
|
| 658 |
+
|
| 659 |
+
'sobol' and 'halton' are superior alternatives and maximize even more
|
| 660 |
+
the parameter space. 'sobol' will enforce an initial population
|
| 661 |
+
size which is calculated as the next power of 2 after
|
| 662 |
+
``popsize * (N - N_equal)``. 'halton' has no requirements but is a bit
|
| 663 |
+
less efficient. See `scipy.stats.qmc` for more details.
|
| 664 |
+
|
| 665 |
+
'random' initializes the population randomly - this has the drawback
|
| 666 |
+
that clustering can occur, preventing the whole of parameter space
|
| 667 |
+
being covered. Use of an array to specify a population could be used,
|
| 668 |
+
for example, to create a tight bunch of initial guesses in an location
|
| 669 |
+
where the solution is known to exist, thereby reducing time for
|
| 670 |
+
convergence.
|
| 671 |
+
atol : float, optional
|
| 672 |
+
Absolute tolerance for convergence, the solving stops when
|
| 673 |
+
``np.std(pop) <= atol + tol * np.abs(np.mean(population_energies))``,
|
| 674 |
+
where and `atol` and `tol` are the absolute and relative tolerance
|
| 675 |
+
respectively.
|
| 676 |
+
updating : {'immediate', 'deferred'}, optional
|
| 677 |
+
If ``'immediate'``, the best solution vector is continuously updated
|
| 678 |
+
within a single generation [4]_. This can lead to faster convergence as
|
| 679 |
+
trial vectors can take advantage of continuous improvements in the best
|
| 680 |
+
solution.
|
| 681 |
+
With ``'deferred'``, the best solution vector is updated once per
|
| 682 |
+
generation. Only ``'deferred'`` is compatible with parallelization or
|
| 683 |
+
vectorization, and the `workers` and `vectorized` keywords can
|
| 684 |
+
over-ride this option.
|
| 685 |
+
workers : int or map-like callable, optional
|
| 686 |
+
If `workers` is an int the population is subdivided into `workers`
|
| 687 |
+
sections and evaluated in parallel
|
| 688 |
+
(uses `multiprocessing.Pool <multiprocessing>`).
|
| 689 |
+
Supply `-1` to use all cores available to the Process.
|
| 690 |
+
Alternatively supply a map-like callable, such as
|
| 691 |
+
`multiprocessing.Pool.map` for evaluating the population in parallel.
|
| 692 |
+
This evaluation is carried out as ``workers(func, iterable)``.
|
| 693 |
+
This option will override the `updating` keyword to
|
| 694 |
+
`updating='deferred'` if `workers != 1`.
|
| 695 |
+
Requires that `func` be pickleable.
|
| 696 |
+
constraints : {NonLinearConstraint, LinearConstraint, Bounds}
|
| 697 |
+
Constraints on the solver, over and above those applied by the `bounds`
|
| 698 |
+
kwd. Uses the approach by Lampinen.
|
| 699 |
+
x0 : None or array-like, optional
|
| 700 |
+
Provides an initial guess to the minimization. Once the population has
|
| 701 |
+
been initialized this vector replaces the first (best) member. This
|
| 702 |
+
replacement is done even if `init` is given an initial population.
|
| 703 |
+
``x0.shape == (N,)``.
|
| 704 |
+
integrality : 1-D array, optional
|
| 705 |
+
For each decision variable, a boolean value indicating whether the
|
| 706 |
+
decision variable is constrained to integer values. The array is
|
| 707 |
+
broadcast to ``(N,)``.
|
| 708 |
+
If any decision variables are constrained to be integral, they will not
|
| 709 |
+
be changed during polishing.
|
| 710 |
+
Only integer values lying between the lower and upper bounds are used.
|
| 711 |
+
If there are no integer values lying between the bounds then a
|
| 712 |
+
`ValueError` is raised.
|
| 713 |
+
vectorized : bool, optional
|
| 714 |
+
If ``vectorized is True``, `func` is sent an `x` array with
|
| 715 |
+
``x.shape == (N, S)``, and is expected to return an array of shape
|
| 716 |
+
``(S,)``, where `S` is the number of solution vectors to be calculated.
|
| 717 |
+
If constraints are applied, each of the functions used to construct
|
| 718 |
+
a `Constraint` object should accept an `x` array with
|
| 719 |
+
``x.shape == (N, S)``, and return an array of shape ``(M, S)``, where
|
| 720 |
+
`M` is the number of constraint components.
|
| 721 |
+
This option is an alternative to the parallelization offered by
|
| 722 |
+
`workers`, and may help in optimization speed. This keyword is
|
| 723 |
+
ignored if ``workers != 1``.
|
| 724 |
+
This option will override the `updating` keyword to
|
| 725 |
+
``updating='deferred'``.
|
| 726 |
+
"""
|
| 727 |
+
|
| 728 |
+
# Dispatch of mutation strategy method (binomial or exponential).
|
| 729 |
+
_binomial = {'best1bin': '_best1',
|
| 730 |
+
'randtobest1bin': '_randtobest1',
|
| 731 |
+
'currenttobest1bin': '_currenttobest1',
|
| 732 |
+
'best2bin': '_best2',
|
| 733 |
+
'rand2bin': '_rand2',
|
| 734 |
+
'rand1bin': '_rand1'}
|
| 735 |
+
_exponential = {'best1exp': '_best1',
|
| 736 |
+
'rand1exp': '_rand1',
|
| 737 |
+
'randtobest1exp': '_randtobest1',
|
| 738 |
+
'currenttobest1exp': '_currenttobest1',
|
| 739 |
+
'best2exp': '_best2',
|
| 740 |
+
'rand2exp': '_rand2'}
|
| 741 |
+
|
| 742 |
+
__init_error_msg = ("The population initialization method must be one of "
|
| 743 |
+
"'latinhypercube' or 'random', or an array of shape "
|
| 744 |
+
"(S, N) where N is the number of parameters and S>5")
|
| 745 |
+
|
| 746 |
+
def __init__(self, func, bounds, args=(),
|
| 747 |
+
strategy='best1bin', maxiter=1000, popsize=15,
|
| 748 |
+
tol=0.01, mutation=(0.5, 1), recombination=0.7, seed=None,
|
| 749 |
+
maxfun=np.inf, callback=None, disp=False, polish=True,
|
| 750 |
+
init='latinhypercube', atol=0, updating='immediate',
|
| 751 |
+
workers=1, constraints=(), x0=None, *, integrality=None,
|
| 752 |
+
vectorized=False):
|
| 753 |
+
|
| 754 |
+
if callable(strategy):
|
| 755 |
+
# a callable strategy is going to be stored in self.strategy anyway
|
| 756 |
+
pass
|
| 757 |
+
elif strategy in self._binomial:
|
| 758 |
+
self.mutation_func = getattr(self, self._binomial[strategy])
|
| 759 |
+
elif strategy in self._exponential:
|
| 760 |
+
self.mutation_func = getattr(self, self._exponential[strategy])
|
| 761 |
+
else:
|
| 762 |
+
raise ValueError("Please select a valid mutation strategy")
|
| 763 |
+
self.strategy = strategy
|
| 764 |
+
|
| 765 |
+
self.callback = _wrap_callback(callback, "differential_evolution")
|
| 766 |
+
self.polish = polish
|
| 767 |
+
|
| 768 |
+
# set the updating / parallelisation options
|
| 769 |
+
if updating in ['immediate', 'deferred']:
|
| 770 |
+
self._updating = updating
|
| 771 |
+
|
| 772 |
+
self.vectorized = vectorized
|
| 773 |
+
|
| 774 |
+
# want to use parallelisation, but updating is immediate
|
| 775 |
+
if workers != 1 and updating == 'immediate':
|
| 776 |
+
warnings.warn("differential_evolution: the 'workers' keyword has"
|
| 777 |
+
" overridden updating='immediate' to"
|
| 778 |
+
" updating='deferred'", UserWarning, stacklevel=2)
|
| 779 |
+
self._updating = 'deferred'
|
| 780 |
+
|
| 781 |
+
if vectorized and workers != 1:
|
| 782 |
+
warnings.warn("differential_evolution: the 'workers' keyword"
|
| 783 |
+
" overrides the 'vectorized' keyword", stacklevel=2)
|
| 784 |
+
self.vectorized = vectorized = False
|
| 785 |
+
|
| 786 |
+
if vectorized and updating == 'immediate':
|
| 787 |
+
warnings.warn("differential_evolution: the 'vectorized' keyword"
|
| 788 |
+
" has overridden updating='immediate' to updating"
|
| 789 |
+
"='deferred'", UserWarning, stacklevel=2)
|
| 790 |
+
self._updating = 'deferred'
|
| 791 |
+
|
| 792 |
+
# an object with a map method.
|
| 793 |
+
if vectorized:
|
| 794 |
+
def maplike_for_vectorized_func(func, x):
|
| 795 |
+
# send an array (N, S) to the user func,
|
| 796 |
+
# expect to receive (S,). Transposition is required because
|
| 797 |
+
# internally the population is held as (S, N)
|
| 798 |
+
return np.atleast_1d(func(x.T))
|
| 799 |
+
workers = maplike_for_vectorized_func
|
| 800 |
+
|
| 801 |
+
self._mapwrapper = MapWrapper(workers)
|
| 802 |
+
|
| 803 |
+
# relative and absolute tolerances for convergence
|
| 804 |
+
self.tol, self.atol = tol, atol
|
| 805 |
+
|
| 806 |
+
# Mutation constant should be in [0, 2). If specified as a sequence
|
| 807 |
+
# then dithering is performed.
|
| 808 |
+
self.scale = mutation
|
| 809 |
+
if (not np.all(np.isfinite(mutation)) or
|
| 810 |
+
np.any(np.array(mutation) >= 2) or
|
| 811 |
+
np.any(np.array(mutation) < 0)):
|
| 812 |
+
raise ValueError('The mutation constant must be a float in '
|
| 813 |
+
'U[0, 2), or specified as a tuple(min, max)'
|
| 814 |
+
' where min < max and min, max are in U[0, 2).')
|
| 815 |
+
|
| 816 |
+
self.dither = None
|
| 817 |
+
if hasattr(mutation, '__iter__') and len(mutation) > 1:
|
| 818 |
+
self.dither = [mutation[0], mutation[1]]
|
| 819 |
+
self.dither.sort()
|
| 820 |
+
|
| 821 |
+
self.cross_over_probability = recombination
|
| 822 |
+
|
| 823 |
+
# we create a wrapped function to allow the use of map (and Pool.map
|
| 824 |
+
# in the future)
|
| 825 |
+
self.func = _FunctionWrapper(func, args)
|
| 826 |
+
self.args = args
|
| 827 |
+
|
| 828 |
+
# convert tuple of lower and upper bounds to limits
|
| 829 |
+
# [(low_0, high_0), ..., (low_n, high_n]
|
| 830 |
+
# -> [[low_0, ..., low_n], [high_0, ..., high_n]]
|
| 831 |
+
if isinstance(bounds, Bounds):
|
| 832 |
+
self.limits = np.array(new_bounds_to_old(bounds.lb,
|
| 833 |
+
bounds.ub,
|
| 834 |
+
len(bounds.lb)),
|
| 835 |
+
dtype=float).T
|
| 836 |
+
else:
|
| 837 |
+
self.limits = np.array(bounds, dtype='float').T
|
| 838 |
+
|
| 839 |
+
if (np.size(self.limits, 0) != 2 or not
|
| 840 |
+
np.all(np.isfinite(self.limits))):
|
| 841 |
+
raise ValueError('bounds should be a sequence containing finite '
|
| 842 |
+
'real valued (min, max) pairs for each value'
|
| 843 |
+
' in x')
|
| 844 |
+
|
| 845 |
+
if maxiter is None: # the default used to be None
|
| 846 |
+
maxiter = 1000
|
| 847 |
+
self.maxiter = maxiter
|
| 848 |
+
if maxfun is None: # the default used to be None
|
| 849 |
+
maxfun = np.inf
|
| 850 |
+
self.maxfun = maxfun
|
| 851 |
+
|
| 852 |
+
# population is scaled to between [0, 1].
|
| 853 |
+
# We have to scale between parameter <-> population
|
| 854 |
+
# save these arguments for _scale_parameter and
|
| 855 |
+
# _unscale_parameter. This is an optimization
|
| 856 |
+
self.__scale_arg1 = 0.5 * (self.limits[0] + self.limits[1])
|
| 857 |
+
self.__scale_arg2 = np.fabs(self.limits[0] - self.limits[1])
|
| 858 |
+
with np.errstate(divide='ignore'):
|
| 859 |
+
# if lb == ub then the following line will be 1/0, which is why
|
| 860 |
+
# we ignore the divide by zero warning. The result from 1/0 is
|
| 861 |
+
# inf, so replace those values by 0.
|
| 862 |
+
self.__recip_scale_arg2 = 1 / self.__scale_arg2
|
| 863 |
+
self.__recip_scale_arg2[~np.isfinite(self.__recip_scale_arg2)] = 0
|
| 864 |
+
|
| 865 |
+
self.parameter_count = np.size(self.limits, 1)
|
| 866 |
+
|
| 867 |
+
self.random_number_generator = check_random_state(seed)
|
| 868 |
+
|
| 869 |
+
# Which parameters are going to be integers?
|
| 870 |
+
if np.any(integrality):
|
| 871 |
+
# # user has provided a truth value for integer constraints
|
| 872 |
+
integrality = np.broadcast_to(
|
| 873 |
+
integrality,
|
| 874 |
+
self.parameter_count
|
| 875 |
+
)
|
| 876 |
+
integrality = np.asarray(integrality, bool)
|
| 877 |
+
# For integrality parameters change the limits to only allow
|
| 878 |
+
# integer values lying between the limits.
|
| 879 |
+
lb, ub = np.copy(self.limits)
|
| 880 |
+
|
| 881 |
+
lb = np.ceil(lb)
|
| 882 |
+
ub = np.floor(ub)
|
| 883 |
+
if not (lb[integrality] <= ub[integrality]).all():
|
| 884 |
+
# there's a parameter that doesn't have an integer value
|
| 885 |
+
# lying between the limits
|
| 886 |
+
raise ValueError("One of the integrality constraints does not"
|
| 887 |
+
" have any possible integer values between"
|
| 888 |
+
" the lower/upper bounds.")
|
| 889 |
+
nlb = np.nextafter(lb[integrality] - 0.5, np.inf)
|
| 890 |
+
nub = np.nextafter(ub[integrality] + 0.5, -np.inf)
|
| 891 |
+
|
| 892 |
+
self.integrality = integrality
|
| 893 |
+
self.limits[0, self.integrality] = nlb
|
| 894 |
+
self.limits[1, self.integrality] = nub
|
| 895 |
+
else:
|
| 896 |
+
self.integrality = False
|
| 897 |
+
|
| 898 |
+
# check for equal bounds
|
| 899 |
+
eb = self.limits[0] == self.limits[1]
|
| 900 |
+
eb_count = np.count_nonzero(eb)
|
| 901 |
+
|
| 902 |
+
# default population initialization is a latin hypercube design, but
|
| 903 |
+
# there are other population initializations possible.
|
| 904 |
+
# the minimum is 5 because 'best2bin' requires a population that's at
|
| 905 |
+
# least 5 long
|
| 906 |
+
# 202301 - reduced population size to account for parameters with
|
| 907 |
+
# equal bounds. If there are no varying parameters set N to at least 1
|
| 908 |
+
self.num_population_members = max(
|
| 909 |
+
5,
|
| 910 |
+
popsize * max(1, self.parameter_count - eb_count)
|
| 911 |
+
)
|
| 912 |
+
self.population_shape = (self.num_population_members,
|
| 913 |
+
self.parameter_count)
|
| 914 |
+
|
| 915 |
+
self._nfev = 0
|
| 916 |
+
# check first str otherwise will fail to compare str with array
|
| 917 |
+
if isinstance(init, str):
|
| 918 |
+
if init == 'latinhypercube':
|
| 919 |
+
self.init_population_lhs()
|
| 920 |
+
elif init == 'sobol':
|
| 921 |
+
# must be Ns = 2**m for Sobol'
|
| 922 |
+
n_s = int(2 ** np.ceil(np.log2(self.num_population_members)))
|
| 923 |
+
self.num_population_members = n_s
|
| 924 |
+
self.population_shape = (self.num_population_members,
|
| 925 |
+
self.parameter_count)
|
| 926 |
+
self.init_population_qmc(qmc_engine='sobol')
|
| 927 |
+
elif init == 'halton':
|
| 928 |
+
self.init_population_qmc(qmc_engine='halton')
|
| 929 |
+
elif init == 'random':
|
| 930 |
+
self.init_population_random()
|
| 931 |
+
else:
|
| 932 |
+
raise ValueError(self.__init_error_msg)
|
| 933 |
+
else:
|
| 934 |
+
self.init_population_array(init)
|
| 935 |
+
|
| 936 |
+
if x0 is not None:
|
| 937 |
+
# scale to within unit interval and
|
| 938 |
+
# ensure parameters are within bounds.
|
| 939 |
+
x0_scaled = self._unscale_parameters(np.asarray(x0))
|
| 940 |
+
if ((x0_scaled > 1.0) | (x0_scaled < 0.0)).any():
|
| 941 |
+
raise ValueError(
|
| 942 |
+
"Some entries in x0 lay outside the specified bounds"
|
| 943 |
+
)
|
| 944 |
+
self.population[0] = x0_scaled
|
| 945 |
+
|
| 946 |
+
# infrastructure for constraints
|
| 947 |
+
self.constraints = constraints
|
| 948 |
+
self._wrapped_constraints = []
|
| 949 |
+
|
| 950 |
+
if hasattr(constraints, '__len__'):
|
| 951 |
+
# sequence of constraints, this will also deal with default
|
| 952 |
+
# keyword parameter
|
| 953 |
+
for c in constraints:
|
| 954 |
+
self._wrapped_constraints.append(
|
| 955 |
+
_ConstraintWrapper(c, self.x)
|
| 956 |
+
)
|
| 957 |
+
else:
|
| 958 |
+
self._wrapped_constraints = [
|
| 959 |
+
_ConstraintWrapper(constraints, self.x)
|
| 960 |
+
]
|
| 961 |
+
self.total_constraints = np.sum(
|
| 962 |
+
[c.num_constr for c in self._wrapped_constraints]
|
| 963 |
+
)
|
| 964 |
+
self.constraint_violation = np.zeros((self.num_population_members, 1))
|
| 965 |
+
self.feasible = np.ones(self.num_population_members, bool)
|
| 966 |
+
|
| 967 |
+
# an array to shuffle when selecting candidates. Create it here
|
| 968 |
+
# rather than repeatedly creating it in _select_samples.
|
| 969 |
+
self._random_population_index = np.arange(self.num_population_members)
|
| 970 |
+
self.disp = disp
|
| 971 |
+
|
| 972 |
+
def init_population_lhs(self):
|
| 973 |
+
"""
|
| 974 |
+
Initializes the population with Latin Hypercube Sampling.
|
| 975 |
+
Latin Hypercube Sampling ensures that each parameter is uniformly
|
| 976 |
+
sampled over its range.
|
| 977 |
+
"""
|
| 978 |
+
rng = self.random_number_generator
|
| 979 |
+
|
| 980 |
+
# Each parameter range needs to be sampled uniformly. The scaled
|
| 981 |
+
# parameter range ([0, 1)) needs to be split into
|
| 982 |
+
# `self.num_population_members` segments, each of which has the following
|
| 983 |
+
# size:
|
| 984 |
+
segsize = 1.0 / self.num_population_members
|
| 985 |
+
|
| 986 |
+
# Within each segment we sample from a uniform random distribution.
|
| 987 |
+
# We need to do this sampling for each parameter.
|
| 988 |
+
samples = (segsize * rng.uniform(size=self.population_shape)
|
| 989 |
+
|
| 990 |
+
# Offset each segment to cover the entire parameter range [0, 1)
|
| 991 |
+
+ np.linspace(0., 1., self.num_population_members,
|
| 992 |
+
endpoint=False)[:, np.newaxis])
|
| 993 |
+
|
| 994 |
+
# Create an array for population of candidate solutions.
|
| 995 |
+
self.population = np.zeros_like(samples)
|
| 996 |
+
|
| 997 |
+
# Initialize population of candidate solutions by permutation of the
|
| 998 |
+
# random samples.
|
| 999 |
+
for j in range(self.parameter_count):
|
| 1000 |
+
order = rng.permutation(range(self.num_population_members))
|
| 1001 |
+
self.population[:, j] = samples[order, j]
|
| 1002 |
+
|
| 1003 |
+
# reset population energies
|
| 1004 |
+
self.population_energies = np.full(self.num_population_members,
|
| 1005 |
+
np.inf)
|
| 1006 |
+
|
| 1007 |
+
# reset number of function evaluations counter
|
| 1008 |
+
self._nfev = 0
|
| 1009 |
+
|
| 1010 |
+
def init_population_qmc(self, qmc_engine):
|
| 1011 |
+
"""Initializes the population with a QMC method.
|
| 1012 |
+
|
| 1013 |
+
QMC methods ensures that each parameter is uniformly
|
| 1014 |
+
sampled over its range.
|
| 1015 |
+
|
| 1016 |
+
Parameters
|
| 1017 |
+
----------
|
| 1018 |
+
qmc_engine : str
|
| 1019 |
+
The QMC method to use for initialization. Can be one of
|
| 1020 |
+
``latinhypercube``, ``sobol`` or ``halton``.
|
| 1021 |
+
|
| 1022 |
+
"""
|
| 1023 |
+
from scipy.stats import qmc
|
| 1024 |
+
|
| 1025 |
+
rng = self.random_number_generator
|
| 1026 |
+
|
| 1027 |
+
# Create an array for population of candidate solutions.
|
| 1028 |
+
if qmc_engine == 'latinhypercube':
|
| 1029 |
+
sampler = qmc.LatinHypercube(d=self.parameter_count, seed=rng)
|
| 1030 |
+
elif qmc_engine == 'sobol':
|
| 1031 |
+
sampler = qmc.Sobol(d=self.parameter_count, seed=rng)
|
| 1032 |
+
elif qmc_engine == 'halton':
|
| 1033 |
+
sampler = qmc.Halton(d=self.parameter_count, seed=rng)
|
| 1034 |
+
else:
|
| 1035 |
+
raise ValueError(self.__init_error_msg)
|
| 1036 |
+
|
| 1037 |
+
self.population = sampler.random(n=self.num_population_members)
|
| 1038 |
+
|
| 1039 |
+
# reset population energies
|
| 1040 |
+
self.population_energies = np.full(self.num_population_members,
|
| 1041 |
+
np.inf)
|
| 1042 |
+
|
| 1043 |
+
# reset number of function evaluations counter
|
| 1044 |
+
self._nfev = 0
|
| 1045 |
+
|
| 1046 |
+
def init_population_random(self):
|
| 1047 |
+
"""
|
| 1048 |
+
Initializes the population at random. This type of initialization
|
| 1049 |
+
can possess clustering, Latin Hypercube sampling is generally better.
|
| 1050 |
+
"""
|
| 1051 |
+
rng = self.random_number_generator
|
| 1052 |
+
self.population = rng.uniform(size=self.population_shape)
|
| 1053 |
+
|
| 1054 |
+
# reset population energies
|
| 1055 |
+
self.population_energies = np.full(self.num_population_members,
|
| 1056 |
+
np.inf)
|
| 1057 |
+
|
| 1058 |
+
# reset number of function evaluations counter
|
| 1059 |
+
self._nfev = 0
|
| 1060 |
+
|
| 1061 |
+
def init_population_array(self, init):
|
| 1062 |
+
"""
|
| 1063 |
+
Initializes the population with a user specified population.
|
| 1064 |
+
|
| 1065 |
+
Parameters
|
| 1066 |
+
----------
|
| 1067 |
+
init : np.ndarray
|
| 1068 |
+
Array specifying subset of the initial population. The array should
|
| 1069 |
+
have shape (S, N), where N is the number of parameters.
|
| 1070 |
+
The population is clipped to the lower and upper bounds.
|
| 1071 |
+
"""
|
| 1072 |
+
# make sure you're using a float array
|
| 1073 |
+
popn = np.asarray(init, dtype=np.float64)
|
| 1074 |
+
|
| 1075 |
+
if (np.size(popn, 0) < 5 or
|
| 1076 |
+
popn.shape[1] != self.parameter_count or
|
| 1077 |
+
len(popn.shape) != 2):
|
| 1078 |
+
raise ValueError("The population supplied needs to have shape"
|
| 1079 |
+
" (S, len(x)), where S > 4.")
|
| 1080 |
+
|
| 1081 |
+
# scale values and clip to bounds, assigning to population
|
| 1082 |
+
self.population = np.clip(self._unscale_parameters(popn), 0, 1)
|
| 1083 |
+
|
| 1084 |
+
self.num_population_members = np.size(self.population, 0)
|
| 1085 |
+
|
| 1086 |
+
self.population_shape = (self.num_population_members,
|
| 1087 |
+
self.parameter_count)
|
| 1088 |
+
|
| 1089 |
+
# reset population energies
|
| 1090 |
+
self.population_energies = np.full(self.num_population_members,
|
| 1091 |
+
np.inf)
|
| 1092 |
+
|
| 1093 |
+
# reset number of function evaluations counter
|
| 1094 |
+
self._nfev = 0
|
| 1095 |
+
|
| 1096 |
+
@property
|
| 1097 |
+
def x(self):
|
| 1098 |
+
"""
|
| 1099 |
+
The best solution from the solver
|
| 1100 |
+
"""
|
| 1101 |
+
return self._scale_parameters(self.population[0])
|
| 1102 |
+
|
| 1103 |
+
@property
|
| 1104 |
+
def convergence(self):
|
| 1105 |
+
"""
|
| 1106 |
+
The standard deviation of the population energies divided by their
|
| 1107 |
+
mean.
|
| 1108 |
+
"""
|
| 1109 |
+
if np.any(np.isinf(self.population_energies)):
|
| 1110 |
+
return np.inf
|
| 1111 |
+
return (np.std(self.population_energies) /
|
| 1112 |
+
(np.abs(np.mean(self.population_energies)) + _MACHEPS))
|
| 1113 |
+
|
| 1114 |
+
def converged(self):
|
| 1115 |
+
"""
|
| 1116 |
+
Return True if the solver has converged.
|
| 1117 |
+
"""
|
| 1118 |
+
if np.any(np.isinf(self.population_energies)):
|
| 1119 |
+
return False
|
| 1120 |
+
|
| 1121 |
+
return (np.std(self.population_energies) <=
|
| 1122 |
+
self.atol +
|
| 1123 |
+
self.tol * np.abs(np.mean(self.population_energies)))
|
| 1124 |
+
|
| 1125 |
+
def solve(self):
|
| 1126 |
+
"""
|
| 1127 |
+
Runs the DifferentialEvolutionSolver.
|
| 1128 |
+
|
| 1129 |
+
Returns
|
| 1130 |
+
-------
|
| 1131 |
+
res : OptimizeResult
|
| 1132 |
+
The optimization result represented as a `OptimizeResult` object.
|
| 1133 |
+
Important attributes are: ``x`` the solution array, ``success`` a
|
| 1134 |
+
Boolean flag indicating if the optimizer exited successfully,
|
| 1135 |
+
``message`` which describes the cause of the termination,
|
| 1136 |
+
``population`` the solution vectors present in the population, and
|
| 1137 |
+
``population_energies`` the value of the objective function for
|
| 1138 |
+
each entry in ``population``.
|
| 1139 |
+
See `OptimizeResult` for a description of other attributes. If
|
| 1140 |
+
`polish` was employed, and a lower minimum was obtained by the
|
| 1141 |
+
polishing, then OptimizeResult also contains the ``jac`` attribute.
|
| 1142 |
+
If the eventual solution does not satisfy the applied constraints
|
| 1143 |
+
``success`` will be `False`.
|
| 1144 |
+
"""
|
| 1145 |
+
nit, warning_flag = 0, False
|
| 1146 |
+
status_message = _status_message['success']
|
| 1147 |
+
|
| 1148 |
+
# The population may have just been initialized (all entries are
|
| 1149 |
+
# np.inf). If it has you have to calculate the initial energies.
|
| 1150 |
+
# Although this is also done in the evolve generator it's possible
|
| 1151 |
+
# that someone can set maxiter=0, at which point we still want the
|
| 1152 |
+
# initial energies to be calculated (the following loop isn't run).
|
| 1153 |
+
if np.all(np.isinf(self.population_energies)):
|
| 1154 |
+
self.feasible, self.constraint_violation = (
|
| 1155 |
+
self._calculate_population_feasibilities(self.population))
|
| 1156 |
+
|
| 1157 |
+
# only work out population energies for feasible solutions
|
| 1158 |
+
self.population_energies[self.feasible] = (
|
| 1159 |
+
self._calculate_population_energies(
|
| 1160 |
+
self.population[self.feasible]))
|
| 1161 |
+
|
| 1162 |
+
self._promote_lowest_energy()
|
| 1163 |
+
|
| 1164 |
+
# do the optimization.
|
| 1165 |
+
for nit in range(1, self.maxiter + 1):
|
| 1166 |
+
# evolve the population by a generation
|
| 1167 |
+
try:
|
| 1168 |
+
next(self)
|
| 1169 |
+
except StopIteration:
|
| 1170 |
+
warning_flag = True
|
| 1171 |
+
if self._nfev > self.maxfun:
|
| 1172 |
+
status_message = _status_message['maxfev']
|
| 1173 |
+
elif self._nfev == self.maxfun:
|
| 1174 |
+
status_message = ('Maximum number of function evaluations'
|
| 1175 |
+
' has been reached.')
|
| 1176 |
+
break
|
| 1177 |
+
|
| 1178 |
+
if self.disp:
|
| 1179 |
+
print(f"differential_evolution step {nit}: f(x)="
|
| 1180 |
+
f" {self.population_energies[0]}"
|
| 1181 |
+
)
|
| 1182 |
+
|
| 1183 |
+
if self.callback:
|
| 1184 |
+
c = self.tol / (self.convergence + _MACHEPS)
|
| 1185 |
+
res = self._result(nit=nit, message="in progress")
|
| 1186 |
+
res.convergence = c
|
| 1187 |
+
try:
|
| 1188 |
+
warning_flag = bool(self.callback(res))
|
| 1189 |
+
except StopIteration:
|
| 1190 |
+
warning_flag = True
|
| 1191 |
+
|
| 1192 |
+
if warning_flag:
|
| 1193 |
+
status_message = 'callback function requested stop early'
|
| 1194 |
+
|
| 1195 |
+
# should the solver terminate?
|
| 1196 |
+
if warning_flag or self.converged():
|
| 1197 |
+
break
|
| 1198 |
+
|
| 1199 |
+
else:
|
| 1200 |
+
status_message = _status_message['maxiter']
|
| 1201 |
+
warning_flag = True
|
| 1202 |
+
|
| 1203 |
+
DE_result = self._result(
|
| 1204 |
+
nit=nit, message=status_message, warning_flag=warning_flag
|
| 1205 |
+
)
|
| 1206 |
+
|
| 1207 |
+
if self.polish and not np.all(self.integrality):
|
| 1208 |
+
# can't polish if all the parameters are integers
|
| 1209 |
+
if np.any(self.integrality):
|
| 1210 |
+
# set the lower/upper bounds equal so that any integrality
|
| 1211 |
+
# constraints work.
|
| 1212 |
+
limits, integrality = self.limits, self.integrality
|
| 1213 |
+
limits[0, integrality] = DE_result.x[integrality]
|
| 1214 |
+
limits[1, integrality] = DE_result.x[integrality]
|
| 1215 |
+
|
| 1216 |
+
polish_method = 'L-BFGS-B'
|
| 1217 |
+
|
| 1218 |
+
if self._wrapped_constraints:
|
| 1219 |
+
polish_method = 'trust-constr'
|
| 1220 |
+
|
| 1221 |
+
constr_violation = self._constraint_violation_fn(DE_result.x)
|
| 1222 |
+
if np.any(constr_violation > 0.):
|
| 1223 |
+
warnings.warn("differential evolution didn't find a "
|
| 1224 |
+
"solution satisfying the constraints, "
|
| 1225 |
+
"attempting to polish from the least "
|
| 1226 |
+
"infeasible solution",
|
| 1227 |
+
UserWarning, stacklevel=2)
|
| 1228 |
+
if self.disp:
|
| 1229 |
+
print(f"Polishing solution with '{polish_method}'")
|
| 1230 |
+
result = minimize(self.func,
|
| 1231 |
+
np.copy(DE_result.x),
|
| 1232 |
+
method=polish_method,
|
| 1233 |
+
bounds=self.limits.T,
|
| 1234 |
+
constraints=self.constraints)
|
| 1235 |
+
|
| 1236 |
+
self._nfev += result.nfev
|
| 1237 |
+
DE_result.nfev = self._nfev
|
| 1238 |
+
|
| 1239 |
+
# Polishing solution is only accepted if there is an improvement in
|
| 1240 |
+
# cost function, the polishing was successful and the solution lies
|
| 1241 |
+
# within the bounds.
|
| 1242 |
+
if (result.fun < DE_result.fun and
|
| 1243 |
+
result.success and
|
| 1244 |
+
np.all(result.x <= self.limits[1]) and
|
| 1245 |
+
np.all(self.limits[0] <= result.x)):
|
| 1246 |
+
DE_result.fun = result.fun
|
| 1247 |
+
DE_result.x = result.x
|
| 1248 |
+
DE_result.jac = result.jac
|
| 1249 |
+
# to keep internal state consistent
|
| 1250 |
+
self.population_energies[0] = result.fun
|
| 1251 |
+
self.population[0] = self._unscale_parameters(result.x)
|
| 1252 |
+
|
| 1253 |
+
if self._wrapped_constraints:
|
| 1254 |
+
DE_result.constr = [c.violation(DE_result.x) for
|
| 1255 |
+
c in self._wrapped_constraints]
|
| 1256 |
+
DE_result.constr_violation = np.max(
|
| 1257 |
+
np.concatenate(DE_result.constr))
|
| 1258 |
+
DE_result.maxcv = DE_result.constr_violation
|
| 1259 |
+
if DE_result.maxcv > 0:
|
| 1260 |
+
# if the result is infeasible then success must be False
|
| 1261 |
+
DE_result.success = False
|
| 1262 |
+
DE_result.message = ("The solution does not satisfy the "
|
| 1263 |
+
f"constraints, MAXCV = {DE_result.maxcv}")
|
| 1264 |
+
|
| 1265 |
+
return DE_result
|
| 1266 |
+
|
| 1267 |
+
def _result(self, **kwds):
|
| 1268 |
+
# form an intermediate OptimizeResult
|
| 1269 |
+
nit = kwds.get('nit', None)
|
| 1270 |
+
message = kwds.get('message', None)
|
| 1271 |
+
warning_flag = kwds.get('warning_flag', False)
|
| 1272 |
+
result = OptimizeResult(
|
| 1273 |
+
x=self.x,
|
| 1274 |
+
fun=self.population_energies[0],
|
| 1275 |
+
nfev=self._nfev,
|
| 1276 |
+
nit=nit,
|
| 1277 |
+
message=message,
|
| 1278 |
+
success=(warning_flag is not True),
|
| 1279 |
+
population=self._scale_parameters(self.population),
|
| 1280 |
+
population_energies=self.population_energies
|
| 1281 |
+
)
|
| 1282 |
+
if self._wrapped_constraints:
|
| 1283 |
+
result.constr = [c.violation(result.x)
|
| 1284 |
+
for c in self._wrapped_constraints]
|
| 1285 |
+
result.constr_violation = np.max(np.concatenate(result.constr))
|
| 1286 |
+
result.maxcv = result.constr_violation
|
| 1287 |
+
if result.maxcv > 0:
|
| 1288 |
+
result.success = False
|
| 1289 |
+
|
| 1290 |
+
return result
|
| 1291 |
+
|
| 1292 |
+
def _calculate_population_energies(self, population):
|
| 1293 |
+
"""
|
| 1294 |
+
Calculate the energies of a population.
|
| 1295 |
+
|
| 1296 |
+
Parameters
|
| 1297 |
+
----------
|
| 1298 |
+
population : ndarray
|
| 1299 |
+
An array of parameter vectors normalised to [0, 1] using lower
|
| 1300 |
+
and upper limits. Has shape ``(np.size(population, 0), N)``.
|
| 1301 |
+
|
| 1302 |
+
Returns
|
| 1303 |
+
-------
|
| 1304 |
+
energies : ndarray
|
| 1305 |
+
An array of energies corresponding to each population member. If
|
| 1306 |
+
maxfun will be exceeded during this call, then the number of
|
| 1307 |
+
function evaluations will be reduced and energies will be
|
| 1308 |
+
right-padded with np.inf. Has shape ``(np.size(population, 0),)``
|
| 1309 |
+
"""
|
| 1310 |
+
num_members = np.size(population, 0)
|
| 1311 |
+
# S is the number of function evals left to stay under the
|
| 1312 |
+
# maxfun budget
|
| 1313 |
+
S = min(num_members, self.maxfun - self._nfev)
|
| 1314 |
+
|
| 1315 |
+
energies = np.full(num_members, np.inf)
|
| 1316 |
+
|
| 1317 |
+
parameters_pop = self._scale_parameters(population)
|
| 1318 |
+
try:
|
| 1319 |
+
calc_energies = list(
|
| 1320 |
+
self._mapwrapper(self.func, parameters_pop[0:S])
|
| 1321 |
+
)
|
| 1322 |
+
calc_energies = np.squeeze(calc_energies)
|
| 1323 |
+
except (TypeError, ValueError) as e:
|
| 1324 |
+
# wrong number of arguments for _mapwrapper
|
| 1325 |
+
# or wrong length returned from the mapper
|
| 1326 |
+
raise RuntimeError(
|
| 1327 |
+
"The map-like callable must be of the form f(func, iterable), "
|
| 1328 |
+
"returning a sequence of numbers the same length as 'iterable'"
|
| 1329 |
+
) from e
|
| 1330 |
+
|
| 1331 |
+
if calc_energies.size != S:
|
| 1332 |
+
if self.vectorized:
|
| 1333 |
+
raise RuntimeError("The vectorized function must return an"
|
| 1334 |
+
" array of shape (S,) when given an array"
|
| 1335 |
+
" of shape (len(x), S)")
|
| 1336 |
+
raise RuntimeError("func(x, *args) must return a scalar value")
|
| 1337 |
+
|
| 1338 |
+
energies[0:S] = calc_energies
|
| 1339 |
+
|
| 1340 |
+
if self.vectorized:
|
| 1341 |
+
self._nfev += 1
|
| 1342 |
+
else:
|
| 1343 |
+
self._nfev += S
|
| 1344 |
+
|
| 1345 |
+
return energies
|
| 1346 |
+
|
| 1347 |
+
def _promote_lowest_energy(self):
|
| 1348 |
+
# swaps 'best solution' into first population entry
|
| 1349 |
+
|
| 1350 |
+
idx = np.arange(self.num_population_members)
|
| 1351 |
+
feasible_solutions = idx[self.feasible]
|
| 1352 |
+
if feasible_solutions.size:
|
| 1353 |
+
# find the best feasible solution
|
| 1354 |
+
idx_t = np.argmin(self.population_energies[feasible_solutions])
|
| 1355 |
+
l = feasible_solutions[idx_t]
|
| 1356 |
+
else:
|
| 1357 |
+
# no solution was feasible, use 'best' infeasible solution, which
|
| 1358 |
+
# will violate constraints the least
|
| 1359 |
+
l = np.argmin(np.sum(self.constraint_violation, axis=1))
|
| 1360 |
+
|
| 1361 |
+
self.population_energies[[0, l]] = self.population_energies[[l, 0]]
|
| 1362 |
+
self.population[[0, l], :] = self.population[[l, 0], :]
|
| 1363 |
+
self.feasible[[0, l]] = self.feasible[[l, 0]]
|
| 1364 |
+
self.constraint_violation[[0, l], :] = (
|
| 1365 |
+
self.constraint_violation[[l, 0], :])
|
| 1366 |
+
|
| 1367 |
+
def _constraint_violation_fn(self, x):
|
| 1368 |
+
"""
|
| 1369 |
+
Calculates total constraint violation for all the constraints, for a
|
| 1370 |
+
set of solutions.
|
| 1371 |
+
|
| 1372 |
+
Parameters
|
| 1373 |
+
----------
|
| 1374 |
+
x : ndarray
|
| 1375 |
+
Solution vector(s). Has shape (S, N), or (N,), where S is the
|
| 1376 |
+
number of solutions to investigate and N is the number of
|
| 1377 |
+
parameters.
|
| 1378 |
+
|
| 1379 |
+
Returns
|
| 1380 |
+
-------
|
| 1381 |
+
cv : ndarray
|
| 1382 |
+
Total violation of constraints. Has shape ``(S, M)``, where M is
|
| 1383 |
+
the total number of constraint components (which is not necessarily
|
| 1384 |
+
equal to len(self._wrapped_constraints)).
|
| 1385 |
+
"""
|
| 1386 |
+
# how many solution vectors you're calculating constraint violations
|
| 1387 |
+
# for
|
| 1388 |
+
S = np.size(x) // self.parameter_count
|
| 1389 |
+
_out = np.zeros((S, self.total_constraints))
|
| 1390 |
+
offset = 0
|
| 1391 |
+
for con in self._wrapped_constraints:
|
| 1392 |
+
# the input/output of the (vectorized) constraint function is
|
| 1393 |
+
# {(N, S), (N,)} --> (M, S)
|
| 1394 |
+
# The input to _constraint_violation_fn is (S, N) or (N,), so
|
| 1395 |
+
# transpose to pass it to the constraint. The output is transposed
|
| 1396 |
+
# from (M, S) to (S, M) for further use.
|
| 1397 |
+
c = con.violation(x.T).T
|
| 1398 |
+
|
| 1399 |
+
# The shape of c should be (M,), (1, M), or (S, M). Check for
|
| 1400 |
+
# those shapes, as an incorrect shape indicates that the
|
| 1401 |
+
# user constraint function didn't return the right thing, and
|
| 1402 |
+
# the reshape operation will fail. Intercept the wrong shape
|
| 1403 |
+
# to give a reasonable error message. I'm not sure what failure
|
| 1404 |
+
# modes an inventive user will come up with.
|
| 1405 |
+
if c.shape[-1] != con.num_constr or (S > 1 and c.shape[0] != S):
|
| 1406 |
+
raise RuntimeError("An array returned from a Constraint has"
|
| 1407 |
+
" the wrong shape. If `vectorized is False`"
|
| 1408 |
+
" the Constraint should return an array of"
|
| 1409 |
+
" shape (M,). If `vectorized is True` then"
|
| 1410 |
+
" the Constraint must return an array of"
|
| 1411 |
+
" shape (M, S), where S is the number of"
|
| 1412 |
+
" solution vectors and M is the number of"
|
| 1413 |
+
" constraint components in a given"
|
| 1414 |
+
" Constraint object.")
|
| 1415 |
+
|
| 1416 |
+
# the violation function may return a 1D array, but is it a
|
| 1417 |
+
# sequence of constraints for one solution (S=1, M>=1), or the
|
| 1418 |
+
# value of a single constraint for a sequence of solutions
|
| 1419 |
+
# (S>=1, M=1)
|
| 1420 |
+
c = np.reshape(c, (S, con.num_constr))
|
| 1421 |
+
_out[:, offset:offset + con.num_constr] = c
|
| 1422 |
+
offset += con.num_constr
|
| 1423 |
+
|
| 1424 |
+
return _out
|
| 1425 |
+
|
| 1426 |
+
def _calculate_population_feasibilities(self, population):
|
| 1427 |
+
"""
|
| 1428 |
+
Calculate the feasibilities of a population.
|
| 1429 |
+
|
| 1430 |
+
Parameters
|
| 1431 |
+
----------
|
| 1432 |
+
population : ndarray
|
| 1433 |
+
An array of parameter vectors normalised to [0, 1] using lower
|
| 1434 |
+
and upper limits. Has shape ``(np.size(population, 0), N)``.
|
| 1435 |
+
|
| 1436 |
+
Returns
|
| 1437 |
+
-------
|
| 1438 |
+
feasible, constraint_violation : ndarray, ndarray
|
| 1439 |
+
Boolean array of feasibility for each population member, and an
|
| 1440 |
+
array of the constraint violation for each population member.
|
| 1441 |
+
constraint_violation has shape ``(np.size(population, 0), M)``,
|
| 1442 |
+
where M is the number of constraints.
|
| 1443 |
+
"""
|
| 1444 |
+
num_members = np.size(population, 0)
|
| 1445 |
+
if not self._wrapped_constraints:
|
| 1446 |
+
# shortcut for no constraints
|
| 1447 |
+
return np.ones(num_members, bool), np.zeros((num_members, 1))
|
| 1448 |
+
|
| 1449 |
+
# (S, N)
|
| 1450 |
+
parameters_pop = self._scale_parameters(population)
|
| 1451 |
+
|
| 1452 |
+
if self.vectorized:
|
| 1453 |
+
# (S, M)
|
| 1454 |
+
constraint_violation = np.array(
|
| 1455 |
+
self._constraint_violation_fn(parameters_pop)
|
| 1456 |
+
)
|
| 1457 |
+
else:
|
| 1458 |
+
# (S, 1, M)
|
| 1459 |
+
constraint_violation = np.array([self._constraint_violation_fn(x)
|
| 1460 |
+
for x in parameters_pop])
|
| 1461 |
+
# if you use the list comprehension in the line above it will
|
| 1462 |
+
# create an array of shape (S, 1, M), because each iteration
|
| 1463 |
+
# generates an array of (1, M). In comparison the vectorized
|
| 1464 |
+
# version returns (S, M). It's therefore necessary to remove axis 1
|
| 1465 |
+
constraint_violation = constraint_violation[:, 0]
|
| 1466 |
+
|
| 1467 |
+
feasible = ~(np.sum(constraint_violation, axis=1) > 0)
|
| 1468 |
+
|
| 1469 |
+
return feasible, constraint_violation
|
| 1470 |
+
|
| 1471 |
+
def __iter__(self):
|
| 1472 |
+
return self
|
| 1473 |
+
|
| 1474 |
+
def __enter__(self):
|
| 1475 |
+
return self
|
| 1476 |
+
|
| 1477 |
+
def __exit__(self, *args):
|
| 1478 |
+
return self._mapwrapper.__exit__(*args)
|
| 1479 |
+
|
| 1480 |
+
def _accept_trial(self, energy_trial, feasible_trial, cv_trial,
|
| 1481 |
+
energy_orig, feasible_orig, cv_orig):
|
| 1482 |
+
"""
|
| 1483 |
+
Trial is accepted if:
|
| 1484 |
+
* it satisfies all constraints and provides a lower or equal objective
|
| 1485 |
+
function value, while both the compared solutions are feasible
|
| 1486 |
+
- or -
|
| 1487 |
+
* it is feasible while the original solution is infeasible,
|
| 1488 |
+
- or -
|
| 1489 |
+
* it is infeasible, but provides a lower or equal constraint violation
|
| 1490 |
+
for all constraint functions.
|
| 1491 |
+
|
| 1492 |
+
This test corresponds to section III of Lampinen [1]_.
|
| 1493 |
+
|
| 1494 |
+
Parameters
|
| 1495 |
+
----------
|
| 1496 |
+
energy_trial : float
|
| 1497 |
+
Energy of the trial solution
|
| 1498 |
+
feasible_trial : float
|
| 1499 |
+
Feasibility of trial solution
|
| 1500 |
+
cv_trial : array-like
|
| 1501 |
+
Excess constraint violation for the trial solution
|
| 1502 |
+
energy_orig : float
|
| 1503 |
+
Energy of the original solution
|
| 1504 |
+
feasible_orig : float
|
| 1505 |
+
Feasibility of original solution
|
| 1506 |
+
cv_orig : array-like
|
| 1507 |
+
Excess constraint violation for the original solution
|
| 1508 |
+
|
| 1509 |
+
Returns
|
| 1510 |
+
-------
|
| 1511 |
+
accepted : bool
|
| 1512 |
+
|
| 1513 |
+
"""
|
| 1514 |
+
if feasible_orig and feasible_trial:
|
| 1515 |
+
return energy_trial <= energy_orig
|
| 1516 |
+
elif feasible_trial and not feasible_orig:
|
| 1517 |
+
return True
|
| 1518 |
+
elif not feasible_trial and (cv_trial <= cv_orig).all():
|
| 1519 |
+
# cv_trial < cv_orig would imply that both trial and orig are not
|
| 1520 |
+
# feasible
|
| 1521 |
+
return True
|
| 1522 |
+
|
| 1523 |
+
return False
|
| 1524 |
+
|
| 1525 |
+
def __next__(self):
|
| 1526 |
+
"""
|
| 1527 |
+
Evolve the population by a single generation
|
| 1528 |
+
|
| 1529 |
+
Returns
|
| 1530 |
+
-------
|
| 1531 |
+
x : ndarray
|
| 1532 |
+
The best solution from the solver.
|
| 1533 |
+
fun : float
|
| 1534 |
+
Value of objective function obtained from the best solution.
|
| 1535 |
+
"""
|
| 1536 |
+
# the population may have just been initialized (all entries are
|
| 1537 |
+
# np.inf). If it has you have to calculate the initial energies
|
| 1538 |
+
if np.all(np.isinf(self.population_energies)):
|
| 1539 |
+
self.feasible, self.constraint_violation = (
|
| 1540 |
+
self._calculate_population_feasibilities(self.population))
|
| 1541 |
+
|
| 1542 |
+
# only need to work out population energies for those that are
|
| 1543 |
+
# feasible
|
| 1544 |
+
self.population_energies[self.feasible] = (
|
| 1545 |
+
self._calculate_population_energies(
|
| 1546 |
+
self.population[self.feasible]))
|
| 1547 |
+
|
| 1548 |
+
self._promote_lowest_energy()
|
| 1549 |
+
|
| 1550 |
+
if self.dither is not None:
|
| 1551 |
+
self.scale = self.random_number_generator.uniform(self.dither[0],
|
| 1552 |
+
self.dither[1])
|
| 1553 |
+
|
| 1554 |
+
if self._updating == 'immediate':
|
| 1555 |
+
# update best solution immediately
|
| 1556 |
+
for candidate in range(self.num_population_members):
|
| 1557 |
+
if self._nfev > self.maxfun:
|
| 1558 |
+
raise StopIteration
|
| 1559 |
+
|
| 1560 |
+
# create a trial solution
|
| 1561 |
+
trial = self._mutate(candidate)
|
| 1562 |
+
|
| 1563 |
+
# ensuring that it's in the range [0, 1)
|
| 1564 |
+
self._ensure_constraint(trial)
|
| 1565 |
+
|
| 1566 |
+
# scale from [0, 1) to the actual parameter value
|
| 1567 |
+
parameters = self._scale_parameters(trial)
|
| 1568 |
+
|
| 1569 |
+
# determine the energy of the objective function
|
| 1570 |
+
if self._wrapped_constraints:
|
| 1571 |
+
cv = self._constraint_violation_fn(parameters)
|
| 1572 |
+
feasible = False
|
| 1573 |
+
energy = np.inf
|
| 1574 |
+
if not np.sum(cv) > 0:
|
| 1575 |
+
# solution is feasible
|
| 1576 |
+
feasible = True
|
| 1577 |
+
energy = self.func(parameters)
|
| 1578 |
+
self._nfev += 1
|
| 1579 |
+
else:
|
| 1580 |
+
feasible = True
|
| 1581 |
+
cv = np.atleast_2d([0.])
|
| 1582 |
+
energy = self.func(parameters)
|
| 1583 |
+
self._nfev += 1
|
| 1584 |
+
|
| 1585 |
+
# compare trial and population member
|
| 1586 |
+
if self._accept_trial(energy, feasible, cv,
|
| 1587 |
+
self.population_energies[candidate],
|
| 1588 |
+
self.feasible[candidate],
|
| 1589 |
+
self.constraint_violation[candidate]):
|
| 1590 |
+
self.population[candidate] = trial
|
| 1591 |
+
self.population_energies[candidate] = np.squeeze(energy)
|
| 1592 |
+
self.feasible[candidate] = feasible
|
| 1593 |
+
self.constraint_violation[candidate] = cv
|
| 1594 |
+
|
| 1595 |
+
# if the trial candidate is also better than the best
|
| 1596 |
+
# solution then promote it.
|
| 1597 |
+
if self._accept_trial(energy, feasible, cv,
|
| 1598 |
+
self.population_energies[0],
|
| 1599 |
+
self.feasible[0],
|
| 1600 |
+
self.constraint_violation[0]):
|
| 1601 |
+
self._promote_lowest_energy()
|
| 1602 |
+
|
| 1603 |
+
elif self._updating == 'deferred':
|
| 1604 |
+
# update best solution once per generation
|
| 1605 |
+
if self._nfev >= self.maxfun:
|
| 1606 |
+
raise StopIteration
|
| 1607 |
+
|
| 1608 |
+
# 'deferred' approach, vectorised form.
|
| 1609 |
+
# create trial solutions
|
| 1610 |
+
trial_pop = self._mutate_many(
|
| 1611 |
+
np.arange(self.num_population_members)
|
| 1612 |
+
)
|
| 1613 |
+
|
| 1614 |
+
# enforce bounds
|
| 1615 |
+
self._ensure_constraint(trial_pop)
|
| 1616 |
+
|
| 1617 |
+
# determine the energies of the objective function, but only for
|
| 1618 |
+
# feasible trials
|
| 1619 |
+
feasible, cv = self._calculate_population_feasibilities(trial_pop)
|
| 1620 |
+
trial_energies = np.full(self.num_population_members, np.inf)
|
| 1621 |
+
|
| 1622 |
+
# only calculate for feasible entries
|
| 1623 |
+
trial_energies[feasible] = self._calculate_population_energies(
|
| 1624 |
+
trial_pop[feasible])
|
| 1625 |
+
|
| 1626 |
+
# which solutions are 'improved'?
|
| 1627 |
+
loc = [self._accept_trial(*val) for val in
|
| 1628 |
+
zip(trial_energies, feasible, cv, self.population_energies,
|
| 1629 |
+
self.feasible, self.constraint_violation)]
|
| 1630 |
+
loc = np.array(loc)
|
| 1631 |
+
self.population = np.where(loc[:, np.newaxis],
|
| 1632 |
+
trial_pop,
|
| 1633 |
+
self.population)
|
| 1634 |
+
self.population_energies = np.where(loc,
|
| 1635 |
+
trial_energies,
|
| 1636 |
+
self.population_energies)
|
| 1637 |
+
self.feasible = np.where(loc,
|
| 1638 |
+
feasible,
|
| 1639 |
+
self.feasible)
|
| 1640 |
+
self.constraint_violation = np.where(loc[:, np.newaxis],
|
| 1641 |
+
cv,
|
| 1642 |
+
self.constraint_violation)
|
| 1643 |
+
|
| 1644 |
+
# make sure the best solution is updated if updating='deferred'.
|
| 1645 |
+
# put the lowest energy into the best solution position.
|
| 1646 |
+
self._promote_lowest_energy()
|
| 1647 |
+
|
| 1648 |
+
return self.x, self.population_energies[0]
|
| 1649 |
+
|
| 1650 |
+
def _scale_parameters(self, trial):
|
| 1651 |
+
"""Scale from a number between 0 and 1 to parameters."""
|
| 1652 |
+
# trial either has shape (N, ) or (L, N), where L is the number of
|
| 1653 |
+
# solutions being scaled
|
| 1654 |
+
scaled = self.__scale_arg1 + (trial - 0.5) * self.__scale_arg2
|
| 1655 |
+
if np.count_nonzero(self.integrality):
|
| 1656 |
+
i = np.broadcast_to(self.integrality, scaled.shape)
|
| 1657 |
+
scaled[i] = np.round(scaled[i])
|
| 1658 |
+
return scaled
|
| 1659 |
+
|
| 1660 |
+
def _unscale_parameters(self, parameters):
|
| 1661 |
+
"""Scale from parameters to a number between 0 and 1."""
|
| 1662 |
+
return (parameters - self.__scale_arg1) * self.__recip_scale_arg2 + 0.5
|
| 1663 |
+
|
| 1664 |
+
def _ensure_constraint(self, trial):
|
| 1665 |
+
"""Make sure the parameters lie between the limits."""
|
| 1666 |
+
mask = np.bitwise_or(trial > 1, trial < 0)
|
| 1667 |
+
if oob := np.count_nonzero(mask):
|
| 1668 |
+
trial[mask] = self.random_number_generator.uniform(size=oob)
|
| 1669 |
+
|
| 1670 |
+
def _mutate_custom(self, candidate):
|
| 1671 |
+
rng = self.random_number_generator
|
| 1672 |
+
msg = (
|
| 1673 |
+
"strategy must have signature"
|
| 1674 |
+
" f(candidate: int, population: np.ndarray, rng=None) returning an"
|
| 1675 |
+
" array of shape (N,)"
|
| 1676 |
+
)
|
| 1677 |
+
_population = self._scale_parameters(self.population)
|
| 1678 |
+
if not len(np.shape(candidate)):
|
| 1679 |
+
# single entry in population
|
| 1680 |
+
trial = self.strategy(candidate, _population, rng=rng)
|
| 1681 |
+
if trial.shape != (self.parameter_count,):
|
| 1682 |
+
raise RuntimeError(msg)
|
| 1683 |
+
else:
|
| 1684 |
+
S = candidate.shape[0]
|
| 1685 |
+
trial = np.array(
|
| 1686 |
+
[self.strategy(c, _population, rng=rng) for c in candidate],
|
| 1687 |
+
dtype=float
|
| 1688 |
+
)
|
| 1689 |
+
if trial.shape != (S, self.parameter_count):
|
| 1690 |
+
raise RuntimeError(msg)
|
| 1691 |
+
return self._unscale_parameters(trial)
|
| 1692 |
+
|
| 1693 |
+
def _mutate_many(self, candidates):
|
| 1694 |
+
"""Create trial vectors based on a mutation strategy."""
|
| 1695 |
+
rng = self.random_number_generator
|
| 1696 |
+
|
| 1697 |
+
S = len(candidates)
|
| 1698 |
+
if callable(self.strategy):
|
| 1699 |
+
return self._mutate_custom(candidates)
|
| 1700 |
+
|
| 1701 |
+
trial = np.copy(self.population[candidates])
|
| 1702 |
+
samples = np.array([self._select_samples(c, 5) for c in candidates])
|
| 1703 |
+
|
| 1704 |
+
if self.strategy in ['currenttobest1exp', 'currenttobest1bin']:
|
| 1705 |
+
bprime = self.mutation_func(candidates, samples)
|
| 1706 |
+
else:
|
| 1707 |
+
bprime = self.mutation_func(samples)
|
| 1708 |
+
|
| 1709 |
+
fill_point = rng_integers(rng, self.parameter_count, size=S)
|
| 1710 |
+
crossovers = rng.uniform(size=(S, self.parameter_count))
|
| 1711 |
+
crossovers = crossovers < self.cross_over_probability
|
| 1712 |
+
if self.strategy in self._binomial:
|
| 1713 |
+
# the last one is always from the bprime vector for binomial
|
| 1714 |
+
# If you fill in modulo with a loop you have to set the last one to
|
| 1715 |
+
# true. If you don't use a loop then you can have any random entry
|
| 1716 |
+
# be True.
|
| 1717 |
+
i = np.arange(S)
|
| 1718 |
+
crossovers[i, fill_point[i]] = True
|
| 1719 |
+
trial = np.where(crossovers, bprime, trial)
|
| 1720 |
+
return trial
|
| 1721 |
+
|
| 1722 |
+
elif self.strategy in self._exponential:
|
| 1723 |
+
crossovers[..., 0] = True
|
| 1724 |
+
for j in range(S):
|
| 1725 |
+
i = 0
|
| 1726 |
+
init_fill = fill_point[j]
|
| 1727 |
+
while (i < self.parameter_count and crossovers[j, i]):
|
| 1728 |
+
trial[j, init_fill] = bprime[j, init_fill]
|
| 1729 |
+
init_fill = (init_fill + 1) % self.parameter_count
|
| 1730 |
+
i += 1
|
| 1731 |
+
|
| 1732 |
+
return trial
|
| 1733 |
+
|
| 1734 |
+
def _mutate(self, candidate):
|
| 1735 |
+
"""Create a trial vector based on a mutation strategy."""
|
| 1736 |
+
rng = self.random_number_generator
|
| 1737 |
+
|
| 1738 |
+
if callable(self.strategy):
|
| 1739 |
+
return self._mutate_custom(candidate)
|
| 1740 |
+
|
| 1741 |
+
fill_point = rng_integers(rng, self.parameter_count)
|
| 1742 |
+
samples = self._select_samples(candidate, 5)
|
| 1743 |
+
|
| 1744 |
+
trial = np.copy(self.population[candidate])
|
| 1745 |
+
|
| 1746 |
+
if self.strategy in ['currenttobest1exp', 'currenttobest1bin']:
|
| 1747 |
+
bprime = self.mutation_func(candidate, samples)
|
| 1748 |
+
else:
|
| 1749 |
+
bprime = self.mutation_func(samples)
|
| 1750 |
+
|
| 1751 |
+
crossovers = rng.uniform(size=self.parameter_count)
|
| 1752 |
+
crossovers = crossovers < self.cross_over_probability
|
| 1753 |
+
if self.strategy in self._binomial:
|
| 1754 |
+
# the last one is always from the bprime vector for binomial
|
| 1755 |
+
# If you fill in modulo with a loop you have to set the last one to
|
| 1756 |
+
# true. If you don't use a loop then you can have any random entry
|
| 1757 |
+
# be True.
|
| 1758 |
+
crossovers[fill_point] = True
|
| 1759 |
+
trial = np.where(crossovers, bprime, trial)
|
| 1760 |
+
return trial
|
| 1761 |
+
|
| 1762 |
+
elif self.strategy in self._exponential:
|
| 1763 |
+
i = 0
|
| 1764 |
+
crossovers[0] = True
|
| 1765 |
+
while i < self.parameter_count and crossovers[i]:
|
| 1766 |
+
trial[fill_point] = bprime[fill_point]
|
| 1767 |
+
fill_point = (fill_point + 1) % self.parameter_count
|
| 1768 |
+
i += 1
|
| 1769 |
+
|
| 1770 |
+
return trial
|
| 1771 |
+
|
| 1772 |
+
def _best1(self, samples):
|
| 1773 |
+
"""best1bin, best1exp"""
|
| 1774 |
+
# samples.shape == (S, 5)
|
| 1775 |
+
# or
|
| 1776 |
+
# samples.shape(5,)
|
| 1777 |
+
r0, r1 = samples[..., :2].T
|
| 1778 |
+
return (self.population[0] + self.scale *
|
| 1779 |
+
(self.population[r0] - self.population[r1]))
|
| 1780 |
+
|
| 1781 |
+
def _rand1(self, samples):
|
| 1782 |
+
"""rand1bin, rand1exp"""
|
| 1783 |
+
r0, r1, r2 = samples[..., :3].T
|
| 1784 |
+
return (self.population[r0] + self.scale *
|
| 1785 |
+
(self.population[r1] - self.population[r2]))
|
| 1786 |
+
|
| 1787 |
+
def _randtobest1(self, samples):
|
| 1788 |
+
"""randtobest1bin, randtobest1exp"""
|
| 1789 |
+
r0, r1, r2 = samples[..., :3].T
|
| 1790 |
+
bprime = np.copy(self.population[r0])
|
| 1791 |
+
bprime += self.scale * (self.population[0] - bprime)
|
| 1792 |
+
bprime += self.scale * (self.population[r1] -
|
| 1793 |
+
self.population[r2])
|
| 1794 |
+
return bprime
|
| 1795 |
+
|
| 1796 |
+
def _currenttobest1(self, candidate, samples):
|
| 1797 |
+
"""currenttobest1bin, currenttobest1exp"""
|
| 1798 |
+
r0, r1 = samples[..., :2].T
|
| 1799 |
+
bprime = (self.population[candidate] + self.scale *
|
| 1800 |
+
(self.population[0] - self.population[candidate] +
|
| 1801 |
+
self.population[r0] - self.population[r1]))
|
| 1802 |
+
return bprime
|
| 1803 |
+
|
| 1804 |
+
def _best2(self, samples):
|
| 1805 |
+
"""best2bin, best2exp"""
|
| 1806 |
+
r0, r1, r2, r3 = samples[..., :4].T
|
| 1807 |
+
bprime = (self.population[0] + self.scale *
|
| 1808 |
+
(self.population[r0] + self.population[r1] -
|
| 1809 |
+
self.population[r2] - self.population[r3]))
|
| 1810 |
+
|
| 1811 |
+
return bprime
|
| 1812 |
+
|
| 1813 |
+
def _rand2(self, samples):
|
| 1814 |
+
"""rand2bin, rand2exp"""
|
| 1815 |
+
r0, r1, r2, r3, r4 = samples[..., :5].T
|
| 1816 |
+
bprime = (self.population[r0] + self.scale *
|
| 1817 |
+
(self.population[r1] + self.population[r2] -
|
| 1818 |
+
self.population[r3] - self.population[r4]))
|
| 1819 |
+
|
| 1820 |
+
return bprime
|
| 1821 |
+
|
| 1822 |
+
def _select_samples(self, candidate, number_samples):
|
| 1823 |
+
"""
|
| 1824 |
+
obtain random integers from range(self.num_population_members),
|
| 1825 |
+
without replacement. You can't have the original candidate either.
|
| 1826 |
+
"""
|
| 1827 |
+
self.random_number_generator.shuffle(self._random_population_index)
|
| 1828 |
+
idxs = self._random_population_index[:number_samples + 1]
|
| 1829 |
+
return idxs[idxs != candidate][:number_samples]
|
| 1830 |
+
|
| 1831 |
+
|
| 1832 |
+
class _ConstraintWrapper:
|
| 1833 |
+
"""Object to wrap/evaluate user defined constraints.
|
| 1834 |
+
|
| 1835 |
+
Very similar in practice to `PreparedConstraint`, except that no evaluation
|
| 1836 |
+
of jac/hess is performed (explicit or implicit).
|
| 1837 |
+
|
| 1838 |
+
If created successfully, it will contain the attributes listed below.
|
| 1839 |
+
|
| 1840 |
+
Parameters
|
| 1841 |
+
----------
|
| 1842 |
+
constraint : {`NonlinearConstraint`, `LinearConstraint`, `Bounds`}
|
| 1843 |
+
Constraint to check and prepare.
|
| 1844 |
+
x0 : array_like
|
| 1845 |
+
Initial vector of independent variables, shape (N,)
|
| 1846 |
+
|
| 1847 |
+
Attributes
|
| 1848 |
+
----------
|
| 1849 |
+
fun : callable
|
| 1850 |
+
Function defining the constraint wrapped by one of the convenience
|
| 1851 |
+
classes.
|
| 1852 |
+
bounds : 2-tuple
|
| 1853 |
+
Contains lower and upper bounds for the constraints --- lb and ub.
|
| 1854 |
+
These are converted to ndarray and have a size equal to the number of
|
| 1855 |
+
the constraints.
|
| 1856 |
+
|
| 1857 |
+
Notes
|
| 1858 |
+
-----
|
| 1859 |
+
_ConstraintWrapper.fun and _ConstraintWrapper.violation can get sent
|
| 1860 |
+
arrays of shape (N, S) or (N,), where S is the number of vectors of shape
|
| 1861 |
+
(N,) to consider constraints for.
|
| 1862 |
+
"""
|
| 1863 |
+
def __init__(self, constraint, x0):
|
| 1864 |
+
self.constraint = constraint
|
| 1865 |
+
|
| 1866 |
+
if isinstance(constraint, NonlinearConstraint):
|
| 1867 |
+
def fun(x):
|
| 1868 |
+
x = np.asarray(x)
|
| 1869 |
+
return np.atleast_1d(constraint.fun(x))
|
| 1870 |
+
elif isinstance(constraint, LinearConstraint):
|
| 1871 |
+
def fun(x):
|
| 1872 |
+
if issparse(constraint.A):
|
| 1873 |
+
A = constraint.A
|
| 1874 |
+
else:
|
| 1875 |
+
A = np.atleast_2d(constraint.A)
|
| 1876 |
+
|
| 1877 |
+
res = A.dot(x)
|
| 1878 |
+
# x either has shape (N, S) or (N)
|
| 1879 |
+
# (M, N) x (N, S) --> (M, S)
|
| 1880 |
+
# (M, N) x (N,) --> (M,)
|
| 1881 |
+
# However, if (M, N) is a matrix then:
|
| 1882 |
+
# (M, N) * (N,) --> (M, 1), we need this to be (M,)
|
| 1883 |
+
if x.ndim == 1 and res.ndim == 2:
|
| 1884 |
+
# deal with case that constraint.A is an np.matrix
|
| 1885 |
+
# see gh20041
|
| 1886 |
+
res = np.asarray(res)[:, 0]
|
| 1887 |
+
|
| 1888 |
+
return res
|
| 1889 |
+
elif isinstance(constraint, Bounds):
|
| 1890 |
+
def fun(x):
|
| 1891 |
+
return np.asarray(x)
|
| 1892 |
+
else:
|
| 1893 |
+
raise ValueError("`constraint` of an unknown type is passed.")
|
| 1894 |
+
|
| 1895 |
+
self.fun = fun
|
| 1896 |
+
|
| 1897 |
+
lb = np.asarray(constraint.lb, dtype=float)
|
| 1898 |
+
ub = np.asarray(constraint.ub, dtype=float)
|
| 1899 |
+
|
| 1900 |
+
x0 = np.asarray(x0)
|
| 1901 |
+
|
| 1902 |
+
# find out the number of constraints
|
| 1903 |
+
f0 = fun(x0)
|
| 1904 |
+
self.num_constr = m = f0.size
|
| 1905 |
+
self.parameter_count = x0.size
|
| 1906 |
+
|
| 1907 |
+
if lb.ndim == 0:
|
| 1908 |
+
lb = np.resize(lb, m)
|
| 1909 |
+
if ub.ndim == 0:
|
| 1910 |
+
ub = np.resize(ub, m)
|
| 1911 |
+
|
| 1912 |
+
self.bounds = (lb, ub)
|
| 1913 |
+
|
| 1914 |
+
def __call__(self, x):
|
| 1915 |
+
return np.atleast_1d(self.fun(x))
|
| 1916 |
+
|
| 1917 |
+
def violation(self, x):
|
| 1918 |
+
"""How much the constraint is exceeded by.
|
| 1919 |
+
|
| 1920 |
+
Parameters
|
| 1921 |
+
----------
|
| 1922 |
+
x : array-like
|
| 1923 |
+
Vector of independent variables, (N, S), where N is number of
|
| 1924 |
+
parameters and S is the number of solutions to be investigated.
|
| 1925 |
+
|
| 1926 |
+
Returns
|
| 1927 |
+
-------
|
| 1928 |
+
excess : array-like
|
| 1929 |
+
How much the constraint is exceeded by, for each of the
|
| 1930 |
+
constraints specified by `_ConstraintWrapper.fun`.
|
| 1931 |
+
Has shape (M, S) where M is the number of constraint components.
|
| 1932 |
+
"""
|
| 1933 |
+
# expect ev to have shape (num_constr, S) or (num_constr,)
|
| 1934 |
+
ev = self.fun(np.asarray(x))
|
| 1935 |
+
|
| 1936 |
+
try:
|
| 1937 |
+
excess_lb = np.maximum(self.bounds[0] - ev.T, 0)
|
| 1938 |
+
excess_ub = np.maximum(ev.T - self.bounds[1], 0)
|
| 1939 |
+
except ValueError as e:
|
| 1940 |
+
raise RuntimeError("An array returned from a Constraint has"
|
| 1941 |
+
" the wrong shape. If `vectorized is False`"
|
| 1942 |
+
" the Constraint should return an array of"
|
| 1943 |
+
" shape (M,). If `vectorized is True` then"
|
| 1944 |
+
" the Constraint must return an array of"
|
| 1945 |
+
" shape (M, S), where S is the number of"
|
| 1946 |
+
" solution vectors and M is the number of"
|
| 1947 |
+
" constraint components in a given"
|
| 1948 |
+
" Constraint object.") from e
|
| 1949 |
+
|
| 1950 |
+
v = (excess_lb + excess_ub).T
|
| 1951 |
+
return v
|
llava_next/lib/python3.10/site-packages/scipy/optimize/_direct.cpython-310-x86_64-linux-gnu.so
ADDED
|
Binary file (43.5 kB). View file
|
|
|
llava_next/lib/python3.10/site-packages/scipy/optimize/_dual_annealing.py
ADDED
|
@@ -0,0 +1,732 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Dual Annealing implementation.
|
| 2 |
+
# Copyright (c) 2018 Sylvain Gubian <sylvain.gubian@pmi.com>,
|
| 3 |
+
# Yang Xiang <yang.xiang@pmi.com>
|
| 4 |
+
# Author: Sylvain Gubian, Yang Xiang, PMP S.A.
|
| 5 |
+
|
| 6 |
+
"""
|
| 7 |
+
A Dual Annealing global optimization algorithm
|
| 8 |
+
"""
|
| 9 |
+
|
| 10 |
+
import numpy as np
|
| 11 |
+
from scipy.optimize import OptimizeResult
|
| 12 |
+
from scipy.optimize import minimize, Bounds
|
| 13 |
+
from scipy.special import gammaln
|
| 14 |
+
from scipy._lib._util import check_random_state
|
| 15 |
+
from scipy.optimize._constraints import new_bounds_to_old
|
| 16 |
+
|
| 17 |
+
__all__ = ['dual_annealing']
|
| 18 |
+
|
| 19 |
+
|
| 20 |
+
class VisitingDistribution:
|
| 21 |
+
"""
|
| 22 |
+
Class used to generate new coordinates based on the distorted
|
| 23 |
+
Cauchy-Lorentz distribution. Depending on the steps within the strategy
|
| 24 |
+
chain, the class implements the strategy for generating new location
|
| 25 |
+
changes.
|
| 26 |
+
|
| 27 |
+
Parameters
|
| 28 |
+
----------
|
| 29 |
+
lb : array_like
|
| 30 |
+
A 1-D NumPy ndarray containing lower bounds of the generated
|
| 31 |
+
components. Neither NaN or inf are allowed.
|
| 32 |
+
ub : array_like
|
| 33 |
+
A 1-D NumPy ndarray containing upper bounds for the generated
|
| 34 |
+
components. Neither NaN or inf are allowed.
|
| 35 |
+
visiting_param : float
|
| 36 |
+
Parameter for visiting distribution. Default value is 2.62.
|
| 37 |
+
Higher values give the visiting distribution a heavier tail, this
|
| 38 |
+
makes the algorithm jump to a more distant region.
|
| 39 |
+
The value range is (1, 3]. Its value is fixed for the life of the
|
| 40 |
+
object.
|
| 41 |
+
rand_gen : {`~numpy.random.RandomState`, `~numpy.random.Generator`}
|
| 42 |
+
A `~numpy.random.RandomState`, `~numpy.random.Generator` object
|
| 43 |
+
for using the current state of the created random generator container.
|
| 44 |
+
|
| 45 |
+
"""
|
| 46 |
+
TAIL_LIMIT = 1.e8
|
| 47 |
+
MIN_VISIT_BOUND = 1.e-10
|
| 48 |
+
|
| 49 |
+
def __init__(self, lb, ub, visiting_param, rand_gen):
|
| 50 |
+
# if you wish to make _visiting_param adjustable during the life of
|
| 51 |
+
# the object then _factor2, _factor3, _factor5, _d1, _factor6 will
|
| 52 |
+
# have to be dynamically calculated in `visit_fn`. They're factored
|
| 53 |
+
# out here so they don't need to be recalculated all the time.
|
| 54 |
+
self._visiting_param = visiting_param
|
| 55 |
+
self.rand_gen = rand_gen
|
| 56 |
+
self.lower = lb
|
| 57 |
+
self.upper = ub
|
| 58 |
+
self.bound_range = ub - lb
|
| 59 |
+
|
| 60 |
+
# these are invariant numbers unless visiting_param changes
|
| 61 |
+
self._factor2 = np.exp((4.0 - self._visiting_param) * np.log(
|
| 62 |
+
self._visiting_param - 1.0))
|
| 63 |
+
self._factor3 = np.exp((2.0 - self._visiting_param) * np.log(2.0)
|
| 64 |
+
/ (self._visiting_param - 1.0))
|
| 65 |
+
self._factor4_p = np.sqrt(np.pi) * self._factor2 / (self._factor3 * (
|
| 66 |
+
3.0 - self._visiting_param))
|
| 67 |
+
|
| 68 |
+
self._factor5 = 1.0 / (self._visiting_param - 1.0) - 0.5
|
| 69 |
+
self._d1 = 2.0 - self._factor5
|
| 70 |
+
self._factor6 = np.pi * (1.0 - self._factor5) / np.sin(
|
| 71 |
+
np.pi * (1.0 - self._factor5)) / np.exp(gammaln(self._d1))
|
| 72 |
+
|
| 73 |
+
def visiting(self, x, step, temperature):
|
| 74 |
+
""" Based on the step in the strategy chain, new coordinates are
|
| 75 |
+
generated by changing all components is the same time or only
|
| 76 |
+
one of them, the new values are computed with visit_fn method
|
| 77 |
+
"""
|
| 78 |
+
dim = x.size
|
| 79 |
+
if step < dim:
|
| 80 |
+
# Changing all coordinates with a new visiting value
|
| 81 |
+
visits = self.visit_fn(temperature, dim)
|
| 82 |
+
upper_sample, lower_sample = self.rand_gen.uniform(size=2)
|
| 83 |
+
visits[visits > self.TAIL_LIMIT] = self.TAIL_LIMIT * upper_sample
|
| 84 |
+
visits[visits < -self.TAIL_LIMIT] = -self.TAIL_LIMIT * lower_sample
|
| 85 |
+
x_visit = visits + x
|
| 86 |
+
a = x_visit - self.lower
|
| 87 |
+
b = np.fmod(a, self.bound_range) + self.bound_range
|
| 88 |
+
x_visit = np.fmod(b, self.bound_range) + self.lower
|
| 89 |
+
x_visit[np.fabs(
|
| 90 |
+
x_visit - self.lower) < self.MIN_VISIT_BOUND] += 1.e-10
|
| 91 |
+
else:
|
| 92 |
+
# Changing only one coordinate at a time based on strategy
|
| 93 |
+
# chain step
|
| 94 |
+
x_visit = np.copy(x)
|
| 95 |
+
visit = self.visit_fn(temperature, 1)[0]
|
| 96 |
+
if visit > self.TAIL_LIMIT:
|
| 97 |
+
visit = self.TAIL_LIMIT * self.rand_gen.uniform()
|
| 98 |
+
elif visit < -self.TAIL_LIMIT:
|
| 99 |
+
visit = -self.TAIL_LIMIT * self.rand_gen.uniform()
|
| 100 |
+
index = step - dim
|
| 101 |
+
x_visit[index] = visit + x[index]
|
| 102 |
+
a = x_visit[index] - self.lower[index]
|
| 103 |
+
b = np.fmod(a, self.bound_range[index]) + self.bound_range[index]
|
| 104 |
+
x_visit[index] = np.fmod(b, self.bound_range[
|
| 105 |
+
index]) + self.lower[index]
|
| 106 |
+
if np.fabs(x_visit[index] - self.lower[
|
| 107 |
+
index]) < self.MIN_VISIT_BOUND:
|
| 108 |
+
x_visit[index] += self.MIN_VISIT_BOUND
|
| 109 |
+
return x_visit
|
| 110 |
+
|
| 111 |
+
def visit_fn(self, temperature, dim):
|
| 112 |
+
""" Formula Visita from p. 405 of reference [2] """
|
| 113 |
+
x, y = self.rand_gen.normal(size=(dim, 2)).T
|
| 114 |
+
|
| 115 |
+
factor1 = np.exp(np.log(temperature) / (self._visiting_param - 1.0))
|
| 116 |
+
factor4 = self._factor4_p * factor1
|
| 117 |
+
|
| 118 |
+
# sigmax
|
| 119 |
+
x *= np.exp(-(self._visiting_param - 1.0) * np.log(
|
| 120 |
+
self._factor6 / factor4) / (3.0 - self._visiting_param))
|
| 121 |
+
|
| 122 |
+
den = np.exp((self._visiting_param - 1.0) * np.log(np.fabs(y)) /
|
| 123 |
+
(3.0 - self._visiting_param))
|
| 124 |
+
|
| 125 |
+
return x / den
|
| 126 |
+
|
| 127 |
+
|
| 128 |
+
class EnergyState:
|
| 129 |
+
"""
|
| 130 |
+
Class used to record the energy state. At any time, it knows what is the
|
| 131 |
+
currently used coordinates and the most recent best location.
|
| 132 |
+
|
| 133 |
+
Parameters
|
| 134 |
+
----------
|
| 135 |
+
lower : array_like
|
| 136 |
+
A 1-D NumPy ndarray containing lower bounds for generating an initial
|
| 137 |
+
random components in the `reset` method.
|
| 138 |
+
upper : array_like
|
| 139 |
+
A 1-D NumPy ndarray containing upper bounds for generating an initial
|
| 140 |
+
random components in the `reset` method
|
| 141 |
+
components. Neither NaN or inf are allowed.
|
| 142 |
+
callback : callable, ``callback(x, f, context)``, optional
|
| 143 |
+
A callback function which will be called for all minima found.
|
| 144 |
+
``x`` and ``f`` are the coordinates and function value of the
|
| 145 |
+
latest minimum found, and `context` has value in [0, 1, 2]
|
| 146 |
+
"""
|
| 147 |
+
# Maximum number of trials for generating a valid starting point
|
| 148 |
+
MAX_REINIT_COUNT = 1000
|
| 149 |
+
|
| 150 |
+
def __init__(self, lower, upper, callback=None):
|
| 151 |
+
self.ebest = None
|
| 152 |
+
self.current_energy = None
|
| 153 |
+
self.current_location = None
|
| 154 |
+
self.xbest = None
|
| 155 |
+
self.lower = lower
|
| 156 |
+
self.upper = upper
|
| 157 |
+
self.callback = callback
|
| 158 |
+
|
| 159 |
+
def reset(self, func_wrapper, rand_gen, x0=None):
|
| 160 |
+
"""
|
| 161 |
+
Initialize current location is the search domain. If `x0` is not
|
| 162 |
+
provided, a random location within the bounds is generated.
|
| 163 |
+
"""
|
| 164 |
+
if x0 is None:
|
| 165 |
+
self.current_location = rand_gen.uniform(self.lower, self.upper,
|
| 166 |
+
size=len(self.lower))
|
| 167 |
+
else:
|
| 168 |
+
self.current_location = np.copy(x0)
|
| 169 |
+
init_error = True
|
| 170 |
+
reinit_counter = 0
|
| 171 |
+
while init_error:
|
| 172 |
+
self.current_energy = func_wrapper.fun(self.current_location)
|
| 173 |
+
if self.current_energy is None:
|
| 174 |
+
raise ValueError('Objective function is returning None')
|
| 175 |
+
if (not np.isfinite(self.current_energy) or np.isnan(
|
| 176 |
+
self.current_energy)):
|
| 177 |
+
if reinit_counter >= EnergyState.MAX_REINIT_COUNT:
|
| 178 |
+
init_error = False
|
| 179 |
+
message = (
|
| 180 |
+
'Stopping algorithm because function '
|
| 181 |
+
'create NaN or (+/-) infinity values even with '
|
| 182 |
+
'trying new random parameters'
|
| 183 |
+
)
|
| 184 |
+
raise ValueError(message)
|
| 185 |
+
self.current_location = rand_gen.uniform(self.lower,
|
| 186 |
+
self.upper,
|
| 187 |
+
size=self.lower.size)
|
| 188 |
+
reinit_counter += 1
|
| 189 |
+
else:
|
| 190 |
+
init_error = False
|
| 191 |
+
# If first time reset, initialize ebest and xbest
|
| 192 |
+
if self.ebest is None and self.xbest is None:
|
| 193 |
+
self.ebest = self.current_energy
|
| 194 |
+
self.xbest = np.copy(self.current_location)
|
| 195 |
+
# Otherwise, we keep them in case of reannealing reset
|
| 196 |
+
|
| 197 |
+
def update_best(self, e, x, context):
|
| 198 |
+
self.ebest = e
|
| 199 |
+
self.xbest = np.copy(x)
|
| 200 |
+
if self.callback is not None:
|
| 201 |
+
val = self.callback(x, e, context)
|
| 202 |
+
if val is not None:
|
| 203 |
+
if val:
|
| 204 |
+
return ('Callback function requested to stop early by '
|
| 205 |
+
'returning True')
|
| 206 |
+
|
| 207 |
+
def update_current(self, e, x):
|
| 208 |
+
self.current_energy = e
|
| 209 |
+
self.current_location = np.copy(x)
|
| 210 |
+
|
| 211 |
+
|
| 212 |
+
class StrategyChain:
|
| 213 |
+
"""
|
| 214 |
+
Class that implements within a Markov chain the strategy for location
|
| 215 |
+
acceptance and local search decision making.
|
| 216 |
+
|
| 217 |
+
Parameters
|
| 218 |
+
----------
|
| 219 |
+
acceptance_param : float
|
| 220 |
+
Parameter for acceptance distribution. It is used to control the
|
| 221 |
+
probability of acceptance. The lower the acceptance parameter, the
|
| 222 |
+
smaller the probability of acceptance. Default value is -5.0 with
|
| 223 |
+
a range (-1e4, -5].
|
| 224 |
+
visit_dist : VisitingDistribution
|
| 225 |
+
Instance of `VisitingDistribution` class.
|
| 226 |
+
func_wrapper : ObjectiveFunWrapper
|
| 227 |
+
Instance of `ObjectiveFunWrapper` class.
|
| 228 |
+
minimizer_wrapper: LocalSearchWrapper
|
| 229 |
+
Instance of `LocalSearchWrapper` class.
|
| 230 |
+
rand_gen : {None, int, `numpy.random.Generator`,
|
| 231 |
+
`numpy.random.RandomState`}, optional
|
| 232 |
+
|
| 233 |
+
If `seed` is None (or `np.random`), the `numpy.random.RandomState`
|
| 234 |
+
singleton is used.
|
| 235 |
+
If `seed` is an int, a new ``RandomState`` instance is used,
|
| 236 |
+
seeded with `seed`.
|
| 237 |
+
If `seed` is already a ``Generator`` or ``RandomState`` instance then
|
| 238 |
+
that instance is used.
|
| 239 |
+
energy_state: EnergyState
|
| 240 |
+
Instance of `EnergyState` class.
|
| 241 |
+
|
| 242 |
+
"""
|
| 243 |
+
|
| 244 |
+
def __init__(self, acceptance_param, visit_dist, func_wrapper,
|
| 245 |
+
minimizer_wrapper, rand_gen, energy_state):
|
| 246 |
+
# Local strategy chain minimum energy and location
|
| 247 |
+
self.emin = energy_state.current_energy
|
| 248 |
+
self.xmin = np.array(energy_state.current_location)
|
| 249 |
+
# Global optimizer state
|
| 250 |
+
self.energy_state = energy_state
|
| 251 |
+
# Acceptance parameter
|
| 252 |
+
self.acceptance_param = acceptance_param
|
| 253 |
+
# Visiting distribution instance
|
| 254 |
+
self.visit_dist = visit_dist
|
| 255 |
+
# Wrapper to objective function
|
| 256 |
+
self.func_wrapper = func_wrapper
|
| 257 |
+
# Wrapper to the local minimizer
|
| 258 |
+
self.minimizer_wrapper = minimizer_wrapper
|
| 259 |
+
self.not_improved_idx = 0
|
| 260 |
+
self.not_improved_max_idx = 1000
|
| 261 |
+
self._rand_gen = rand_gen
|
| 262 |
+
self.temperature_step = 0
|
| 263 |
+
self.K = 100 * len(energy_state.current_location)
|
| 264 |
+
|
| 265 |
+
def accept_reject(self, j, e, x_visit):
|
| 266 |
+
r = self._rand_gen.uniform()
|
| 267 |
+
pqv_temp = 1.0 - ((1.0 - self.acceptance_param) *
|
| 268 |
+
(e - self.energy_state.current_energy) / self.temperature_step)
|
| 269 |
+
if pqv_temp <= 0.:
|
| 270 |
+
pqv = 0.
|
| 271 |
+
else:
|
| 272 |
+
pqv = np.exp(np.log(pqv_temp) / (
|
| 273 |
+
1. - self.acceptance_param))
|
| 274 |
+
|
| 275 |
+
if r <= pqv:
|
| 276 |
+
# We accept the new location and update state
|
| 277 |
+
self.energy_state.update_current(e, x_visit)
|
| 278 |
+
self.xmin = np.copy(self.energy_state.current_location)
|
| 279 |
+
|
| 280 |
+
# No improvement for a long time
|
| 281 |
+
if self.not_improved_idx >= self.not_improved_max_idx:
|
| 282 |
+
if j == 0 or self.energy_state.current_energy < self.emin:
|
| 283 |
+
self.emin = self.energy_state.current_energy
|
| 284 |
+
self.xmin = np.copy(self.energy_state.current_location)
|
| 285 |
+
|
| 286 |
+
def run(self, step, temperature):
|
| 287 |
+
self.temperature_step = temperature / float(step + 1)
|
| 288 |
+
self.not_improved_idx += 1
|
| 289 |
+
for j in range(self.energy_state.current_location.size * 2):
|
| 290 |
+
if j == 0:
|
| 291 |
+
if step == 0:
|
| 292 |
+
self.energy_state_improved = True
|
| 293 |
+
else:
|
| 294 |
+
self.energy_state_improved = False
|
| 295 |
+
x_visit = self.visit_dist.visiting(
|
| 296 |
+
self.energy_state.current_location, j, temperature)
|
| 297 |
+
# Calling the objective function
|
| 298 |
+
e = self.func_wrapper.fun(x_visit)
|
| 299 |
+
if e < self.energy_state.current_energy:
|
| 300 |
+
# We have got a better energy value
|
| 301 |
+
self.energy_state.update_current(e, x_visit)
|
| 302 |
+
if e < self.energy_state.ebest:
|
| 303 |
+
val = self.energy_state.update_best(e, x_visit, 0)
|
| 304 |
+
if val is not None:
|
| 305 |
+
if val:
|
| 306 |
+
return val
|
| 307 |
+
self.energy_state_improved = True
|
| 308 |
+
self.not_improved_idx = 0
|
| 309 |
+
else:
|
| 310 |
+
# We have not improved but do we accept the new location?
|
| 311 |
+
self.accept_reject(j, e, x_visit)
|
| 312 |
+
if self.func_wrapper.nfev >= self.func_wrapper.maxfun:
|
| 313 |
+
return ('Maximum number of function call reached '
|
| 314 |
+
'during annealing')
|
| 315 |
+
# End of StrategyChain loop
|
| 316 |
+
|
| 317 |
+
def local_search(self):
|
| 318 |
+
# Decision making for performing a local search
|
| 319 |
+
# based on strategy chain results
|
| 320 |
+
# If energy has been improved or no improvement since too long,
|
| 321 |
+
# performing a local search with the best strategy chain location
|
| 322 |
+
if self.energy_state_improved:
|
| 323 |
+
# Global energy has improved, let's see if LS improves further
|
| 324 |
+
e, x = self.minimizer_wrapper.local_search(self.energy_state.xbest,
|
| 325 |
+
self.energy_state.ebest)
|
| 326 |
+
if e < self.energy_state.ebest:
|
| 327 |
+
self.not_improved_idx = 0
|
| 328 |
+
val = self.energy_state.update_best(e, x, 1)
|
| 329 |
+
if val is not None:
|
| 330 |
+
if val:
|
| 331 |
+
return val
|
| 332 |
+
self.energy_state.update_current(e, x)
|
| 333 |
+
if self.func_wrapper.nfev >= self.func_wrapper.maxfun:
|
| 334 |
+
return ('Maximum number of function call reached '
|
| 335 |
+
'during local search')
|
| 336 |
+
# Check probability of a need to perform a LS even if no improvement
|
| 337 |
+
do_ls = False
|
| 338 |
+
if self.K < 90 * len(self.energy_state.current_location):
|
| 339 |
+
pls = np.exp(self.K * (
|
| 340 |
+
self.energy_state.ebest - self.energy_state.current_energy) /
|
| 341 |
+
self.temperature_step)
|
| 342 |
+
if pls >= self._rand_gen.uniform():
|
| 343 |
+
do_ls = True
|
| 344 |
+
# Global energy not improved, let's see what LS gives
|
| 345 |
+
# on the best strategy chain location
|
| 346 |
+
if self.not_improved_idx >= self.not_improved_max_idx:
|
| 347 |
+
do_ls = True
|
| 348 |
+
if do_ls:
|
| 349 |
+
e, x = self.minimizer_wrapper.local_search(self.xmin, self.emin)
|
| 350 |
+
self.xmin = np.copy(x)
|
| 351 |
+
self.emin = e
|
| 352 |
+
self.not_improved_idx = 0
|
| 353 |
+
self.not_improved_max_idx = self.energy_state.current_location.size
|
| 354 |
+
if e < self.energy_state.ebest:
|
| 355 |
+
val = self.energy_state.update_best(
|
| 356 |
+
self.emin, self.xmin, 2)
|
| 357 |
+
if val is not None:
|
| 358 |
+
if val:
|
| 359 |
+
return val
|
| 360 |
+
self.energy_state.update_current(e, x)
|
| 361 |
+
if self.func_wrapper.nfev >= self.func_wrapper.maxfun:
|
| 362 |
+
return ('Maximum number of function call reached '
|
| 363 |
+
'during dual annealing')
|
| 364 |
+
|
| 365 |
+
|
| 366 |
+
class ObjectiveFunWrapper:
|
| 367 |
+
|
| 368 |
+
def __init__(self, func, maxfun=1e7, *args):
|
| 369 |
+
self.func = func
|
| 370 |
+
self.args = args
|
| 371 |
+
# Number of objective function evaluations
|
| 372 |
+
self.nfev = 0
|
| 373 |
+
# Number of gradient function evaluation if used
|
| 374 |
+
self.ngev = 0
|
| 375 |
+
# Number of hessian of the objective function if used
|
| 376 |
+
self.nhev = 0
|
| 377 |
+
self.maxfun = maxfun
|
| 378 |
+
|
| 379 |
+
def fun(self, x):
|
| 380 |
+
self.nfev += 1
|
| 381 |
+
return self.func(x, *self.args)
|
| 382 |
+
|
| 383 |
+
|
| 384 |
+
class LocalSearchWrapper:
|
| 385 |
+
"""
|
| 386 |
+
Class used to wrap around the minimizer used for local search
|
| 387 |
+
Default local minimizer is SciPy minimizer L-BFGS-B
|
| 388 |
+
"""
|
| 389 |
+
|
| 390 |
+
LS_MAXITER_RATIO = 6
|
| 391 |
+
LS_MAXITER_MIN = 100
|
| 392 |
+
LS_MAXITER_MAX = 1000
|
| 393 |
+
|
| 394 |
+
def __init__(self, search_bounds, func_wrapper, *args, **kwargs):
|
| 395 |
+
self.func_wrapper = func_wrapper
|
| 396 |
+
self.kwargs = kwargs
|
| 397 |
+
self.jac = self.kwargs.get('jac', None)
|
| 398 |
+
self.hess = self.kwargs.get('hess', None)
|
| 399 |
+
self.hessp = self.kwargs.get('hessp', None)
|
| 400 |
+
self.kwargs.pop("args", None)
|
| 401 |
+
self.minimizer = minimize
|
| 402 |
+
bounds_list = list(zip(*search_bounds))
|
| 403 |
+
self.lower = np.array(bounds_list[0])
|
| 404 |
+
self.upper = np.array(bounds_list[1])
|
| 405 |
+
|
| 406 |
+
# If no minimizer specified, use SciPy minimize with 'L-BFGS-B' method
|
| 407 |
+
if not self.kwargs:
|
| 408 |
+
n = len(self.lower)
|
| 409 |
+
ls_max_iter = min(max(n * self.LS_MAXITER_RATIO,
|
| 410 |
+
self.LS_MAXITER_MIN),
|
| 411 |
+
self.LS_MAXITER_MAX)
|
| 412 |
+
self.kwargs['method'] = 'L-BFGS-B'
|
| 413 |
+
self.kwargs['options'] = {
|
| 414 |
+
'maxiter': ls_max_iter,
|
| 415 |
+
}
|
| 416 |
+
self.kwargs['bounds'] = list(zip(self.lower, self.upper))
|
| 417 |
+
else:
|
| 418 |
+
if callable(self.jac):
|
| 419 |
+
def wrapped_jac(x):
|
| 420 |
+
return self.jac(x, *args)
|
| 421 |
+
self.kwargs['jac'] = wrapped_jac
|
| 422 |
+
if callable(self.hess):
|
| 423 |
+
def wrapped_hess(x):
|
| 424 |
+
return self.hess(x, *args)
|
| 425 |
+
self.kwargs['hess'] = wrapped_hess
|
| 426 |
+
if callable(self.hessp):
|
| 427 |
+
def wrapped_hessp(x, p):
|
| 428 |
+
return self.hessp(x, p, *args)
|
| 429 |
+
self.kwargs['hessp'] = wrapped_hessp
|
| 430 |
+
|
| 431 |
+
def local_search(self, x, e):
|
| 432 |
+
# Run local search from the given x location where energy value is e
|
| 433 |
+
x_tmp = np.copy(x)
|
| 434 |
+
mres = self.minimizer(self.func_wrapper.fun, x, **self.kwargs)
|
| 435 |
+
if 'njev' in mres:
|
| 436 |
+
self.func_wrapper.ngev += mres.njev
|
| 437 |
+
if 'nhev' in mres:
|
| 438 |
+
self.func_wrapper.nhev += mres.nhev
|
| 439 |
+
# Check if is valid value
|
| 440 |
+
is_finite = np.all(np.isfinite(mres.x)) and np.isfinite(mres.fun)
|
| 441 |
+
in_bounds = np.all(mres.x >= self.lower) and np.all(
|
| 442 |
+
mres.x <= self.upper)
|
| 443 |
+
is_valid = is_finite and in_bounds
|
| 444 |
+
|
| 445 |
+
# Use the new point only if it is valid and return a better results
|
| 446 |
+
if is_valid and mres.fun < e:
|
| 447 |
+
return mres.fun, mres.x
|
| 448 |
+
else:
|
| 449 |
+
return e, x_tmp
|
| 450 |
+
|
| 451 |
+
|
| 452 |
+
def dual_annealing(func, bounds, args=(), maxiter=1000,
|
| 453 |
+
minimizer_kwargs=None, initial_temp=5230.,
|
| 454 |
+
restart_temp_ratio=2.e-5, visit=2.62, accept=-5.0,
|
| 455 |
+
maxfun=1e7, seed=None, no_local_search=False,
|
| 456 |
+
callback=None, x0=None):
|
| 457 |
+
"""
|
| 458 |
+
Find the global minimum of a function using Dual Annealing.
|
| 459 |
+
|
| 460 |
+
Parameters
|
| 461 |
+
----------
|
| 462 |
+
func : callable
|
| 463 |
+
The objective function to be minimized. Must be in the form
|
| 464 |
+
``f(x, *args)``, where ``x`` is the argument in the form of a 1-D array
|
| 465 |
+
and ``args`` is a tuple of any additional fixed parameters needed to
|
| 466 |
+
completely specify the function.
|
| 467 |
+
bounds : sequence or `Bounds`
|
| 468 |
+
Bounds for variables. There are two ways to specify the bounds:
|
| 469 |
+
|
| 470 |
+
1. Instance of `Bounds` class.
|
| 471 |
+
2. Sequence of ``(min, max)`` pairs for each element in `x`.
|
| 472 |
+
|
| 473 |
+
args : tuple, optional
|
| 474 |
+
Any additional fixed parameters needed to completely specify the
|
| 475 |
+
objective function.
|
| 476 |
+
maxiter : int, optional
|
| 477 |
+
The maximum number of global search iterations. Default value is 1000.
|
| 478 |
+
minimizer_kwargs : dict, optional
|
| 479 |
+
Keyword arguments to be passed to the local minimizer
|
| 480 |
+
(`minimize`). An important option could be ``method`` for the minimizer
|
| 481 |
+
method to use.
|
| 482 |
+
If no keyword arguments are provided, the local minimizer defaults to
|
| 483 |
+
'L-BFGS-B' and uses the already supplied bounds. If `minimizer_kwargs`
|
| 484 |
+
is specified, then the dict must contain all parameters required to
|
| 485 |
+
control the local minimization. `args` is ignored in this dict, as it is
|
| 486 |
+
passed automatically. `bounds` is not automatically passed on to the
|
| 487 |
+
local minimizer as the method may not support them.
|
| 488 |
+
initial_temp : float, optional
|
| 489 |
+
The initial temperature, use higher values to facilitates a wider
|
| 490 |
+
search of the energy landscape, allowing dual_annealing to escape
|
| 491 |
+
local minima that it is trapped in. Default value is 5230. Range is
|
| 492 |
+
(0.01, 5.e4].
|
| 493 |
+
restart_temp_ratio : float, optional
|
| 494 |
+
During the annealing process, temperature is decreasing, when it
|
| 495 |
+
reaches ``initial_temp * restart_temp_ratio``, the reannealing process
|
| 496 |
+
is triggered. Default value of the ratio is 2e-5. Range is (0, 1).
|
| 497 |
+
visit : float, optional
|
| 498 |
+
Parameter for visiting distribution. Default value is 2.62. Higher
|
| 499 |
+
values give the visiting distribution a heavier tail, this makes
|
| 500 |
+
the algorithm jump to a more distant region. The value range is (1, 3].
|
| 501 |
+
accept : float, optional
|
| 502 |
+
Parameter for acceptance distribution. It is used to control the
|
| 503 |
+
probability of acceptance. The lower the acceptance parameter, the
|
| 504 |
+
smaller the probability of acceptance. Default value is -5.0 with
|
| 505 |
+
a range (-1e4, -5].
|
| 506 |
+
maxfun : int, optional
|
| 507 |
+
Soft limit for the number of objective function calls. If the
|
| 508 |
+
algorithm is in the middle of a local search, this number will be
|
| 509 |
+
exceeded, the algorithm will stop just after the local search is
|
| 510 |
+
done. Default value is 1e7.
|
| 511 |
+
seed : {None, int, `numpy.random.Generator`, `numpy.random.RandomState`}, optional
|
| 512 |
+
If `seed` is None (or `np.random`), the `numpy.random.RandomState`
|
| 513 |
+
singleton is used.
|
| 514 |
+
If `seed` is an int, a new ``RandomState`` instance is used,
|
| 515 |
+
seeded with `seed`.
|
| 516 |
+
If `seed` is already a ``Generator`` or ``RandomState`` instance then
|
| 517 |
+
that instance is used.
|
| 518 |
+
Specify `seed` for repeatable minimizations. The random numbers
|
| 519 |
+
generated with this seed only affect the visiting distribution function
|
| 520 |
+
and new coordinates generation.
|
| 521 |
+
no_local_search : bool, optional
|
| 522 |
+
If `no_local_search` is set to True, a traditional Generalized
|
| 523 |
+
Simulated Annealing will be performed with no local search
|
| 524 |
+
strategy applied.
|
| 525 |
+
callback : callable, optional
|
| 526 |
+
A callback function with signature ``callback(x, f, context)``,
|
| 527 |
+
which will be called for all minima found.
|
| 528 |
+
``x`` and ``f`` are the coordinates and function value of the
|
| 529 |
+
latest minimum found, and ``context`` has value in [0, 1, 2], with the
|
| 530 |
+
following meaning:
|
| 531 |
+
|
| 532 |
+
- 0: minimum detected in the annealing process.
|
| 533 |
+
- 1: detection occurred in the local search process.
|
| 534 |
+
- 2: detection done in the dual annealing process.
|
| 535 |
+
|
| 536 |
+
If the callback implementation returns True, the algorithm will stop.
|
| 537 |
+
x0 : ndarray, shape(n,), optional
|
| 538 |
+
Coordinates of a single N-D starting point.
|
| 539 |
+
|
| 540 |
+
Returns
|
| 541 |
+
-------
|
| 542 |
+
res : OptimizeResult
|
| 543 |
+
The optimization result represented as a `OptimizeResult` object.
|
| 544 |
+
Important attributes are: ``x`` the solution array, ``fun`` the value
|
| 545 |
+
of the function at the solution, and ``message`` which describes the
|
| 546 |
+
cause of the termination.
|
| 547 |
+
See `OptimizeResult` for a description of other attributes.
|
| 548 |
+
|
| 549 |
+
Notes
|
| 550 |
+
-----
|
| 551 |
+
This function implements the Dual Annealing optimization. This stochastic
|
| 552 |
+
approach derived from [3]_ combines the generalization of CSA (Classical
|
| 553 |
+
Simulated Annealing) and FSA (Fast Simulated Annealing) [1]_ [2]_ coupled
|
| 554 |
+
to a strategy for applying a local search on accepted locations [4]_.
|
| 555 |
+
An alternative implementation of this same algorithm is described in [5]_
|
| 556 |
+
and benchmarks are presented in [6]_. This approach introduces an advanced
|
| 557 |
+
method to refine the solution found by the generalized annealing
|
| 558 |
+
process. This algorithm uses a distorted Cauchy-Lorentz visiting
|
| 559 |
+
distribution, with its shape controlled by the parameter :math:`q_{v}`
|
| 560 |
+
|
| 561 |
+
.. math::
|
| 562 |
+
|
| 563 |
+
g_{q_{v}}(\\Delta x(t)) \\propto \\frac{ \\
|
| 564 |
+
\\left[T_{q_{v}}(t) \\right]^{-\\frac{D}{3-q_{v}}}}{ \\
|
| 565 |
+
\\left[{1+(q_{v}-1)\\frac{(\\Delta x(t))^{2}} { \\
|
| 566 |
+
\\left[T_{q_{v}}(t)\\right]^{\\frac{2}{3-q_{v}}}}}\\right]^{ \\
|
| 567 |
+
\\frac{1}{q_{v}-1}+\\frac{D-1}{2}}}
|
| 568 |
+
|
| 569 |
+
Where :math:`t` is the artificial time. This visiting distribution is used
|
| 570 |
+
to generate a trial jump distance :math:`\\Delta x(t)` of variable
|
| 571 |
+
:math:`x(t)` under artificial temperature :math:`T_{q_{v}}(t)`.
|
| 572 |
+
|
| 573 |
+
From the starting point, after calling the visiting distribution
|
| 574 |
+
function, the acceptance probability is computed as follows:
|
| 575 |
+
|
| 576 |
+
.. math::
|
| 577 |
+
|
| 578 |
+
p_{q_{a}} = \\min{\\{1,\\left[1-(1-q_{a}) \\beta \\Delta E \\right]^{ \\
|
| 579 |
+
\\frac{1}{1-q_{a}}}\\}}
|
| 580 |
+
|
| 581 |
+
Where :math:`q_{a}` is a acceptance parameter. For :math:`q_{a}<1`, zero
|
| 582 |
+
acceptance probability is assigned to the cases where
|
| 583 |
+
|
| 584 |
+
.. math::
|
| 585 |
+
|
| 586 |
+
[1-(1-q_{a}) \\beta \\Delta E] < 0
|
| 587 |
+
|
| 588 |
+
The artificial temperature :math:`T_{q_{v}}(t)` is decreased according to
|
| 589 |
+
|
| 590 |
+
.. math::
|
| 591 |
+
|
| 592 |
+
T_{q_{v}}(t) = T_{q_{v}}(1) \\frac{2^{q_{v}-1}-1}{\\left( \\
|
| 593 |
+
1 + t\\right)^{q_{v}-1}-1}
|
| 594 |
+
|
| 595 |
+
Where :math:`q_{v}` is the visiting parameter.
|
| 596 |
+
|
| 597 |
+
.. versionadded:: 1.2.0
|
| 598 |
+
|
| 599 |
+
References
|
| 600 |
+
----------
|
| 601 |
+
.. [1] Tsallis C. Possible generalization of Boltzmann-Gibbs
|
| 602 |
+
statistics. Journal of Statistical Physics, 52, 479-487 (1998).
|
| 603 |
+
.. [2] Tsallis C, Stariolo DA. Generalized Simulated Annealing.
|
| 604 |
+
Physica A, 233, 395-406 (1996).
|
| 605 |
+
.. [3] Xiang Y, Sun DY, Fan W, Gong XG. Generalized Simulated
|
| 606 |
+
Annealing Algorithm and Its Application to the Thomson Model.
|
| 607 |
+
Physics Letters A, 233, 216-220 (1997).
|
| 608 |
+
.. [4] Xiang Y, Gong XG. Efficiency of Generalized Simulated
|
| 609 |
+
Annealing. Physical Review E, 62, 4473 (2000).
|
| 610 |
+
.. [5] Xiang Y, Gubian S, Suomela B, Hoeng J. Generalized
|
| 611 |
+
Simulated Annealing for Efficient Global Optimization: the GenSA
|
| 612 |
+
Package for R. The R Journal, Volume 5/1 (2013).
|
| 613 |
+
.. [6] Mullen, K. Continuous Global Optimization in R. Journal of
|
| 614 |
+
Statistical Software, 60(6), 1 - 45, (2014).
|
| 615 |
+
:doi:`10.18637/jss.v060.i06`
|
| 616 |
+
|
| 617 |
+
Examples
|
| 618 |
+
--------
|
| 619 |
+
The following example is a 10-D problem, with many local minima.
|
| 620 |
+
The function involved is called Rastrigin
|
| 621 |
+
(https://en.wikipedia.org/wiki/Rastrigin_function)
|
| 622 |
+
|
| 623 |
+
>>> import numpy as np
|
| 624 |
+
>>> from scipy.optimize import dual_annealing
|
| 625 |
+
>>> func = lambda x: np.sum(x*x - 10*np.cos(2*np.pi*x)) + 10*np.size(x)
|
| 626 |
+
>>> lw = [-5.12] * 10
|
| 627 |
+
>>> up = [5.12] * 10
|
| 628 |
+
>>> ret = dual_annealing(func, bounds=list(zip(lw, up)))
|
| 629 |
+
>>> ret.x
|
| 630 |
+
array([-4.26437714e-09, -3.91699361e-09, -1.86149218e-09, -3.97165720e-09,
|
| 631 |
+
-6.29151648e-09, -6.53145322e-09, -3.93616815e-09, -6.55623025e-09,
|
| 632 |
+
-6.05775280e-09, -5.00668935e-09]) # random
|
| 633 |
+
>>> ret.fun
|
| 634 |
+
0.000000
|
| 635 |
+
|
| 636 |
+
"""
|
| 637 |
+
|
| 638 |
+
if isinstance(bounds, Bounds):
|
| 639 |
+
bounds = new_bounds_to_old(bounds.lb, bounds.ub, len(bounds.lb))
|
| 640 |
+
|
| 641 |
+
if x0 is not None and not len(x0) == len(bounds):
|
| 642 |
+
raise ValueError('Bounds size does not match x0')
|
| 643 |
+
|
| 644 |
+
lu = list(zip(*bounds))
|
| 645 |
+
lower = np.array(lu[0])
|
| 646 |
+
upper = np.array(lu[1])
|
| 647 |
+
# Check that restart temperature ratio is correct
|
| 648 |
+
if restart_temp_ratio <= 0. or restart_temp_ratio >= 1.:
|
| 649 |
+
raise ValueError('Restart temperature ratio has to be in range (0, 1)')
|
| 650 |
+
# Checking bounds are valid
|
| 651 |
+
if (np.any(np.isinf(lower)) or np.any(np.isinf(upper)) or np.any(
|
| 652 |
+
np.isnan(lower)) or np.any(np.isnan(upper))):
|
| 653 |
+
raise ValueError('Some bounds values are inf values or nan values')
|
| 654 |
+
# Checking that bounds are consistent
|
| 655 |
+
if not np.all(lower < upper):
|
| 656 |
+
raise ValueError('Bounds are not consistent min < max')
|
| 657 |
+
# Checking that bounds are the same length
|
| 658 |
+
if not len(lower) == len(upper):
|
| 659 |
+
raise ValueError('Bounds do not have the same dimensions')
|
| 660 |
+
|
| 661 |
+
# Wrapper for the objective function
|
| 662 |
+
func_wrapper = ObjectiveFunWrapper(func, maxfun, *args)
|
| 663 |
+
|
| 664 |
+
# minimizer_kwargs has to be a dict, not None
|
| 665 |
+
minimizer_kwargs = minimizer_kwargs or {}
|
| 666 |
+
|
| 667 |
+
minimizer_wrapper = LocalSearchWrapper(
|
| 668 |
+
bounds, func_wrapper, *args, **minimizer_kwargs)
|
| 669 |
+
|
| 670 |
+
# Initialization of random Generator for reproducible runs if seed provided
|
| 671 |
+
rand_state = check_random_state(seed)
|
| 672 |
+
# Initialization of the energy state
|
| 673 |
+
energy_state = EnergyState(lower, upper, callback)
|
| 674 |
+
energy_state.reset(func_wrapper, rand_state, x0)
|
| 675 |
+
# Minimum value of annealing temperature reached to perform
|
| 676 |
+
# re-annealing
|
| 677 |
+
temperature_restart = initial_temp * restart_temp_ratio
|
| 678 |
+
# VisitingDistribution instance
|
| 679 |
+
visit_dist = VisitingDistribution(lower, upper, visit, rand_state)
|
| 680 |
+
# Strategy chain instance
|
| 681 |
+
strategy_chain = StrategyChain(accept, visit_dist, func_wrapper,
|
| 682 |
+
minimizer_wrapper, rand_state, energy_state)
|
| 683 |
+
need_to_stop = False
|
| 684 |
+
iteration = 0
|
| 685 |
+
message = []
|
| 686 |
+
# OptimizeResult object to be returned
|
| 687 |
+
optimize_res = OptimizeResult()
|
| 688 |
+
optimize_res.success = True
|
| 689 |
+
optimize_res.status = 0
|
| 690 |
+
|
| 691 |
+
t1 = np.exp((visit - 1) * np.log(2.0)) - 1.0
|
| 692 |
+
# Run the search loop
|
| 693 |
+
while not need_to_stop:
|
| 694 |
+
for i in range(maxiter):
|
| 695 |
+
# Compute temperature for this step
|
| 696 |
+
s = float(i) + 2.0
|
| 697 |
+
t2 = np.exp((visit - 1) * np.log(s)) - 1.0
|
| 698 |
+
temperature = initial_temp * t1 / t2
|
| 699 |
+
if iteration >= maxiter:
|
| 700 |
+
message.append("Maximum number of iteration reached")
|
| 701 |
+
need_to_stop = True
|
| 702 |
+
break
|
| 703 |
+
# Need a re-annealing process?
|
| 704 |
+
if temperature < temperature_restart:
|
| 705 |
+
energy_state.reset(func_wrapper, rand_state)
|
| 706 |
+
break
|
| 707 |
+
# starting strategy chain
|
| 708 |
+
val = strategy_chain.run(i, temperature)
|
| 709 |
+
if val is not None:
|
| 710 |
+
message.append(val)
|
| 711 |
+
need_to_stop = True
|
| 712 |
+
optimize_res.success = False
|
| 713 |
+
break
|
| 714 |
+
# Possible local search at the end of the strategy chain
|
| 715 |
+
if not no_local_search:
|
| 716 |
+
val = strategy_chain.local_search()
|
| 717 |
+
if val is not None:
|
| 718 |
+
message.append(val)
|
| 719 |
+
need_to_stop = True
|
| 720 |
+
optimize_res.success = False
|
| 721 |
+
break
|
| 722 |
+
iteration += 1
|
| 723 |
+
|
| 724 |
+
# Setting the OptimizeResult values
|
| 725 |
+
optimize_res.x = energy_state.xbest
|
| 726 |
+
optimize_res.fun = energy_state.ebest
|
| 727 |
+
optimize_res.nit = iteration
|
| 728 |
+
optimize_res.nfev = func_wrapper.nfev
|
| 729 |
+
optimize_res.njev = func_wrapper.ngev
|
| 730 |
+
optimize_res.nhev = func_wrapper.nhev
|
| 731 |
+
optimize_res.message = message
|
| 732 |
+
return optimize_res
|
llava_next/lib/python3.10/site-packages/scipy/optimize/_isotonic.py
ADDED
|
@@ -0,0 +1,158 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from __future__ import annotations
|
| 2 |
+
from typing import TYPE_CHECKING
|
| 3 |
+
|
| 4 |
+
import numpy as np
|
| 5 |
+
|
| 6 |
+
from ._optimize import OptimizeResult
|
| 7 |
+
from ._pava_pybind import pava
|
| 8 |
+
|
| 9 |
+
if TYPE_CHECKING:
|
| 10 |
+
import numpy.typing as npt
|
| 11 |
+
|
| 12 |
+
|
| 13 |
+
__all__ = ["isotonic_regression"]
|
| 14 |
+
|
| 15 |
+
|
| 16 |
+
def isotonic_regression(
|
| 17 |
+
y: npt.ArrayLike,
|
| 18 |
+
*,
|
| 19 |
+
weights: npt.ArrayLike | None = None,
|
| 20 |
+
increasing: bool = True,
|
| 21 |
+
) -> OptimizeResult:
|
| 22 |
+
r"""Nonparametric isotonic regression.
|
| 23 |
+
|
| 24 |
+
A (not strictly) monotonically increasing array `x` with the same length
|
| 25 |
+
as `y` is calculated by the pool adjacent violators algorithm (PAVA), see
|
| 26 |
+
[1]_. See the Notes section for more details.
|
| 27 |
+
|
| 28 |
+
Parameters
|
| 29 |
+
----------
|
| 30 |
+
y : (N,) array_like
|
| 31 |
+
Response variable.
|
| 32 |
+
weights : (N,) array_like or None
|
| 33 |
+
Case weights.
|
| 34 |
+
increasing : bool
|
| 35 |
+
If True, fit monotonic increasing, i.e. isotonic, regression.
|
| 36 |
+
If False, fit a monotonic decreasing, i.e. antitonic, regression.
|
| 37 |
+
Default is True.
|
| 38 |
+
|
| 39 |
+
Returns
|
| 40 |
+
-------
|
| 41 |
+
res : OptimizeResult
|
| 42 |
+
The optimization result represented as a ``OptimizeResult`` object.
|
| 43 |
+
Important attributes are:
|
| 44 |
+
|
| 45 |
+
- ``x``: The isotonic regression solution, i.e. an increasing (or
|
| 46 |
+
decreasing) array of the same length than y, with elements in the
|
| 47 |
+
range from min(y) to max(y).
|
| 48 |
+
- ``weights`` : Array with the sum of case weights for each block
|
| 49 |
+
(or pool) B.
|
| 50 |
+
- ``blocks``: Array of length B+1 with the indices of the start
|
| 51 |
+
positions of each block (or pool) B. The j-th block is given by
|
| 52 |
+
``x[blocks[j]:blocks[j+1]]`` for which all values are the same.
|
| 53 |
+
|
| 54 |
+
Notes
|
| 55 |
+
-----
|
| 56 |
+
Given data :math:`y` and case weights :math:`w`, the isotonic regression
|
| 57 |
+
solves the following optimization problem:
|
| 58 |
+
|
| 59 |
+
.. math::
|
| 60 |
+
|
| 61 |
+
\operatorname{argmin}_{x_i} \sum_i w_i (y_i - x_i)^2 \quad
|
| 62 |
+
\text{subject to } x_i \leq x_j \text{ whenever } i \leq j \,.
|
| 63 |
+
|
| 64 |
+
For every input value :math:`y_i`, it generates a value :math:`x_i` such
|
| 65 |
+
that :math:`x` is increasing (but not strictly), i.e.
|
| 66 |
+
:math:`x_i \leq x_{i+1}`. This is accomplished by the PAVA.
|
| 67 |
+
The solution consists of pools or blocks, i.e. neighboring elements of
|
| 68 |
+
:math:`x`, e.g. :math:`x_i` and :math:`x_{i+1}`, that all have the same
|
| 69 |
+
value.
|
| 70 |
+
|
| 71 |
+
Most interestingly, the solution stays the same if the squared loss is
|
| 72 |
+
replaced by the wide class of Bregman functions which are the unique
|
| 73 |
+
class of strictly consistent scoring functions for the mean, see [2]_
|
| 74 |
+
and references therein.
|
| 75 |
+
|
| 76 |
+
The implemented version of PAVA according to [1]_ has a computational
|
| 77 |
+
complexity of O(N) with input size N.
|
| 78 |
+
|
| 79 |
+
References
|
| 80 |
+
----------
|
| 81 |
+
.. [1] Busing, F. M. T. A. (2022).
|
| 82 |
+
Monotone Regression: A Simple and Fast O(n) PAVA Implementation.
|
| 83 |
+
Journal of Statistical Software, Code Snippets, 102(1), 1-25.
|
| 84 |
+
:doi:`10.18637/jss.v102.c01`
|
| 85 |
+
.. [2] Jordan, A.I., Mühlemann, A. & Ziegel, J.F.
|
| 86 |
+
Characterizing the optimal solutions to the isotonic regression
|
| 87 |
+
problem for identifiable functionals.
|
| 88 |
+
Ann Inst Stat Math 74, 489-514 (2022).
|
| 89 |
+
:doi:`10.1007/s10463-021-00808-0`
|
| 90 |
+
|
| 91 |
+
Examples
|
| 92 |
+
--------
|
| 93 |
+
This example demonstrates that ``isotonic_regression`` really solves a
|
| 94 |
+
constrained optimization problem.
|
| 95 |
+
|
| 96 |
+
>>> import numpy as np
|
| 97 |
+
>>> from scipy.optimize import isotonic_regression, minimize
|
| 98 |
+
>>> y = [1.5, 1.0, 4.0, 6.0, 5.7, 5.0, 7.8, 9.0, 7.5, 9.5, 9.0]
|
| 99 |
+
>>> def objective(yhat, y):
|
| 100 |
+
... return np.sum((yhat - y)**2)
|
| 101 |
+
>>> def constraint(yhat, y):
|
| 102 |
+
... # This is for a monotonically increasing regression.
|
| 103 |
+
... return np.diff(yhat)
|
| 104 |
+
>>> result = minimize(objective, x0=y, args=(y,),
|
| 105 |
+
... constraints=[{'type': 'ineq',
|
| 106 |
+
... 'fun': lambda x: constraint(x, y)}])
|
| 107 |
+
>>> result.x
|
| 108 |
+
array([1.25 , 1.25 , 4. , 5.56666667, 5.56666667,
|
| 109 |
+
5.56666667, 7.8 , 8.25 , 8.25 , 9.25 ,
|
| 110 |
+
9.25 ])
|
| 111 |
+
>>> result = isotonic_regression(y)
|
| 112 |
+
>>> result.x
|
| 113 |
+
array([1.25 , 1.25 , 4. , 5.56666667, 5.56666667,
|
| 114 |
+
5.56666667, 7.8 , 8.25 , 8.25 , 9.25 ,
|
| 115 |
+
9.25 ])
|
| 116 |
+
|
| 117 |
+
The big advantage of ``isotonic_regression`` compared to calling
|
| 118 |
+
``minimize`` is that it is more user friendly, i.e. one does not need to
|
| 119 |
+
define objective and constraint functions, and that it is orders of
|
| 120 |
+
magnitudes faster. On commodity hardware (in 2023), for normal distributed
|
| 121 |
+
input y of length 1000, the minimizer takes about 4 seconds, while
|
| 122 |
+
``isotonic_regression`` takes about 200 microseconds.
|
| 123 |
+
"""
|
| 124 |
+
yarr = np.atleast_1d(y) # Check yarr.ndim == 1 is implicit (pybind11) in pava.
|
| 125 |
+
order = slice(None) if increasing else slice(None, None, -1)
|
| 126 |
+
x = np.array(yarr[order], order="C", dtype=np.float64, copy=True)
|
| 127 |
+
if weights is None:
|
| 128 |
+
wx = np.ones_like(yarr, dtype=np.float64)
|
| 129 |
+
else:
|
| 130 |
+
warr = np.atleast_1d(weights)
|
| 131 |
+
|
| 132 |
+
if not (yarr.ndim == warr.ndim == 1 and yarr.shape[0] == warr.shape[0]):
|
| 133 |
+
raise ValueError(
|
| 134 |
+
"Input arrays y and w must have one dimension of equal length."
|
| 135 |
+
)
|
| 136 |
+
if np.any(warr <= 0):
|
| 137 |
+
raise ValueError("Weights w must be strictly positive.")
|
| 138 |
+
|
| 139 |
+
wx = np.array(warr[order], order="C", dtype=np.float64, copy=True)
|
| 140 |
+
n = x.shape[0]
|
| 141 |
+
r = np.full(shape=n + 1, fill_value=-1, dtype=np.intp)
|
| 142 |
+
x, wx, r, b = pava(x, wx, r)
|
| 143 |
+
# Now that we know the number of blocks b, we only keep the relevant part
|
| 144 |
+
# of r and wx.
|
| 145 |
+
# As information: Due to the pava implementation, after the last block
|
| 146 |
+
# index, there might be smaller numbers appended to r, e.g.
|
| 147 |
+
# r = [0, 10, 8, 7] which in the end should be r = [0, 10].
|
| 148 |
+
r = r[:b + 1]
|
| 149 |
+
wx = wx[:b]
|
| 150 |
+
if not increasing:
|
| 151 |
+
x = x[::-1]
|
| 152 |
+
wx = wx[::-1]
|
| 153 |
+
r = r[-1] - r[::-1]
|
| 154 |
+
return OptimizeResult(
|
| 155 |
+
x=x,
|
| 156 |
+
weights=wx,
|
| 157 |
+
blocks=r,
|
| 158 |
+
)
|
llava_next/lib/python3.10/site-packages/scipy/optimize/_lbfgsb_py.py
ADDED
|
@@ -0,0 +1,543 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Functions
|
| 3 |
+
---------
|
| 4 |
+
.. autosummary::
|
| 5 |
+
:toctree: generated/
|
| 6 |
+
|
| 7 |
+
fmin_l_bfgs_b
|
| 8 |
+
|
| 9 |
+
"""
|
| 10 |
+
|
| 11 |
+
## License for the Python wrapper
|
| 12 |
+
## ==============================
|
| 13 |
+
|
| 14 |
+
## Copyright (c) 2004 David M. Cooke <cookedm@physics.mcmaster.ca>
|
| 15 |
+
|
| 16 |
+
## Permission is hereby granted, free of charge, to any person obtaining a
|
| 17 |
+
## copy of this software and associated documentation files (the "Software"),
|
| 18 |
+
## to deal in the Software without restriction, including without limitation
|
| 19 |
+
## the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
| 20 |
+
## and/or sell copies of the Software, and to permit persons to whom the
|
| 21 |
+
## Software is furnished to do so, subject to the following conditions:
|
| 22 |
+
|
| 23 |
+
## The above copyright notice and this permission notice shall be included in
|
| 24 |
+
## all copies or substantial portions of the Software.
|
| 25 |
+
|
| 26 |
+
## THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
| 27 |
+
## IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
| 28 |
+
## FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
| 29 |
+
## AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
| 30 |
+
## LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
| 31 |
+
## FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
| 32 |
+
## DEALINGS IN THE SOFTWARE.
|
| 33 |
+
|
| 34 |
+
## Modifications by Travis Oliphant and Enthought, Inc. for inclusion in SciPy
|
| 35 |
+
|
| 36 |
+
import numpy as np
|
| 37 |
+
from numpy import array, asarray, float64, zeros
|
| 38 |
+
from . import _lbfgsb
|
| 39 |
+
from ._optimize import (MemoizeJac, OptimizeResult, _call_callback_maybe_halt,
|
| 40 |
+
_wrap_callback, _check_unknown_options,
|
| 41 |
+
_prepare_scalar_function)
|
| 42 |
+
from ._constraints import old_bound_to_new
|
| 43 |
+
|
| 44 |
+
from scipy.sparse.linalg import LinearOperator
|
| 45 |
+
|
| 46 |
+
__all__ = ['fmin_l_bfgs_b', 'LbfgsInvHessProduct']
|
| 47 |
+
|
| 48 |
+
|
| 49 |
+
def fmin_l_bfgs_b(func, x0, fprime=None, args=(),
|
| 50 |
+
approx_grad=0,
|
| 51 |
+
bounds=None, m=10, factr=1e7, pgtol=1e-5,
|
| 52 |
+
epsilon=1e-8,
|
| 53 |
+
iprint=-1, maxfun=15000, maxiter=15000, disp=None,
|
| 54 |
+
callback=None, maxls=20):
|
| 55 |
+
"""
|
| 56 |
+
Minimize a function func using the L-BFGS-B algorithm.
|
| 57 |
+
|
| 58 |
+
Parameters
|
| 59 |
+
----------
|
| 60 |
+
func : callable f(x,*args)
|
| 61 |
+
Function to minimize.
|
| 62 |
+
x0 : ndarray
|
| 63 |
+
Initial guess.
|
| 64 |
+
fprime : callable fprime(x,*args), optional
|
| 65 |
+
The gradient of `func`. If None, then `func` returns the function
|
| 66 |
+
value and the gradient (``f, g = func(x, *args)``), unless
|
| 67 |
+
`approx_grad` is True in which case `func` returns only ``f``.
|
| 68 |
+
args : sequence, optional
|
| 69 |
+
Arguments to pass to `func` and `fprime`.
|
| 70 |
+
approx_grad : bool, optional
|
| 71 |
+
Whether to approximate the gradient numerically (in which case
|
| 72 |
+
`func` returns only the function value).
|
| 73 |
+
bounds : list, optional
|
| 74 |
+
``(min, max)`` pairs for each element in ``x``, defining
|
| 75 |
+
the bounds on that parameter. Use None or +-inf for one of ``min`` or
|
| 76 |
+
``max`` when there is no bound in that direction.
|
| 77 |
+
m : int, optional
|
| 78 |
+
The maximum number of variable metric corrections
|
| 79 |
+
used to define the limited memory matrix. (The limited memory BFGS
|
| 80 |
+
method does not store the full hessian but uses this many terms in an
|
| 81 |
+
approximation to it.)
|
| 82 |
+
factr : float, optional
|
| 83 |
+
The iteration stops when
|
| 84 |
+
``(f^k - f^{k+1})/max{|f^k|,|f^{k+1}|,1} <= factr * eps``,
|
| 85 |
+
where ``eps`` is the machine precision, which is automatically
|
| 86 |
+
generated by the code. Typical values for `factr` are: 1e12 for
|
| 87 |
+
low accuracy; 1e7 for moderate accuracy; 10.0 for extremely
|
| 88 |
+
high accuracy. See Notes for relationship to `ftol`, which is exposed
|
| 89 |
+
(instead of `factr`) by the `scipy.optimize.minimize` interface to
|
| 90 |
+
L-BFGS-B.
|
| 91 |
+
pgtol : float, optional
|
| 92 |
+
The iteration will stop when
|
| 93 |
+
``max{|proj g_i | i = 1, ..., n} <= pgtol``
|
| 94 |
+
where ``proj g_i`` is the i-th component of the projected gradient.
|
| 95 |
+
epsilon : float, optional
|
| 96 |
+
Step size used when `approx_grad` is True, for numerically
|
| 97 |
+
calculating the gradient
|
| 98 |
+
iprint : int, optional
|
| 99 |
+
Controls the frequency of output. ``iprint < 0`` means no output;
|
| 100 |
+
``iprint = 0`` print only one line at the last iteration;
|
| 101 |
+
``0 < iprint < 99`` print also f and ``|proj g|`` every iprint iterations;
|
| 102 |
+
``iprint = 99`` print details of every iteration except n-vectors;
|
| 103 |
+
``iprint = 100`` print also the changes of active set and final x;
|
| 104 |
+
``iprint > 100`` print details of every iteration including x and g.
|
| 105 |
+
disp : int, optional
|
| 106 |
+
If zero, then no output. If a positive number, then this over-rides
|
| 107 |
+
`iprint` (i.e., `iprint` gets the value of `disp`).
|
| 108 |
+
maxfun : int, optional
|
| 109 |
+
Maximum number of function evaluations. Note that this function
|
| 110 |
+
may violate the limit because of evaluating gradients by numerical
|
| 111 |
+
differentiation.
|
| 112 |
+
maxiter : int, optional
|
| 113 |
+
Maximum number of iterations.
|
| 114 |
+
callback : callable, optional
|
| 115 |
+
Called after each iteration, as ``callback(xk)``, where ``xk`` is the
|
| 116 |
+
current parameter vector.
|
| 117 |
+
maxls : int, optional
|
| 118 |
+
Maximum number of line search steps (per iteration). Default is 20.
|
| 119 |
+
|
| 120 |
+
Returns
|
| 121 |
+
-------
|
| 122 |
+
x : array_like
|
| 123 |
+
Estimated position of the minimum.
|
| 124 |
+
f : float
|
| 125 |
+
Value of `func` at the minimum.
|
| 126 |
+
d : dict
|
| 127 |
+
Information dictionary.
|
| 128 |
+
|
| 129 |
+
* d['warnflag'] is
|
| 130 |
+
|
| 131 |
+
- 0 if converged,
|
| 132 |
+
- 1 if too many function evaluations or too many iterations,
|
| 133 |
+
- 2 if stopped for another reason, given in d['task']
|
| 134 |
+
|
| 135 |
+
* d['grad'] is the gradient at the minimum (should be 0 ish)
|
| 136 |
+
* d['funcalls'] is the number of function calls made.
|
| 137 |
+
* d['nit'] is the number of iterations.
|
| 138 |
+
|
| 139 |
+
See also
|
| 140 |
+
--------
|
| 141 |
+
minimize: Interface to minimization algorithms for multivariate
|
| 142 |
+
functions. See the 'L-BFGS-B' `method` in particular. Note that the
|
| 143 |
+
`ftol` option is made available via that interface, while `factr` is
|
| 144 |
+
provided via this interface, where `factr` is the factor multiplying
|
| 145 |
+
the default machine floating-point precision to arrive at `ftol`:
|
| 146 |
+
``ftol = factr * numpy.finfo(float).eps``.
|
| 147 |
+
|
| 148 |
+
Notes
|
| 149 |
+
-----
|
| 150 |
+
License of L-BFGS-B (FORTRAN code):
|
| 151 |
+
|
| 152 |
+
The version included here (in fortran code) is 3.0
|
| 153 |
+
(released April 25, 2011). It was written by Ciyou Zhu, Richard Byrd,
|
| 154 |
+
and Jorge Nocedal <nocedal@ece.nwu.edu>. It carries the following
|
| 155 |
+
condition for use:
|
| 156 |
+
|
| 157 |
+
This software is freely available, but we expect that all publications
|
| 158 |
+
describing work using this software, or all commercial products using it,
|
| 159 |
+
quote at least one of the references given below. This software is released
|
| 160 |
+
under the BSD License.
|
| 161 |
+
|
| 162 |
+
References
|
| 163 |
+
----------
|
| 164 |
+
* R. H. Byrd, P. Lu and J. Nocedal. A Limited Memory Algorithm for Bound
|
| 165 |
+
Constrained Optimization, (1995), SIAM Journal on Scientific and
|
| 166 |
+
Statistical Computing, 16, 5, pp. 1190-1208.
|
| 167 |
+
* C. Zhu, R. H. Byrd and J. Nocedal. L-BFGS-B: Algorithm 778: L-BFGS-B,
|
| 168 |
+
FORTRAN routines for large scale bound constrained optimization (1997),
|
| 169 |
+
ACM Transactions on Mathematical Software, 23, 4, pp. 550 - 560.
|
| 170 |
+
* J.L. Morales and J. Nocedal. L-BFGS-B: Remark on Algorithm 778: L-BFGS-B,
|
| 171 |
+
FORTRAN routines for large scale bound constrained optimization (2011),
|
| 172 |
+
ACM Transactions on Mathematical Software, 38, 1.
|
| 173 |
+
|
| 174 |
+
Examples
|
| 175 |
+
--------
|
| 176 |
+
Solve a linear regression problem via `fmin_l_bfgs_b`. To do this, first we define
|
| 177 |
+
an objective function ``f(m, b) = (y - y_model)**2``, where `y` describes the
|
| 178 |
+
observations and `y_model` the prediction of the linear model as
|
| 179 |
+
``y_model = m*x + b``. The bounds for the parameters, ``m`` and ``b``, are arbitrarily
|
| 180 |
+
chosen as ``(0,5)`` and ``(5,10)`` for this example.
|
| 181 |
+
|
| 182 |
+
>>> import numpy as np
|
| 183 |
+
>>> from scipy.optimize import fmin_l_bfgs_b
|
| 184 |
+
>>> X = np.arange(0, 10, 1)
|
| 185 |
+
>>> M = 2
|
| 186 |
+
>>> B = 3
|
| 187 |
+
>>> Y = M * X + B
|
| 188 |
+
>>> def func(parameters, *args):
|
| 189 |
+
... x = args[0]
|
| 190 |
+
... y = args[1]
|
| 191 |
+
... m, b = parameters
|
| 192 |
+
... y_model = m*x + b
|
| 193 |
+
... error = sum(np.power((y - y_model), 2))
|
| 194 |
+
... return error
|
| 195 |
+
|
| 196 |
+
>>> initial_values = np.array([0.0, 1.0])
|
| 197 |
+
|
| 198 |
+
>>> x_opt, f_opt, info = fmin_l_bfgs_b(func, x0=initial_values, args=(X, Y),
|
| 199 |
+
... approx_grad=True)
|
| 200 |
+
>>> x_opt, f_opt
|
| 201 |
+
array([1.99999999, 3.00000006]), 1.7746231151323805e-14 # may vary
|
| 202 |
+
|
| 203 |
+
The optimized parameters in ``x_opt`` agree with the ground truth parameters
|
| 204 |
+
``m`` and ``b``. Next, let us perform a bound contrained optimization using the `bounds`
|
| 205 |
+
parameter.
|
| 206 |
+
|
| 207 |
+
>>> bounds = [(0, 5), (5, 10)]
|
| 208 |
+
>>> x_opt, f_op, info = fmin_l_bfgs_b(func, x0=initial_values, args=(X, Y),
|
| 209 |
+
... approx_grad=True, bounds=bounds)
|
| 210 |
+
>>> x_opt, f_opt
|
| 211 |
+
array([1.65990508, 5.31649385]), 15.721334516453945 # may vary
|
| 212 |
+
"""
|
| 213 |
+
# handle fprime/approx_grad
|
| 214 |
+
if approx_grad:
|
| 215 |
+
fun = func
|
| 216 |
+
jac = None
|
| 217 |
+
elif fprime is None:
|
| 218 |
+
fun = MemoizeJac(func)
|
| 219 |
+
jac = fun.derivative
|
| 220 |
+
else:
|
| 221 |
+
fun = func
|
| 222 |
+
jac = fprime
|
| 223 |
+
|
| 224 |
+
# build options
|
| 225 |
+
callback = _wrap_callback(callback)
|
| 226 |
+
opts = {'disp': disp,
|
| 227 |
+
'iprint': iprint,
|
| 228 |
+
'maxcor': m,
|
| 229 |
+
'ftol': factr * np.finfo(float).eps,
|
| 230 |
+
'gtol': pgtol,
|
| 231 |
+
'eps': epsilon,
|
| 232 |
+
'maxfun': maxfun,
|
| 233 |
+
'maxiter': maxiter,
|
| 234 |
+
'callback': callback,
|
| 235 |
+
'maxls': maxls}
|
| 236 |
+
|
| 237 |
+
res = _minimize_lbfgsb(fun, x0, args=args, jac=jac, bounds=bounds,
|
| 238 |
+
**opts)
|
| 239 |
+
d = {'grad': res['jac'],
|
| 240 |
+
'task': res['message'],
|
| 241 |
+
'funcalls': res['nfev'],
|
| 242 |
+
'nit': res['nit'],
|
| 243 |
+
'warnflag': res['status']}
|
| 244 |
+
f = res['fun']
|
| 245 |
+
x = res['x']
|
| 246 |
+
|
| 247 |
+
return x, f, d
|
| 248 |
+
|
| 249 |
+
|
| 250 |
+
def _minimize_lbfgsb(fun, x0, args=(), jac=None, bounds=None,
|
| 251 |
+
disp=None, maxcor=10, ftol=2.2204460492503131e-09,
|
| 252 |
+
gtol=1e-5, eps=1e-8, maxfun=15000, maxiter=15000,
|
| 253 |
+
iprint=-1, callback=None, maxls=20,
|
| 254 |
+
finite_diff_rel_step=None, **unknown_options):
|
| 255 |
+
"""
|
| 256 |
+
Minimize a scalar function of one or more variables using the L-BFGS-B
|
| 257 |
+
algorithm.
|
| 258 |
+
|
| 259 |
+
Options
|
| 260 |
+
-------
|
| 261 |
+
disp : None or int
|
| 262 |
+
If `disp is None` (the default), then the supplied version of `iprint`
|
| 263 |
+
is used. If `disp is not None`, then it overrides the supplied version
|
| 264 |
+
of `iprint` with the behaviour you outlined.
|
| 265 |
+
maxcor : int
|
| 266 |
+
The maximum number of variable metric corrections used to
|
| 267 |
+
define the limited memory matrix. (The limited memory BFGS
|
| 268 |
+
method does not store the full hessian but uses this many terms
|
| 269 |
+
in an approximation to it.)
|
| 270 |
+
ftol : float
|
| 271 |
+
The iteration stops when ``(f^k -
|
| 272 |
+
f^{k+1})/max{|f^k|,|f^{k+1}|,1} <= ftol``.
|
| 273 |
+
gtol : float
|
| 274 |
+
The iteration will stop when ``max{|proj g_i | i = 1, ..., n}
|
| 275 |
+
<= gtol`` where ``proj g_i`` is the i-th component of the
|
| 276 |
+
projected gradient.
|
| 277 |
+
eps : float or ndarray
|
| 278 |
+
If `jac is None` the absolute step size used for numerical
|
| 279 |
+
approximation of the jacobian via forward differences.
|
| 280 |
+
maxfun : int
|
| 281 |
+
Maximum number of function evaluations. Note that this function
|
| 282 |
+
may violate the limit because of evaluating gradients by numerical
|
| 283 |
+
differentiation.
|
| 284 |
+
maxiter : int
|
| 285 |
+
Maximum number of iterations.
|
| 286 |
+
iprint : int, optional
|
| 287 |
+
Controls the frequency of output. ``iprint < 0`` means no output;
|
| 288 |
+
``iprint = 0`` print only one line at the last iteration;
|
| 289 |
+
``0 < iprint < 99`` print also f and ``|proj g|`` every iprint iterations;
|
| 290 |
+
``iprint = 99`` print details of every iteration except n-vectors;
|
| 291 |
+
``iprint = 100`` print also the changes of active set and final x;
|
| 292 |
+
``iprint > 100`` print details of every iteration including x and g.
|
| 293 |
+
maxls : int, optional
|
| 294 |
+
Maximum number of line search steps (per iteration). Default is 20.
|
| 295 |
+
finite_diff_rel_step : None or array_like, optional
|
| 296 |
+
If `jac in ['2-point', '3-point', 'cs']` the relative step size to
|
| 297 |
+
use for numerical approximation of the jacobian. The absolute step
|
| 298 |
+
size is computed as ``h = rel_step * sign(x) * max(1, abs(x))``,
|
| 299 |
+
possibly adjusted to fit into the bounds. For ``method='3-point'``
|
| 300 |
+
the sign of `h` is ignored. If None (default) then step is selected
|
| 301 |
+
automatically.
|
| 302 |
+
|
| 303 |
+
Notes
|
| 304 |
+
-----
|
| 305 |
+
The option `ftol` is exposed via the `scipy.optimize.minimize` interface,
|
| 306 |
+
but calling `scipy.optimize.fmin_l_bfgs_b` directly exposes `factr`. The
|
| 307 |
+
relationship between the two is ``ftol = factr * numpy.finfo(float).eps``.
|
| 308 |
+
I.e., `factr` multiplies the default machine floating-point precision to
|
| 309 |
+
arrive at `ftol`.
|
| 310 |
+
|
| 311 |
+
"""
|
| 312 |
+
_check_unknown_options(unknown_options)
|
| 313 |
+
m = maxcor
|
| 314 |
+
pgtol = gtol
|
| 315 |
+
factr = ftol / np.finfo(float).eps
|
| 316 |
+
|
| 317 |
+
x0 = asarray(x0).ravel()
|
| 318 |
+
n, = x0.shape
|
| 319 |
+
|
| 320 |
+
# historically old-style bounds were/are expected by lbfgsb.
|
| 321 |
+
# That's still the case but we'll deal with new-style from here on,
|
| 322 |
+
# it's easier
|
| 323 |
+
if bounds is None:
|
| 324 |
+
pass
|
| 325 |
+
elif len(bounds) != n:
|
| 326 |
+
raise ValueError('length of x0 != length of bounds')
|
| 327 |
+
else:
|
| 328 |
+
bounds = np.array(old_bound_to_new(bounds))
|
| 329 |
+
|
| 330 |
+
# check bounds
|
| 331 |
+
if (bounds[0] > bounds[1]).any():
|
| 332 |
+
raise ValueError(
|
| 333 |
+
"LBFGSB - one of the lower bounds is greater than an upper bound."
|
| 334 |
+
)
|
| 335 |
+
|
| 336 |
+
# initial vector must lie within the bounds. Otherwise ScalarFunction and
|
| 337 |
+
# approx_derivative will cause problems
|
| 338 |
+
x0 = np.clip(x0, bounds[0], bounds[1])
|
| 339 |
+
|
| 340 |
+
if disp is not None:
|
| 341 |
+
if disp == 0:
|
| 342 |
+
iprint = -1
|
| 343 |
+
else:
|
| 344 |
+
iprint = disp
|
| 345 |
+
|
| 346 |
+
# _prepare_scalar_function can use bounds=None to represent no bounds
|
| 347 |
+
sf = _prepare_scalar_function(fun, x0, jac=jac, args=args, epsilon=eps,
|
| 348 |
+
bounds=bounds,
|
| 349 |
+
finite_diff_rel_step=finite_diff_rel_step)
|
| 350 |
+
|
| 351 |
+
func_and_grad = sf.fun_and_grad
|
| 352 |
+
|
| 353 |
+
fortran_int = _lbfgsb.types.intvar.dtype
|
| 354 |
+
|
| 355 |
+
nbd = zeros(n, fortran_int)
|
| 356 |
+
low_bnd = zeros(n, float64)
|
| 357 |
+
upper_bnd = zeros(n, float64)
|
| 358 |
+
bounds_map = {(-np.inf, np.inf): 0,
|
| 359 |
+
(1, np.inf): 1,
|
| 360 |
+
(1, 1): 2,
|
| 361 |
+
(-np.inf, 1): 3}
|
| 362 |
+
|
| 363 |
+
if bounds is not None:
|
| 364 |
+
for i in range(0, n):
|
| 365 |
+
l, u = bounds[0, i], bounds[1, i]
|
| 366 |
+
if not np.isinf(l):
|
| 367 |
+
low_bnd[i] = l
|
| 368 |
+
l = 1
|
| 369 |
+
if not np.isinf(u):
|
| 370 |
+
upper_bnd[i] = u
|
| 371 |
+
u = 1
|
| 372 |
+
nbd[i] = bounds_map[l, u]
|
| 373 |
+
|
| 374 |
+
if not maxls > 0:
|
| 375 |
+
raise ValueError('maxls must be positive.')
|
| 376 |
+
|
| 377 |
+
x = array(x0, float64)
|
| 378 |
+
f = array(0.0, float64)
|
| 379 |
+
g = zeros((n,), float64)
|
| 380 |
+
wa = zeros(2*m*n + 5*n + 11*m*m + 8*m, float64)
|
| 381 |
+
iwa = zeros(3*n, fortran_int)
|
| 382 |
+
task = zeros(1, 'S60')
|
| 383 |
+
csave = zeros(1, 'S60')
|
| 384 |
+
lsave = zeros(4, fortran_int)
|
| 385 |
+
isave = zeros(44, fortran_int)
|
| 386 |
+
dsave = zeros(29, float64)
|
| 387 |
+
|
| 388 |
+
task[:] = 'START'
|
| 389 |
+
|
| 390 |
+
n_iterations = 0
|
| 391 |
+
|
| 392 |
+
while 1:
|
| 393 |
+
# g may become float32 if a user provides a function that calculates
|
| 394 |
+
# the Jacobian in float32 (see gh-18730). The underlying Fortran code
|
| 395 |
+
# expects float64, so upcast it
|
| 396 |
+
g = g.astype(np.float64)
|
| 397 |
+
# x, f, g, wa, iwa, task, csave, lsave, isave, dsave = \
|
| 398 |
+
_lbfgsb.setulb(m, x, low_bnd, upper_bnd, nbd, f, g, factr,
|
| 399 |
+
pgtol, wa, iwa, task, iprint, csave, lsave,
|
| 400 |
+
isave, dsave, maxls)
|
| 401 |
+
task_str = task.tobytes()
|
| 402 |
+
if task_str.startswith(b'FG'):
|
| 403 |
+
# The minimization routine wants f and g at the current x.
|
| 404 |
+
# Note that interruptions due to maxfun are postponed
|
| 405 |
+
# until the completion of the current minimization iteration.
|
| 406 |
+
# Overwrite f and g:
|
| 407 |
+
f, g = func_and_grad(x)
|
| 408 |
+
elif task_str.startswith(b'NEW_X'):
|
| 409 |
+
# new iteration
|
| 410 |
+
n_iterations += 1
|
| 411 |
+
|
| 412 |
+
intermediate_result = OptimizeResult(x=x, fun=f)
|
| 413 |
+
if _call_callback_maybe_halt(callback, intermediate_result):
|
| 414 |
+
task[:] = 'STOP: CALLBACK REQUESTED HALT'
|
| 415 |
+
if n_iterations >= maxiter:
|
| 416 |
+
task[:] = 'STOP: TOTAL NO. of ITERATIONS REACHED LIMIT'
|
| 417 |
+
elif sf.nfev > maxfun:
|
| 418 |
+
task[:] = ('STOP: TOTAL NO. of f AND g EVALUATIONS '
|
| 419 |
+
'EXCEEDS LIMIT')
|
| 420 |
+
else:
|
| 421 |
+
break
|
| 422 |
+
|
| 423 |
+
task_str = task.tobytes().strip(b'\x00').strip()
|
| 424 |
+
if task_str.startswith(b'CONV'):
|
| 425 |
+
warnflag = 0
|
| 426 |
+
elif sf.nfev > maxfun or n_iterations >= maxiter:
|
| 427 |
+
warnflag = 1
|
| 428 |
+
else:
|
| 429 |
+
warnflag = 2
|
| 430 |
+
|
| 431 |
+
# These two portions of the workspace are described in the mainlb
|
| 432 |
+
# subroutine in lbfgsb.f. See line 363.
|
| 433 |
+
s = wa[0: m*n].reshape(m, n)
|
| 434 |
+
y = wa[m*n: 2*m*n].reshape(m, n)
|
| 435 |
+
|
| 436 |
+
# See lbfgsb.f line 160 for this portion of the workspace.
|
| 437 |
+
# isave(31) = the total number of BFGS updates prior the current iteration;
|
| 438 |
+
n_bfgs_updates = isave[30]
|
| 439 |
+
|
| 440 |
+
n_corrs = min(n_bfgs_updates, maxcor)
|
| 441 |
+
hess_inv = LbfgsInvHessProduct(s[:n_corrs], y[:n_corrs])
|
| 442 |
+
|
| 443 |
+
task_str = task_str.decode()
|
| 444 |
+
return OptimizeResult(fun=f, jac=g, nfev=sf.nfev,
|
| 445 |
+
njev=sf.ngev,
|
| 446 |
+
nit=n_iterations, status=warnflag, message=task_str,
|
| 447 |
+
x=x, success=(warnflag == 0), hess_inv=hess_inv)
|
| 448 |
+
|
| 449 |
+
|
| 450 |
+
class LbfgsInvHessProduct(LinearOperator):
|
| 451 |
+
"""Linear operator for the L-BFGS approximate inverse Hessian.
|
| 452 |
+
|
| 453 |
+
This operator computes the product of a vector with the approximate inverse
|
| 454 |
+
of the Hessian of the objective function, using the L-BFGS limited
|
| 455 |
+
memory approximation to the inverse Hessian, accumulated during the
|
| 456 |
+
optimization.
|
| 457 |
+
|
| 458 |
+
Objects of this class implement the ``scipy.sparse.linalg.LinearOperator``
|
| 459 |
+
interface.
|
| 460 |
+
|
| 461 |
+
Parameters
|
| 462 |
+
----------
|
| 463 |
+
sk : array_like, shape=(n_corr, n)
|
| 464 |
+
Array of `n_corr` most recent updates to the solution vector.
|
| 465 |
+
(See [1]).
|
| 466 |
+
yk : array_like, shape=(n_corr, n)
|
| 467 |
+
Array of `n_corr` most recent updates to the gradient. (See [1]).
|
| 468 |
+
|
| 469 |
+
References
|
| 470 |
+
----------
|
| 471 |
+
.. [1] Nocedal, Jorge. "Updating quasi-Newton matrices with limited
|
| 472 |
+
storage." Mathematics of computation 35.151 (1980): 773-782.
|
| 473 |
+
|
| 474 |
+
"""
|
| 475 |
+
|
| 476 |
+
def __init__(self, sk, yk):
|
| 477 |
+
"""Construct the operator."""
|
| 478 |
+
if sk.shape != yk.shape or sk.ndim != 2:
|
| 479 |
+
raise ValueError('sk and yk must have matching shape, (n_corrs, n)')
|
| 480 |
+
n_corrs, n = sk.shape
|
| 481 |
+
|
| 482 |
+
super().__init__(dtype=np.float64, shape=(n, n))
|
| 483 |
+
|
| 484 |
+
self.sk = sk
|
| 485 |
+
self.yk = yk
|
| 486 |
+
self.n_corrs = n_corrs
|
| 487 |
+
self.rho = 1 / np.einsum('ij,ij->i', sk, yk)
|
| 488 |
+
|
| 489 |
+
def _matvec(self, x):
|
| 490 |
+
"""Efficient matrix-vector multiply with the BFGS matrices.
|
| 491 |
+
|
| 492 |
+
This calculation is described in Section (4) of [1].
|
| 493 |
+
|
| 494 |
+
Parameters
|
| 495 |
+
----------
|
| 496 |
+
x : ndarray
|
| 497 |
+
An array with shape (n,) or (n,1).
|
| 498 |
+
|
| 499 |
+
Returns
|
| 500 |
+
-------
|
| 501 |
+
y : ndarray
|
| 502 |
+
The matrix-vector product
|
| 503 |
+
|
| 504 |
+
"""
|
| 505 |
+
s, y, n_corrs, rho = self.sk, self.yk, self.n_corrs, self.rho
|
| 506 |
+
q = np.array(x, dtype=self.dtype, copy=True)
|
| 507 |
+
if q.ndim == 2 and q.shape[1] == 1:
|
| 508 |
+
q = q.reshape(-1)
|
| 509 |
+
|
| 510 |
+
alpha = np.empty(n_corrs)
|
| 511 |
+
|
| 512 |
+
for i in range(n_corrs-1, -1, -1):
|
| 513 |
+
alpha[i] = rho[i] * np.dot(s[i], q)
|
| 514 |
+
q = q - alpha[i]*y[i]
|
| 515 |
+
|
| 516 |
+
r = q
|
| 517 |
+
for i in range(n_corrs):
|
| 518 |
+
beta = rho[i] * np.dot(y[i], r)
|
| 519 |
+
r = r + s[i] * (alpha[i] - beta)
|
| 520 |
+
|
| 521 |
+
return r
|
| 522 |
+
|
| 523 |
+
def todense(self):
|
| 524 |
+
"""Return a dense array representation of this operator.
|
| 525 |
+
|
| 526 |
+
Returns
|
| 527 |
+
-------
|
| 528 |
+
arr : ndarray, shape=(n, n)
|
| 529 |
+
An array with the same shape and containing
|
| 530 |
+
the same data represented by this `LinearOperator`.
|
| 531 |
+
|
| 532 |
+
"""
|
| 533 |
+
s, y, n_corrs, rho = self.sk, self.yk, self.n_corrs, self.rho
|
| 534 |
+
I = np.eye(*self.shape, dtype=self.dtype)
|
| 535 |
+
Hk = I
|
| 536 |
+
|
| 537 |
+
for i in range(n_corrs):
|
| 538 |
+
A1 = I - s[i][:, np.newaxis] * y[i][np.newaxis, :] * rho[i]
|
| 539 |
+
A2 = I - y[i][:, np.newaxis] * s[i][np.newaxis, :] * rho[i]
|
| 540 |
+
|
| 541 |
+
Hk = np.dot(A1, np.dot(Hk, A2)) + (rho[i] * s[i][:, np.newaxis] *
|
| 542 |
+
s[i][np.newaxis, :])
|
| 543 |
+
return Hk
|
llava_next/lib/python3.10/site-packages/scipy/optimize/_linesearch.py
ADDED
|
@@ -0,0 +1,896 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Functions
|
| 3 |
+
---------
|
| 4 |
+
.. autosummary::
|
| 5 |
+
:toctree: generated/
|
| 6 |
+
|
| 7 |
+
line_search_armijo
|
| 8 |
+
line_search_wolfe1
|
| 9 |
+
line_search_wolfe2
|
| 10 |
+
scalar_search_wolfe1
|
| 11 |
+
scalar_search_wolfe2
|
| 12 |
+
|
| 13 |
+
"""
|
| 14 |
+
from warnings import warn
|
| 15 |
+
|
| 16 |
+
from ._dcsrch import DCSRCH
|
| 17 |
+
import numpy as np
|
| 18 |
+
|
| 19 |
+
__all__ = ['LineSearchWarning', 'line_search_wolfe1', 'line_search_wolfe2',
|
| 20 |
+
'scalar_search_wolfe1', 'scalar_search_wolfe2',
|
| 21 |
+
'line_search_armijo']
|
| 22 |
+
|
| 23 |
+
class LineSearchWarning(RuntimeWarning):
|
| 24 |
+
pass
|
| 25 |
+
|
| 26 |
+
|
| 27 |
+
def _check_c1_c2(c1, c2):
|
| 28 |
+
if not (0 < c1 < c2 < 1):
|
| 29 |
+
raise ValueError("'c1' and 'c2' do not satisfy"
|
| 30 |
+
"'0 < c1 < c2 < 1'.")
|
| 31 |
+
|
| 32 |
+
|
| 33 |
+
#------------------------------------------------------------------------------
|
| 34 |
+
# Minpack's Wolfe line and scalar searches
|
| 35 |
+
#------------------------------------------------------------------------------
|
| 36 |
+
|
| 37 |
+
def line_search_wolfe1(f, fprime, xk, pk, gfk=None,
|
| 38 |
+
old_fval=None, old_old_fval=None,
|
| 39 |
+
args=(), c1=1e-4, c2=0.9, amax=50, amin=1e-8,
|
| 40 |
+
xtol=1e-14):
|
| 41 |
+
"""
|
| 42 |
+
As `scalar_search_wolfe1` but do a line search to direction `pk`
|
| 43 |
+
|
| 44 |
+
Parameters
|
| 45 |
+
----------
|
| 46 |
+
f : callable
|
| 47 |
+
Function `f(x)`
|
| 48 |
+
fprime : callable
|
| 49 |
+
Gradient of `f`
|
| 50 |
+
xk : array_like
|
| 51 |
+
Current point
|
| 52 |
+
pk : array_like
|
| 53 |
+
Search direction
|
| 54 |
+
gfk : array_like, optional
|
| 55 |
+
Gradient of `f` at point `xk`
|
| 56 |
+
old_fval : float, optional
|
| 57 |
+
Value of `f` at point `xk`
|
| 58 |
+
old_old_fval : float, optional
|
| 59 |
+
Value of `f` at point preceding `xk`
|
| 60 |
+
|
| 61 |
+
The rest of the parameters are the same as for `scalar_search_wolfe1`.
|
| 62 |
+
|
| 63 |
+
Returns
|
| 64 |
+
-------
|
| 65 |
+
stp, f_count, g_count, fval, old_fval
|
| 66 |
+
As in `line_search_wolfe1`
|
| 67 |
+
gval : array
|
| 68 |
+
Gradient of `f` at the final point
|
| 69 |
+
|
| 70 |
+
Notes
|
| 71 |
+
-----
|
| 72 |
+
Parameters `c1` and `c2` must satisfy ``0 < c1 < c2 < 1``.
|
| 73 |
+
|
| 74 |
+
"""
|
| 75 |
+
if gfk is None:
|
| 76 |
+
gfk = fprime(xk, *args)
|
| 77 |
+
|
| 78 |
+
gval = [gfk]
|
| 79 |
+
gc = [0]
|
| 80 |
+
fc = [0]
|
| 81 |
+
|
| 82 |
+
def phi(s):
|
| 83 |
+
fc[0] += 1
|
| 84 |
+
return f(xk + s*pk, *args)
|
| 85 |
+
|
| 86 |
+
def derphi(s):
|
| 87 |
+
gval[0] = fprime(xk + s*pk, *args)
|
| 88 |
+
gc[0] += 1
|
| 89 |
+
return np.dot(gval[0], pk)
|
| 90 |
+
|
| 91 |
+
derphi0 = np.dot(gfk, pk)
|
| 92 |
+
|
| 93 |
+
stp, fval, old_fval = scalar_search_wolfe1(
|
| 94 |
+
phi, derphi, old_fval, old_old_fval, derphi0,
|
| 95 |
+
c1=c1, c2=c2, amax=amax, amin=amin, xtol=xtol)
|
| 96 |
+
|
| 97 |
+
return stp, fc[0], gc[0], fval, old_fval, gval[0]
|
| 98 |
+
|
| 99 |
+
|
| 100 |
+
def scalar_search_wolfe1(phi, derphi, phi0=None, old_phi0=None, derphi0=None,
|
| 101 |
+
c1=1e-4, c2=0.9,
|
| 102 |
+
amax=50, amin=1e-8, xtol=1e-14):
|
| 103 |
+
"""
|
| 104 |
+
Scalar function search for alpha that satisfies strong Wolfe conditions
|
| 105 |
+
|
| 106 |
+
alpha > 0 is assumed to be a descent direction.
|
| 107 |
+
|
| 108 |
+
Parameters
|
| 109 |
+
----------
|
| 110 |
+
phi : callable phi(alpha)
|
| 111 |
+
Function at point `alpha`
|
| 112 |
+
derphi : callable phi'(alpha)
|
| 113 |
+
Objective function derivative. Returns a scalar.
|
| 114 |
+
phi0 : float, optional
|
| 115 |
+
Value of phi at 0
|
| 116 |
+
old_phi0 : float, optional
|
| 117 |
+
Value of phi at previous point
|
| 118 |
+
derphi0 : float, optional
|
| 119 |
+
Value derphi at 0
|
| 120 |
+
c1 : float, optional
|
| 121 |
+
Parameter for Armijo condition rule.
|
| 122 |
+
c2 : float, optional
|
| 123 |
+
Parameter for curvature condition rule.
|
| 124 |
+
amax, amin : float, optional
|
| 125 |
+
Maximum and minimum step size
|
| 126 |
+
xtol : float, optional
|
| 127 |
+
Relative tolerance for an acceptable step.
|
| 128 |
+
|
| 129 |
+
Returns
|
| 130 |
+
-------
|
| 131 |
+
alpha : float
|
| 132 |
+
Step size, or None if no suitable step was found
|
| 133 |
+
phi : float
|
| 134 |
+
Value of `phi` at the new point `alpha`
|
| 135 |
+
phi0 : float
|
| 136 |
+
Value of `phi` at `alpha=0`
|
| 137 |
+
|
| 138 |
+
Notes
|
| 139 |
+
-----
|
| 140 |
+
Uses routine DCSRCH from MINPACK.
|
| 141 |
+
|
| 142 |
+
Parameters `c1` and `c2` must satisfy ``0 < c1 < c2 < 1`` as described in [1]_.
|
| 143 |
+
|
| 144 |
+
References
|
| 145 |
+
----------
|
| 146 |
+
|
| 147 |
+
.. [1] Nocedal, J., & Wright, S. J. (2006). Numerical optimization.
|
| 148 |
+
In Springer Series in Operations Research and Financial Engineering.
|
| 149 |
+
(Springer Series in Operations Research and Financial Engineering).
|
| 150 |
+
Springer Nature.
|
| 151 |
+
|
| 152 |
+
"""
|
| 153 |
+
_check_c1_c2(c1, c2)
|
| 154 |
+
|
| 155 |
+
if phi0 is None:
|
| 156 |
+
phi0 = phi(0.)
|
| 157 |
+
if derphi0 is None:
|
| 158 |
+
derphi0 = derphi(0.)
|
| 159 |
+
|
| 160 |
+
if old_phi0 is not None and derphi0 != 0:
|
| 161 |
+
alpha1 = min(1.0, 1.01*2*(phi0 - old_phi0)/derphi0)
|
| 162 |
+
if alpha1 < 0:
|
| 163 |
+
alpha1 = 1.0
|
| 164 |
+
else:
|
| 165 |
+
alpha1 = 1.0
|
| 166 |
+
|
| 167 |
+
maxiter = 100
|
| 168 |
+
|
| 169 |
+
dcsrch = DCSRCH(phi, derphi, c1, c2, xtol, amin, amax)
|
| 170 |
+
stp, phi1, phi0, task = dcsrch(
|
| 171 |
+
alpha1, phi0=phi0, derphi0=derphi0, maxiter=maxiter
|
| 172 |
+
)
|
| 173 |
+
|
| 174 |
+
return stp, phi1, phi0
|
| 175 |
+
|
| 176 |
+
|
| 177 |
+
line_search = line_search_wolfe1
|
| 178 |
+
|
| 179 |
+
|
| 180 |
+
#------------------------------------------------------------------------------
|
| 181 |
+
# Pure-Python Wolfe line and scalar searches
|
| 182 |
+
#------------------------------------------------------------------------------
|
| 183 |
+
|
| 184 |
+
# Note: `line_search_wolfe2` is the public `scipy.optimize.line_search`
|
| 185 |
+
|
| 186 |
+
def line_search_wolfe2(f, myfprime, xk, pk, gfk=None, old_fval=None,
|
| 187 |
+
old_old_fval=None, args=(), c1=1e-4, c2=0.9, amax=None,
|
| 188 |
+
extra_condition=None, maxiter=10):
|
| 189 |
+
"""Find alpha that satisfies strong Wolfe conditions.
|
| 190 |
+
|
| 191 |
+
Parameters
|
| 192 |
+
----------
|
| 193 |
+
f : callable f(x,*args)
|
| 194 |
+
Objective function.
|
| 195 |
+
myfprime : callable f'(x,*args)
|
| 196 |
+
Objective function gradient.
|
| 197 |
+
xk : ndarray
|
| 198 |
+
Starting point.
|
| 199 |
+
pk : ndarray
|
| 200 |
+
Search direction. The search direction must be a descent direction
|
| 201 |
+
for the algorithm to converge.
|
| 202 |
+
gfk : ndarray, optional
|
| 203 |
+
Gradient value for x=xk (xk being the current parameter
|
| 204 |
+
estimate). Will be recomputed if omitted.
|
| 205 |
+
old_fval : float, optional
|
| 206 |
+
Function value for x=xk. Will be recomputed if omitted.
|
| 207 |
+
old_old_fval : float, optional
|
| 208 |
+
Function value for the point preceding x=xk.
|
| 209 |
+
args : tuple, optional
|
| 210 |
+
Additional arguments passed to objective function.
|
| 211 |
+
c1 : float, optional
|
| 212 |
+
Parameter for Armijo condition rule.
|
| 213 |
+
c2 : float, optional
|
| 214 |
+
Parameter for curvature condition rule.
|
| 215 |
+
amax : float, optional
|
| 216 |
+
Maximum step size
|
| 217 |
+
extra_condition : callable, optional
|
| 218 |
+
A callable of the form ``extra_condition(alpha, x, f, g)``
|
| 219 |
+
returning a boolean. Arguments are the proposed step ``alpha``
|
| 220 |
+
and the corresponding ``x``, ``f`` and ``g`` values. The line search
|
| 221 |
+
accepts the value of ``alpha`` only if this
|
| 222 |
+
callable returns ``True``. If the callable returns ``False``
|
| 223 |
+
for the step length, the algorithm will continue with
|
| 224 |
+
new iterates. The callable is only called for iterates
|
| 225 |
+
satisfying the strong Wolfe conditions.
|
| 226 |
+
maxiter : int, optional
|
| 227 |
+
Maximum number of iterations to perform.
|
| 228 |
+
|
| 229 |
+
Returns
|
| 230 |
+
-------
|
| 231 |
+
alpha : float or None
|
| 232 |
+
Alpha for which ``x_new = x0 + alpha * pk``,
|
| 233 |
+
or None if the line search algorithm did not converge.
|
| 234 |
+
fc : int
|
| 235 |
+
Number of function evaluations made.
|
| 236 |
+
gc : int
|
| 237 |
+
Number of gradient evaluations made.
|
| 238 |
+
new_fval : float or None
|
| 239 |
+
New function value ``f(x_new)=f(x0+alpha*pk)``,
|
| 240 |
+
or None if the line search algorithm did not converge.
|
| 241 |
+
old_fval : float
|
| 242 |
+
Old function value ``f(x0)``.
|
| 243 |
+
new_slope : float or None
|
| 244 |
+
The local slope along the search direction at the
|
| 245 |
+
new value ``<myfprime(x_new), pk>``,
|
| 246 |
+
or None if the line search algorithm did not converge.
|
| 247 |
+
|
| 248 |
+
|
| 249 |
+
Notes
|
| 250 |
+
-----
|
| 251 |
+
Uses the line search algorithm to enforce strong Wolfe
|
| 252 |
+
conditions. See Wright and Nocedal, 'Numerical Optimization',
|
| 253 |
+
1999, pp. 59-61.
|
| 254 |
+
|
| 255 |
+
The search direction `pk` must be a descent direction (e.g.
|
| 256 |
+
``-myfprime(xk)``) to find a step length that satisfies the strong Wolfe
|
| 257 |
+
conditions. If the search direction is not a descent direction (e.g.
|
| 258 |
+
``myfprime(xk)``), then `alpha`, `new_fval`, and `new_slope` will be None.
|
| 259 |
+
|
| 260 |
+
Examples
|
| 261 |
+
--------
|
| 262 |
+
>>> import numpy as np
|
| 263 |
+
>>> from scipy.optimize import line_search
|
| 264 |
+
|
| 265 |
+
A objective function and its gradient are defined.
|
| 266 |
+
|
| 267 |
+
>>> def obj_func(x):
|
| 268 |
+
... return (x[0])**2+(x[1])**2
|
| 269 |
+
>>> def obj_grad(x):
|
| 270 |
+
... return [2*x[0], 2*x[1]]
|
| 271 |
+
|
| 272 |
+
We can find alpha that satisfies strong Wolfe conditions.
|
| 273 |
+
|
| 274 |
+
>>> start_point = np.array([1.8, 1.7])
|
| 275 |
+
>>> search_gradient = np.array([-1.0, -1.0])
|
| 276 |
+
>>> line_search(obj_func, obj_grad, start_point, search_gradient)
|
| 277 |
+
(1.0, 2, 1, 1.1300000000000001, 6.13, [1.6, 1.4])
|
| 278 |
+
|
| 279 |
+
"""
|
| 280 |
+
fc = [0]
|
| 281 |
+
gc = [0]
|
| 282 |
+
gval = [None]
|
| 283 |
+
gval_alpha = [None]
|
| 284 |
+
|
| 285 |
+
def phi(alpha):
|
| 286 |
+
fc[0] += 1
|
| 287 |
+
return f(xk + alpha * pk, *args)
|
| 288 |
+
|
| 289 |
+
fprime = myfprime
|
| 290 |
+
|
| 291 |
+
def derphi(alpha):
|
| 292 |
+
gc[0] += 1
|
| 293 |
+
gval[0] = fprime(xk + alpha * pk, *args) # store for later use
|
| 294 |
+
gval_alpha[0] = alpha
|
| 295 |
+
return np.dot(gval[0], pk)
|
| 296 |
+
|
| 297 |
+
if gfk is None:
|
| 298 |
+
gfk = fprime(xk, *args)
|
| 299 |
+
derphi0 = np.dot(gfk, pk)
|
| 300 |
+
|
| 301 |
+
if extra_condition is not None:
|
| 302 |
+
# Add the current gradient as argument, to avoid needless
|
| 303 |
+
# re-evaluation
|
| 304 |
+
def extra_condition2(alpha, phi):
|
| 305 |
+
if gval_alpha[0] != alpha:
|
| 306 |
+
derphi(alpha)
|
| 307 |
+
x = xk + alpha * pk
|
| 308 |
+
return extra_condition(alpha, x, phi, gval[0])
|
| 309 |
+
else:
|
| 310 |
+
extra_condition2 = None
|
| 311 |
+
|
| 312 |
+
alpha_star, phi_star, old_fval, derphi_star = scalar_search_wolfe2(
|
| 313 |
+
phi, derphi, old_fval, old_old_fval, derphi0, c1, c2, amax,
|
| 314 |
+
extra_condition2, maxiter=maxiter)
|
| 315 |
+
|
| 316 |
+
if derphi_star is None:
|
| 317 |
+
warn('The line search algorithm did not converge',
|
| 318 |
+
LineSearchWarning, stacklevel=2)
|
| 319 |
+
else:
|
| 320 |
+
# derphi_star is a number (derphi) -- so use the most recently
|
| 321 |
+
# calculated gradient used in computing it derphi = gfk*pk
|
| 322 |
+
# this is the gradient at the next step no need to compute it
|
| 323 |
+
# again in the outer loop.
|
| 324 |
+
derphi_star = gval[0]
|
| 325 |
+
|
| 326 |
+
return alpha_star, fc[0], gc[0], phi_star, old_fval, derphi_star
|
| 327 |
+
|
| 328 |
+
|
| 329 |
+
def scalar_search_wolfe2(phi, derphi, phi0=None,
|
| 330 |
+
old_phi0=None, derphi0=None,
|
| 331 |
+
c1=1e-4, c2=0.9, amax=None,
|
| 332 |
+
extra_condition=None, maxiter=10):
|
| 333 |
+
"""Find alpha that satisfies strong Wolfe conditions.
|
| 334 |
+
|
| 335 |
+
alpha > 0 is assumed to be a descent direction.
|
| 336 |
+
|
| 337 |
+
Parameters
|
| 338 |
+
----------
|
| 339 |
+
phi : callable phi(alpha)
|
| 340 |
+
Objective scalar function.
|
| 341 |
+
derphi : callable phi'(alpha)
|
| 342 |
+
Objective function derivative. Returns a scalar.
|
| 343 |
+
phi0 : float, optional
|
| 344 |
+
Value of phi at 0.
|
| 345 |
+
old_phi0 : float, optional
|
| 346 |
+
Value of phi at previous point.
|
| 347 |
+
derphi0 : float, optional
|
| 348 |
+
Value of derphi at 0
|
| 349 |
+
c1 : float, optional
|
| 350 |
+
Parameter for Armijo condition rule.
|
| 351 |
+
c2 : float, optional
|
| 352 |
+
Parameter for curvature condition rule.
|
| 353 |
+
amax : float, optional
|
| 354 |
+
Maximum step size.
|
| 355 |
+
extra_condition : callable, optional
|
| 356 |
+
A callable of the form ``extra_condition(alpha, phi_value)``
|
| 357 |
+
returning a boolean. The line search accepts the value
|
| 358 |
+
of ``alpha`` only if this callable returns ``True``.
|
| 359 |
+
If the callable returns ``False`` for the step length,
|
| 360 |
+
the algorithm will continue with new iterates.
|
| 361 |
+
The callable is only called for iterates satisfying
|
| 362 |
+
the strong Wolfe conditions.
|
| 363 |
+
maxiter : int, optional
|
| 364 |
+
Maximum number of iterations to perform.
|
| 365 |
+
|
| 366 |
+
Returns
|
| 367 |
+
-------
|
| 368 |
+
alpha_star : float or None
|
| 369 |
+
Best alpha, or None if the line search algorithm did not converge.
|
| 370 |
+
phi_star : float
|
| 371 |
+
phi at alpha_star.
|
| 372 |
+
phi0 : float
|
| 373 |
+
phi at 0.
|
| 374 |
+
derphi_star : float or None
|
| 375 |
+
derphi at alpha_star, or None if the line search algorithm
|
| 376 |
+
did not converge.
|
| 377 |
+
|
| 378 |
+
Notes
|
| 379 |
+
-----
|
| 380 |
+
Uses the line search algorithm to enforce strong Wolfe
|
| 381 |
+
conditions. See Wright and Nocedal, 'Numerical Optimization',
|
| 382 |
+
1999, pp. 59-61.
|
| 383 |
+
|
| 384 |
+
"""
|
| 385 |
+
_check_c1_c2(c1, c2)
|
| 386 |
+
|
| 387 |
+
if phi0 is None:
|
| 388 |
+
phi0 = phi(0.)
|
| 389 |
+
|
| 390 |
+
if derphi0 is None:
|
| 391 |
+
derphi0 = derphi(0.)
|
| 392 |
+
|
| 393 |
+
alpha0 = 0
|
| 394 |
+
if old_phi0 is not None and derphi0 != 0:
|
| 395 |
+
alpha1 = min(1.0, 1.01*2*(phi0 - old_phi0)/derphi0)
|
| 396 |
+
else:
|
| 397 |
+
alpha1 = 1.0
|
| 398 |
+
|
| 399 |
+
if alpha1 < 0:
|
| 400 |
+
alpha1 = 1.0
|
| 401 |
+
|
| 402 |
+
if amax is not None:
|
| 403 |
+
alpha1 = min(alpha1, amax)
|
| 404 |
+
|
| 405 |
+
phi_a1 = phi(alpha1)
|
| 406 |
+
#derphi_a1 = derphi(alpha1) evaluated below
|
| 407 |
+
|
| 408 |
+
phi_a0 = phi0
|
| 409 |
+
derphi_a0 = derphi0
|
| 410 |
+
|
| 411 |
+
if extra_condition is None:
|
| 412 |
+
def extra_condition(alpha, phi):
|
| 413 |
+
return True
|
| 414 |
+
|
| 415 |
+
for i in range(maxiter):
|
| 416 |
+
if alpha1 == 0 or (amax is not None and alpha0 > amax):
|
| 417 |
+
# alpha1 == 0: This shouldn't happen. Perhaps the increment has
|
| 418 |
+
# slipped below machine precision?
|
| 419 |
+
alpha_star = None
|
| 420 |
+
phi_star = phi0
|
| 421 |
+
phi0 = old_phi0
|
| 422 |
+
derphi_star = None
|
| 423 |
+
|
| 424 |
+
if alpha1 == 0:
|
| 425 |
+
msg = 'Rounding errors prevent the line search from converging'
|
| 426 |
+
else:
|
| 427 |
+
msg = "The line search algorithm could not find a solution " + \
|
| 428 |
+
"less than or equal to amax: %s" % amax
|
| 429 |
+
|
| 430 |
+
warn(msg, LineSearchWarning, stacklevel=2)
|
| 431 |
+
break
|
| 432 |
+
|
| 433 |
+
not_first_iteration = i > 0
|
| 434 |
+
if (phi_a1 > phi0 + c1 * alpha1 * derphi0) or \
|
| 435 |
+
((phi_a1 >= phi_a0) and not_first_iteration):
|
| 436 |
+
alpha_star, phi_star, derphi_star = \
|
| 437 |
+
_zoom(alpha0, alpha1, phi_a0,
|
| 438 |
+
phi_a1, derphi_a0, phi, derphi,
|
| 439 |
+
phi0, derphi0, c1, c2, extra_condition)
|
| 440 |
+
break
|
| 441 |
+
|
| 442 |
+
derphi_a1 = derphi(alpha1)
|
| 443 |
+
if (abs(derphi_a1) <= -c2*derphi0):
|
| 444 |
+
if extra_condition(alpha1, phi_a1):
|
| 445 |
+
alpha_star = alpha1
|
| 446 |
+
phi_star = phi_a1
|
| 447 |
+
derphi_star = derphi_a1
|
| 448 |
+
break
|
| 449 |
+
|
| 450 |
+
if (derphi_a1 >= 0):
|
| 451 |
+
alpha_star, phi_star, derphi_star = \
|
| 452 |
+
_zoom(alpha1, alpha0, phi_a1,
|
| 453 |
+
phi_a0, derphi_a1, phi, derphi,
|
| 454 |
+
phi0, derphi0, c1, c2, extra_condition)
|
| 455 |
+
break
|
| 456 |
+
|
| 457 |
+
alpha2 = 2 * alpha1 # increase by factor of two on each iteration
|
| 458 |
+
if amax is not None:
|
| 459 |
+
alpha2 = min(alpha2, amax)
|
| 460 |
+
alpha0 = alpha1
|
| 461 |
+
alpha1 = alpha2
|
| 462 |
+
phi_a0 = phi_a1
|
| 463 |
+
phi_a1 = phi(alpha1)
|
| 464 |
+
derphi_a0 = derphi_a1
|
| 465 |
+
|
| 466 |
+
else:
|
| 467 |
+
# stopping test maxiter reached
|
| 468 |
+
alpha_star = alpha1
|
| 469 |
+
phi_star = phi_a1
|
| 470 |
+
derphi_star = None
|
| 471 |
+
warn('The line search algorithm did not converge',
|
| 472 |
+
LineSearchWarning, stacklevel=2)
|
| 473 |
+
|
| 474 |
+
return alpha_star, phi_star, phi0, derphi_star
|
| 475 |
+
|
| 476 |
+
|
| 477 |
+
def _cubicmin(a, fa, fpa, b, fb, c, fc):
|
| 478 |
+
"""
|
| 479 |
+
Finds the minimizer for a cubic polynomial that goes through the
|
| 480 |
+
points (a,fa), (b,fb), and (c,fc) with derivative at a of fpa.
|
| 481 |
+
|
| 482 |
+
If no minimizer can be found, return None.
|
| 483 |
+
|
| 484 |
+
"""
|
| 485 |
+
# f(x) = A *(x-a)^3 + B*(x-a)^2 + C*(x-a) + D
|
| 486 |
+
|
| 487 |
+
with np.errstate(divide='raise', over='raise', invalid='raise'):
|
| 488 |
+
try:
|
| 489 |
+
C = fpa
|
| 490 |
+
db = b - a
|
| 491 |
+
dc = c - a
|
| 492 |
+
denom = (db * dc) ** 2 * (db - dc)
|
| 493 |
+
d1 = np.empty((2, 2))
|
| 494 |
+
d1[0, 0] = dc ** 2
|
| 495 |
+
d1[0, 1] = -db ** 2
|
| 496 |
+
d1[1, 0] = -dc ** 3
|
| 497 |
+
d1[1, 1] = db ** 3
|
| 498 |
+
[A, B] = np.dot(d1, np.asarray([fb - fa - C * db,
|
| 499 |
+
fc - fa - C * dc]).flatten())
|
| 500 |
+
A /= denom
|
| 501 |
+
B /= denom
|
| 502 |
+
radical = B * B - 3 * A * C
|
| 503 |
+
xmin = a + (-B + np.sqrt(radical)) / (3 * A)
|
| 504 |
+
except ArithmeticError:
|
| 505 |
+
return None
|
| 506 |
+
if not np.isfinite(xmin):
|
| 507 |
+
return None
|
| 508 |
+
return xmin
|
| 509 |
+
|
| 510 |
+
|
| 511 |
+
def _quadmin(a, fa, fpa, b, fb):
|
| 512 |
+
"""
|
| 513 |
+
Finds the minimizer for a quadratic polynomial that goes through
|
| 514 |
+
the points (a,fa), (b,fb) with derivative at a of fpa.
|
| 515 |
+
|
| 516 |
+
"""
|
| 517 |
+
# f(x) = B*(x-a)^2 + C*(x-a) + D
|
| 518 |
+
with np.errstate(divide='raise', over='raise', invalid='raise'):
|
| 519 |
+
try:
|
| 520 |
+
D = fa
|
| 521 |
+
C = fpa
|
| 522 |
+
db = b - a * 1.0
|
| 523 |
+
B = (fb - D - C * db) / (db * db)
|
| 524 |
+
xmin = a - C / (2.0 * B)
|
| 525 |
+
except ArithmeticError:
|
| 526 |
+
return None
|
| 527 |
+
if not np.isfinite(xmin):
|
| 528 |
+
return None
|
| 529 |
+
return xmin
|
| 530 |
+
|
| 531 |
+
|
| 532 |
+
def _zoom(a_lo, a_hi, phi_lo, phi_hi, derphi_lo,
|
| 533 |
+
phi, derphi, phi0, derphi0, c1, c2, extra_condition):
|
| 534 |
+
"""Zoom stage of approximate linesearch satisfying strong Wolfe conditions.
|
| 535 |
+
|
| 536 |
+
Part of the optimization algorithm in `scalar_search_wolfe2`.
|
| 537 |
+
|
| 538 |
+
Notes
|
| 539 |
+
-----
|
| 540 |
+
Implements Algorithm 3.6 (zoom) in Wright and Nocedal,
|
| 541 |
+
'Numerical Optimization', 1999, pp. 61.
|
| 542 |
+
|
| 543 |
+
"""
|
| 544 |
+
|
| 545 |
+
maxiter = 10
|
| 546 |
+
i = 0
|
| 547 |
+
delta1 = 0.2 # cubic interpolant check
|
| 548 |
+
delta2 = 0.1 # quadratic interpolant check
|
| 549 |
+
phi_rec = phi0
|
| 550 |
+
a_rec = 0
|
| 551 |
+
while True:
|
| 552 |
+
# interpolate to find a trial step length between a_lo and
|
| 553 |
+
# a_hi Need to choose interpolation here. Use cubic
|
| 554 |
+
# interpolation and then if the result is within delta *
|
| 555 |
+
# dalpha or outside of the interval bounded by a_lo or a_hi
|
| 556 |
+
# then use quadratic interpolation, if the result is still too
|
| 557 |
+
# close, then use bisection
|
| 558 |
+
|
| 559 |
+
dalpha = a_hi - a_lo
|
| 560 |
+
if dalpha < 0:
|
| 561 |
+
a, b = a_hi, a_lo
|
| 562 |
+
else:
|
| 563 |
+
a, b = a_lo, a_hi
|
| 564 |
+
|
| 565 |
+
# minimizer of cubic interpolant
|
| 566 |
+
# (uses phi_lo, derphi_lo, phi_hi, and the most recent value of phi)
|
| 567 |
+
#
|
| 568 |
+
# if the result is too close to the end points (or out of the
|
| 569 |
+
# interval), then use quadratic interpolation with phi_lo,
|
| 570 |
+
# derphi_lo and phi_hi if the result is still too close to the
|
| 571 |
+
# end points (or out of the interval) then use bisection
|
| 572 |
+
|
| 573 |
+
if (i > 0):
|
| 574 |
+
cchk = delta1 * dalpha
|
| 575 |
+
a_j = _cubicmin(a_lo, phi_lo, derphi_lo, a_hi, phi_hi,
|
| 576 |
+
a_rec, phi_rec)
|
| 577 |
+
if (i == 0) or (a_j is None) or (a_j > b - cchk) or (a_j < a + cchk):
|
| 578 |
+
qchk = delta2 * dalpha
|
| 579 |
+
a_j = _quadmin(a_lo, phi_lo, derphi_lo, a_hi, phi_hi)
|
| 580 |
+
if (a_j is None) or (a_j > b-qchk) or (a_j < a+qchk):
|
| 581 |
+
a_j = a_lo + 0.5*dalpha
|
| 582 |
+
|
| 583 |
+
# Check new value of a_j
|
| 584 |
+
|
| 585 |
+
phi_aj = phi(a_j)
|
| 586 |
+
if (phi_aj > phi0 + c1*a_j*derphi0) or (phi_aj >= phi_lo):
|
| 587 |
+
phi_rec = phi_hi
|
| 588 |
+
a_rec = a_hi
|
| 589 |
+
a_hi = a_j
|
| 590 |
+
phi_hi = phi_aj
|
| 591 |
+
else:
|
| 592 |
+
derphi_aj = derphi(a_j)
|
| 593 |
+
if abs(derphi_aj) <= -c2*derphi0 and extra_condition(a_j, phi_aj):
|
| 594 |
+
a_star = a_j
|
| 595 |
+
val_star = phi_aj
|
| 596 |
+
valprime_star = derphi_aj
|
| 597 |
+
break
|
| 598 |
+
if derphi_aj*(a_hi - a_lo) >= 0:
|
| 599 |
+
phi_rec = phi_hi
|
| 600 |
+
a_rec = a_hi
|
| 601 |
+
a_hi = a_lo
|
| 602 |
+
phi_hi = phi_lo
|
| 603 |
+
else:
|
| 604 |
+
phi_rec = phi_lo
|
| 605 |
+
a_rec = a_lo
|
| 606 |
+
a_lo = a_j
|
| 607 |
+
phi_lo = phi_aj
|
| 608 |
+
derphi_lo = derphi_aj
|
| 609 |
+
i += 1
|
| 610 |
+
if (i > maxiter):
|
| 611 |
+
# Failed to find a conforming step size
|
| 612 |
+
a_star = None
|
| 613 |
+
val_star = None
|
| 614 |
+
valprime_star = None
|
| 615 |
+
break
|
| 616 |
+
return a_star, val_star, valprime_star
|
| 617 |
+
|
| 618 |
+
|
| 619 |
+
#------------------------------------------------------------------------------
|
| 620 |
+
# Armijo line and scalar searches
|
| 621 |
+
#------------------------------------------------------------------------------
|
| 622 |
+
|
| 623 |
+
def line_search_armijo(f, xk, pk, gfk, old_fval, args=(), c1=1e-4, alpha0=1):
|
| 624 |
+
"""Minimize over alpha, the function ``f(xk+alpha pk)``.
|
| 625 |
+
|
| 626 |
+
Parameters
|
| 627 |
+
----------
|
| 628 |
+
f : callable
|
| 629 |
+
Function to be minimized.
|
| 630 |
+
xk : array_like
|
| 631 |
+
Current point.
|
| 632 |
+
pk : array_like
|
| 633 |
+
Search direction.
|
| 634 |
+
gfk : array_like
|
| 635 |
+
Gradient of `f` at point `xk`.
|
| 636 |
+
old_fval : float
|
| 637 |
+
Value of `f` at point `xk`.
|
| 638 |
+
args : tuple, optional
|
| 639 |
+
Optional arguments.
|
| 640 |
+
c1 : float, optional
|
| 641 |
+
Value to control stopping criterion.
|
| 642 |
+
alpha0 : scalar, optional
|
| 643 |
+
Value of `alpha` at start of the optimization.
|
| 644 |
+
|
| 645 |
+
Returns
|
| 646 |
+
-------
|
| 647 |
+
alpha
|
| 648 |
+
f_count
|
| 649 |
+
f_val_at_alpha
|
| 650 |
+
|
| 651 |
+
Notes
|
| 652 |
+
-----
|
| 653 |
+
Uses the interpolation algorithm (Armijo backtracking) as suggested by
|
| 654 |
+
Wright and Nocedal in 'Numerical Optimization', 1999, pp. 56-57
|
| 655 |
+
|
| 656 |
+
"""
|
| 657 |
+
xk = np.atleast_1d(xk)
|
| 658 |
+
fc = [0]
|
| 659 |
+
|
| 660 |
+
def phi(alpha1):
|
| 661 |
+
fc[0] += 1
|
| 662 |
+
return f(xk + alpha1*pk, *args)
|
| 663 |
+
|
| 664 |
+
if old_fval is None:
|
| 665 |
+
phi0 = phi(0.)
|
| 666 |
+
else:
|
| 667 |
+
phi0 = old_fval # compute f(xk) -- done in past loop
|
| 668 |
+
|
| 669 |
+
derphi0 = np.dot(gfk, pk)
|
| 670 |
+
alpha, phi1 = scalar_search_armijo(phi, phi0, derphi0, c1=c1,
|
| 671 |
+
alpha0=alpha0)
|
| 672 |
+
return alpha, fc[0], phi1
|
| 673 |
+
|
| 674 |
+
|
| 675 |
+
def line_search_BFGS(f, xk, pk, gfk, old_fval, args=(), c1=1e-4, alpha0=1):
|
| 676 |
+
"""
|
| 677 |
+
Compatibility wrapper for `line_search_armijo`
|
| 678 |
+
"""
|
| 679 |
+
r = line_search_armijo(f, xk, pk, gfk, old_fval, args=args, c1=c1,
|
| 680 |
+
alpha0=alpha0)
|
| 681 |
+
return r[0], r[1], 0, r[2]
|
| 682 |
+
|
| 683 |
+
|
| 684 |
+
def scalar_search_armijo(phi, phi0, derphi0, c1=1e-4, alpha0=1, amin=0):
|
| 685 |
+
"""Minimize over alpha, the function ``phi(alpha)``.
|
| 686 |
+
|
| 687 |
+
Uses the interpolation algorithm (Armijo backtracking) as suggested by
|
| 688 |
+
Wright and Nocedal in 'Numerical Optimization', 1999, pp. 56-57
|
| 689 |
+
|
| 690 |
+
alpha > 0 is assumed to be a descent direction.
|
| 691 |
+
|
| 692 |
+
Returns
|
| 693 |
+
-------
|
| 694 |
+
alpha
|
| 695 |
+
phi1
|
| 696 |
+
|
| 697 |
+
"""
|
| 698 |
+
phi_a0 = phi(alpha0)
|
| 699 |
+
if phi_a0 <= phi0 + c1*alpha0*derphi0:
|
| 700 |
+
return alpha0, phi_a0
|
| 701 |
+
|
| 702 |
+
# Otherwise, compute the minimizer of a quadratic interpolant:
|
| 703 |
+
|
| 704 |
+
alpha1 = -(derphi0) * alpha0**2 / 2.0 / (phi_a0 - phi0 - derphi0 * alpha0)
|
| 705 |
+
phi_a1 = phi(alpha1)
|
| 706 |
+
|
| 707 |
+
if (phi_a1 <= phi0 + c1*alpha1*derphi0):
|
| 708 |
+
return alpha1, phi_a1
|
| 709 |
+
|
| 710 |
+
# Otherwise, loop with cubic interpolation until we find an alpha which
|
| 711 |
+
# satisfies the first Wolfe condition (since we are backtracking, we will
|
| 712 |
+
# assume that the value of alpha is not too small and satisfies the second
|
| 713 |
+
# condition.
|
| 714 |
+
|
| 715 |
+
while alpha1 > amin: # we are assuming alpha>0 is a descent direction
|
| 716 |
+
factor = alpha0**2 * alpha1**2 * (alpha1-alpha0)
|
| 717 |
+
a = alpha0**2 * (phi_a1 - phi0 - derphi0*alpha1) - \
|
| 718 |
+
alpha1**2 * (phi_a0 - phi0 - derphi0*alpha0)
|
| 719 |
+
a = a / factor
|
| 720 |
+
b = -alpha0**3 * (phi_a1 - phi0 - derphi0*alpha1) + \
|
| 721 |
+
alpha1**3 * (phi_a0 - phi0 - derphi0*alpha0)
|
| 722 |
+
b = b / factor
|
| 723 |
+
|
| 724 |
+
alpha2 = (-b + np.sqrt(abs(b**2 - 3 * a * derphi0))) / (3.0*a)
|
| 725 |
+
phi_a2 = phi(alpha2)
|
| 726 |
+
|
| 727 |
+
if (phi_a2 <= phi0 + c1*alpha2*derphi0):
|
| 728 |
+
return alpha2, phi_a2
|
| 729 |
+
|
| 730 |
+
if (alpha1 - alpha2) > alpha1 / 2.0 or (1 - alpha2/alpha1) < 0.96:
|
| 731 |
+
alpha2 = alpha1 / 2.0
|
| 732 |
+
|
| 733 |
+
alpha0 = alpha1
|
| 734 |
+
alpha1 = alpha2
|
| 735 |
+
phi_a0 = phi_a1
|
| 736 |
+
phi_a1 = phi_a2
|
| 737 |
+
|
| 738 |
+
# Failed to find a suitable step length
|
| 739 |
+
return None, phi_a1
|
| 740 |
+
|
| 741 |
+
|
| 742 |
+
#------------------------------------------------------------------------------
|
| 743 |
+
# Non-monotone line search for DF-SANE
|
| 744 |
+
#------------------------------------------------------------------------------
|
| 745 |
+
|
| 746 |
+
def _nonmonotone_line_search_cruz(f, x_k, d, prev_fs, eta,
|
| 747 |
+
gamma=1e-4, tau_min=0.1, tau_max=0.5):
|
| 748 |
+
"""
|
| 749 |
+
Nonmonotone backtracking line search as described in [1]_
|
| 750 |
+
|
| 751 |
+
Parameters
|
| 752 |
+
----------
|
| 753 |
+
f : callable
|
| 754 |
+
Function returning a tuple ``(f, F)`` where ``f`` is the value
|
| 755 |
+
of a merit function and ``F`` the residual.
|
| 756 |
+
x_k : ndarray
|
| 757 |
+
Initial position.
|
| 758 |
+
d : ndarray
|
| 759 |
+
Search direction.
|
| 760 |
+
prev_fs : float
|
| 761 |
+
List of previous merit function values. Should have ``len(prev_fs) <= M``
|
| 762 |
+
where ``M`` is the nonmonotonicity window parameter.
|
| 763 |
+
eta : float
|
| 764 |
+
Allowed merit function increase, see [1]_
|
| 765 |
+
gamma, tau_min, tau_max : float, optional
|
| 766 |
+
Search parameters, see [1]_
|
| 767 |
+
|
| 768 |
+
Returns
|
| 769 |
+
-------
|
| 770 |
+
alpha : float
|
| 771 |
+
Step length
|
| 772 |
+
xp : ndarray
|
| 773 |
+
Next position
|
| 774 |
+
fp : float
|
| 775 |
+
Merit function value at next position
|
| 776 |
+
Fp : ndarray
|
| 777 |
+
Residual at next position
|
| 778 |
+
|
| 779 |
+
References
|
| 780 |
+
----------
|
| 781 |
+
[1] "Spectral residual method without gradient information for solving
|
| 782 |
+
large-scale nonlinear systems of equations." W. La Cruz,
|
| 783 |
+
J.M. Martinez, M. Raydan. Math. Comp. **75**, 1429 (2006).
|
| 784 |
+
|
| 785 |
+
"""
|
| 786 |
+
f_k = prev_fs[-1]
|
| 787 |
+
f_bar = max(prev_fs)
|
| 788 |
+
|
| 789 |
+
alpha_p = 1
|
| 790 |
+
alpha_m = 1
|
| 791 |
+
alpha = 1
|
| 792 |
+
|
| 793 |
+
while True:
|
| 794 |
+
xp = x_k + alpha_p * d
|
| 795 |
+
fp, Fp = f(xp)
|
| 796 |
+
|
| 797 |
+
if fp <= f_bar + eta - gamma * alpha_p**2 * f_k:
|
| 798 |
+
alpha = alpha_p
|
| 799 |
+
break
|
| 800 |
+
|
| 801 |
+
alpha_tp = alpha_p**2 * f_k / (fp + (2*alpha_p - 1)*f_k)
|
| 802 |
+
|
| 803 |
+
xp = x_k - alpha_m * d
|
| 804 |
+
fp, Fp = f(xp)
|
| 805 |
+
|
| 806 |
+
if fp <= f_bar + eta - gamma * alpha_m**2 * f_k:
|
| 807 |
+
alpha = -alpha_m
|
| 808 |
+
break
|
| 809 |
+
|
| 810 |
+
alpha_tm = alpha_m**2 * f_k / (fp + (2*alpha_m - 1)*f_k)
|
| 811 |
+
|
| 812 |
+
alpha_p = np.clip(alpha_tp, tau_min * alpha_p, tau_max * alpha_p)
|
| 813 |
+
alpha_m = np.clip(alpha_tm, tau_min * alpha_m, tau_max * alpha_m)
|
| 814 |
+
|
| 815 |
+
return alpha, xp, fp, Fp
|
| 816 |
+
|
| 817 |
+
|
| 818 |
+
def _nonmonotone_line_search_cheng(f, x_k, d, f_k, C, Q, eta,
|
| 819 |
+
gamma=1e-4, tau_min=0.1, tau_max=0.5,
|
| 820 |
+
nu=0.85):
|
| 821 |
+
"""
|
| 822 |
+
Nonmonotone line search from [1]
|
| 823 |
+
|
| 824 |
+
Parameters
|
| 825 |
+
----------
|
| 826 |
+
f : callable
|
| 827 |
+
Function returning a tuple ``(f, F)`` where ``f`` is the value
|
| 828 |
+
of a merit function and ``F`` the residual.
|
| 829 |
+
x_k : ndarray
|
| 830 |
+
Initial position.
|
| 831 |
+
d : ndarray
|
| 832 |
+
Search direction.
|
| 833 |
+
f_k : float
|
| 834 |
+
Initial merit function value.
|
| 835 |
+
C, Q : float
|
| 836 |
+
Control parameters. On the first iteration, give values
|
| 837 |
+
Q=1.0, C=f_k
|
| 838 |
+
eta : float
|
| 839 |
+
Allowed merit function increase, see [1]_
|
| 840 |
+
nu, gamma, tau_min, tau_max : float, optional
|
| 841 |
+
Search parameters, see [1]_
|
| 842 |
+
|
| 843 |
+
Returns
|
| 844 |
+
-------
|
| 845 |
+
alpha : float
|
| 846 |
+
Step length
|
| 847 |
+
xp : ndarray
|
| 848 |
+
Next position
|
| 849 |
+
fp : float
|
| 850 |
+
Merit function value at next position
|
| 851 |
+
Fp : ndarray
|
| 852 |
+
Residual at next position
|
| 853 |
+
C : float
|
| 854 |
+
New value for the control parameter C
|
| 855 |
+
Q : float
|
| 856 |
+
New value for the control parameter Q
|
| 857 |
+
|
| 858 |
+
References
|
| 859 |
+
----------
|
| 860 |
+
.. [1] W. Cheng & D.-H. Li, ''A derivative-free nonmonotone line
|
| 861 |
+
search and its application to the spectral residual
|
| 862 |
+
method'', IMA J. Numer. Anal. 29, 814 (2009).
|
| 863 |
+
|
| 864 |
+
"""
|
| 865 |
+
alpha_p = 1
|
| 866 |
+
alpha_m = 1
|
| 867 |
+
alpha = 1
|
| 868 |
+
|
| 869 |
+
while True:
|
| 870 |
+
xp = x_k + alpha_p * d
|
| 871 |
+
fp, Fp = f(xp)
|
| 872 |
+
|
| 873 |
+
if fp <= C + eta - gamma * alpha_p**2 * f_k:
|
| 874 |
+
alpha = alpha_p
|
| 875 |
+
break
|
| 876 |
+
|
| 877 |
+
alpha_tp = alpha_p**2 * f_k / (fp + (2*alpha_p - 1)*f_k)
|
| 878 |
+
|
| 879 |
+
xp = x_k - alpha_m * d
|
| 880 |
+
fp, Fp = f(xp)
|
| 881 |
+
|
| 882 |
+
if fp <= C + eta - gamma * alpha_m**2 * f_k:
|
| 883 |
+
alpha = -alpha_m
|
| 884 |
+
break
|
| 885 |
+
|
| 886 |
+
alpha_tm = alpha_m**2 * f_k / (fp + (2*alpha_m - 1)*f_k)
|
| 887 |
+
|
| 888 |
+
alpha_p = np.clip(alpha_tp, tau_min * alpha_p, tau_max * alpha_p)
|
| 889 |
+
alpha_m = np.clip(alpha_tm, tau_min * alpha_m, tau_max * alpha_m)
|
| 890 |
+
|
| 891 |
+
# Update C and Q
|
| 892 |
+
Q_next = nu * Q + 1
|
| 893 |
+
C = (nu * Q * (C + eta) + fp) / Q_next
|
| 894 |
+
Q = Q_next
|
| 895 |
+
|
| 896 |
+
return alpha, xp, fp, Fp, C, Q
|
llava_next/lib/python3.10/site-packages/scipy/optimize/_linprog_doc.py
ADDED
|
@@ -0,0 +1,1434 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Created on Sat Aug 22 19:49:17 2020
|
| 3 |
+
|
| 4 |
+
@author: matth
|
| 5 |
+
"""
|
| 6 |
+
|
| 7 |
+
|
| 8 |
+
def _linprog_highs_doc(c, A_ub=None, b_ub=None, A_eq=None, b_eq=None,
|
| 9 |
+
bounds=None, method='highs', callback=None,
|
| 10 |
+
maxiter=None, disp=False, presolve=True,
|
| 11 |
+
time_limit=None,
|
| 12 |
+
dual_feasibility_tolerance=None,
|
| 13 |
+
primal_feasibility_tolerance=None,
|
| 14 |
+
ipm_optimality_tolerance=None,
|
| 15 |
+
simplex_dual_edge_weight_strategy=None,
|
| 16 |
+
mip_rel_gap=None,
|
| 17 |
+
**unknown_options):
|
| 18 |
+
r"""
|
| 19 |
+
Linear programming: minimize a linear objective function subject to linear
|
| 20 |
+
equality and inequality constraints using one of the HiGHS solvers.
|
| 21 |
+
|
| 22 |
+
Linear programming solves problems of the following form:
|
| 23 |
+
|
| 24 |
+
.. math::
|
| 25 |
+
|
| 26 |
+
\min_x \ & c^T x \\
|
| 27 |
+
\mbox{such that} \ & A_{ub} x \leq b_{ub},\\
|
| 28 |
+
& A_{eq} x = b_{eq},\\
|
| 29 |
+
& l \leq x \leq u ,
|
| 30 |
+
|
| 31 |
+
where :math:`x` is a vector of decision variables; :math:`c`,
|
| 32 |
+
:math:`b_{ub}`, :math:`b_{eq}`, :math:`l`, and :math:`u` are vectors; and
|
| 33 |
+
:math:`A_{ub}` and :math:`A_{eq}` are matrices.
|
| 34 |
+
|
| 35 |
+
Alternatively, that's:
|
| 36 |
+
|
| 37 |
+
minimize::
|
| 38 |
+
|
| 39 |
+
c @ x
|
| 40 |
+
|
| 41 |
+
such that::
|
| 42 |
+
|
| 43 |
+
A_ub @ x <= b_ub
|
| 44 |
+
A_eq @ x == b_eq
|
| 45 |
+
lb <= x <= ub
|
| 46 |
+
|
| 47 |
+
Note that by default ``lb = 0`` and ``ub = None`` unless specified with
|
| 48 |
+
``bounds``.
|
| 49 |
+
|
| 50 |
+
Parameters
|
| 51 |
+
----------
|
| 52 |
+
c : 1-D array
|
| 53 |
+
The coefficients of the linear objective function to be minimized.
|
| 54 |
+
A_ub : 2-D array, optional
|
| 55 |
+
The inequality constraint matrix. Each row of ``A_ub`` specifies the
|
| 56 |
+
coefficients of a linear inequality constraint on ``x``.
|
| 57 |
+
b_ub : 1-D array, optional
|
| 58 |
+
The inequality constraint vector. Each element represents an
|
| 59 |
+
upper bound on the corresponding value of ``A_ub @ x``.
|
| 60 |
+
A_eq : 2-D array, optional
|
| 61 |
+
The equality constraint matrix. Each row of ``A_eq`` specifies the
|
| 62 |
+
coefficients of a linear equality constraint on ``x``.
|
| 63 |
+
b_eq : 1-D array, optional
|
| 64 |
+
The equality constraint vector. Each element of ``A_eq @ x`` must equal
|
| 65 |
+
the corresponding element of ``b_eq``.
|
| 66 |
+
bounds : sequence, optional
|
| 67 |
+
A sequence of ``(min, max)`` pairs for each element in ``x``, defining
|
| 68 |
+
the minimum and maximum values of that decision variable. Use ``None``
|
| 69 |
+
to indicate that there is no bound. By default, bounds are
|
| 70 |
+
``(0, None)`` (all decision variables are non-negative).
|
| 71 |
+
If a single tuple ``(min, max)`` is provided, then ``min`` and
|
| 72 |
+
``max`` will serve as bounds for all decision variables.
|
| 73 |
+
method : str
|
| 74 |
+
|
| 75 |
+
This is the method-specific documentation for 'highs', which chooses
|
| 76 |
+
automatically between
|
| 77 |
+
:ref:`'highs-ds' <optimize.linprog-highs-ds>` and
|
| 78 |
+
:ref:`'highs-ipm' <optimize.linprog-highs-ipm>`.
|
| 79 |
+
:ref:`'interior-point' <optimize.linprog-interior-point>` (default),
|
| 80 |
+
:ref:`'revised simplex' <optimize.linprog-revised_simplex>`, and
|
| 81 |
+
:ref:`'simplex' <optimize.linprog-simplex>` (legacy)
|
| 82 |
+
are also available.
|
| 83 |
+
integrality : 1-D array or int, optional
|
| 84 |
+
Indicates the type of integrality constraint on each decision variable.
|
| 85 |
+
|
| 86 |
+
``0`` : Continuous variable; no integrality constraint.
|
| 87 |
+
|
| 88 |
+
``1`` : Integer variable; decision variable must be an integer
|
| 89 |
+
within `bounds`.
|
| 90 |
+
|
| 91 |
+
``2`` : Semi-continuous variable; decision variable must be within
|
| 92 |
+
`bounds` or take value ``0``.
|
| 93 |
+
|
| 94 |
+
``3`` : Semi-integer variable; decision variable must be an integer
|
| 95 |
+
within `bounds` or take value ``0``.
|
| 96 |
+
|
| 97 |
+
By default, all variables are continuous.
|
| 98 |
+
|
| 99 |
+
For mixed integrality constraints, supply an array of shape `c.shape`.
|
| 100 |
+
To infer a constraint on each decision variable from shorter inputs,
|
| 101 |
+
the argument will be broadcasted to `c.shape` using `np.broadcast_to`.
|
| 102 |
+
|
| 103 |
+
This argument is currently used only by the ``'highs'`` method and
|
| 104 |
+
ignored otherwise.
|
| 105 |
+
|
| 106 |
+
Options
|
| 107 |
+
-------
|
| 108 |
+
maxiter : int
|
| 109 |
+
The maximum number of iterations to perform in either phase.
|
| 110 |
+
For :ref:`'highs-ipm' <optimize.linprog-highs-ipm>`, this does not
|
| 111 |
+
include the number of crossover iterations. Default is the largest
|
| 112 |
+
possible value for an ``int`` on the platform.
|
| 113 |
+
disp : bool (default: ``False``)
|
| 114 |
+
Set to ``True`` if indicators of optimization status are to be
|
| 115 |
+
printed to the console during optimization.
|
| 116 |
+
presolve : bool (default: ``True``)
|
| 117 |
+
Presolve attempts to identify trivial infeasibilities,
|
| 118 |
+
identify trivial unboundedness, and simplify the problem before
|
| 119 |
+
sending it to the main solver. It is generally recommended
|
| 120 |
+
to keep the default setting ``True``; set to ``False`` if
|
| 121 |
+
presolve is to be disabled.
|
| 122 |
+
time_limit : float
|
| 123 |
+
The maximum time in seconds allotted to solve the problem;
|
| 124 |
+
default is the largest possible value for a ``double`` on the
|
| 125 |
+
platform.
|
| 126 |
+
dual_feasibility_tolerance : double (default: 1e-07)
|
| 127 |
+
Dual feasibility tolerance for
|
| 128 |
+
:ref:`'highs-ds' <optimize.linprog-highs-ds>`.
|
| 129 |
+
The minimum of this and ``primal_feasibility_tolerance``
|
| 130 |
+
is used for the feasibility tolerance of
|
| 131 |
+
:ref:`'highs-ipm' <optimize.linprog-highs-ipm>`.
|
| 132 |
+
primal_feasibility_tolerance : double (default: 1e-07)
|
| 133 |
+
Primal feasibility tolerance for
|
| 134 |
+
:ref:`'highs-ds' <optimize.linprog-highs-ds>`.
|
| 135 |
+
The minimum of this and ``dual_feasibility_tolerance``
|
| 136 |
+
is used for the feasibility tolerance of
|
| 137 |
+
:ref:`'highs-ipm' <optimize.linprog-highs-ipm>`.
|
| 138 |
+
ipm_optimality_tolerance : double (default: ``1e-08``)
|
| 139 |
+
Optimality tolerance for
|
| 140 |
+
:ref:`'highs-ipm' <optimize.linprog-highs-ipm>`.
|
| 141 |
+
Minimum allowable value is 1e-12.
|
| 142 |
+
simplex_dual_edge_weight_strategy : str (default: None)
|
| 143 |
+
Strategy for simplex dual edge weights. The default, ``None``,
|
| 144 |
+
automatically selects one of the following.
|
| 145 |
+
|
| 146 |
+
``'dantzig'`` uses Dantzig's original strategy of choosing the most
|
| 147 |
+
negative reduced cost.
|
| 148 |
+
|
| 149 |
+
``'devex'`` uses the strategy described in [15]_.
|
| 150 |
+
|
| 151 |
+
``steepest`` uses the exact steepest edge strategy as described in
|
| 152 |
+
[16]_.
|
| 153 |
+
|
| 154 |
+
``'steepest-devex'`` begins with the exact steepest edge strategy
|
| 155 |
+
until the computation is too costly or inexact and then switches to
|
| 156 |
+
the devex method.
|
| 157 |
+
|
| 158 |
+
Currently, ``None`` always selects ``'steepest-devex'``, but this
|
| 159 |
+
may change as new options become available.
|
| 160 |
+
mip_rel_gap : double (default: None)
|
| 161 |
+
Termination criterion for MIP solver: solver will terminate when the
|
| 162 |
+
gap between the primal objective value and the dual objective bound,
|
| 163 |
+
scaled by the primal objective value, is <= mip_rel_gap.
|
| 164 |
+
unknown_options : dict
|
| 165 |
+
Optional arguments not used by this particular solver. If
|
| 166 |
+
``unknown_options`` is non-empty, a warning is issued listing
|
| 167 |
+
all unused options.
|
| 168 |
+
|
| 169 |
+
Returns
|
| 170 |
+
-------
|
| 171 |
+
res : OptimizeResult
|
| 172 |
+
A :class:`scipy.optimize.OptimizeResult` consisting of the fields:
|
| 173 |
+
|
| 174 |
+
x : 1D array
|
| 175 |
+
The values of the decision variables that minimizes the
|
| 176 |
+
objective function while satisfying the constraints.
|
| 177 |
+
fun : float
|
| 178 |
+
The optimal value of the objective function ``c @ x``.
|
| 179 |
+
slack : 1D array
|
| 180 |
+
The (nominally positive) values of the slack,
|
| 181 |
+
``b_ub - A_ub @ x``.
|
| 182 |
+
con : 1D array
|
| 183 |
+
The (nominally zero) residuals of the equality constraints,
|
| 184 |
+
``b_eq - A_eq @ x``.
|
| 185 |
+
success : bool
|
| 186 |
+
``True`` when the algorithm succeeds in finding an optimal
|
| 187 |
+
solution.
|
| 188 |
+
status : int
|
| 189 |
+
An integer representing the exit status of the algorithm.
|
| 190 |
+
|
| 191 |
+
``0`` : Optimization terminated successfully.
|
| 192 |
+
|
| 193 |
+
``1`` : Iteration or time limit reached.
|
| 194 |
+
|
| 195 |
+
``2`` : Problem appears to be infeasible.
|
| 196 |
+
|
| 197 |
+
``3`` : Problem appears to be unbounded.
|
| 198 |
+
|
| 199 |
+
``4`` : The HiGHS solver ran into a problem.
|
| 200 |
+
|
| 201 |
+
message : str
|
| 202 |
+
A string descriptor of the exit status of the algorithm.
|
| 203 |
+
nit : int
|
| 204 |
+
The total number of iterations performed.
|
| 205 |
+
For the HiGHS simplex method, this includes iterations in all
|
| 206 |
+
phases. For the HiGHS interior-point method, this does not include
|
| 207 |
+
crossover iterations.
|
| 208 |
+
crossover_nit : int
|
| 209 |
+
The number of primal/dual pushes performed during the
|
| 210 |
+
crossover routine for the HiGHS interior-point method.
|
| 211 |
+
This is ``0`` for the HiGHS simplex method.
|
| 212 |
+
ineqlin : OptimizeResult
|
| 213 |
+
Solution and sensitivity information corresponding to the
|
| 214 |
+
inequality constraints, `b_ub`. A dictionary consisting of the
|
| 215 |
+
fields:
|
| 216 |
+
|
| 217 |
+
residual : np.ndnarray
|
| 218 |
+
The (nominally positive) values of the slack variables,
|
| 219 |
+
``b_ub - A_ub @ x``. This quantity is also commonly
|
| 220 |
+
referred to as "slack".
|
| 221 |
+
|
| 222 |
+
marginals : np.ndarray
|
| 223 |
+
The sensitivity (partial derivative) of the objective
|
| 224 |
+
function with respect to the right-hand side of the
|
| 225 |
+
inequality constraints, `b_ub`.
|
| 226 |
+
|
| 227 |
+
eqlin : OptimizeResult
|
| 228 |
+
Solution and sensitivity information corresponding to the
|
| 229 |
+
equality constraints, `b_eq`. A dictionary consisting of the
|
| 230 |
+
fields:
|
| 231 |
+
|
| 232 |
+
residual : np.ndarray
|
| 233 |
+
The (nominally zero) residuals of the equality constraints,
|
| 234 |
+
``b_eq - A_eq @ x``.
|
| 235 |
+
|
| 236 |
+
marginals : np.ndarray
|
| 237 |
+
The sensitivity (partial derivative) of the objective
|
| 238 |
+
function with respect to the right-hand side of the
|
| 239 |
+
equality constraints, `b_eq`.
|
| 240 |
+
|
| 241 |
+
lower, upper : OptimizeResult
|
| 242 |
+
Solution and sensitivity information corresponding to the
|
| 243 |
+
lower and upper bounds on decision variables, `bounds`.
|
| 244 |
+
|
| 245 |
+
residual : np.ndarray
|
| 246 |
+
The (nominally positive) values of the quantity
|
| 247 |
+
``x - lb`` (lower) or ``ub - x`` (upper).
|
| 248 |
+
|
| 249 |
+
marginals : np.ndarray
|
| 250 |
+
The sensitivity (partial derivative) of the objective
|
| 251 |
+
function with respect to the lower and upper
|
| 252 |
+
`bounds`.
|
| 253 |
+
|
| 254 |
+
Notes
|
| 255 |
+
-----
|
| 256 |
+
|
| 257 |
+
Method :ref:`'highs-ds' <optimize.linprog-highs-ds>` is a wrapper
|
| 258 |
+
of the C++ high performance dual revised simplex implementation (HSOL)
|
| 259 |
+
[13]_, [14]_. Method :ref:`'highs-ipm' <optimize.linprog-highs-ipm>`
|
| 260 |
+
is a wrapper of a C++ implementation of an **i**\ nterior-\ **p**\ oint
|
| 261 |
+
**m**\ ethod [13]_; it features a crossover routine, so it is as accurate
|
| 262 |
+
as a simplex solver. Method :ref:`'highs' <optimize.linprog-highs>` chooses
|
| 263 |
+
between the two automatically. For new code involving `linprog`, we
|
| 264 |
+
recommend explicitly choosing one of these three method values instead of
|
| 265 |
+
:ref:`'interior-point' <optimize.linprog-interior-point>` (default),
|
| 266 |
+
:ref:`'revised simplex' <optimize.linprog-revised_simplex>`, and
|
| 267 |
+
:ref:`'simplex' <optimize.linprog-simplex>` (legacy).
|
| 268 |
+
|
| 269 |
+
The result fields `ineqlin`, `eqlin`, `lower`, and `upper` all contain
|
| 270 |
+
`marginals`, or partial derivatives of the objective function with respect
|
| 271 |
+
to the right-hand side of each constraint. These partial derivatives are
|
| 272 |
+
also referred to as "Lagrange multipliers", "dual values", and
|
| 273 |
+
"shadow prices". The sign convention of `marginals` is opposite that
|
| 274 |
+
of Lagrange multipliers produced by many nonlinear solvers.
|
| 275 |
+
|
| 276 |
+
References
|
| 277 |
+
----------
|
| 278 |
+
.. [13] Huangfu, Q., Galabova, I., Feldmeier, M., and Hall, J. A. J.
|
| 279 |
+
"HiGHS - high performance software for linear optimization."
|
| 280 |
+
https://highs.dev/
|
| 281 |
+
.. [14] Huangfu, Q. and Hall, J. A. J. "Parallelizing the dual revised
|
| 282 |
+
simplex method." Mathematical Programming Computation, 10 (1),
|
| 283 |
+
119-142, 2018. DOI: 10.1007/s12532-017-0130-5
|
| 284 |
+
.. [15] Harris, Paula MJ. "Pivot selection methods of the Devex LP code."
|
| 285 |
+
Mathematical programming 5.1 (1973): 1-28.
|
| 286 |
+
.. [16] Goldfarb, Donald, and John Ker Reid. "A practicable steepest-edge
|
| 287 |
+
simplex algorithm." Mathematical Programming 12.1 (1977): 361-371.
|
| 288 |
+
"""
|
| 289 |
+
pass
|
| 290 |
+
|
| 291 |
+
|
| 292 |
+
def _linprog_highs_ds_doc(c, A_ub=None, b_ub=None, A_eq=None, b_eq=None,
|
| 293 |
+
bounds=None, method='highs-ds', callback=None,
|
| 294 |
+
maxiter=None, disp=False, presolve=True,
|
| 295 |
+
time_limit=None,
|
| 296 |
+
dual_feasibility_tolerance=None,
|
| 297 |
+
primal_feasibility_tolerance=None,
|
| 298 |
+
simplex_dual_edge_weight_strategy=None,
|
| 299 |
+
**unknown_options):
|
| 300 |
+
r"""
|
| 301 |
+
Linear programming: minimize a linear objective function subject to linear
|
| 302 |
+
equality and inequality constraints using the HiGHS dual simplex solver.
|
| 303 |
+
|
| 304 |
+
Linear programming solves problems of the following form:
|
| 305 |
+
|
| 306 |
+
.. math::
|
| 307 |
+
|
| 308 |
+
\min_x \ & c^T x \\
|
| 309 |
+
\mbox{such that} \ & A_{ub} x \leq b_{ub},\\
|
| 310 |
+
& A_{eq} x = b_{eq},\\
|
| 311 |
+
& l \leq x \leq u ,
|
| 312 |
+
|
| 313 |
+
where :math:`x` is a vector of decision variables; :math:`c`,
|
| 314 |
+
:math:`b_{ub}`, :math:`b_{eq}`, :math:`l`, and :math:`u` are vectors; and
|
| 315 |
+
:math:`A_{ub}` and :math:`A_{eq}` are matrices.
|
| 316 |
+
|
| 317 |
+
Alternatively, that's:
|
| 318 |
+
|
| 319 |
+
minimize::
|
| 320 |
+
|
| 321 |
+
c @ x
|
| 322 |
+
|
| 323 |
+
such that::
|
| 324 |
+
|
| 325 |
+
A_ub @ x <= b_ub
|
| 326 |
+
A_eq @ x == b_eq
|
| 327 |
+
lb <= x <= ub
|
| 328 |
+
|
| 329 |
+
Note that by default ``lb = 0`` and ``ub = None`` unless specified with
|
| 330 |
+
``bounds``.
|
| 331 |
+
|
| 332 |
+
Parameters
|
| 333 |
+
----------
|
| 334 |
+
c : 1-D array
|
| 335 |
+
The coefficients of the linear objective function to be minimized.
|
| 336 |
+
A_ub : 2-D array, optional
|
| 337 |
+
The inequality constraint matrix. Each row of ``A_ub`` specifies the
|
| 338 |
+
coefficients of a linear inequality constraint on ``x``.
|
| 339 |
+
b_ub : 1-D array, optional
|
| 340 |
+
The inequality constraint vector. Each element represents an
|
| 341 |
+
upper bound on the corresponding value of ``A_ub @ x``.
|
| 342 |
+
A_eq : 2-D array, optional
|
| 343 |
+
The equality constraint matrix. Each row of ``A_eq`` specifies the
|
| 344 |
+
coefficients of a linear equality constraint on ``x``.
|
| 345 |
+
b_eq : 1-D array, optional
|
| 346 |
+
The equality constraint vector. Each element of ``A_eq @ x`` must equal
|
| 347 |
+
the corresponding element of ``b_eq``.
|
| 348 |
+
bounds : sequence, optional
|
| 349 |
+
A sequence of ``(min, max)`` pairs for each element in ``x``, defining
|
| 350 |
+
the minimum and maximum values of that decision variable. Use ``None``
|
| 351 |
+
to indicate that there is no bound. By default, bounds are
|
| 352 |
+
``(0, None)`` (all decision variables are non-negative).
|
| 353 |
+
If a single tuple ``(min, max)`` is provided, then ``min`` and
|
| 354 |
+
``max`` will serve as bounds for all decision variables.
|
| 355 |
+
method : str
|
| 356 |
+
|
| 357 |
+
This is the method-specific documentation for 'highs-ds'.
|
| 358 |
+
:ref:`'highs' <optimize.linprog-highs>`,
|
| 359 |
+
:ref:`'highs-ipm' <optimize.linprog-highs-ipm>`,
|
| 360 |
+
:ref:`'interior-point' <optimize.linprog-interior-point>` (default),
|
| 361 |
+
:ref:`'revised simplex' <optimize.linprog-revised_simplex>`, and
|
| 362 |
+
:ref:`'simplex' <optimize.linprog-simplex>` (legacy)
|
| 363 |
+
are also available.
|
| 364 |
+
|
| 365 |
+
Options
|
| 366 |
+
-------
|
| 367 |
+
maxiter : int
|
| 368 |
+
The maximum number of iterations to perform in either phase.
|
| 369 |
+
Default is the largest possible value for an ``int`` on the platform.
|
| 370 |
+
disp : bool (default: ``False``)
|
| 371 |
+
Set to ``True`` if indicators of optimization status are to be
|
| 372 |
+
printed to the console during optimization.
|
| 373 |
+
presolve : bool (default: ``True``)
|
| 374 |
+
Presolve attempts to identify trivial infeasibilities,
|
| 375 |
+
identify trivial unboundedness, and simplify the problem before
|
| 376 |
+
sending it to the main solver. It is generally recommended
|
| 377 |
+
to keep the default setting ``True``; set to ``False`` if
|
| 378 |
+
presolve is to be disabled.
|
| 379 |
+
time_limit : float
|
| 380 |
+
The maximum time in seconds allotted to solve the problem;
|
| 381 |
+
default is the largest possible value for a ``double`` on the
|
| 382 |
+
platform.
|
| 383 |
+
dual_feasibility_tolerance : double (default: 1e-07)
|
| 384 |
+
Dual feasibility tolerance for
|
| 385 |
+
:ref:`'highs-ds' <optimize.linprog-highs-ds>`.
|
| 386 |
+
primal_feasibility_tolerance : double (default: 1e-07)
|
| 387 |
+
Primal feasibility tolerance for
|
| 388 |
+
:ref:`'highs-ds' <optimize.linprog-highs-ds>`.
|
| 389 |
+
simplex_dual_edge_weight_strategy : str (default: None)
|
| 390 |
+
Strategy for simplex dual edge weights. The default, ``None``,
|
| 391 |
+
automatically selects one of the following.
|
| 392 |
+
|
| 393 |
+
``'dantzig'`` uses Dantzig's original strategy of choosing the most
|
| 394 |
+
negative reduced cost.
|
| 395 |
+
|
| 396 |
+
``'devex'`` uses the strategy described in [15]_.
|
| 397 |
+
|
| 398 |
+
``steepest`` uses the exact steepest edge strategy as described in
|
| 399 |
+
[16]_.
|
| 400 |
+
|
| 401 |
+
``'steepest-devex'`` begins with the exact steepest edge strategy
|
| 402 |
+
until the computation is too costly or inexact and then switches to
|
| 403 |
+
the devex method.
|
| 404 |
+
|
| 405 |
+
Currently, ``None`` always selects ``'steepest-devex'``, but this
|
| 406 |
+
may change as new options become available.
|
| 407 |
+
unknown_options : dict
|
| 408 |
+
Optional arguments not used by this particular solver. If
|
| 409 |
+
``unknown_options`` is non-empty, a warning is issued listing
|
| 410 |
+
all unused options.
|
| 411 |
+
|
| 412 |
+
Returns
|
| 413 |
+
-------
|
| 414 |
+
res : OptimizeResult
|
| 415 |
+
A :class:`scipy.optimize.OptimizeResult` consisting of the fields:
|
| 416 |
+
|
| 417 |
+
x : 1D array
|
| 418 |
+
The values of the decision variables that minimizes the
|
| 419 |
+
objective function while satisfying the constraints.
|
| 420 |
+
fun : float
|
| 421 |
+
The optimal value of the objective function ``c @ x``.
|
| 422 |
+
slack : 1D array
|
| 423 |
+
The (nominally positive) values of the slack,
|
| 424 |
+
``b_ub - A_ub @ x``.
|
| 425 |
+
con : 1D array
|
| 426 |
+
The (nominally zero) residuals of the equality constraints,
|
| 427 |
+
``b_eq - A_eq @ x``.
|
| 428 |
+
success : bool
|
| 429 |
+
``True`` when the algorithm succeeds in finding an optimal
|
| 430 |
+
solution.
|
| 431 |
+
status : int
|
| 432 |
+
An integer representing the exit status of the algorithm.
|
| 433 |
+
|
| 434 |
+
``0`` : Optimization terminated successfully.
|
| 435 |
+
|
| 436 |
+
``1`` : Iteration or time limit reached.
|
| 437 |
+
|
| 438 |
+
``2`` : Problem appears to be infeasible.
|
| 439 |
+
|
| 440 |
+
``3`` : Problem appears to be unbounded.
|
| 441 |
+
|
| 442 |
+
``4`` : The HiGHS solver ran into a problem.
|
| 443 |
+
|
| 444 |
+
message : str
|
| 445 |
+
A string descriptor of the exit status of the algorithm.
|
| 446 |
+
nit : int
|
| 447 |
+
The total number of iterations performed. This includes iterations
|
| 448 |
+
in all phases.
|
| 449 |
+
crossover_nit : int
|
| 450 |
+
This is always ``0`` for the HiGHS simplex method.
|
| 451 |
+
For the HiGHS interior-point method, this is the number of
|
| 452 |
+
primal/dual pushes performed during the crossover routine.
|
| 453 |
+
ineqlin : OptimizeResult
|
| 454 |
+
Solution and sensitivity information corresponding to the
|
| 455 |
+
inequality constraints, `b_ub`. A dictionary consisting of the
|
| 456 |
+
fields:
|
| 457 |
+
|
| 458 |
+
residual : np.ndnarray
|
| 459 |
+
The (nominally positive) values of the slack variables,
|
| 460 |
+
``b_ub - A_ub @ x``. This quantity is also commonly
|
| 461 |
+
referred to as "slack".
|
| 462 |
+
|
| 463 |
+
marginals : np.ndarray
|
| 464 |
+
The sensitivity (partial derivative) of the objective
|
| 465 |
+
function with respect to the right-hand side of the
|
| 466 |
+
inequality constraints, `b_ub`.
|
| 467 |
+
|
| 468 |
+
eqlin : OptimizeResult
|
| 469 |
+
Solution and sensitivity information corresponding to the
|
| 470 |
+
equality constraints, `b_eq`. A dictionary consisting of the
|
| 471 |
+
fields:
|
| 472 |
+
|
| 473 |
+
residual : np.ndarray
|
| 474 |
+
The (nominally zero) residuals of the equality constraints,
|
| 475 |
+
``b_eq - A_eq @ x``.
|
| 476 |
+
|
| 477 |
+
marginals : np.ndarray
|
| 478 |
+
The sensitivity (partial derivative) of the objective
|
| 479 |
+
function with respect to the right-hand side of the
|
| 480 |
+
equality constraints, `b_eq`.
|
| 481 |
+
|
| 482 |
+
lower, upper : OptimizeResult
|
| 483 |
+
Solution and sensitivity information corresponding to the
|
| 484 |
+
lower and upper bounds on decision variables, `bounds`.
|
| 485 |
+
|
| 486 |
+
residual : np.ndarray
|
| 487 |
+
The (nominally positive) values of the quantity
|
| 488 |
+
``x - lb`` (lower) or ``ub - x`` (upper).
|
| 489 |
+
|
| 490 |
+
marginals : np.ndarray
|
| 491 |
+
The sensitivity (partial derivative) of the objective
|
| 492 |
+
function with respect to the lower and upper
|
| 493 |
+
`bounds`.
|
| 494 |
+
|
| 495 |
+
Notes
|
| 496 |
+
-----
|
| 497 |
+
|
| 498 |
+
Method :ref:`'highs-ds' <optimize.linprog-highs-ds>` is a wrapper
|
| 499 |
+
of the C++ high performance dual revised simplex implementation (HSOL)
|
| 500 |
+
[13]_, [14]_. Method :ref:`'highs-ipm' <optimize.linprog-highs-ipm>`
|
| 501 |
+
is a wrapper of a C++ implementation of an **i**\ nterior-\ **p**\ oint
|
| 502 |
+
**m**\ ethod [13]_; it features a crossover routine, so it is as accurate
|
| 503 |
+
as a simplex solver. Method :ref:`'highs' <optimize.linprog-highs>` chooses
|
| 504 |
+
between the two automatically. For new code involving `linprog`, we
|
| 505 |
+
recommend explicitly choosing one of these three method values instead of
|
| 506 |
+
:ref:`'interior-point' <optimize.linprog-interior-point>` (default),
|
| 507 |
+
:ref:`'revised simplex' <optimize.linprog-revised_simplex>`, and
|
| 508 |
+
:ref:`'simplex' <optimize.linprog-simplex>` (legacy).
|
| 509 |
+
|
| 510 |
+
The result fields `ineqlin`, `eqlin`, `lower`, and `upper` all contain
|
| 511 |
+
`marginals`, or partial derivatives of the objective function with respect
|
| 512 |
+
to the right-hand side of each constraint. These partial derivatives are
|
| 513 |
+
also referred to as "Lagrange multipliers", "dual values", and
|
| 514 |
+
"shadow prices". The sign convention of `marginals` is opposite that
|
| 515 |
+
of Lagrange multipliers produced by many nonlinear solvers.
|
| 516 |
+
|
| 517 |
+
References
|
| 518 |
+
----------
|
| 519 |
+
.. [13] Huangfu, Q., Galabova, I., Feldmeier, M., and Hall, J. A. J.
|
| 520 |
+
"HiGHS - high performance software for linear optimization."
|
| 521 |
+
https://highs.dev/
|
| 522 |
+
.. [14] Huangfu, Q. and Hall, J. A. J. "Parallelizing the dual revised
|
| 523 |
+
simplex method." Mathematical Programming Computation, 10 (1),
|
| 524 |
+
119-142, 2018. DOI: 10.1007/s12532-017-0130-5
|
| 525 |
+
.. [15] Harris, Paula MJ. "Pivot selection methods of the Devex LP code."
|
| 526 |
+
Mathematical programming 5.1 (1973): 1-28.
|
| 527 |
+
.. [16] Goldfarb, Donald, and John Ker Reid. "A practicable steepest-edge
|
| 528 |
+
simplex algorithm." Mathematical Programming 12.1 (1977): 361-371.
|
| 529 |
+
"""
|
| 530 |
+
pass
|
| 531 |
+
|
| 532 |
+
|
| 533 |
+
def _linprog_highs_ipm_doc(c, A_ub=None, b_ub=None, A_eq=None, b_eq=None,
|
| 534 |
+
bounds=None, method='highs-ipm', callback=None,
|
| 535 |
+
maxiter=None, disp=False, presolve=True,
|
| 536 |
+
time_limit=None,
|
| 537 |
+
dual_feasibility_tolerance=None,
|
| 538 |
+
primal_feasibility_tolerance=None,
|
| 539 |
+
ipm_optimality_tolerance=None,
|
| 540 |
+
**unknown_options):
|
| 541 |
+
r"""
|
| 542 |
+
Linear programming: minimize a linear objective function subject to linear
|
| 543 |
+
equality and inequality constraints using the HiGHS interior point solver.
|
| 544 |
+
|
| 545 |
+
Linear programming solves problems of the following form:
|
| 546 |
+
|
| 547 |
+
.. math::
|
| 548 |
+
|
| 549 |
+
\min_x \ & c^T x \\
|
| 550 |
+
\mbox{such that} \ & A_{ub} x \leq b_{ub},\\
|
| 551 |
+
& A_{eq} x = b_{eq},\\
|
| 552 |
+
& l \leq x \leq u ,
|
| 553 |
+
|
| 554 |
+
where :math:`x` is a vector of decision variables; :math:`c`,
|
| 555 |
+
:math:`b_{ub}`, :math:`b_{eq}`, :math:`l`, and :math:`u` are vectors; and
|
| 556 |
+
:math:`A_{ub}` and :math:`A_{eq}` are matrices.
|
| 557 |
+
|
| 558 |
+
Alternatively, that's:
|
| 559 |
+
|
| 560 |
+
minimize::
|
| 561 |
+
|
| 562 |
+
c @ x
|
| 563 |
+
|
| 564 |
+
such that::
|
| 565 |
+
|
| 566 |
+
A_ub @ x <= b_ub
|
| 567 |
+
A_eq @ x == b_eq
|
| 568 |
+
lb <= x <= ub
|
| 569 |
+
|
| 570 |
+
Note that by default ``lb = 0`` and ``ub = None`` unless specified with
|
| 571 |
+
``bounds``.
|
| 572 |
+
|
| 573 |
+
Parameters
|
| 574 |
+
----------
|
| 575 |
+
c : 1-D array
|
| 576 |
+
The coefficients of the linear objective function to be minimized.
|
| 577 |
+
A_ub : 2-D array, optional
|
| 578 |
+
The inequality constraint matrix. Each row of ``A_ub`` specifies the
|
| 579 |
+
coefficients of a linear inequality constraint on ``x``.
|
| 580 |
+
b_ub : 1-D array, optional
|
| 581 |
+
The inequality constraint vector. Each element represents an
|
| 582 |
+
upper bound on the corresponding value of ``A_ub @ x``.
|
| 583 |
+
A_eq : 2-D array, optional
|
| 584 |
+
The equality constraint matrix. Each row of ``A_eq`` specifies the
|
| 585 |
+
coefficients of a linear equality constraint on ``x``.
|
| 586 |
+
b_eq : 1-D array, optional
|
| 587 |
+
The equality constraint vector. Each element of ``A_eq @ x`` must equal
|
| 588 |
+
the corresponding element of ``b_eq``.
|
| 589 |
+
bounds : sequence, optional
|
| 590 |
+
A sequence of ``(min, max)`` pairs for each element in ``x``, defining
|
| 591 |
+
the minimum and maximum values of that decision variable. Use ``None``
|
| 592 |
+
to indicate that there is no bound. By default, bounds are
|
| 593 |
+
``(0, None)`` (all decision variables are non-negative).
|
| 594 |
+
If a single tuple ``(min, max)`` is provided, then ``min`` and
|
| 595 |
+
``max`` will serve as bounds for all decision variables.
|
| 596 |
+
method : str
|
| 597 |
+
|
| 598 |
+
This is the method-specific documentation for 'highs-ipm'.
|
| 599 |
+
:ref:`'highs-ipm' <optimize.linprog-highs>`,
|
| 600 |
+
:ref:`'highs-ds' <optimize.linprog-highs-ds>`,
|
| 601 |
+
:ref:`'interior-point' <optimize.linprog-interior-point>` (default),
|
| 602 |
+
:ref:`'revised simplex' <optimize.linprog-revised_simplex>`, and
|
| 603 |
+
:ref:`'simplex' <optimize.linprog-simplex>` (legacy)
|
| 604 |
+
are also available.
|
| 605 |
+
|
| 606 |
+
Options
|
| 607 |
+
-------
|
| 608 |
+
maxiter : int
|
| 609 |
+
The maximum number of iterations to perform in either phase.
|
| 610 |
+
For :ref:`'highs-ipm' <optimize.linprog-highs-ipm>`, this does not
|
| 611 |
+
include the number of crossover iterations. Default is the largest
|
| 612 |
+
possible value for an ``int`` on the platform.
|
| 613 |
+
disp : bool (default: ``False``)
|
| 614 |
+
Set to ``True`` if indicators of optimization status are to be
|
| 615 |
+
printed to the console during optimization.
|
| 616 |
+
presolve : bool (default: ``True``)
|
| 617 |
+
Presolve attempts to identify trivial infeasibilities,
|
| 618 |
+
identify trivial unboundedness, and simplify the problem before
|
| 619 |
+
sending it to the main solver. It is generally recommended
|
| 620 |
+
to keep the default setting ``True``; set to ``False`` if
|
| 621 |
+
presolve is to be disabled.
|
| 622 |
+
time_limit : float
|
| 623 |
+
The maximum time in seconds allotted to solve the problem;
|
| 624 |
+
default is the largest possible value for a ``double`` on the
|
| 625 |
+
platform.
|
| 626 |
+
dual_feasibility_tolerance : double (default: 1e-07)
|
| 627 |
+
The minimum of this and ``primal_feasibility_tolerance``
|
| 628 |
+
is used for the feasibility tolerance of
|
| 629 |
+
:ref:`'highs-ipm' <optimize.linprog-highs-ipm>`.
|
| 630 |
+
primal_feasibility_tolerance : double (default: 1e-07)
|
| 631 |
+
The minimum of this and ``dual_feasibility_tolerance``
|
| 632 |
+
is used for the feasibility tolerance of
|
| 633 |
+
:ref:`'highs-ipm' <optimize.linprog-highs-ipm>`.
|
| 634 |
+
ipm_optimality_tolerance : double (default: ``1e-08``)
|
| 635 |
+
Optimality tolerance for
|
| 636 |
+
:ref:`'highs-ipm' <optimize.linprog-highs-ipm>`.
|
| 637 |
+
Minimum allowable value is 1e-12.
|
| 638 |
+
unknown_options : dict
|
| 639 |
+
Optional arguments not used by this particular solver. If
|
| 640 |
+
``unknown_options`` is non-empty, a warning is issued listing
|
| 641 |
+
all unused options.
|
| 642 |
+
|
| 643 |
+
Returns
|
| 644 |
+
-------
|
| 645 |
+
res : OptimizeResult
|
| 646 |
+
A :class:`scipy.optimize.OptimizeResult` consisting of the fields:
|
| 647 |
+
|
| 648 |
+
x : 1D array
|
| 649 |
+
The values of the decision variables that minimizes the
|
| 650 |
+
objective function while satisfying the constraints.
|
| 651 |
+
fun : float
|
| 652 |
+
The optimal value of the objective function ``c @ x``.
|
| 653 |
+
slack : 1D array
|
| 654 |
+
The (nominally positive) values of the slack,
|
| 655 |
+
``b_ub - A_ub @ x``.
|
| 656 |
+
con : 1D array
|
| 657 |
+
The (nominally zero) residuals of the equality constraints,
|
| 658 |
+
``b_eq - A_eq @ x``.
|
| 659 |
+
success : bool
|
| 660 |
+
``True`` when the algorithm succeeds in finding an optimal
|
| 661 |
+
solution.
|
| 662 |
+
status : int
|
| 663 |
+
An integer representing the exit status of the algorithm.
|
| 664 |
+
|
| 665 |
+
``0`` : Optimization terminated successfully.
|
| 666 |
+
|
| 667 |
+
``1`` : Iteration or time limit reached.
|
| 668 |
+
|
| 669 |
+
``2`` : Problem appears to be infeasible.
|
| 670 |
+
|
| 671 |
+
``3`` : Problem appears to be unbounded.
|
| 672 |
+
|
| 673 |
+
``4`` : The HiGHS solver ran into a problem.
|
| 674 |
+
|
| 675 |
+
message : str
|
| 676 |
+
A string descriptor of the exit status of the algorithm.
|
| 677 |
+
nit : int
|
| 678 |
+
The total number of iterations performed.
|
| 679 |
+
For the HiGHS interior-point method, this does not include
|
| 680 |
+
crossover iterations.
|
| 681 |
+
crossover_nit : int
|
| 682 |
+
The number of primal/dual pushes performed during the
|
| 683 |
+
crossover routine for the HiGHS interior-point method.
|
| 684 |
+
ineqlin : OptimizeResult
|
| 685 |
+
Solution and sensitivity information corresponding to the
|
| 686 |
+
inequality constraints, `b_ub`. A dictionary consisting of the
|
| 687 |
+
fields:
|
| 688 |
+
|
| 689 |
+
residual : np.ndnarray
|
| 690 |
+
The (nominally positive) values of the slack variables,
|
| 691 |
+
``b_ub - A_ub @ x``. This quantity is also commonly
|
| 692 |
+
referred to as "slack".
|
| 693 |
+
|
| 694 |
+
marginals : np.ndarray
|
| 695 |
+
The sensitivity (partial derivative) of the objective
|
| 696 |
+
function with respect to the right-hand side of the
|
| 697 |
+
inequality constraints, `b_ub`.
|
| 698 |
+
|
| 699 |
+
eqlin : OptimizeResult
|
| 700 |
+
Solution and sensitivity information corresponding to the
|
| 701 |
+
equality constraints, `b_eq`. A dictionary consisting of the
|
| 702 |
+
fields:
|
| 703 |
+
|
| 704 |
+
residual : np.ndarray
|
| 705 |
+
The (nominally zero) residuals of the equality constraints,
|
| 706 |
+
``b_eq - A_eq @ x``.
|
| 707 |
+
|
| 708 |
+
marginals : np.ndarray
|
| 709 |
+
The sensitivity (partial derivative) of the objective
|
| 710 |
+
function with respect to the right-hand side of the
|
| 711 |
+
equality constraints, `b_eq`.
|
| 712 |
+
|
| 713 |
+
lower, upper : OptimizeResult
|
| 714 |
+
Solution and sensitivity information corresponding to the
|
| 715 |
+
lower and upper bounds on decision variables, `bounds`.
|
| 716 |
+
|
| 717 |
+
residual : np.ndarray
|
| 718 |
+
The (nominally positive) values of the quantity
|
| 719 |
+
``x - lb`` (lower) or ``ub - x`` (upper).
|
| 720 |
+
|
| 721 |
+
marginals : np.ndarray
|
| 722 |
+
The sensitivity (partial derivative) of the objective
|
| 723 |
+
function with respect to the lower and upper
|
| 724 |
+
`bounds`.
|
| 725 |
+
|
| 726 |
+
Notes
|
| 727 |
+
-----
|
| 728 |
+
|
| 729 |
+
Method :ref:`'highs-ipm' <optimize.linprog-highs-ipm>`
|
| 730 |
+
is a wrapper of a C++ implementation of an **i**\ nterior-\ **p**\ oint
|
| 731 |
+
**m**\ ethod [13]_; it features a crossover routine, so it is as accurate
|
| 732 |
+
as a simplex solver.
|
| 733 |
+
Method :ref:`'highs-ds' <optimize.linprog-highs-ds>` is a wrapper
|
| 734 |
+
of the C++ high performance dual revised simplex implementation (HSOL)
|
| 735 |
+
[13]_, [14]_. Method :ref:`'highs' <optimize.linprog-highs>` chooses
|
| 736 |
+
between the two automatically. For new code involving `linprog`, we
|
| 737 |
+
recommend explicitly choosing one of these three method values instead of
|
| 738 |
+
:ref:`'interior-point' <optimize.linprog-interior-point>` (default),
|
| 739 |
+
:ref:`'revised simplex' <optimize.linprog-revised_simplex>`, and
|
| 740 |
+
:ref:`'simplex' <optimize.linprog-simplex>` (legacy).
|
| 741 |
+
|
| 742 |
+
The result fields `ineqlin`, `eqlin`, `lower`, and `upper` all contain
|
| 743 |
+
`marginals`, or partial derivatives of the objective function with respect
|
| 744 |
+
to the right-hand side of each constraint. These partial derivatives are
|
| 745 |
+
also referred to as "Lagrange multipliers", "dual values", and
|
| 746 |
+
"shadow prices". The sign convention of `marginals` is opposite that
|
| 747 |
+
of Lagrange multipliers produced by many nonlinear solvers.
|
| 748 |
+
|
| 749 |
+
References
|
| 750 |
+
----------
|
| 751 |
+
.. [13] Huangfu, Q., Galabova, I., Feldmeier, M., and Hall, J. A. J.
|
| 752 |
+
"HiGHS - high performance software for linear optimization."
|
| 753 |
+
https://highs.dev/
|
| 754 |
+
.. [14] Huangfu, Q. and Hall, J. A. J. "Parallelizing the dual revised
|
| 755 |
+
simplex method." Mathematical Programming Computation, 10 (1),
|
| 756 |
+
119-142, 2018. DOI: 10.1007/s12532-017-0130-5
|
| 757 |
+
"""
|
| 758 |
+
pass
|
| 759 |
+
|
| 760 |
+
|
| 761 |
+
def _linprog_ip_doc(c, A_ub=None, b_ub=None, A_eq=None, b_eq=None,
|
| 762 |
+
bounds=None, method='interior-point', callback=None,
|
| 763 |
+
maxiter=1000, disp=False, presolve=True,
|
| 764 |
+
tol=1e-8, autoscale=False, rr=True,
|
| 765 |
+
alpha0=.99995, beta=0.1, sparse=False,
|
| 766 |
+
lstsq=False, sym_pos=True, cholesky=True, pc=True,
|
| 767 |
+
ip=False, permc_spec='MMD_AT_PLUS_A', **unknown_options):
|
| 768 |
+
r"""
|
| 769 |
+
Linear programming: minimize a linear objective function subject to linear
|
| 770 |
+
equality and inequality constraints using the interior-point method of
|
| 771 |
+
[4]_.
|
| 772 |
+
|
| 773 |
+
.. deprecated:: 1.9.0
|
| 774 |
+
`method='interior-point'` will be removed in SciPy 1.11.0.
|
| 775 |
+
It is replaced by `method='highs'` because the latter is
|
| 776 |
+
faster and more robust.
|
| 777 |
+
|
| 778 |
+
Linear programming solves problems of the following form:
|
| 779 |
+
|
| 780 |
+
.. math::
|
| 781 |
+
|
| 782 |
+
\min_x \ & c^T x \\
|
| 783 |
+
\mbox{such that} \ & A_{ub} x \leq b_{ub},\\
|
| 784 |
+
& A_{eq} x = b_{eq},\\
|
| 785 |
+
& l \leq x \leq u ,
|
| 786 |
+
|
| 787 |
+
where :math:`x` is a vector of decision variables; :math:`c`,
|
| 788 |
+
:math:`b_{ub}`, :math:`b_{eq}`, :math:`l`, and :math:`u` are vectors; and
|
| 789 |
+
:math:`A_{ub}` and :math:`A_{eq}` are matrices.
|
| 790 |
+
|
| 791 |
+
Alternatively, that's:
|
| 792 |
+
|
| 793 |
+
minimize::
|
| 794 |
+
|
| 795 |
+
c @ x
|
| 796 |
+
|
| 797 |
+
such that::
|
| 798 |
+
|
| 799 |
+
A_ub @ x <= b_ub
|
| 800 |
+
A_eq @ x == b_eq
|
| 801 |
+
lb <= x <= ub
|
| 802 |
+
|
| 803 |
+
Note that by default ``lb = 0`` and ``ub = None`` unless specified with
|
| 804 |
+
``bounds``.
|
| 805 |
+
|
| 806 |
+
Parameters
|
| 807 |
+
----------
|
| 808 |
+
c : 1-D array
|
| 809 |
+
The coefficients of the linear objective function to be minimized.
|
| 810 |
+
A_ub : 2-D array, optional
|
| 811 |
+
The inequality constraint matrix. Each row of ``A_ub`` specifies the
|
| 812 |
+
coefficients of a linear inequality constraint on ``x``.
|
| 813 |
+
b_ub : 1-D array, optional
|
| 814 |
+
The inequality constraint vector. Each element represents an
|
| 815 |
+
upper bound on the corresponding value of ``A_ub @ x``.
|
| 816 |
+
A_eq : 2-D array, optional
|
| 817 |
+
The equality constraint matrix. Each row of ``A_eq`` specifies the
|
| 818 |
+
coefficients of a linear equality constraint on ``x``.
|
| 819 |
+
b_eq : 1-D array, optional
|
| 820 |
+
The equality constraint vector. Each element of ``A_eq @ x`` must equal
|
| 821 |
+
the corresponding element of ``b_eq``.
|
| 822 |
+
bounds : sequence, optional
|
| 823 |
+
A sequence of ``(min, max)`` pairs for each element in ``x``, defining
|
| 824 |
+
the minimum and maximum values of that decision variable. Use ``None``
|
| 825 |
+
to indicate that there is no bound. By default, bounds are
|
| 826 |
+
``(0, None)`` (all decision variables are non-negative).
|
| 827 |
+
If a single tuple ``(min, max)`` is provided, then ``min`` and
|
| 828 |
+
``max`` will serve as bounds for all decision variables.
|
| 829 |
+
method : str
|
| 830 |
+
This is the method-specific documentation for 'interior-point'.
|
| 831 |
+
:ref:`'highs' <optimize.linprog-highs>`,
|
| 832 |
+
:ref:`'highs-ds' <optimize.linprog-highs-ds>`,
|
| 833 |
+
:ref:`'highs-ipm' <optimize.linprog-highs-ipm>`,
|
| 834 |
+
:ref:`'revised simplex' <optimize.linprog-revised_simplex>`, and
|
| 835 |
+
:ref:`'simplex' <optimize.linprog-simplex>` (legacy)
|
| 836 |
+
are also available.
|
| 837 |
+
callback : callable, optional
|
| 838 |
+
Callback function to be executed once per iteration.
|
| 839 |
+
|
| 840 |
+
Options
|
| 841 |
+
-------
|
| 842 |
+
maxiter : int (default: 1000)
|
| 843 |
+
The maximum number of iterations of the algorithm.
|
| 844 |
+
disp : bool (default: False)
|
| 845 |
+
Set to ``True`` if indicators of optimization status are to be printed
|
| 846 |
+
to the console each iteration.
|
| 847 |
+
presolve : bool (default: True)
|
| 848 |
+
Presolve attempts to identify trivial infeasibilities,
|
| 849 |
+
identify trivial unboundedness, and simplify the problem before
|
| 850 |
+
sending it to the main solver. It is generally recommended
|
| 851 |
+
to keep the default setting ``True``; set to ``False`` if
|
| 852 |
+
presolve is to be disabled.
|
| 853 |
+
tol : float (default: 1e-8)
|
| 854 |
+
Termination tolerance to be used for all termination criteria;
|
| 855 |
+
see [4]_ Section 4.5.
|
| 856 |
+
autoscale : bool (default: False)
|
| 857 |
+
Set to ``True`` to automatically perform equilibration.
|
| 858 |
+
Consider using this option if the numerical values in the
|
| 859 |
+
constraints are separated by several orders of magnitude.
|
| 860 |
+
rr : bool (default: True)
|
| 861 |
+
Set to ``False`` to disable automatic redundancy removal.
|
| 862 |
+
alpha0 : float (default: 0.99995)
|
| 863 |
+
The maximal step size for Mehrota's predictor-corrector search
|
| 864 |
+
direction; see :math:`\beta_{3}` of [4]_ Table 8.1.
|
| 865 |
+
beta : float (default: 0.1)
|
| 866 |
+
The desired reduction of the path parameter :math:`\mu` (see [6]_)
|
| 867 |
+
when Mehrota's predictor-corrector is not in use (uncommon).
|
| 868 |
+
sparse : bool (default: False)
|
| 869 |
+
Set to ``True`` if the problem is to be treated as sparse after
|
| 870 |
+
presolve. If either ``A_eq`` or ``A_ub`` is a sparse matrix,
|
| 871 |
+
this option will automatically be set ``True``, and the problem
|
| 872 |
+
will be treated as sparse even during presolve. If your constraint
|
| 873 |
+
matrices contain mostly zeros and the problem is not very small (less
|
| 874 |
+
than about 100 constraints or variables), consider setting ``True``
|
| 875 |
+
or providing ``A_eq`` and ``A_ub`` as sparse matrices.
|
| 876 |
+
lstsq : bool (default: ``False``)
|
| 877 |
+
Set to ``True`` if the problem is expected to be very poorly
|
| 878 |
+
conditioned. This should always be left ``False`` unless severe
|
| 879 |
+
numerical difficulties are encountered. Leave this at the default
|
| 880 |
+
unless you receive a warning message suggesting otherwise.
|
| 881 |
+
sym_pos : bool (default: True)
|
| 882 |
+
Leave ``True`` if the problem is expected to yield a well conditioned
|
| 883 |
+
symmetric positive definite normal equation matrix
|
| 884 |
+
(almost always). Leave this at the default unless you receive
|
| 885 |
+
a warning message suggesting otherwise.
|
| 886 |
+
cholesky : bool (default: True)
|
| 887 |
+
Set to ``True`` if the normal equations are to be solved by explicit
|
| 888 |
+
Cholesky decomposition followed by explicit forward/backward
|
| 889 |
+
substitution. This is typically faster for problems
|
| 890 |
+
that are numerically well-behaved.
|
| 891 |
+
pc : bool (default: True)
|
| 892 |
+
Leave ``True`` if the predictor-corrector method of Mehrota is to be
|
| 893 |
+
used. This is almost always (if not always) beneficial.
|
| 894 |
+
ip : bool (default: False)
|
| 895 |
+
Set to ``True`` if the improved initial point suggestion due to [4]_
|
| 896 |
+
Section 4.3 is desired. Whether this is beneficial or not
|
| 897 |
+
depends on the problem.
|
| 898 |
+
permc_spec : str (default: 'MMD_AT_PLUS_A')
|
| 899 |
+
(Has effect only with ``sparse = True``, ``lstsq = False``, ``sym_pos =
|
| 900 |
+
True``, and no SuiteSparse.)
|
| 901 |
+
A matrix is factorized in each iteration of the algorithm.
|
| 902 |
+
This option specifies how to permute the columns of the matrix for
|
| 903 |
+
sparsity preservation. Acceptable values are:
|
| 904 |
+
|
| 905 |
+
- ``NATURAL``: natural ordering.
|
| 906 |
+
- ``MMD_ATA``: minimum degree ordering on the structure of A^T A.
|
| 907 |
+
- ``MMD_AT_PLUS_A``: minimum degree ordering on the structure of A^T+A.
|
| 908 |
+
- ``COLAMD``: approximate minimum degree column ordering.
|
| 909 |
+
|
| 910 |
+
This option can impact the convergence of the
|
| 911 |
+
interior point algorithm; test different values to determine which
|
| 912 |
+
performs best for your problem. For more information, refer to
|
| 913 |
+
``scipy.sparse.linalg.splu``.
|
| 914 |
+
unknown_options : dict
|
| 915 |
+
Optional arguments not used by this particular solver. If
|
| 916 |
+
`unknown_options` is non-empty a warning is issued listing all
|
| 917 |
+
unused options.
|
| 918 |
+
|
| 919 |
+
Returns
|
| 920 |
+
-------
|
| 921 |
+
res : OptimizeResult
|
| 922 |
+
A :class:`scipy.optimize.OptimizeResult` consisting of the fields:
|
| 923 |
+
|
| 924 |
+
x : 1-D array
|
| 925 |
+
The values of the decision variables that minimizes the
|
| 926 |
+
objective function while satisfying the constraints.
|
| 927 |
+
fun : float
|
| 928 |
+
The optimal value of the objective function ``c @ x``.
|
| 929 |
+
slack : 1-D array
|
| 930 |
+
The (nominally positive) values of the slack variables,
|
| 931 |
+
``b_ub - A_ub @ x``.
|
| 932 |
+
con : 1-D array
|
| 933 |
+
The (nominally zero) residuals of the equality constraints,
|
| 934 |
+
``b_eq - A_eq @ x``.
|
| 935 |
+
success : bool
|
| 936 |
+
``True`` when the algorithm succeeds in finding an optimal
|
| 937 |
+
solution.
|
| 938 |
+
status : int
|
| 939 |
+
An integer representing the exit status of the algorithm.
|
| 940 |
+
|
| 941 |
+
``0`` : Optimization terminated successfully.
|
| 942 |
+
|
| 943 |
+
``1`` : Iteration limit reached.
|
| 944 |
+
|
| 945 |
+
``2`` : Problem appears to be infeasible.
|
| 946 |
+
|
| 947 |
+
``3`` : Problem appears to be unbounded.
|
| 948 |
+
|
| 949 |
+
``4`` : Numerical difficulties encountered.
|
| 950 |
+
|
| 951 |
+
message : str
|
| 952 |
+
A string descriptor of the exit status of the algorithm.
|
| 953 |
+
nit : int
|
| 954 |
+
The total number of iterations performed in all phases.
|
| 955 |
+
|
| 956 |
+
|
| 957 |
+
Notes
|
| 958 |
+
-----
|
| 959 |
+
This method implements the algorithm outlined in [4]_ with ideas from [8]_
|
| 960 |
+
and a structure inspired by the simpler methods of [6]_.
|
| 961 |
+
|
| 962 |
+
The primal-dual path following method begins with initial 'guesses' of
|
| 963 |
+
the primal and dual variables of the standard form problem and iteratively
|
| 964 |
+
attempts to solve the (nonlinear) Karush-Kuhn-Tucker conditions for the
|
| 965 |
+
problem with a gradually reduced logarithmic barrier term added to the
|
| 966 |
+
objective. This particular implementation uses a homogeneous self-dual
|
| 967 |
+
formulation, which provides certificates of infeasibility or unboundedness
|
| 968 |
+
where applicable.
|
| 969 |
+
|
| 970 |
+
The default initial point for the primal and dual variables is that
|
| 971 |
+
defined in [4]_ Section 4.4 Equation 8.22. Optionally (by setting initial
|
| 972 |
+
point option ``ip=True``), an alternate (potentially improved) starting
|
| 973 |
+
point can be calculated according to the additional recommendations of
|
| 974 |
+
[4]_ Section 4.4.
|
| 975 |
+
|
| 976 |
+
A search direction is calculated using the predictor-corrector method
|
| 977 |
+
(single correction) proposed by Mehrota and detailed in [4]_ Section 4.1.
|
| 978 |
+
(A potential improvement would be to implement the method of multiple
|
| 979 |
+
corrections described in [4]_ Section 4.2.) In practice, this is
|
| 980 |
+
accomplished by solving the normal equations, [4]_ Section 5.1 Equations
|
| 981 |
+
8.31 and 8.32, derived from the Newton equations [4]_ Section 5 Equations
|
| 982 |
+
8.25 (compare to [4]_ Section 4 Equations 8.6-8.8). The advantage of
|
| 983 |
+
solving the normal equations rather than 8.25 directly is that the
|
| 984 |
+
matrices involved are symmetric positive definite, so Cholesky
|
| 985 |
+
decomposition can be used rather than the more expensive LU factorization.
|
| 986 |
+
|
| 987 |
+
With default options, the solver used to perform the factorization depends
|
| 988 |
+
on third-party software availability and the conditioning of the problem.
|
| 989 |
+
|
| 990 |
+
For dense problems, solvers are tried in the following order:
|
| 991 |
+
|
| 992 |
+
1. ``scipy.linalg.cho_factor``
|
| 993 |
+
|
| 994 |
+
2. ``scipy.linalg.solve`` with option ``sym_pos=True``
|
| 995 |
+
|
| 996 |
+
3. ``scipy.linalg.solve`` with option ``sym_pos=False``
|
| 997 |
+
|
| 998 |
+
4. ``scipy.linalg.lstsq``
|
| 999 |
+
|
| 1000 |
+
For sparse problems:
|
| 1001 |
+
|
| 1002 |
+
1. ``sksparse.cholmod.cholesky`` (if scikit-sparse and SuiteSparse are
|
| 1003 |
+
installed)
|
| 1004 |
+
|
| 1005 |
+
2. ``scipy.sparse.linalg.factorized`` (if scikit-umfpack and SuiteSparse
|
| 1006 |
+
are installed)
|
| 1007 |
+
|
| 1008 |
+
3. ``scipy.sparse.linalg.splu`` (which uses SuperLU distributed with SciPy)
|
| 1009 |
+
|
| 1010 |
+
4. ``scipy.sparse.linalg.lsqr``
|
| 1011 |
+
|
| 1012 |
+
If the solver fails for any reason, successively more robust (but slower)
|
| 1013 |
+
solvers are attempted in the order indicated. Attempting, failing, and
|
| 1014 |
+
re-starting factorization can be time consuming, so if the problem is
|
| 1015 |
+
numerically challenging, options can be set to bypass solvers that are
|
| 1016 |
+
failing. Setting ``cholesky=False`` skips to solver 2,
|
| 1017 |
+
``sym_pos=False`` skips to solver 3, and ``lstsq=True`` skips
|
| 1018 |
+
to solver 4 for both sparse and dense problems.
|
| 1019 |
+
|
| 1020 |
+
Potential improvements for combatting issues associated with dense
|
| 1021 |
+
columns in otherwise sparse problems are outlined in [4]_ Section 5.3 and
|
| 1022 |
+
[10]_ Section 4.1-4.2; the latter also discusses the alleviation of
|
| 1023 |
+
accuracy issues associated with the substitution approach to free
|
| 1024 |
+
variables.
|
| 1025 |
+
|
| 1026 |
+
After calculating the search direction, the maximum possible step size
|
| 1027 |
+
that does not activate the non-negativity constraints is calculated, and
|
| 1028 |
+
the smaller of this step size and unity is applied (as in [4]_ Section
|
| 1029 |
+
4.1.) [4]_ Section 4.3 suggests improvements for choosing the step size.
|
| 1030 |
+
|
| 1031 |
+
The new point is tested according to the termination conditions of [4]_
|
| 1032 |
+
Section 4.5. The same tolerance, which can be set using the ``tol`` option,
|
| 1033 |
+
is used for all checks. (A potential improvement would be to expose
|
| 1034 |
+
the different tolerances to be set independently.) If optimality,
|
| 1035 |
+
unboundedness, or infeasibility is detected, the solve procedure
|
| 1036 |
+
terminates; otherwise it repeats.
|
| 1037 |
+
|
| 1038 |
+
Whereas the top level ``linprog`` module expects a problem of form:
|
| 1039 |
+
|
| 1040 |
+
Minimize::
|
| 1041 |
+
|
| 1042 |
+
c @ x
|
| 1043 |
+
|
| 1044 |
+
Subject to::
|
| 1045 |
+
|
| 1046 |
+
A_ub @ x <= b_ub
|
| 1047 |
+
A_eq @ x == b_eq
|
| 1048 |
+
lb <= x <= ub
|
| 1049 |
+
|
| 1050 |
+
where ``lb = 0`` and ``ub = None`` unless set in ``bounds``. The problem
|
| 1051 |
+
is automatically converted to the form:
|
| 1052 |
+
|
| 1053 |
+
Minimize::
|
| 1054 |
+
|
| 1055 |
+
c @ x
|
| 1056 |
+
|
| 1057 |
+
Subject to::
|
| 1058 |
+
|
| 1059 |
+
A @ x == b
|
| 1060 |
+
x >= 0
|
| 1061 |
+
|
| 1062 |
+
for solution. That is, the original problem contains equality, upper-bound
|
| 1063 |
+
and variable constraints whereas the method specific solver requires
|
| 1064 |
+
equality constraints and variable non-negativity. ``linprog`` converts the
|
| 1065 |
+
original problem to standard form by converting the simple bounds to upper
|
| 1066 |
+
bound constraints, introducing non-negative slack variables for inequality
|
| 1067 |
+
constraints, and expressing unbounded variables as the difference between
|
| 1068 |
+
two non-negative variables. The problem is converted back to the original
|
| 1069 |
+
form before results are reported.
|
| 1070 |
+
|
| 1071 |
+
References
|
| 1072 |
+
----------
|
| 1073 |
+
.. [4] Andersen, Erling D., and Knud D. Andersen. "The MOSEK interior point
|
| 1074 |
+
optimizer for linear programming: an implementation of the
|
| 1075 |
+
homogeneous algorithm." High performance optimization. Springer US,
|
| 1076 |
+
2000. 197-232.
|
| 1077 |
+
.. [6] Freund, Robert M. "Primal-Dual Interior-Point Methods for Linear
|
| 1078 |
+
Programming based on Newton's Method." Unpublished Course Notes,
|
| 1079 |
+
March 2004. Available 2/25/2017 at
|
| 1080 |
+
https://ocw.mit.edu/courses/sloan-school-of-management/15-084j-nonlinear-programming-spring-2004/lecture-notes/lec14_int_pt_mthd.pdf
|
| 1081 |
+
.. [8] Andersen, Erling D., and Knud D. Andersen. "Presolving in linear
|
| 1082 |
+
programming." Mathematical Programming 71.2 (1995): 221-245.
|
| 1083 |
+
.. [9] Bertsimas, Dimitris, and J. Tsitsiklis. "Introduction to linear
|
| 1084 |
+
programming." Athena Scientific 1 (1997): 997.
|
| 1085 |
+
.. [10] Andersen, Erling D., et al. Implementation of interior point
|
| 1086 |
+
methods for large scale linear programming. HEC/Universite de
|
| 1087 |
+
Geneve, 1996.
|
| 1088 |
+
"""
|
| 1089 |
+
pass
|
| 1090 |
+
|
| 1091 |
+
|
| 1092 |
+
def _linprog_rs_doc(c, A_ub=None, b_ub=None, A_eq=None, b_eq=None,
|
| 1093 |
+
bounds=None, method='interior-point', callback=None,
|
| 1094 |
+
x0=None, maxiter=5000, disp=False, presolve=True,
|
| 1095 |
+
tol=1e-12, autoscale=False, rr=True, maxupdate=10,
|
| 1096 |
+
mast=False, pivot="mrc", **unknown_options):
|
| 1097 |
+
r"""
|
| 1098 |
+
Linear programming: minimize a linear objective function subject to linear
|
| 1099 |
+
equality and inequality constraints using the revised simplex method.
|
| 1100 |
+
|
| 1101 |
+
.. deprecated:: 1.9.0
|
| 1102 |
+
`method='revised simplex'` will be removed in SciPy 1.11.0.
|
| 1103 |
+
It is replaced by `method='highs'` because the latter is
|
| 1104 |
+
faster and more robust.
|
| 1105 |
+
|
| 1106 |
+
Linear programming solves problems of the following form:
|
| 1107 |
+
|
| 1108 |
+
.. math::
|
| 1109 |
+
|
| 1110 |
+
\min_x \ & c^T x \\
|
| 1111 |
+
\mbox{such that} \ & A_{ub} x \leq b_{ub},\\
|
| 1112 |
+
& A_{eq} x = b_{eq},\\
|
| 1113 |
+
& l \leq x \leq u ,
|
| 1114 |
+
|
| 1115 |
+
where :math:`x` is a vector of decision variables; :math:`c`,
|
| 1116 |
+
:math:`b_{ub}`, :math:`b_{eq}`, :math:`l`, and :math:`u` are vectors; and
|
| 1117 |
+
:math:`A_{ub}` and :math:`A_{eq}` are matrices.
|
| 1118 |
+
|
| 1119 |
+
Alternatively, that's:
|
| 1120 |
+
|
| 1121 |
+
minimize::
|
| 1122 |
+
|
| 1123 |
+
c @ x
|
| 1124 |
+
|
| 1125 |
+
such that::
|
| 1126 |
+
|
| 1127 |
+
A_ub @ x <= b_ub
|
| 1128 |
+
A_eq @ x == b_eq
|
| 1129 |
+
lb <= x <= ub
|
| 1130 |
+
|
| 1131 |
+
Note that by default ``lb = 0`` and ``ub = None`` unless specified with
|
| 1132 |
+
``bounds``.
|
| 1133 |
+
|
| 1134 |
+
Parameters
|
| 1135 |
+
----------
|
| 1136 |
+
c : 1-D array
|
| 1137 |
+
The coefficients of the linear objective function to be minimized.
|
| 1138 |
+
A_ub : 2-D array, optional
|
| 1139 |
+
The inequality constraint matrix. Each row of ``A_ub`` specifies the
|
| 1140 |
+
coefficients of a linear inequality constraint on ``x``.
|
| 1141 |
+
b_ub : 1-D array, optional
|
| 1142 |
+
The inequality constraint vector. Each element represents an
|
| 1143 |
+
upper bound on the corresponding value of ``A_ub @ x``.
|
| 1144 |
+
A_eq : 2-D array, optional
|
| 1145 |
+
The equality constraint matrix. Each row of ``A_eq`` specifies the
|
| 1146 |
+
coefficients of a linear equality constraint on ``x``.
|
| 1147 |
+
b_eq : 1-D array, optional
|
| 1148 |
+
The equality constraint vector. Each element of ``A_eq @ x`` must equal
|
| 1149 |
+
the corresponding element of ``b_eq``.
|
| 1150 |
+
bounds : sequence, optional
|
| 1151 |
+
A sequence of ``(min, max)`` pairs for each element in ``x``, defining
|
| 1152 |
+
the minimum and maximum values of that decision variable. Use ``None``
|
| 1153 |
+
to indicate that there is no bound. By default, bounds are
|
| 1154 |
+
``(0, None)`` (all decision variables are non-negative).
|
| 1155 |
+
If a single tuple ``(min, max)`` is provided, then ``min`` and
|
| 1156 |
+
``max`` will serve as bounds for all decision variables.
|
| 1157 |
+
method : str
|
| 1158 |
+
This is the method-specific documentation for 'revised simplex'.
|
| 1159 |
+
:ref:`'highs' <optimize.linprog-highs>`,
|
| 1160 |
+
:ref:`'highs-ds' <optimize.linprog-highs-ds>`,
|
| 1161 |
+
:ref:`'highs-ipm' <optimize.linprog-highs-ipm>`,
|
| 1162 |
+
:ref:`'interior-point' <optimize.linprog-interior-point>` (default),
|
| 1163 |
+
and :ref:`'simplex' <optimize.linprog-simplex>` (legacy)
|
| 1164 |
+
are also available.
|
| 1165 |
+
callback : callable, optional
|
| 1166 |
+
Callback function to be executed once per iteration.
|
| 1167 |
+
x0 : 1-D array, optional
|
| 1168 |
+
Guess values of the decision variables, which will be refined by
|
| 1169 |
+
the optimization algorithm. This argument is currently used only by the
|
| 1170 |
+
'revised simplex' method, and can only be used if `x0` represents a
|
| 1171 |
+
basic feasible solution.
|
| 1172 |
+
|
| 1173 |
+
Options
|
| 1174 |
+
-------
|
| 1175 |
+
maxiter : int (default: 5000)
|
| 1176 |
+
The maximum number of iterations to perform in either phase.
|
| 1177 |
+
disp : bool (default: False)
|
| 1178 |
+
Set to ``True`` if indicators of optimization status are to be printed
|
| 1179 |
+
to the console each iteration.
|
| 1180 |
+
presolve : bool (default: True)
|
| 1181 |
+
Presolve attempts to identify trivial infeasibilities,
|
| 1182 |
+
identify trivial unboundedness, and simplify the problem before
|
| 1183 |
+
sending it to the main solver. It is generally recommended
|
| 1184 |
+
to keep the default setting ``True``; set to ``False`` if
|
| 1185 |
+
presolve is to be disabled.
|
| 1186 |
+
tol : float (default: 1e-12)
|
| 1187 |
+
The tolerance which determines when a solution is "close enough" to
|
| 1188 |
+
zero in Phase 1 to be considered a basic feasible solution or close
|
| 1189 |
+
enough to positive to serve as an optimal solution.
|
| 1190 |
+
autoscale : bool (default: False)
|
| 1191 |
+
Set to ``True`` to automatically perform equilibration.
|
| 1192 |
+
Consider using this option if the numerical values in the
|
| 1193 |
+
constraints are separated by several orders of magnitude.
|
| 1194 |
+
rr : bool (default: True)
|
| 1195 |
+
Set to ``False`` to disable automatic redundancy removal.
|
| 1196 |
+
maxupdate : int (default: 10)
|
| 1197 |
+
The maximum number of updates performed on the LU factorization.
|
| 1198 |
+
After this many updates is reached, the basis matrix is factorized
|
| 1199 |
+
from scratch.
|
| 1200 |
+
mast : bool (default: False)
|
| 1201 |
+
Minimize Amortized Solve Time. If enabled, the average time to solve
|
| 1202 |
+
a linear system using the basis factorization is measured. Typically,
|
| 1203 |
+
the average solve time will decrease with each successive solve after
|
| 1204 |
+
initial factorization, as factorization takes much more time than the
|
| 1205 |
+
solve operation (and updates). Eventually, however, the updated
|
| 1206 |
+
factorization becomes sufficiently complex that the average solve time
|
| 1207 |
+
begins to increase. When this is detected, the basis is refactorized
|
| 1208 |
+
from scratch. Enable this option to maximize speed at the risk of
|
| 1209 |
+
nondeterministic behavior. Ignored if ``maxupdate`` is 0.
|
| 1210 |
+
pivot : "mrc" or "bland" (default: "mrc")
|
| 1211 |
+
Pivot rule: Minimum Reduced Cost ("mrc") or Bland's rule ("bland").
|
| 1212 |
+
Choose Bland's rule if iteration limit is reached and cycling is
|
| 1213 |
+
suspected.
|
| 1214 |
+
unknown_options : dict
|
| 1215 |
+
Optional arguments not used by this particular solver. If
|
| 1216 |
+
`unknown_options` is non-empty a warning is issued listing all
|
| 1217 |
+
unused options.
|
| 1218 |
+
|
| 1219 |
+
Returns
|
| 1220 |
+
-------
|
| 1221 |
+
res : OptimizeResult
|
| 1222 |
+
A :class:`scipy.optimize.OptimizeResult` consisting of the fields:
|
| 1223 |
+
|
| 1224 |
+
x : 1-D array
|
| 1225 |
+
The values of the decision variables that minimizes the
|
| 1226 |
+
objective function while satisfying the constraints.
|
| 1227 |
+
fun : float
|
| 1228 |
+
The optimal value of the objective function ``c @ x``.
|
| 1229 |
+
slack : 1-D array
|
| 1230 |
+
The (nominally positive) values of the slack variables,
|
| 1231 |
+
``b_ub - A_ub @ x``.
|
| 1232 |
+
con : 1-D array
|
| 1233 |
+
The (nominally zero) residuals of the equality constraints,
|
| 1234 |
+
``b_eq - A_eq @ x``.
|
| 1235 |
+
success : bool
|
| 1236 |
+
``True`` when the algorithm succeeds in finding an optimal
|
| 1237 |
+
solution.
|
| 1238 |
+
status : int
|
| 1239 |
+
An integer representing the exit status of the algorithm.
|
| 1240 |
+
|
| 1241 |
+
``0`` : Optimization terminated successfully.
|
| 1242 |
+
|
| 1243 |
+
``1`` : Iteration limit reached.
|
| 1244 |
+
|
| 1245 |
+
``2`` : Problem appears to be infeasible.
|
| 1246 |
+
|
| 1247 |
+
``3`` : Problem appears to be unbounded.
|
| 1248 |
+
|
| 1249 |
+
``4`` : Numerical difficulties encountered.
|
| 1250 |
+
|
| 1251 |
+
``5`` : Problem has no constraints; turn presolve on.
|
| 1252 |
+
|
| 1253 |
+
``6`` : Invalid guess provided.
|
| 1254 |
+
|
| 1255 |
+
message : str
|
| 1256 |
+
A string descriptor of the exit status of the algorithm.
|
| 1257 |
+
nit : int
|
| 1258 |
+
The total number of iterations performed in all phases.
|
| 1259 |
+
|
| 1260 |
+
|
| 1261 |
+
Notes
|
| 1262 |
+
-----
|
| 1263 |
+
Method *revised simplex* uses the revised simplex method as described in
|
| 1264 |
+
[9]_, except that a factorization [11]_ of the basis matrix, rather than
|
| 1265 |
+
its inverse, is efficiently maintained and used to solve the linear systems
|
| 1266 |
+
at each iteration of the algorithm.
|
| 1267 |
+
|
| 1268 |
+
References
|
| 1269 |
+
----------
|
| 1270 |
+
.. [9] Bertsimas, Dimitris, and J. Tsitsiklis. "Introduction to linear
|
| 1271 |
+
programming." Athena Scientific 1 (1997): 997.
|
| 1272 |
+
.. [11] Bartels, Richard H. "A stabilization of the simplex method."
|
| 1273 |
+
Journal in Numerische Mathematik 16.5 (1971): 414-434.
|
| 1274 |
+
"""
|
| 1275 |
+
pass
|
| 1276 |
+
|
| 1277 |
+
|
| 1278 |
+
def _linprog_simplex_doc(c, A_ub=None, b_ub=None, A_eq=None, b_eq=None,
|
| 1279 |
+
bounds=None, method='interior-point', callback=None,
|
| 1280 |
+
maxiter=5000, disp=False, presolve=True,
|
| 1281 |
+
tol=1e-12, autoscale=False, rr=True, bland=False,
|
| 1282 |
+
**unknown_options):
|
| 1283 |
+
r"""
|
| 1284 |
+
Linear programming: minimize a linear objective function subject to linear
|
| 1285 |
+
equality and inequality constraints using the tableau-based simplex method.
|
| 1286 |
+
|
| 1287 |
+
.. deprecated:: 1.9.0
|
| 1288 |
+
`method='simplex'` will be removed in SciPy 1.11.0.
|
| 1289 |
+
It is replaced by `method='highs'` because the latter is
|
| 1290 |
+
faster and more robust.
|
| 1291 |
+
|
| 1292 |
+
Linear programming solves problems of the following form:
|
| 1293 |
+
|
| 1294 |
+
.. math::
|
| 1295 |
+
|
| 1296 |
+
\min_x \ & c^T x \\
|
| 1297 |
+
\mbox{such that} \ & A_{ub} x \leq b_{ub},\\
|
| 1298 |
+
& A_{eq} x = b_{eq},\\
|
| 1299 |
+
& l \leq x \leq u ,
|
| 1300 |
+
|
| 1301 |
+
where :math:`x` is a vector of decision variables; :math:`c`,
|
| 1302 |
+
:math:`b_{ub}`, :math:`b_{eq}`, :math:`l`, and :math:`u` are vectors; and
|
| 1303 |
+
:math:`A_{ub}` and :math:`A_{eq}` are matrices.
|
| 1304 |
+
|
| 1305 |
+
Alternatively, that's:
|
| 1306 |
+
|
| 1307 |
+
minimize::
|
| 1308 |
+
|
| 1309 |
+
c @ x
|
| 1310 |
+
|
| 1311 |
+
such that::
|
| 1312 |
+
|
| 1313 |
+
A_ub @ x <= b_ub
|
| 1314 |
+
A_eq @ x == b_eq
|
| 1315 |
+
lb <= x <= ub
|
| 1316 |
+
|
| 1317 |
+
Note that by default ``lb = 0`` and ``ub = None`` unless specified with
|
| 1318 |
+
``bounds``.
|
| 1319 |
+
|
| 1320 |
+
Parameters
|
| 1321 |
+
----------
|
| 1322 |
+
c : 1-D array
|
| 1323 |
+
The coefficients of the linear objective function to be minimized.
|
| 1324 |
+
A_ub : 2-D array, optional
|
| 1325 |
+
The inequality constraint matrix. Each row of ``A_ub`` specifies the
|
| 1326 |
+
coefficients of a linear inequality constraint on ``x``.
|
| 1327 |
+
b_ub : 1-D array, optional
|
| 1328 |
+
The inequality constraint vector. Each element represents an
|
| 1329 |
+
upper bound on the corresponding value of ``A_ub @ x``.
|
| 1330 |
+
A_eq : 2-D array, optional
|
| 1331 |
+
The equality constraint matrix. Each row of ``A_eq`` specifies the
|
| 1332 |
+
coefficients of a linear equality constraint on ``x``.
|
| 1333 |
+
b_eq : 1-D array, optional
|
| 1334 |
+
The equality constraint vector. Each element of ``A_eq @ x`` must equal
|
| 1335 |
+
the corresponding element of ``b_eq``.
|
| 1336 |
+
bounds : sequence, optional
|
| 1337 |
+
A sequence of ``(min, max)`` pairs for each element in ``x``, defining
|
| 1338 |
+
the minimum and maximum values of that decision variable. Use ``None``
|
| 1339 |
+
to indicate that there is no bound. By default, bounds are
|
| 1340 |
+
``(0, None)`` (all decision variables are non-negative).
|
| 1341 |
+
If a single tuple ``(min, max)`` is provided, then ``min`` and
|
| 1342 |
+
``max`` will serve as bounds for all decision variables.
|
| 1343 |
+
method : str
|
| 1344 |
+
This is the method-specific documentation for 'simplex'.
|
| 1345 |
+
:ref:`'highs' <optimize.linprog-highs>`,
|
| 1346 |
+
:ref:`'highs-ds' <optimize.linprog-highs-ds>`,
|
| 1347 |
+
:ref:`'highs-ipm' <optimize.linprog-highs-ipm>`,
|
| 1348 |
+
:ref:`'interior-point' <optimize.linprog-interior-point>` (default),
|
| 1349 |
+
and :ref:`'revised simplex' <optimize.linprog-revised_simplex>`
|
| 1350 |
+
are also available.
|
| 1351 |
+
callback : callable, optional
|
| 1352 |
+
Callback function to be executed once per iteration.
|
| 1353 |
+
|
| 1354 |
+
Options
|
| 1355 |
+
-------
|
| 1356 |
+
maxiter : int (default: 5000)
|
| 1357 |
+
The maximum number of iterations to perform in either phase.
|
| 1358 |
+
disp : bool (default: False)
|
| 1359 |
+
Set to ``True`` if indicators of optimization status are to be printed
|
| 1360 |
+
to the console each iteration.
|
| 1361 |
+
presolve : bool (default: True)
|
| 1362 |
+
Presolve attempts to identify trivial infeasibilities,
|
| 1363 |
+
identify trivial unboundedness, and simplify the problem before
|
| 1364 |
+
sending it to the main solver. It is generally recommended
|
| 1365 |
+
to keep the default setting ``True``; set to ``False`` if
|
| 1366 |
+
presolve is to be disabled.
|
| 1367 |
+
tol : float (default: 1e-12)
|
| 1368 |
+
The tolerance which determines when a solution is "close enough" to
|
| 1369 |
+
zero in Phase 1 to be considered a basic feasible solution or close
|
| 1370 |
+
enough to positive to serve as an optimal solution.
|
| 1371 |
+
autoscale : bool (default: False)
|
| 1372 |
+
Set to ``True`` to automatically perform equilibration.
|
| 1373 |
+
Consider using this option if the numerical values in the
|
| 1374 |
+
constraints are separated by several orders of magnitude.
|
| 1375 |
+
rr : bool (default: True)
|
| 1376 |
+
Set to ``False`` to disable automatic redundancy removal.
|
| 1377 |
+
bland : bool
|
| 1378 |
+
If True, use Bland's anti-cycling rule [3]_ to choose pivots to
|
| 1379 |
+
prevent cycling. If False, choose pivots which should lead to a
|
| 1380 |
+
converged solution more quickly. The latter method is subject to
|
| 1381 |
+
cycling (non-convergence) in rare instances.
|
| 1382 |
+
unknown_options : dict
|
| 1383 |
+
Optional arguments not used by this particular solver. If
|
| 1384 |
+
`unknown_options` is non-empty a warning is issued listing all
|
| 1385 |
+
unused options.
|
| 1386 |
+
|
| 1387 |
+
Returns
|
| 1388 |
+
-------
|
| 1389 |
+
res : OptimizeResult
|
| 1390 |
+
A :class:`scipy.optimize.OptimizeResult` consisting of the fields:
|
| 1391 |
+
|
| 1392 |
+
x : 1-D array
|
| 1393 |
+
The values of the decision variables that minimizes the
|
| 1394 |
+
objective function while satisfying the constraints.
|
| 1395 |
+
fun : float
|
| 1396 |
+
The optimal value of the objective function ``c @ x``.
|
| 1397 |
+
slack : 1-D array
|
| 1398 |
+
The (nominally positive) values of the slack variables,
|
| 1399 |
+
``b_ub - A_ub @ x``.
|
| 1400 |
+
con : 1-D array
|
| 1401 |
+
The (nominally zero) residuals of the equality constraints,
|
| 1402 |
+
``b_eq - A_eq @ x``.
|
| 1403 |
+
success : bool
|
| 1404 |
+
``True`` when the algorithm succeeds in finding an optimal
|
| 1405 |
+
solution.
|
| 1406 |
+
status : int
|
| 1407 |
+
An integer representing the exit status of the algorithm.
|
| 1408 |
+
|
| 1409 |
+
``0`` : Optimization terminated successfully.
|
| 1410 |
+
|
| 1411 |
+
``1`` : Iteration limit reached.
|
| 1412 |
+
|
| 1413 |
+
``2`` : Problem appears to be infeasible.
|
| 1414 |
+
|
| 1415 |
+
``3`` : Problem appears to be unbounded.
|
| 1416 |
+
|
| 1417 |
+
``4`` : Numerical difficulties encountered.
|
| 1418 |
+
|
| 1419 |
+
message : str
|
| 1420 |
+
A string descriptor of the exit status of the algorithm.
|
| 1421 |
+
nit : int
|
| 1422 |
+
The total number of iterations performed in all phases.
|
| 1423 |
+
|
| 1424 |
+
References
|
| 1425 |
+
----------
|
| 1426 |
+
.. [1] Dantzig, George B., Linear programming and extensions. Rand
|
| 1427 |
+
Corporation Research Study Princeton Univ. Press, Princeton, NJ,
|
| 1428 |
+
1963
|
| 1429 |
+
.. [2] Hillier, S.H. and Lieberman, G.J. (1995), "Introduction to
|
| 1430 |
+
Mathematical Programming", McGraw-Hill, Chapter 4.
|
| 1431 |
+
.. [3] Bland, Robert G. New finite pivoting rules for the simplex method.
|
| 1432 |
+
Mathematics of Operations Research (2), 1977: pp. 103-107.
|
| 1433 |
+
"""
|
| 1434 |
+
pass
|
llava_next/lib/python3.10/site-packages/scipy/optimize/_linprog_rs.py
ADDED
|
@@ -0,0 +1,572 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Revised simplex method for linear programming
|
| 2 |
+
|
| 3 |
+
The *revised simplex* method uses the method described in [1]_, except
|
| 4 |
+
that a factorization [2]_ of the basis matrix, rather than its inverse,
|
| 5 |
+
is efficiently maintained and used to solve the linear systems at each
|
| 6 |
+
iteration of the algorithm.
|
| 7 |
+
|
| 8 |
+
.. versionadded:: 1.3.0
|
| 9 |
+
|
| 10 |
+
References
|
| 11 |
+
----------
|
| 12 |
+
.. [1] Bertsimas, Dimitris, and J. Tsitsiklis. "Introduction to linear
|
| 13 |
+
programming." Athena Scientific 1 (1997): 997.
|
| 14 |
+
.. [2] Bartels, Richard H. "A stabilization of the simplex method."
|
| 15 |
+
Journal in Numerische Mathematik 16.5 (1971): 414-434.
|
| 16 |
+
|
| 17 |
+
"""
|
| 18 |
+
# Author: Matt Haberland
|
| 19 |
+
|
| 20 |
+
import numpy as np
|
| 21 |
+
from numpy.linalg import LinAlgError
|
| 22 |
+
|
| 23 |
+
from scipy.linalg import solve
|
| 24 |
+
from ._optimize import _check_unknown_options
|
| 25 |
+
from ._bglu_dense import LU
|
| 26 |
+
from ._bglu_dense import BGLU as BGLU
|
| 27 |
+
from ._linprog_util import _postsolve
|
| 28 |
+
from ._optimize import OptimizeResult
|
| 29 |
+
|
| 30 |
+
|
| 31 |
+
def _phase_one(A, b, x0, callback, postsolve_args, maxiter, tol, disp,
|
| 32 |
+
maxupdate, mast, pivot):
|
| 33 |
+
"""
|
| 34 |
+
The purpose of phase one is to find an initial basic feasible solution
|
| 35 |
+
(BFS) to the original problem.
|
| 36 |
+
|
| 37 |
+
Generates an auxiliary problem with a trivial BFS and an objective that
|
| 38 |
+
minimizes infeasibility of the original problem. Solves the auxiliary
|
| 39 |
+
problem using the main simplex routine (phase two). This either yields
|
| 40 |
+
a BFS to the original problem or determines that the original problem is
|
| 41 |
+
infeasible. If feasible, phase one detects redundant rows in the original
|
| 42 |
+
constraint matrix and removes them, then chooses additional indices as
|
| 43 |
+
necessary to complete a basis/BFS for the original problem.
|
| 44 |
+
"""
|
| 45 |
+
|
| 46 |
+
m, n = A.shape
|
| 47 |
+
status = 0
|
| 48 |
+
|
| 49 |
+
# generate auxiliary problem to get initial BFS
|
| 50 |
+
A, b, c, basis, x, status = _generate_auxiliary_problem(A, b, x0, tol)
|
| 51 |
+
|
| 52 |
+
if status == 6:
|
| 53 |
+
residual = c.dot(x)
|
| 54 |
+
iter_k = 0
|
| 55 |
+
return x, basis, A, b, residual, status, iter_k
|
| 56 |
+
|
| 57 |
+
# solve auxiliary problem
|
| 58 |
+
phase_one_n = n
|
| 59 |
+
iter_k = 0
|
| 60 |
+
x, basis, status, iter_k = _phase_two(c, A, x, basis, callback,
|
| 61 |
+
postsolve_args,
|
| 62 |
+
maxiter, tol, disp,
|
| 63 |
+
maxupdate, mast, pivot,
|
| 64 |
+
iter_k, phase_one_n)
|
| 65 |
+
|
| 66 |
+
# check for infeasibility
|
| 67 |
+
residual = c.dot(x)
|
| 68 |
+
if status == 0 and residual > tol:
|
| 69 |
+
status = 2
|
| 70 |
+
|
| 71 |
+
# drive artificial variables out of basis
|
| 72 |
+
# TODO: test redundant row removal better
|
| 73 |
+
# TODO: make solve more efficient with BGLU? This could take a while.
|
| 74 |
+
keep_rows = np.ones(m, dtype=bool)
|
| 75 |
+
for basis_column in basis[basis >= n]:
|
| 76 |
+
B = A[:, basis]
|
| 77 |
+
try:
|
| 78 |
+
basis_finder = np.abs(solve(B, A)) # inefficient
|
| 79 |
+
pertinent_row = np.argmax(basis_finder[:, basis_column])
|
| 80 |
+
eligible_columns = np.ones(n, dtype=bool)
|
| 81 |
+
eligible_columns[basis[basis < n]] = 0
|
| 82 |
+
eligible_column_indices = np.where(eligible_columns)[0]
|
| 83 |
+
index = np.argmax(basis_finder[:, :n]
|
| 84 |
+
[pertinent_row, eligible_columns])
|
| 85 |
+
new_basis_column = eligible_column_indices[index]
|
| 86 |
+
if basis_finder[pertinent_row, new_basis_column] < tol:
|
| 87 |
+
keep_rows[pertinent_row] = False
|
| 88 |
+
else:
|
| 89 |
+
basis[basis == basis_column] = new_basis_column
|
| 90 |
+
except LinAlgError:
|
| 91 |
+
status = 4
|
| 92 |
+
|
| 93 |
+
# form solution to original problem
|
| 94 |
+
A = A[keep_rows, :n]
|
| 95 |
+
basis = basis[keep_rows]
|
| 96 |
+
x = x[:n]
|
| 97 |
+
m = A.shape[0]
|
| 98 |
+
return x, basis, A, b, residual, status, iter_k
|
| 99 |
+
|
| 100 |
+
|
| 101 |
+
def _get_more_basis_columns(A, basis):
|
| 102 |
+
"""
|
| 103 |
+
Called when the auxiliary problem terminates with artificial columns in
|
| 104 |
+
the basis, which must be removed and replaced with non-artificial
|
| 105 |
+
columns. Finds additional columns that do not make the matrix singular.
|
| 106 |
+
"""
|
| 107 |
+
m, n = A.shape
|
| 108 |
+
|
| 109 |
+
# options for inclusion are those that aren't already in the basis
|
| 110 |
+
a = np.arange(m+n)
|
| 111 |
+
bl = np.zeros(len(a), dtype=bool)
|
| 112 |
+
bl[basis] = 1
|
| 113 |
+
options = a[~bl]
|
| 114 |
+
options = options[options < n] # and they have to be non-artificial
|
| 115 |
+
|
| 116 |
+
# form basis matrix
|
| 117 |
+
B = np.zeros((m, m))
|
| 118 |
+
B[:, 0:len(basis)] = A[:, basis]
|
| 119 |
+
|
| 120 |
+
if (basis.size > 0 and
|
| 121 |
+
np.linalg.matrix_rank(B[:, :len(basis)]) < len(basis)):
|
| 122 |
+
raise Exception("Basis has dependent columns")
|
| 123 |
+
|
| 124 |
+
rank = 0 # just enter the loop
|
| 125 |
+
for i in range(n): # somewhat arbitrary, but we need another way out
|
| 126 |
+
# permute the options, and take as many as needed
|
| 127 |
+
new_basis = np.random.permutation(options)[:m-len(basis)]
|
| 128 |
+
B[:, len(basis):] = A[:, new_basis] # update the basis matrix
|
| 129 |
+
rank = np.linalg.matrix_rank(B) # check the rank
|
| 130 |
+
if rank == m:
|
| 131 |
+
break
|
| 132 |
+
|
| 133 |
+
return np.concatenate((basis, new_basis))
|
| 134 |
+
|
| 135 |
+
|
| 136 |
+
def _generate_auxiliary_problem(A, b, x0, tol):
|
| 137 |
+
"""
|
| 138 |
+
Modifies original problem to create an auxiliary problem with a trivial
|
| 139 |
+
initial basic feasible solution and an objective that minimizes
|
| 140 |
+
infeasibility in the original problem.
|
| 141 |
+
|
| 142 |
+
Conceptually, this is done by stacking an identity matrix on the right of
|
| 143 |
+
the original constraint matrix, adding artificial variables to correspond
|
| 144 |
+
with each of these new columns, and generating a cost vector that is all
|
| 145 |
+
zeros except for ones corresponding with each of the new variables.
|
| 146 |
+
|
| 147 |
+
A initial basic feasible solution is trivial: all variables are zero
|
| 148 |
+
except for the artificial variables, which are set equal to the
|
| 149 |
+
corresponding element of the right hand side `b`.
|
| 150 |
+
|
| 151 |
+
Running the simplex method on this auxiliary problem drives all of the
|
| 152 |
+
artificial variables - and thus the cost - to zero if the original problem
|
| 153 |
+
is feasible. The original problem is declared infeasible otherwise.
|
| 154 |
+
|
| 155 |
+
Much of the complexity below is to improve efficiency by using singleton
|
| 156 |
+
columns in the original problem where possible, thus generating artificial
|
| 157 |
+
variables only as necessary, and using an initial 'guess' basic feasible
|
| 158 |
+
solution.
|
| 159 |
+
"""
|
| 160 |
+
status = 0
|
| 161 |
+
m, n = A.shape
|
| 162 |
+
|
| 163 |
+
if x0 is not None:
|
| 164 |
+
x = x0
|
| 165 |
+
else:
|
| 166 |
+
x = np.zeros(n)
|
| 167 |
+
|
| 168 |
+
r = b - A@x # residual; this must be all zeros for feasibility
|
| 169 |
+
|
| 170 |
+
A[r < 0] = -A[r < 0] # express problem with RHS positive for trivial BFS
|
| 171 |
+
b[r < 0] = -b[r < 0] # to the auxiliary problem
|
| 172 |
+
r[r < 0] *= -1
|
| 173 |
+
|
| 174 |
+
# Rows which we will need to find a trivial way to zero.
|
| 175 |
+
# This should just be the rows where there is a nonzero residual.
|
| 176 |
+
# But then we would not necessarily have a column singleton in every row.
|
| 177 |
+
# This makes it difficult to find an initial basis.
|
| 178 |
+
if x0 is None:
|
| 179 |
+
nonzero_constraints = np.arange(m)
|
| 180 |
+
else:
|
| 181 |
+
nonzero_constraints = np.where(r > tol)[0]
|
| 182 |
+
|
| 183 |
+
# these are (at least some of) the initial basis columns
|
| 184 |
+
basis = np.where(np.abs(x) > tol)[0]
|
| 185 |
+
|
| 186 |
+
if len(nonzero_constraints) == 0 and len(basis) <= m: # already a BFS
|
| 187 |
+
c = np.zeros(n)
|
| 188 |
+
basis = _get_more_basis_columns(A, basis)
|
| 189 |
+
return A, b, c, basis, x, status
|
| 190 |
+
elif (len(nonzero_constraints) > m - len(basis) or
|
| 191 |
+
np.any(x < 0)): # can't get trivial BFS
|
| 192 |
+
c = np.zeros(n)
|
| 193 |
+
status = 6
|
| 194 |
+
return A, b, c, basis, x, status
|
| 195 |
+
|
| 196 |
+
# chooses existing columns appropriate for inclusion in initial basis
|
| 197 |
+
cols, rows = _select_singleton_columns(A, r)
|
| 198 |
+
|
| 199 |
+
# find the rows we need to zero that we _can_ zero with column singletons
|
| 200 |
+
i_tofix = np.isin(rows, nonzero_constraints)
|
| 201 |
+
# these columns can't already be in the basis, though
|
| 202 |
+
# we are going to add them to the basis and change the corresponding x val
|
| 203 |
+
i_notinbasis = np.logical_not(np.isin(cols, basis))
|
| 204 |
+
i_fix_without_aux = np.logical_and(i_tofix, i_notinbasis)
|
| 205 |
+
rows = rows[i_fix_without_aux]
|
| 206 |
+
cols = cols[i_fix_without_aux]
|
| 207 |
+
|
| 208 |
+
# indices of the rows we can only zero with auxiliary variable
|
| 209 |
+
# these rows will get a one in each auxiliary column
|
| 210 |
+
arows = nonzero_constraints[np.logical_not(
|
| 211 |
+
np.isin(nonzero_constraints, rows))]
|
| 212 |
+
n_aux = len(arows)
|
| 213 |
+
acols = n + np.arange(n_aux) # indices of auxiliary columns
|
| 214 |
+
|
| 215 |
+
basis_ng = np.concatenate((cols, acols)) # basis columns not from guess
|
| 216 |
+
basis_ng_rows = np.concatenate((rows, arows)) # rows we need to zero
|
| 217 |
+
|
| 218 |
+
# add auxiliary singleton columns
|
| 219 |
+
A = np.hstack((A, np.zeros((m, n_aux))))
|
| 220 |
+
A[arows, acols] = 1
|
| 221 |
+
|
| 222 |
+
# generate initial BFS
|
| 223 |
+
x = np.concatenate((x, np.zeros(n_aux)))
|
| 224 |
+
x[basis_ng] = r[basis_ng_rows]/A[basis_ng_rows, basis_ng]
|
| 225 |
+
|
| 226 |
+
# generate costs to minimize infeasibility
|
| 227 |
+
c = np.zeros(n_aux + n)
|
| 228 |
+
c[acols] = 1
|
| 229 |
+
|
| 230 |
+
# basis columns correspond with nonzeros in guess, those with column
|
| 231 |
+
# singletons we used to zero remaining constraints, and any additional
|
| 232 |
+
# columns to get a full set (m columns)
|
| 233 |
+
basis = np.concatenate((basis, basis_ng))
|
| 234 |
+
basis = _get_more_basis_columns(A, basis) # add columns as needed
|
| 235 |
+
|
| 236 |
+
return A, b, c, basis, x, status
|
| 237 |
+
|
| 238 |
+
|
| 239 |
+
def _select_singleton_columns(A, b):
|
| 240 |
+
"""
|
| 241 |
+
Finds singleton columns for which the singleton entry is of the same sign
|
| 242 |
+
as the right-hand side; these columns are eligible for inclusion in an
|
| 243 |
+
initial basis. Determines the rows in which the singleton entries are
|
| 244 |
+
located. For each of these rows, returns the indices of the one singleton
|
| 245 |
+
column and its corresponding row.
|
| 246 |
+
"""
|
| 247 |
+
# find indices of all singleton columns and corresponding row indices
|
| 248 |
+
column_indices = np.nonzero(np.sum(np.abs(A) != 0, axis=0) == 1)[0]
|
| 249 |
+
columns = A[:, column_indices] # array of singleton columns
|
| 250 |
+
row_indices = np.zeros(len(column_indices), dtype=int)
|
| 251 |
+
nonzero_rows, nonzero_columns = np.nonzero(columns)
|
| 252 |
+
row_indices[nonzero_columns] = nonzero_rows # corresponding row indices
|
| 253 |
+
|
| 254 |
+
# keep only singletons with entries that have same sign as RHS
|
| 255 |
+
# this is necessary because all elements of BFS must be non-negative
|
| 256 |
+
same_sign = A[row_indices, column_indices]*b[row_indices] >= 0
|
| 257 |
+
column_indices = column_indices[same_sign][::-1]
|
| 258 |
+
row_indices = row_indices[same_sign][::-1]
|
| 259 |
+
# Reversing the order so that steps below select rightmost columns
|
| 260 |
+
# for initial basis, which will tend to be slack variables. (If the
|
| 261 |
+
# guess corresponds with a basic feasible solution but a constraint
|
| 262 |
+
# is not satisfied with the corresponding slack variable zero, the slack
|
| 263 |
+
# variable must be basic.)
|
| 264 |
+
|
| 265 |
+
# for each row, keep rightmost singleton column with an entry in that row
|
| 266 |
+
unique_row_indices, first_columns = np.unique(row_indices,
|
| 267 |
+
return_index=True)
|
| 268 |
+
return column_indices[first_columns], unique_row_indices
|
| 269 |
+
|
| 270 |
+
|
| 271 |
+
def _find_nonzero_rows(A, tol):
|
| 272 |
+
"""
|
| 273 |
+
Returns logical array indicating the locations of rows with at least
|
| 274 |
+
one nonzero element.
|
| 275 |
+
"""
|
| 276 |
+
return np.any(np.abs(A) > tol, axis=1)
|
| 277 |
+
|
| 278 |
+
|
| 279 |
+
def _select_enter_pivot(c_hat, bl, a, rule="bland", tol=1e-12):
|
| 280 |
+
"""
|
| 281 |
+
Selects a pivot to enter the basis. Currently Bland's rule - the smallest
|
| 282 |
+
index that has a negative reduced cost - is the default.
|
| 283 |
+
"""
|
| 284 |
+
if rule.lower() == "mrc": # index with minimum reduced cost
|
| 285 |
+
return a[~bl][np.argmin(c_hat)]
|
| 286 |
+
else: # smallest index w/ negative reduced cost
|
| 287 |
+
return a[~bl][c_hat < -tol][0]
|
| 288 |
+
|
| 289 |
+
|
| 290 |
+
def _display_iter(phase, iteration, slack, con, fun):
|
| 291 |
+
"""
|
| 292 |
+
Print indicators of optimization status to the console.
|
| 293 |
+
"""
|
| 294 |
+
header = True if not iteration % 20 else False
|
| 295 |
+
|
| 296 |
+
if header:
|
| 297 |
+
print("Phase",
|
| 298 |
+
"Iteration",
|
| 299 |
+
"Minimum Slack ",
|
| 300 |
+
"Constraint Residual",
|
| 301 |
+
"Objective ")
|
| 302 |
+
|
| 303 |
+
# :<X.Y left aligns Y digits in X digit spaces
|
| 304 |
+
fmt = '{0:<6}{1:<10}{2:<20.13}{3:<20.13}{4:<20.13}'
|
| 305 |
+
try:
|
| 306 |
+
slack = np.min(slack)
|
| 307 |
+
except ValueError:
|
| 308 |
+
slack = "NA"
|
| 309 |
+
print(fmt.format(phase, iteration, slack, np.linalg.norm(con), fun))
|
| 310 |
+
|
| 311 |
+
|
| 312 |
+
def _display_and_callback(phase_one_n, x, postsolve_args, status,
|
| 313 |
+
iteration, disp, callback):
|
| 314 |
+
if phase_one_n is not None:
|
| 315 |
+
phase = 1
|
| 316 |
+
x_postsolve = x[:phase_one_n]
|
| 317 |
+
else:
|
| 318 |
+
phase = 2
|
| 319 |
+
x_postsolve = x
|
| 320 |
+
x_o, fun, slack, con = _postsolve(x_postsolve,
|
| 321 |
+
postsolve_args)
|
| 322 |
+
|
| 323 |
+
if callback is not None:
|
| 324 |
+
res = OptimizeResult({'x': x_o, 'fun': fun, 'slack': slack,
|
| 325 |
+
'con': con, 'nit': iteration,
|
| 326 |
+
'phase': phase, 'complete': False,
|
| 327 |
+
'status': status, 'message': "",
|
| 328 |
+
'success': False})
|
| 329 |
+
callback(res)
|
| 330 |
+
if disp:
|
| 331 |
+
_display_iter(phase, iteration, slack, con, fun)
|
| 332 |
+
|
| 333 |
+
|
| 334 |
+
def _phase_two(c, A, x, b, callback, postsolve_args, maxiter, tol, disp,
|
| 335 |
+
maxupdate, mast, pivot, iteration=0, phase_one_n=None):
|
| 336 |
+
"""
|
| 337 |
+
The heart of the simplex method. Beginning with a basic feasible solution,
|
| 338 |
+
moves to adjacent basic feasible solutions successively lower reduced cost.
|
| 339 |
+
Terminates when there are no basic feasible solutions with lower reduced
|
| 340 |
+
cost or if the problem is determined to be unbounded.
|
| 341 |
+
|
| 342 |
+
This implementation follows the revised simplex method based on LU
|
| 343 |
+
decomposition. Rather than maintaining a tableau or an inverse of the
|
| 344 |
+
basis matrix, we keep a factorization of the basis matrix that allows
|
| 345 |
+
efficient solution of linear systems while avoiding stability issues
|
| 346 |
+
associated with inverted matrices.
|
| 347 |
+
"""
|
| 348 |
+
m, n = A.shape
|
| 349 |
+
status = 0
|
| 350 |
+
a = np.arange(n) # indices of columns of A
|
| 351 |
+
ab = np.arange(m) # indices of columns of B
|
| 352 |
+
if maxupdate:
|
| 353 |
+
# basis matrix factorization object; similar to B = A[:, b]
|
| 354 |
+
B = BGLU(A, b, maxupdate, mast)
|
| 355 |
+
else:
|
| 356 |
+
B = LU(A, b)
|
| 357 |
+
|
| 358 |
+
for iteration in range(iteration, maxiter):
|
| 359 |
+
|
| 360 |
+
if disp or callback is not None:
|
| 361 |
+
_display_and_callback(phase_one_n, x, postsolve_args, status,
|
| 362 |
+
iteration, disp, callback)
|
| 363 |
+
|
| 364 |
+
bl = np.zeros(len(a), dtype=bool)
|
| 365 |
+
bl[b] = 1
|
| 366 |
+
|
| 367 |
+
xb = x[b] # basic variables
|
| 368 |
+
cb = c[b] # basic costs
|
| 369 |
+
|
| 370 |
+
try:
|
| 371 |
+
v = B.solve(cb, transposed=True) # similar to v = solve(B.T, cb)
|
| 372 |
+
except LinAlgError:
|
| 373 |
+
status = 4
|
| 374 |
+
break
|
| 375 |
+
|
| 376 |
+
# TODO: cythonize?
|
| 377 |
+
c_hat = c - v.dot(A) # reduced cost
|
| 378 |
+
c_hat = c_hat[~bl]
|
| 379 |
+
# Above is much faster than:
|
| 380 |
+
# N = A[:, ~bl] # slow!
|
| 381 |
+
# c_hat = c[~bl] - v.T.dot(N)
|
| 382 |
+
# Can we perform the multiplication only on the nonbasic columns?
|
| 383 |
+
|
| 384 |
+
if np.all(c_hat >= -tol): # all reduced costs positive -> terminate
|
| 385 |
+
break
|
| 386 |
+
|
| 387 |
+
j = _select_enter_pivot(c_hat, bl, a, rule=pivot, tol=tol)
|
| 388 |
+
u = B.solve(A[:, j]) # similar to u = solve(B, A[:, j])
|
| 389 |
+
|
| 390 |
+
i = u > tol # if none of the u are positive, unbounded
|
| 391 |
+
if not np.any(i):
|
| 392 |
+
status = 3
|
| 393 |
+
break
|
| 394 |
+
|
| 395 |
+
th = xb[i]/u[i]
|
| 396 |
+
l = np.argmin(th) # implicitly selects smallest subscript
|
| 397 |
+
th_star = th[l] # step size
|
| 398 |
+
|
| 399 |
+
x[b] = x[b] - th_star*u # take step
|
| 400 |
+
x[j] = th_star
|
| 401 |
+
B.update(ab[i][l], j) # modify basis
|
| 402 |
+
b = B.b # similar to b[ab[i][l]] =
|
| 403 |
+
|
| 404 |
+
else:
|
| 405 |
+
# If the end of the for loop is reached (without a break statement),
|
| 406 |
+
# then another step has been taken, so the iteration counter should
|
| 407 |
+
# increment, info should be displayed, and callback should be called.
|
| 408 |
+
iteration += 1
|
| 409 |
+
status = 1
|
| 410 |
+
if disp or callback is not None:
|
| 411 |
+
_display_and_callback(phase_one_n, x, postsolve_args, status,
|
| 412 |
+
iteration, disp, callback)
|
| 413 |
+
|
| 414 |
+
return x, b, status, iteration
|
| 415 |
+
|
| 416 |
+
|
| 417 |
+
def _linprog_rs(c, c0, A, b, x0, callback, postsolve_args,
|
| 418 |
+
maxiter=5000, tol=1e-12, disp=False,
|
| 419 |
+
maxupdate=10, mast=False, pivot="mrc",
|
| 420 |
+
**unknown_options):
|
| 421 |
+
"""
|
| 422 |
+
Solve the following linear programming problem via a two-phase
|
| 423 |
+
revised simplex algorithm.::
|
| 424 |
+
|
| 425 |
+
minimize: c @ x
|
| 426 |
+
|
| 427 |
+
subject to: A @ x == b
|
| 428 |
+
0 <= x < oo
|
| 429 |
+
|
| 430 |
+
User-facing documentation is in _linprog_doc.py.
|
| 431 |
+
|
| 432 |
+
Parameters
|
| 433 |
+
----------
|
| 434 |
+
c : 1-D array
|
| 435 |
+
Coefficients of the linear objective function to be minimized.
|
| 436 |
+
c0 : float
|
| 437 |
+
Constant term in objective function due to fixed (and eliminated)
|
| 438 |
+
variables. (Currently unused.)
|
| 439 |
+
A : 2-D array
|
| 440 |
+
2-D array which, when matrix-multiplied by ``x``, gives the values of
|
| 441 |
+
the equality constraints at ``x``.
|
| 442 |
+
b : 1-D array
|
| 443 |
+
1-D array of values representing the RHS of each equality constraint
|
| 444 |
+
(row) in ``A_eq``.
|
| 445 |
+
x0 : 1-D array, optional
|
| 446 |
+
Starting values of the independent variables, which will be refined by
|
| 447 |
+
the optimization algorithm. For the revised simplex method, these must
|
| 448 |
+
correspond with a basic feasible solution.
|
| 449 |
+
callback : callable, optional
|
| 450 |
+
If a callback function is provided, it will be called within each
|
| 451 |
+
iteration of the algorithm. The callback function must accept a single
|
| 452 |
+
`scipy.optimize.OptimizeResult` consisting of the following fields:
|
| 453 |
+
|
| 454 |
+
x : 1-D array
|
| 455 |
+
Current solution vector.
|
| 456 |
+
fun : float
|
| 457 |
+
Current value of the objective function ``c @ x``.
|
| 458 |
+
success : bool
|
| 459 |
+
True only when an algorithm has completed successfully,
|
| 460 |
+
so this is always False as the callback function is called
|
| 461 |
+
only while the algorithm is still iterating.
|
| 462 |
+
slack : 1-D array
|
| 463 |
+
The values of the slack variables. Each slack variable
|
| 464 |
+
corresponds to an inequality constraint. If the slack is zero,
|
| 465 |
+
the corresponding constraint is active.
|
| 466 |
+
con : 1-D array
|
| 467 |
+
The (nominally zero) residuals of the equality constraints,
|
| 468 |
+
that is, ``b - A_eq @ x``.
|
| 469 |
+
phase : int
|
| 470 |
+
The phase of the algorithm being executed.
|
| 471 |
+
status : int
|
| 472 |
+
For revised simplex, this is always 0 because if a different
|
| 473 |
+
status is detected, the algorithm terminates.
|
| 474 |
+
nit : int
|
| 475 |
+
The number of iterations performed.
|
| 476 |
+
message : str
|
| 477 |
+
A string descriptor of the exit status of the optimization.
|
| 478 |
+
postsolve_args : tuple
|
| 479 |
+
Data needed by _postsolve to convert the solution to the standard-form
|
| 480 |
+
problem into the solution to the original problem.
|
| 481 |
+
|
| 482 |
+
Options
|
| 483 |
+
-------
|
| 484 |
+
maxiter : int
|
| 485 |
+
The maximum number of iterations to perform in either phase.
|
| 486 |
+
tol : float
|
| 487 |
+
The tolerance which determines when a solution is "close enough" to
|
| 488 |
+
zero in Phase 1 to be considered a basic feasible solution or close
|
| 489 |
+
enough to positive to serve as an optimal solution.
|
| 490 |
+
disp : bool
|
| 491 |
+
Set to ``True`` if indicators of optimization status are to be printed
|
| 492 |
+
to the console each iteration.
|
| 493 |
+
maxupdate : int
|
| 494 |
+
The maximum number of updates performed on the LU factorization.
|
| 495 |
+
After this many updates is reached, the basis matrix is factorized
|
| 496 |
+
from scratch.
|
| 497 |
+
mast : bool
|
| 498 |
+
Minimize Amortized Solve Time. If enabled, the average time to solve
|
| 499 |
+
a linear system using the basis factorization is measured. Typically,
|
| 500 |
+
the average solve time will decrease with each successive solve after
|
| 501 |
+
initial factorization, as factorization takes much more time than the
|
| 502 |
+
solve operation (and updates). Eventually, however, the updated
|
| 503 |
+
factorization becomes sufficiently complex that the average solve time
|
| 504 |
+
begins to increase. When this is detected, the basis is refactorized
|
| 505 |
+
from scratch. Enable this option to maximize speed at the risk of
|
| 506 |
+
nondeterministic behavior. Ignored if ``maxupdate`` is 0.
|
| 507 |
+
pivot : "mrc" or "bland"
|
| 508 |
+
Pivot rule: Minimum Reduced Cost (default) or Bland's rule. Choose
|
| 509 |
+
Bland's rule if iteration limit is reached and cycling is suspected.
|
| 510 |
+
unknown_options : dict
|
| 511 |
+
Optional arguments not used by this particular solver. If
|
| 512 |
+
`unknown_options` is non-empty a warning is issued listing all
|
| 513 |
+
unused options.
|
| 514 |
+
|
| 515 |
+
Returns
|
| 516 |
+
-------
|
| 517 |
+
x : 1-D array
|
| 518 |
+
Solution vector.
|
| 519 |
+
status : int
|
| 520 |
+
An integer representing the exit status of the optimization::
|
| 521 |
+
|
| 522 |
+
0 : Optimization terminated successfully
|
| 523 |
+
1 : Iteration limit reached
|
| 524 |
+
2 : Problem appears to be infeasible
|
| 525 |
+
3 : Problem appears to be unbounded
|
| 526 |
+
4 : Numerical difficulties encountered
|
| 527 |
+
5 : No constraints; turn presolve on
|
| 528 |
+
6 : Guess x0 cannot be converted to a basic feasible solution
|
| 529 |
+
|
| 530 |
+
message : str
|
| 531 |
+
A string descriptor of the exit status of the optimization.
|
| 532 |
+
iteration : int
|
| 533 |
+
The number of iterations taken to solve the problem.
|
| 534 |
+
"""
|
| 535 |
+
|
| 536 |
+
_check_unknown_options(unknown_options)
|
| 537 |
+
|
| 538 |
+
messages = ["Optimization terminated successfully.",
|
| 539 |
+
"Iteration limit reached.",
|
| 540 |
+
"The problem appears infeasible, as the phase one auxiliary "
|
| 541 |
+
"problem terminated successfully with a residual of {0:.1e}, "
|
| 542 |
+
"greater than the tolerance {1} required for the solution to "
|
| 543 |
+
"be considered feasible. Consider increasing the tolerance to "
|
| 544 |
+
"be greater than {0:.1e}. If this tolerance is unnaceptably "
|
| 545 |
+
"large, the problem is likely infeasible.",
|
| 546 |
+
"The problem is unbounded, as the simplex algorithm found "
|
| 547 |
+
"a basic feasible solution from which there is a direction "
|
| 548 |
+
"with negative reduced cost in which all decision variables "
|
| 549 |
+
"increase.",
|
| 550 |
+
"Numerical difficulties encountered; consider trying "
|
| 551 |
+
"method='interior-point'.",
|
| 552 |
+
"Problems with no constraints are trivially solved; please "
|
| 553 |
+
"turn presolve on.",
|
| 554 |
+
"The guess x0 cannot be converted to a basic feasible "
|
| 555 |
+
"solution. "
|
| 556 |
+
]
|
| 557 |
+
|
| 558 |
+
if A.size == 0: # address test_unbounded_below_no_presolve_corrected
|
| 559 |
+
return np.zeros(c.shape), 5, messages[5], 0
|
| 560 |
+
|
| 561 |
+
x, basis, A, b, residual, status, iteration = (
|
| 562 |
+
_phase_one(A, b, x0, callback, postsolve_args,
|
| 563 |
+
maxiter, tol, disp, maxupdate, mast, pivot))
|
| 564 |
+
|
| 565 |
+
if status == 0:
|
| 566 |
+
x, basis, status, iteration = _phase_two(c, A, x, basis, callback,
|
| 567 |
+
postsolve_args,
|
| 568 |
+
maxiter, tol, disp,
|
| 569 |
+
maxupdate, mast, pivot,
|
| 570 |
+
iteration)
|
| 571 |
+
|
| 572 |
+
return x, status, messages[status].format(residual, tol), iteration
|
llava_next/lib/python3.10/site-packages/scipy/optimize/_lsap.cpython-310-x86_64-linux-gnu.so
ADDED
|
Binary file (27.1 kB). View file
|
|
|
llava_next/lib/python3.10/site-packages/scipy/optimize/_milp.py
ADDED
|
@@ -0,0 +1,392 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import warnings
|
| 2 |
+
import numpy as np
|
| 3 |
+
from scipy.sparse import csc_array, vstack, issparse
|
| 4 |
+
from scipy._lib._util import VisibleDeprecationWarning
|
| 5 |
+
from ._highs._highs_wrapper import _highs_wrapper # type: ignore[import-not-found,import-untyped]
|
| 6 |
+
from ._constraints import LinearConstraint, Bounds
|
| 7 |
+
from ._optimize import OptimizeResult
|
| 8 |
+
from ._linprog_highs import _highs_to_scipy_status_message
|
| 9 |
+
|
| 10 |
+
|
| 11 |
+
def _constraints_to_components(constraints):
|
| 12 |
+
"""
|
| 13 |
+
Convert sequence of constraints to a single set of components A, b_l, b_u.
|
| 14 |
+
|
| 15 |
+
`constraints` could be
|
| 16 |
+
|
| 17 |
+
1. A LinearConstraint
|
| 18 |
+
2. A tuple representing a LinearConstraint
|
| 19 |
+
3. An invalid object
|
| 20 |
+
4. A sequence of composed entirely of objects of type 1/2
|
| 21 |
+
5. A sequence containing at least one object of type 3
|
| 22 |
+
|
| 23 |
+
We want to accept 1, 2, and 4 and reject 3 and 5.
|
| 24 |
+
"""
|
| 25 |
+
message = ("`constraints` (or each element within `constraints`) must be "
|
| 26 |
+
"convertible into an instance of "
|
| 27 |
+
"`scipy.optimize.LinearConstraint`.")
|
| 28 |
+
As = []
|
| 29 |
+
b_ls = []
|
| 30 |
+
b_us = []
|
| 31 |
+
|
| 32 |
+
# Accept case 1 by standardizing as case 4
|
| 33 |
+
if isinstance(constraints, LinearConstraint):
|
| 34 |
+
constraints = [constraints]
|
| 35 |
+
else:
|
| 36 |
+
# Reject case 3
|
| 37 |
+
try:
|
| 38 |
+
iter(constraints)
|
| 39 |
+
except TypeError as exc:
|
| 40 |
+
raise ValueError(message) from exc
|
| 41 |
+
|
| 42 |
+
# Accept case 2 by standardizing as case 4
|
| 43 |
+
if len(constraints) == 3:
|
| 44 |
+
# argument could be a single tuple representing a LinearConstraint
|
| 45 |
+
try:
|
| 46 |
+
constraints = [LinearConstraint(*constraints)]
|
| 47 |
+
except (TypeError, ValueError, VisibleDeprecationWarning):
|
| 48 |
+
# argument was not a tuple representing a LinearConstraint
|
| 49 |
+
pass
|
| 50 |
+
|
| 51 |
+
# Address cases 4/5
|
| 52 |
+
for constraint in constraints:
|
| 53 |
+
# if it's not a LinearConstraint or something that represents a
|
| 54 |
+
# LinearConstraint at this point, it's invalid
|
| 55 |
+
if not isinstance(constraint, LinearConstraint):
|
| 56 |
+
try:
|
| 57 |
+
constraint = LinearConstraint(*constraint)
|
| 58 |
+
except TypeError as exc:
|
| 59 |
+
raise ValueError(message) from exc
|
| 60 |
+
As.append(csc_array(constraint.A))
|
| 61 |
+
b_ls.append(np.atleast_1d(constraint.lb).astype(np.float64))
|
| 62 |
+
b_us.append(np.atleast_1d(constraint.ub).astype(np.float64))
|
| 63 |
+
|
| 64 |
+
if len(As) > 1:
|
| 65 |
+
A = vstack(As, format="csc")
|
| 66 |
+
b_l = np.concatenate(b_ls)
|
| 67 |
+
b_u = np.concatenate(b_us)
|
| 68 |
+
else: # avoid unnecessary copying
|
| 69 |
+
A = As[0]
|
| 70 |
+
b_l = b_ls[0]
|
| 71 |
+
b_u = b_us[0]
|
| 72 |
+
|
| 73 |
+
return A, b_l, b_u
|
| 74 |
+
|
| 75 |
+
|
| 76 |
+
def _milp_iv(c, integrality, bounds, constraints, options):
|
| 77 |
+
# objective IV
|
| 78 |
+
if issparse(c):
|
| 79 |
+
raise ValueError("`c` must be a dense array.")
|
| 80 |
+
c = np.atleast_1d(c).astype(np.float64)
|
| 81 |
+
if c.ndim != 1 or c.size == 0 or not np.all(np.isfinite(c)):
|
| 82 |
+
message = ("`c` must be a one-dimensional array of finite numbers "
|
| 83 |
+
"with at least one element.")
|
| 84 |
+
raise ValueError(message)
|
| 85 |
+
|
| 86 |
+
# integrality IV
|
| 87 |
+
if issparse(integrality):
|
| 88 |
+
raise ValueError("`integrality` must be a dense array.")
|
| 89 |
+
message = ("`integrality` must contain integers 0-3 and be broadcastable "
|
| 90 |
+
"to `c.shape`.")
|
| 91 |
+
if integrality is None:
|
| 92 |
+
integrality = 0
|
| 93 |
+
try:
|
| 94 |
+
integrality = np.broadcast_to(integrality, c.shape).astype(np.uint8)
|
| 95 |
+
except ValueError:
|
| 96 |
+
raise ValueError(message)
|
| 97 |
+
if integrality.min() < 0 or integrality.max() > 3:
|
| 98 |
+
raise ValueError(message)
|
| 99 |
+
|
| 100 |
+
# bounds IV
|
| 101 |
+
if bounds is None:
|
| 102 |
+
bounds = Bounds(0, np.inf)
|
| 103 |
+
elif not isinstance(bounds, Bounds):
|
| 104 |
+
message = ("`bounds` must be convertible into an instance of "
|
| 105 |
+
"`scipy.optimize.Bounds`.")
|
| 106 |
+
try:
|
| 107 |
+
bounds = Bounds(*bounds)
|
| 108 |
+
except TypeError as exc:
|
| 109 |
+
raise ValueError(message) from exc
|
| 110 |
+
|
| 111 |
+
try:
|
| 112 |
+
lb = np.broadcast_to(bounds.lb, c.shape).astype(np.float64)
|
| 113 |
+
ub = np.broadcast_to(bounds.ub, c.shape).astype(np.float64)
|
| 114 |
+
except (ValueError, TypeError) as exc:
|
| 115 |
+
message = ("`bounds.lb` and `bounds.ub` must contain reals and "
|
| 116 |
+
"be broadcastable to `c.shape`.")
|
| 117 |
+
raise ValueError(message) from exc
|
| 118 |
+
|
| 119 |
+
# constraints IV
|
| 120 |
+
if not constraints:
|
| 121 |
+
constraints = [LinearConstraint(np.empty((0, c.size)),
|
| 122 |
+
np.empty((0,)), np.empty((0,)))]
|
| 123 |
+
try:
|
| 124 |
+
A, b_l, b_u = _constraints_to_components(constraints)
|
| 125 |
+
except ValueError as exc:
|
| 126 |
+
message = ("`constraints` (or each element within `constraints`) must "
|
| 127 |
+
"be convertible into an instance of "
|
| 128 |
+
"`scipy.optimize.LinearConstraint`.")
|
| 129 |
+
raise ValueError(message) from exc
|
| 130 |
+
|
| 131 |
+
if A.shape != (b_l.size, c.size):
|
| 132 |
+
message = "The shape of `A` must be (len(b_l), len(c))."
|
| 133 |
+
raise ValueError(message)
|
| 134 |
+
indptr, indices, data = A.indptr, A.indices, A.data.astype(np.float64)
|
| 135 |
+
|
| 136 |
+
# options IV
|
| 137 |
+
options = options or {}
|
| 138 |
+
supported_options = {'disp', 'presolve', 'time_limit', 'node_limit',
|
| 139 |
+
'mip_rel_gap'}
|
| 140 |
+
unsupported_options = set(options).difference(supported_options)
|
| 141 |
+
if unsupported_options:
|
| 142 |
+
message = (f"Unrecognized options detected: {unsupported_options}. "
|
| 143 |
+
"These will be passed to HiGHS verbatim.")
|
| 144 |
+
warnings.warn(message, RuntimeWarning, stacklevel=3)
|
| 145 |
+
options_iv = {'log_to_console': options.pop("disp", False),
|
| 146 |
+
'mip_max_nodes': options.pop("node_limit", None)}
|
| 147 |
+
options_iv.update(options)
|
| 148 |
+
|
| 149 |
+
return c, integrality, lb, ub, indptr, indices, data, b_l, b_u, options_iv
|
| 150 |
+
|
| 151 |
+
|
| 152 |
+
def milp(c, *, integrality=None, bounds=None, constraints=None, options=None):
|
| 153 |
+
r"""
|
| 154 |
+
Mixed-integer linear programming
|
| 155 |
+
|
| 156 |
+
Solves problems of the following form:
|
| 157 |
+
|
| 158 |
+
.. math::
|
| 159 |
+
|
| 160 |
+
\min_x \ & c^T x \\
|
| 161 |
+
\mbox{such that} \ & b_l \leq A x \leq b_u,\\
|
| 162 |
+
& l \leq x \leq u, \\
|
| 163 |
+
& x_i \in \mathbb{Z}, i \in X_i
|
| 164 |
+
|
| 165 |
+
where :math:`x` is a vector of decision variables;
|
| 166 |
+
:math:`c`, :math:`b_l`, :math:`b_u`, :math:`l`, and :math:`u` are vectors;
|
| 167 |
+
:math:`A` is a matrix, and :math:`X_i` is the set of indices of
|
| 168 |
+
decision variables that must be integral. (In this context, a
|
| 169 |
+
variable that can assume only integer values is said to be "integral";
|
| 170 |
+
it has an "integrality" constraint.)
|
| 171 |
+
|
| 172 |
+
Alternatively, that's:
|
| 173 |
+
|
| 174 |
+
minimize::
|
| 175 |
+
|
| 176 |
+
c @ x
|
| 177 |
+
|
| 178 |
+
such that::
|
| 179 |
+
|
| 180 |
+
b_l <= A @ x <= b_u
|
| 181 |
+
l <= x <= u
|
| 182 |
+
Specified elements of x must be integers
|
| 183 |
+
|
| 184 |
+
By default, ``l = 0`` and ``u = np.inf`` unless specified with
|
| 185 |
+
``bounds``.
|
| 186 |
+
|
| 187 |
+
Parameters
|
| 188 |
+
----------
|
| 189 |
+
c : 1D dense array_like
|
| 190 |
+
The coefficients of the linear objective function to be minimized.
|
| 191 |
+
`c` is converted to a double precision array before the problem is
|
| 192 |
+
solved.
|
| 193 |
+
integrality : 1D dense array_like, optional
|
| 194 |
+
Indicates the type of integrality constraint on each decision variable.
|
| 195 |
+
|
| 196 |
+
``0`` : Continuous variable; no integrality constraint.
|
| 197 |
+
|
| 198 |
+
``1`` : Integer variable; decision variable must be an integer
|
| 199 |
+
within `bounds`.
|
| 200 |
+
|
| 201 |
+
``2`` : Semi-continuous variable; decision variable must be within
|
| 202 |
+
`bounds` or take value ``0``.
|
| 203 |
+
|
| 204 |
+
``3`` : Semi-integer variable; decision variable must be an integer
|
| 205 |
+
within `bounds` or take value ``0``.
|
| 206 |
+
|
| 207 |
+
By default, all variables are continuous. `integrality` is converted
|
| 208 |
+
to an array of integers before the problem is solved.
|
| 209 |
+
|
| 210 |
+
bounds : scipy.optimize.Bounds, optional
|
| 211 |
+
Bounds on the decision variables. Lower and upper bounds are converted
|
| 212 |
+
to double precision arrays before the problem is solved. The
|
| 213 |
+
``keep_feasible`` parameter of the `Bounds` object is ignored. If
|
| 214 |
+
not specified, all decision variables are constrained to be
|
| 215 |
+
non-negative.
|
| 216 |
+
constraints : sequence of scipy.optimize.LinearConstraint, optional
|
| 217 |
+
Linear constraints of the optimization problem. Arguments may be
|
| 218 |
+
one of the following:
|
| 219 |
+
|
| 220 |
+
1. A single `LinearConstraint` object
|
| 221 |
+
2. A single tuple that can be converted to a `LinearConstraint` object
|
| 222 |
+
as ``LinearConstraint(*constraints)``
|
| 223 |
+
3. A sequence composed entirely of objects of type 1. and 2.
|
| 224 |
+
|
| 225 |
+
Before the problem is solved, all values are converted to double
|
| 226 |
+
precision, and the matrices of constraint coefficients are converted to
|
| 227 |
+
instances of `scipy.sparse.csc_array`. The ``keep_feasible`` parameter
|
| 228 |
+
of `LinearConstraint` objects is ignored.
|
| 229 |
+
options : dict, optional
|
| 230 |
+
A dictionary of solver options. The following keys are recognized.
|
| 231 |
+
|
| 232 |
+
disp : bool (default: ``False``)
|
| 233 |
+
Set to ``True`` if indicators of optimization status are to be
|
| 234 |
+
printed to the console during optimization.
|
| 235 |
+
node_limit : int, optional
|
| 236 |
+
The maximum number of nodes (linear program relaxations) to solve
|
| 237 |
+
before stopping. Default is no maximum number of nodes.
|
| 238 |
+
presolve : bool (default: ``True``)
|
| 239 |
+
Presolve attempts to identify trivial infeasibilities,
|
| 240 |
+
identify trivial unboundedness, and simplify the problem before
|
| 241 |
+
sending it to the main solver.
|
| 242 |
+
time_limit : float, optional
|
| 243 |
+
The maximum number of seconds allotted to solve the problem.
|
| 244 |
+
Default is no time limit.
|
| 245 |
+
mip_rel_gap : float, optional
|
| 246 |
+
Termination criterion for MIP solver: solver will terminate when
|
| 247 |
+
the gap between the primal objective value and the dual objective
|
| 248 |
+
bound, scaled by the primal objective value, is <= mip_rel_gap.
|
| 249 |
+
|
| 250 |
+
Returns
|
| 251 |
+
-------
|
| 252 |
+
res : OptimizeResult
|
| 253 |
+
An instance of :class:`scipy.optimize.OptimizeResult`. The object
|
| 254 |
+
is guaranteed to have the following attributes.
|
| 255 |
+
|
| 256 |
+
status : int
|
| 257 |
+
An integer representing the exit status of the algorithm.
|
| 258 |
+
|
| 259 |
+
``0`` : Optimal solution found.
|
| 260 |
+
|
| 261 |
+
``1`` : Iteration or time limit reached.
|
| 262 |
+
|
| 263 |
+
``2`` : Problem is infeasible.
|
| 264 |
+
|
| 265 |
+
``3`` : Problem is unbounded.
|
| 266 |
+
|
| 267 |
+
``4`` : Other; see message for details.
|
| 268 |
+
|
| 269 |
+
success : bool
|
| 270 |
+
``True`` when an optimal solution is found and ``False`` otherwise.
|
| 271 |
+
|
| 272 |
+
message : str
|
| 273 |
+
A string descriptor of the exit status of the algorithm.
|
| 274 |
+
|
| 275 |
+
The following attributes will also be present, but the values may be
|
| 276 |
+
``None``, depending on the solution status.
|
| 277 |
+
|
| 278 |
+
x : ndarray
|
| 279 |
+
The values of the decision variables that minimize the
|
| 280 |
+
objective function while satisfying the constraints.
|
| 281 |
+
fun : float
|
| 282 |
+
The optimal value of the objective function ``c @ x``.
|
| 283 |
+
mip_node_count : int
|
| 284 |
+
The number of subproblems or "nodes" solved by the MILP solver.
|
| 285 |
+
mip_dual_bound : float
|
| 286 |
+
The MILP solver's final estimate of the lower bound on the optimal
|
| 287 |
+
solution.
|
| 288 |
+
mip_gap : float
|
| 289 |
+
The difference between the primal objective value and the dual
|
| 290 |
+
objective bound, scaled by the primal objective value.
|
| 291 |
+
|
| 292 |
+
Notes
|
| 293 |
+
-----
|
| 294 |
+
`milp` is a wrapper of the HiGHS linear optimization software [1]_. The
|
| 295 |
+
algorithm is deterministic, and it typically finds the global optimum of
|
| 296 |
+
moderately challenging mixed-integer linear programs (when it exists).
|
| 297 |
+
|
| 298 |
+
References
|
| 299 |
+
----------
|
| 300 |
+
.. [1] Huangfu, Q., Galabova, I., Feldmeier, M., and Hall, J. A. J.
|
| 301 |
+
"HiGHS - high performance software for linear optimization."
|
| 302 |
+
https://highs.dev/
|
| 303 |
+
.. [2] Huangfu, Q. and Hall, J. A. J. "Parallelizing the dual revised
|
| 304 |
+
simplex method." Mathematical Programming Computation, 10 (1),
|
| 305 |
+
119-142, 2018. DOI: 10.1007/s12532-017-0130-5
|
| 306 |
+
|
| 307 |
+
Examples
|
| 308 |
+
--------
|
| 309 |
+
Consider the problem at
|
| 310 |
+
https://en.wikipedia.org/wiki/Integer_programming#Example, which is
|
| 311 |
+
expressed as a maximization problem of two variables. Since `milp` requires
|
| 312 |
+
that the problem be expressed as a minimization problem, the objective
|
| 313 |
+
function coefficients on the decision variables are:
|
| 314 |
+
|
| 315 |
+
>>> import numpy as np
|
| 316 |
+
>>> c = -np.array([0, 1])
|
| 317 |
+
|
| 318 |
+
Note the negative sign: we maximize the original objective function
|
| 319 |
+
by minimizing the negative of the objective function.
|
| 320 |
+
|
| 321 |
+
We collect the coefficients of the constraints into arrays like:
|
| 322 |
+
|
| 323 |
+
>>> A = np.array([[-1, 1], [3, 2], [2, 3]])
|
| 324 |
+
>>> b_u = np.array([1, 12, 12])
|
| 325 |
+
>>> b_l = np.full_like(b_u, -np.inf, dtype=float)
|
| 326 |
+
|
| 327 |
+
Because there is no lower limit on these constraints, we have defined a
|
| 328 |
+
variable ``b_l`` full of values representing negative infinity. This may
|
| 329 |
+
be unfamiliar to users of `scipy.optimize.linprog`, which only accepts
|
| 330 |
+
"less than" (or "upper bound") inequality constraints of the form
|
| 331 |
+
``A_ub @ x <= b_u``. By accepting both ``b_l`` and ``b_u`` of constraints
|
| 332 |
+
``b_l <= A_ub @ x <= b_u``, `milp` makes it easy to specify "greater than"
|
| 333 |
+
inequality constraints, "less than" inequality constraints, and equality
|
| 334 |
+
constraints concisely.
|
| 335 |
+
|
| 336 |
+
These arrays are collected into a single `LinearConstraint` object like:
|
| 337 |
+
|
| 338 |
+
>>> from scipy.optimize import LinearConstraint
|
| 339 |
+
>>> constraints = LinearConstraint(A, b_l, b_u)
|
| 340 |
+
|
| 341 |
+
The non-negativity bounds on the decision variables are enforced by
|
| 342 |
+
default, so we do not need to provide an argument for `bounds`.
|
| 343 |
+
|
| 344 |
+
Finally, the problem states that both decision variables must be integers:
|
| 345 |
+
|
| 346 |
+
>>> integrality = np.ones_like(c)
|
| 347 |
+
|
| 348 |
+
We solve the problem like:
|
| 349 |
+
|
| 350 |
+
>>> from scipy.optimize import milp
|
| 351 |
+
>>> res = milp(c=c, constraints=constraints, integrality=integrality)
|
| 352 |
+
>>> res.x
|
| 353 |
+
[2.0, 2.0]
|
| 354 |
+
|
| 355 |
+
Note that had we solved the relaxed problem (without integrality
|
| 356 |
+
constraints):
|
| 357 |
+
|
| 358 |
+
>>> res = milp(c=c, constraints=constraints) # OR:
|
| 359 |
+
>>> # from scipy.optimize import linprog; res = linprog(c, A, b_u)
|
| 360 |
+
>>> res.x
|
| 361 |
+
[1.8, 2.8]
|
| 362 |
+
|
| 363 |
+
we would not have obtained the correct solution by rounding to the nearest
|
| 364 |
+
integers.
|
| 365 |
+
|
| 366 |
+
Other examples are given :ref:`in the tutorial <tutorial-optimize_milp>`.
|
| 367 |
+
|
| 368 |
+
"""
|
| 369 |
+
args_iv = _milp_iv(c, integrality, bounds, constraints, options)
|
| 370 |
+
c, integrality, lb, ub, indptr, indices, data, b_l, b_u, options = args_iv
|
| 371 |
+
|
| 372 |
+
highs_res = _highs_wrapper(c, indptr, indices, data, b_l, b_u,
|
| 373 |
+
lb, ub, integrality, options)
|
| 374 |
+
|
| 375 |
+
res = {}
|
| 376 |
+
|
| 377 |
+
# Convert to scipy-style status and message
|
| 378 |
+
highs_status = highs_res.get('status', None)
|
| 379 |
+
highs_message = highs_res.get('message', None)
|
| 380 |
+
status, message = _highs_to_scipy_status_message(highs_status,
|
| 381 |
+
highs_message)
|
| 382 |
+
res['status'] = status
|
| 383 |
+
res['message'] = message
|
| 384 |
+
res['success'] = (status == 0)
|
| 385 |
+
x = highs_res.get('x', None)
|
| 386 |
+
res['x'] = np.array(x) if x is not None else None
|
| 387 |
+
res['fun'] = highs_res.get('fun', None)
|
| 388 |
+
res['mip_node_count'] = highs_res.get('mip_node_count', None)
|
| 389 |
+
res['mip_dual_bound'] = highs_res.get('mip_dual_bound', None)
|
| 390 |
+
res['mip_gap'] = highs_res.get('mip_gap', None)
|
| 391 |
+
|
| 392 |
+
return OptimizeResult(res)
|
llava_next/lib/python3.10/site-packages/scipy/optimize/_minpack.cpython-310-x86_64-linux-gnu.so
ADDED
|
Binary file (78.3 kB). View file
|
|
|
llava_next/lib/python3.10/site-packages/scipy/optimize/_minpack_py.py
ADDED
|
@@ -0,0 +1,1164 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import warnings
|
| 2 |
+
from . import _minpack
|
| 3 |
+
|
| 4 |
+
import numpy as np
|
| 5 |
+
from numpy import (atleast_1d, triu, shape, transpose, zeros, prod, greater,
|
| 6 |
+
asarray, inf,
|
| 7 |
+
finfo, inexact, issubdtype, dtype)
|
| 8 |
+
from scipy import linalg
|
| 9 |
+
from scipy.linalg import svd, cholesky, solve_triangular, LinAlgError
|
| 10 |
+
from scipy._lib._util import _asarray_validated, _lazywhere, _contains_nan
|
| 11 |
+
from scipy._lib._util import getfullargspec_no_self as _getfullargspec
|
| 12 |
+
from ._optimize import OptimizeResult, _check_unknown_options, OptimizeWarning
|
| 13 |
+
from ._lsq import least_squares
|
| 14 |
+
# from ._lsq.common import make_strictly_feasible
|
| 15 |
+
from ._lsq.least_squares import prepare_bounds
|
| 16 |
+
from scipy.optimize._minimize import Bounds
|
| 17 |
+
|
| 18 |
+
__all__ = ['fsolve', 'leastsq', 'fixed_point', 'curve_fit']
|
| 19 |
+
|
| 20 |
+
|
| 21 |
+
def _check_func(checker, argname, thefunc, x0, args, numinputs,
|
| 22 |
+
output_shape=None):
|
| 23 |
+
res = atleast_1d(thefunc(*((x0[:numinputs],) + args)))
|
| 24 |
+
if (output_shape is not None) and (shape(res) != output_shape):
|
| 25 |
+
if (output_shape[0] != 1):
|
| 26 |
+
if len(output_shape) > 1:
|
| 27 |
+
if output_shape[1] == 1:
|
| 28 |
+
return shape(res)
|
| 29 |
+
msg = f"{checker}: there is a mismatch between the input and output " \
|
| 30 |
+
f"shape of the '{argname}' argument"
|
| 31 |
+
func_name = getattr(thefunc, '__name__', None)
|
| 32 |
+
if func_name:
|
| 33 |
+
msg += " '%s'." % func_name
|
| 34 |
+
else:
|
| 35 |
+
msg += "."
|
| 36 |
+
msg += f'Shape should be {output_shape} but it is {shape(res)}.'
|
| 37 |
+
raise TypeError(msg)
|
| 38 |
+
if issubdtype(res.dtype, inexact):
|
| 39 |
+
dt = res.dtype
|
| 40 |
+
else:
|
| 41 |
+
dt = dtype(float)
|
| 42 |
+
return shape(res), dt
|
| 43 |
+
|
| 44 |
+
|
| 45 |
+
def fsolve(func, x0, args=(), fprime=None, full_output=0,
|
| 46 |
+
col_deriv=0, xtol=1.49012e-8, maxfev=0, band=None,
|
| 47 |
+
epsfcn=None, factor=100, diag=None):
|
| 48 |
+
"""
|
| 49 |
+
Find the roots of a function.
|
| 50 |
+
|
| 51 |
+
Return the roots of the (non-linear) equations defined by
|
| 52 |
+
``func(x) = 0`` given a starting estimate.
|
| 53 |
+
|
| 54 |
+
Parameters
|
| 55 |
+
----------
|
| 56 |
+
func : callable ``f(x, *args)``
|
| 57 |
+
A function that takes at least one (possibly vector) argument,
|
| 58 |
+
and returns a value of the same length.
|
| 59 |
+
x0 : ndarray
|
| 60 |
+
The starting estimate for the roots of ``func(x) = 0``.
|
| 61 |
+
args : tuple, optional
|
| 62 |
+
Any extra arguments to `func`.
|
| 63 |
+
fprime : callable ``f(x, *args)``, optional
|
| 64 |
+
A function to compute the Jacobian of `func` with derivatives
|
| 65 |
+
across the rows. By default, the Jacobian will be estimated.
|
| 66 |
+
full_output : bool, optional
|
| 67 |
+
If True, return optional outputs.
|
| 68 |
+
col_deriv : bool, optional
|
| 69 |
+
Specify whether the Jacobian function computes derivatives down
|
| 70 |
+
the columns (faster, because there is no transpose operation).
|
| 71 |
+
xtol : float, optional
|
| 72 |
+
The calculation will terminate if the relative error between two
|
| 73 |
+
consecutive iterates is at most `xtol`.
|
| 74 |
+
maxfev : int, optional
|
| 75 |
+
The maximum number of calls to the function. If zero, then
|
| 76 |
+
``100*(N+1)`` is the maximum where N is the number of elements
|
| 77 |
+
in `x0`.
|
| 78 |
+
band : tuple, optional
|
| 79 |
+
If set to a two-sequence containing the number of sub- and
|
| 80 |
+
super-diagonals within the band of the Jacobi matrix, the
|
| 81 |
+
Jacobi matrix is considered banded (only for ``fprime=None``).
|
| 82 |
+
epsfcn : float, optional
|
| 83 |
+
A suitable step length for the forward-difference
|
| 84 |
+
approximation of the Jacobian (for ``fprime=None``). If
|
| 85 |
+
`epsfcn` is less than the machine precision, it is assumed
|
| 86 |
+
that the relative errors in the functions are of the order of
|
| 87 |
+
the machine precision.
|
| 88 |
+
factor : float, optional
|
| 89 |
+
A parameter determining the initial step bound
|
| 90 |
+
(``factor * || diag * x||``). Should be in the interval
|
| 91 |
+
``(0.1, 100)``.
|
| 92 |
+
diag : sequence, optional
|
| 93 |
+
N positive entries that serve as a scale factors for the
|
| 94 |
+
variables.
|
| 95 |
+
|
| 96 |
+
Returns
|
| 97 |
+
-------
|
| 98 |
+
x : ndarray
|
| 99 |
+
The solution (or the result of the last iteration for
|
| 100 |
+
an unsuccessful call).
|
| 101 |
+
infodict : dict
|
| 102 |
+
A dictionary of optional outputs with the keys:
|
| 103 |
+
|
| 104 |
+
``nfev``
|
| 105 |
+
number of function calls
|
| 106 |
+
``njev``
|
| 107 |
+
number of Jacobian calls
|
| 108 |
+
``fvec``
|
| 109 |
+
function evaluated at the output
|
| 110 |
+
``fjac``
|
| 111 |
+
the orthogonal matrix, q, produced by the QR
|
| 112 |
+
factorization of the final approximate Jacobian
|
| 113 |
+
matrix, stored column wise
|
| 114 |
+
``r``
|
| 115 |
+
upper triangular matrix produced by QR factorization
|
| 116 |
+
of the same matrix
|
| 117 |
+
``qtf``
|
| 118 |
+
the vector ``(transpose(q) * fvec)``
|
| 119 |
+
|
| 120 |
+
ier : int
|
| 121 |
+
An integer flag. Set to 1 if a solution was found, otherwise refer
|
| 122 |
+
to `mesg` for more information.
|
| 123 |
+
mesg : str
|
| 124 |
+
If no solution is found, `mesg` details the cause of failure.
|
| 125 |
+
|
| 126 |
+
See Also
|
| 127 |
+
--------
|
| 128 |
+
root : Interface to root finding algorithms for multivariate
|
| 129 |
+
functions. See the ``method='hybr'`` in particular.
|
| 130 |
+
|
| 131 |
+
Notes
|
| 132 |
+
-----
|
| 133 |
+
``fsolve`` is a wrapper around MINPACK's hybrd and hybrj algorithms.
|
| 134 |
+
|
| 135 |
+
Examples
|
| 136 |
+
--------
|
| 137 |
+
Find a solution to the system of equations:
|
| 138 |
+
``x0*cos(x1) = 4, x1*x0 - x1 = 5``.
|
| 139 |
+
|
| 140 |
+
>>> import numpy as np
|
| 141 |
+
>>> from scipy.optimize import fsolve
|
| 142 |
+
>>> def func(x):
|
| 143 |
+
... return [x[0] * np.cos(x[1]) - 4,
|
| 144 |
+
... x[1] * x[0] - x[1] - 5]
|
| 145 |
+
>>> root = fsolve(func, [1, 1])
|
| 146 |
+
>>> root
|
| 147 |
+
array([6.50409711, 0.90841421])
|
| 148 |
+
>>> np.isclose(func(root), [0.0, 0.0]) # func(root) should be almost 0.0.
|
| 149 |
+
array([ True, True])
|
| 150 |
+
|
| 151 |
+
"""
|
| 152 |
+
def _wrapped_func(*fargs):
|
| 153 |
+
"""
|
| 154 |
+
Wrapped `func` to track the number of times
|
| 155 |
+
the function has been called.
|
| 156 |
+
"""
|
| 157 |
+
_wrapped_func.nfev += 1
|
| 158 |
+
return func(*fargs)
|
| 159 |
+
|
| 160 |
+
_wrapped_func.nfev = 0
|
| 161 |
+
|
| 162 |
+
options = {'col_deriv': col_deriv,
|
| 163 |
+
'xtol': xtol,
|
| 164 |
+
'maxfev': maxfev,
|
| 165 |
+
'band': band,
|
| 166 |
+
'eps': epsfcn,
|
| 167 |
+
'factor': factor,
|
| 168 |
+
'diag': diag}
|
| 169 |
+
|
| 170 |
+
res = _root_hybr(_wrapped_func, x0, args, jac=fprime, **options)
|
| 171 |
+
res.nfev = _wrapped_func.nfev
|
| 172 |
+
|
| 173 |
+
if full_output:
|
| 174 |
+
x = res['x']
|
| 175 |
+
info = {k: res.get(k)
|
| 176 |
+
for k in ('nfev', 'njev', 'fjac', 'r', 'qtf') if k in res}
|
| 177 |
+
info['fvec'] = res['fun']
|
| 178 |
+
return x, info, res['status'], res['message']
|
| 179 |
+
else:
|
| 180 |
+
status = res['status']
|
| 181 |
+
msg = res['message']
|
| 182 |
+
if status == 0:
|
| 183 |
+
raise TypeError(msg)
|
| 184 |
+
elif status == 1:
|
| 185 |
+
pass
|
| 186 |
+
elif status in [2, 3, 4, 5]:
|
| 187 |
+
warnings.warn(msg, RuntimeWarning, stacklevel=2)
|
| 188 |
+
else:
|
| 189 |
+
raise TypeError(msg)
|
| 190 |
+
return res['x']
|
| 191 |
+
|
| 192 |
+
|
| 193 |
+
def _root_hybr(func, x0, args=(), jac=None,
|
| 194 |
+
col_deriv=0, xtol=1.49012e-08, maxfev=0, band=None, eps=None,
|
| 195 |
+
factor=100, diag=None, **unknown_options):
|
| 196 |
+
"""
|
| 197 |
+
Find the roots of a multivariate function using MINPACK's hybrd and
|
| 198 |
+
hybrj routines (modified Powell method).
|
| 199 |
+
|
| 200 |
+
Options
|
| 201 |
+
-------
|
| 202 |
+
col_deriv : bool
|
| 203 |
+
Specify whether the Jacobian function computes derivatives down
|
| 204 |
+
the columns (faster, because there is no transpose operation).
|
| 205 |
+
xtol : float
|
| 206 |
+
The calculation will terminate if the relative error between two
|
| 207 |
+
consecutive iterates is at most `xtol`.
|
| 208 |
+
maxfev : int
|
| 209 |
+
The maximum number of calls to the function. If zero, then
|
| 210 |
+
``100*(N+1)`` is the maximum where N is the number of elements
|
| 211 |
+
in `x0`.
|
| 212 |
+
band : tuple
|
| 213 |
+
If set to a two-sequence containing the number of sub- and
|
| 214 |
+
super-diagonals within the band of the Jacobi matrix, the
|
| 215 |
+
Jacobi matrix is considered banded (only for ``fprime=None``).
|
| 216 |
+
eps : float
|
| 217 |
+
A suitable step length for the forward-difference
|
| 218 |
+
approximation of the Jacobian (for ``fprime=None``). If
|
| 219 |
+
`eps` is less than the machine precision, it is assumed
|
| 220 |
+
that the relative errors in the functions are of the order of
|
| 221 |
+
the machine precision.
|
| 222 |
+
factor : float
|
| 223 |
+
A parameter determining the initial step bound
|
| 224 |
+
(``factor * || diag * x||``). Should be in the interval
|
| 225 |
+
``(0.1, 100)``.
|
| 226 |
+
diag : sequence
|
| 227 |
+
N positive entries that serve as a scale factors for the
|
| 228 |
+
variables.
|
| 229 |
+
|
| 230 |
+
"""
|
| 231 |
+
_check_unknown_options(unknown_options)
|
| 232 |
+
epsfcn = eps
|
| 233 |
+
|
| 234 |
+
x0 = asarray(x0).flatten()
|
| 235 |
+
n = len(x0)
|
| 236 |
+
if not isinstance(args, tuple):
|
| 237 |
+
args = (args,)
|
| 238 |
+
shape, dtype = _check_func('fsolve', 'func', func, x0, args, n, (n,))
|
| 239 |
+
if epsfcn is None:
|
| 240 |
+
epsfcn = finfo(dtype).eps
|
| 241 |
+
Dfun = jac
|
| 242 |
+
if Dfun is None:
|
| 243 |
+
if band is None:
|
| 244 |
+
ml, mu = -10, -10
|
| 245 |
+
else:
|
| 246 |
+
ml, mu = band[:2]
|
| 247 |
+
if maxfev == 0:
|
| 248 |
+
maxfev = 200 * (n + 1)
|
| 249 |
+
retval = _minpack._hybrd(func, x0, args, 1, xtol, maxfev,
|
| 250 |
+
ml, mu, epsfcn, factor, diag)
|
| 251 |
+
else:
|
| 252 |
+
_check_func('fsolve', 'fprime', Dfun, x0, args, n, (n, n))
|
| 253 |
+
if (maxfev == 0):
|
| 254 |
+
maxfev = 100 * (n + 1)
|
| 255 |
+
retval = _minpack._hybrj(func, Dfun, x0, args, 1,
|
| 256 |
+
col_deriv, xtol, maxfev, factor, diag)
|
| 257 |
+
|
| 258 |
+
x, status = retval[0], retval[-1]
|
| 259 |
+
|
| 260 |
+
errors = {0: "Improper input parameters were entered.",
|
| 261 |
+
1: "The solution converged.",
|
| 262 |
+
2: "The number of calls to function has "
|
| 263 |
+
"reached maxfev = %d." % maxfev,
|
| 264 |
+
3: "xtol=%f is too small, no further improvement "
|
| 265 |
+
"in the approximate\n solution "
|
| 266 |
+
"is possible." % xtol,
|
| 267 |
+
4: "The iteration is not making good progress, as measured "
|
| 268 |
+
"by the \n improvement from the last five "
|
| 269 |
+
"Jacobian evaluations.",
|
| 270 |
+
5: "The iteration is not making good progress, "
|
| 271 |
+
"as measured by the \n improvement from the last "
|
| 272 |
+
"ten iterations.",
|
| 273 |
+
'unknown': "An error occurred."}
|
| 274 |
+
|
| 275 |
+
info = retval[1]
|
| 276 |
+
info['fun'] = info.pop('fvec')
|
| 277 |
+
sol = OptimizeResult(x=x, success=(status == 1), status=status,
|
| 278 |
+
method="hybr")
|
| 279 |
+
sol.update(info)
|
| 280 |
+
try:
|
| 281 |
+
sol['message'] = errors[status]
|
| 282 |
+
except KeyError:
|
| 283 |
+
sol['message'] = errors['unknown']
|
| 284 |
+
|
| 285 |
+
return sol
|
| 286 |
+
|
| 287 |
+
|
| 288 |
+
LEASTSQ_SUCCESS = [1, 2, 3, 4]
|
| 289 |
+
LEASTSQ_FAILURE = [5, 6, 7, 8]
|
| 290 |
+
|
| 291 |
+
|
| 292 |
+
def leastsq(func, x0, args=(), Dfun=None, full_output=False,
|
| 293 |
+
col_deriv=False, ftol=1.49012e-8, xtol=1.49012e-8,
|
| 294 |
+
gtol=0.0, maxfev=0, epsfcn=None, factor=100, diag=None):
|
| 295 |
+
"""
|
| 296 |
+
Minimize the sum of squares of a set of equations.
|
| 297 |
+
|
| 298 |
+
::
|
| 299 |
+
|
| 300 |
+
x = arg min(sum(func(y)**2,axis=0))
|
| 301 |
+
y
|
| 302 |
+
|
| 303 |
+
Parameters
|
| 304 |
+
----------
|
| 305 |
+
func : callable
|
| 306 |
+
Should take at least one (possibly length ``N`` vector) argument and
|
| 307 |
+
returns ``M`` floating point numbers. It must not return NaNs or
|
| 308 |
+
fitting might fail. ``M`` must be greater than or equal to ``N``.
|
| 309 |
+
x0 : ndarray
|
| 310 |
+
The starting estimate for the minimization.
|
| 311 |
+
args : tuple, optional
|
| 312 |
+
Any extra arguments to func are placed in this tuple.
|
| 313 |
+
Dfun : callable, optional
|
| 314 |
+
A function or method to compute the Jacobian of func with derivatives
|
| 315 |
+
across the rows. If this is None, the Jacobian will be estimated.
|
| 316 |
+
full_output : bool, optional
|
| 317 |
+
If ``True``, return all optional outputs (not just `x` and `ier`).
|
| 318 |
+
col_deriv : bool, optional
|
| 319 |
+
If ``True``, specify that the Jacobian function computes derivatives
|
| 320 |
+
down the columns (faster, because there is no transpose operation).
|
| 321 |
+
ftol : float, optional
|
| 322 |
+
Relative error desired in the sum of squares.
|
| 323 |
+
xtol : float, optional
|
| 324 |
+
Relative error desired in the approximate solution.
|
| 325 |
+
gtol : float, optional
|
| 326 |
+
Orthogonality desired between the function vector and the columns of
|
| 327 |
+
the Jacobian.
|
| 328 |
+
maxfev : int, optional
|
| 329 |
+
The maximum number of calls to the function. If `Dfun` is provided,
|
| 330 |
+
then the default `maxfev` is 100*(N+1) where N is the number of elements
|
| 331 |
+
in x0, otherwise the default `maxfev` is 200*(N+1).
|
| 332 |
+
epsfcn : float, optional
|
| 333 |
+
A variable used in determining a suitable step length for the forward-
|
| 334 |
+
difference approximation of the Jacobian (for Dfun=None).
|
| 335 |
+
Normally the actual step length will be sqrt(epsfcn)*x
|
| 336 |
+
If epsfcn is less than the machine precision, it is assumed that the
|
| 337 |
+
relative errors are of the order of the machine precision.
|
| 338 |
+
factor : float, optional
|
| 339 |
+
A parameter determining the initial step bound
|
| 340 |
+
(``factor * || diag * x||``). Should be in interval ``(0.1, 100)``.
|
| 341 |
+
diag : sequence, optional
|
| 342 |
+
N positive entries that serve as a scale factors for the variables.
|
| 343 |
+
|
| 344 |
+
Returns
|
| 345 |
+
-------
|
| 346 |
+
x : ndarray
|
| 347 |
+
The solution (or the result of the last iteration for an unsuccessful
|
| 348 |
+
call).
|
| 349 |
+
cov_x : ndarray
|
| 350 |
+
The inverse of the Hessian. `fjac` and `ipvt` are used to construct an
|
| 351 |
+
estimate of the Hessian. A value of None indicates a singular matrix,
|
| 352 |
+
which means the curvature in parameters `x` is numerically flat. To
|
| 353 |
+
obtain the covariance matrix of the parameters `x`, `cov_x` must be
|
| 354 |
+
multiplied by the variance of the residuals -- see curve_fit. Only
|
| 355 |
+
returned if `full_output` is ``True``.
|
| 356 |
+
infodict : dict
|
| 357 |
+
a dictionary of optional outputs with the keys:
|
| 358 |
+
|
| 359 |
+
``nfev``
|
| 360 |
+
The number of function calls
|
| 361 |
+
``fvec``
|
| 362 |
+
The function evaluated at the output
|
| 363 |
+
``fjac``
|
| 364 |
+
A permutation of the R matrix of a QR
|
| 365 |
+
factorization of the final approximate
|
| 366 |
+
Jacobian matrix, stored column wise.
|
| 367 |
+
Together with ipvt, the covariance of the
|
| 368 |
+
estimate can be approximated.
|
| 369 |
+
``ipvt``
|
| 370 |
+
An integer array of length N which defines
|
| 371 |
+
a permutation matrix, p, such that
|
| 372 |
+
fjac*p = q*r, where r is upper triangular
|
| 373 |
+
with diagonal elements of nonincreasing
|
| 374 |
+
magnitude. Column j of p is column ipvt(j)
|
| 375 |
+
of the identity matrix.
|
| 376 |
+
``qtf``
|
| 377 |
+
The vector (transpose(q) * fvec).
|
| 378 |
+
|
| 379 |
+
Only returned if `full_output` is ``True``.
|
| 380 |
+
mesg : str
|
| 381 |
+
A string message giving information about the cause of failure.
|
| 382 |
+
Only returned if `full_output` is ``True``.
|
| 383 |
+
ier : int
|
| 384 |
+
An integer flag. If it is equal to 1, 2, 3 or 4, the solution was
|
| 385 |
+
found. Otherwise, the solution was not found. In either case, the
|
| 386 |
+
optional output variable 'mesg' gives more information.
|
| 387 |
+
|
| 388 |
+
See Also
|
| 389 |
+
--------
|
| 390 |
+
least_squares : Newer interface to solve nonlinear least-squares problems
|
| 391 |
+
with bounds on the variables. See ``method='lm'`` in particular.
|
| 392 |
+
|
| 393 |
+
Notes
|
| 394 |
+
-----
|
| 395 |
+
"leastsq" is a wrapper around MINPACK's lmdif and lmder algorithms.
|
| 396 |
+
|
| 397 |
+
cov_x is a Jacobian approximation to the Hessian of the least squares
|
| 398 |
+
objective function.
|
| 399 |
+
This approximation assumes that the objective function is based on the
|
| 400 |
+
difference between some observed target data (ydata) and a (non-linear)
|
| 401 |
+
function of the parameters `f(xdata, params)` ::
|
| 402 |
+
|
| 403 |
+
func(params) = ydata - f(xdata, params)
|
| 404 |
+
|
| 405 |
+
so that the objective function is ::
|
| 406 |
+
|
| 407 |
+
min sum((ydata - f(xdata, params))**2, axis=0)
|
| 408 |
+
params
|
| 409 |
+
|
| 410 |
+
The solution, `x`, is always a 1-D array, regardless of the shape of `x0`,
|
| 411 |
+
or whether `x0` is a scalar.
|
| 412 |
+
|
| 413 |
+
Examples
|
| 414 |
+
--------
|
| 415 |
+
>>> from scipy.optimize import leastsq
|
| 416 |
+
>>> def func(x):
|
| 417 |
+
... return 2*(x-3)**2+1
|
| 418 |
+
>>> leastsq(func, 0)
|
| 419 |
+
(array([2.99999999]), 1)
|
| 420 |
+
|
| 421 |
+
"""
|
| 422 |
+
x0 = asarray(x0).flatten()
|
| 423 |
+
n = len(x0)
|
| 424 |
+
if not isinstance(args, tuple):
|
| 425 |
+
args = (args,)
|
| 426 |
+
shape, dtype = _check_func('leastsq', 'func', func, x0, args, n)
|
| 427 |
+
m = shape[0]
|
| 428 |
+
|
| 429 |
+
if n > m:
|
| 430 |
+
raise TypeError(f"Improper input: func input vector length N={n} must"
|
| 431 |
+
f" not exceed func output vector length M={m}")
|
| 432 |
+
|
| 433 |
+
if epsfcn is None:
|
| 434 |
+
epsfcn = finfo(dtype).eps
|
| 435 |
+
|
| 436 |
+
if Dfun is None:
|
| 437 |
+
if maxfev == 0:
|
| 438 |
+
maxfev = 200*(n + 1)
|
| 439 |
+
retval = _minpack._lmdif(func, x0, args, full_output, ftol, xtol,
|
| 440 |
+
gtol, maxfev, epsfcn, factor, diag)
|
| 441 |
+
else:
|
| 442 |
+
if col_deriv:
|
| 443 |
+
_check_func('leastsq', 'Dfun', Dfun, x0, args, n, (n, m))
|
| 444 |
+
else:
|
| 445 |
+
_check_func('leastsq', 'Dfun', Dfun, x0, args, n, (m, n))
|
| 446 |
+
if maxfev == 0:
|
| 447 |
+
maxfev = 100 * (n + 1)
|
| 448 |
+
retval = _minpack._lmder(func, Dfun, x0, args, full_output,
|
| 449 |
+
col_deriv, ftol, xtol, gtol, maxfev,
|
| 450 |
+
factor, diag)
|
| 451 |
+
|
| 452 |
+
errors = {0: ["Improper input parameters.", TypeError],
|
| 453 |
+
1: ["Both actual and predicted relative reductions "
|
| 454 |
+
"in the sum of squares\n are at most %f" % ftol, None],
|
| 455 |
+
2: ["The relative error between two consecutive "
|
| 456 |
+
"iterates is at most %f" % xtol, None],
|
| 457 |
+
3: ["Both actual and predicted relative reductions in "
|
| 458 |
+
f"the sum of squares\n are at most {ftol:f} and the "
|
| 459 |
+
"relative error between two consecutive "
|
| 460 |
+
f"iterates is at \n most {xtol:f}", None],
|
| 461 |
+
4: ["The cosine of the angle between func(x) and any "
|
| 462 |
+
"column of the\n Jacobian is at most %f in "
|
| 463 |
+
"absolute value" % gtol, None],
|
| 464 |
+
5: ["Number of calls to function has reached "
|
| 465 |
+
"maxfev = %d." % maxfev, ValueError],
|
| 466 |
+
6: ["ftol=%f is too small, no further reduction "
|
| 467 |
+
"in the sum of squares\n is possible." % ftol,
|
| 468 |
+
ValueError],
|
| 469 |
+
7: ["xtol=%f is too small, no further improvement in "
|
| 470 |
+
"the approximate\n solution is possible." % xtol,
|
| 471 |
+
ValueError],
|
| 472 |
+
8: ["gtol=%f is too small, func(x) is orthogonal to the "
|
| 473 |
+
"columns of\n the Jacobian to machine "
|
| 474 |
+
"precision." % gtol, ValueError]}
|
| 475 |
+
|
| 476 |
+
# The FORTRAN return value (possible return values are >= 0 and <= 8)
|
| 477 |
+
info = retval[-1]
|
| 478 |
+
|
| 479 |
+
if full_output:
|
| 480 |
+
cov_x = None
|
| 481 |
+
if info in LEASTSQ_SUCCESS:
|
| 482 |
+
# This was
|
| 483 |
+
# perm = take(eye(n), retval[1]['ipvt'] - 1, 0)
|
| 484 |
+
# r = triu(transpose(retval[1]['fjac'])[:n, :])
|
| 485 |
+
# R = dot(r, perm)
|
| 486 |
+
# cov_x = inv(dot(transpose(R), R))
|
| 487 |
+
# but the explicit dot product was not necessary and sometimes
|
| 488 |
+
# the result was not symmetric positive definite. See gh-4555.
|
| 489 |
+
perm = retval[1]['ipvt'] - 1
|
| 490 |
+
n = len(perm)
|
| 491 |
+
r = triu(transpose(retval[1]['fjac'])[:n, :])
|
| 492 |
+
inv_triu = linalg.get_lapack_funcs('trtri', (r,))
|
| 493 |
+
try:
|
| 494 |
+
# inverse of permuted matrix is a permutation of matrix inverse
|
| 495 |
+
invR, trtri_info = inv_triu(r) # default: upper, non-unit diag
|
| 496 |
+
if trtri_info != 0: # explicit comparison for readability
|
| 497 |
+
raise LinAlgError(f'trtri returned info {trtri_info}')
|
| 498 |
+
invR[perm] = invR.copy()
|
| 499 |
+
cov_x = invR @ invR.T
|
| 500 |
+
except (LinAlgError, ValueError):
|
| 501 |
+
pass
|
| 502 |
+
return (retval[0], cov_x) + retval[1:-1] + (errors[info][0], info)
|
| 503 |
+
else:
|
| 504 |
+
if info in LEASTSQ_FAILURE:
|
| 505 |
+
warnings.warn(errors[info][0], RuntimeWarning, stacklevel=2)
|
| 506 |
+
elif info == 0:
|
| 507 |
+
raise errors[info][1](errors[info][0])
|
| 508 |
+
return retval[0], info
|
| 509 |
+
|
| 510 |
+
|
| 511 |
+
def _lightweight_memoizer(f):
|
| 512 |
+
# very shallow memoization to address gh-13670: only remember the first set
|
| 513 |
+
# of parameters and corresponding function value, and only attempt to use
|
| 514 |
+
# them twice (the number of times the function is evaluated at x0).
|
| 515 |
+
def _memoized_func(params):
|
| 516 |
+
if _memoized_func.skip_lookup:
|
| 517 |
+
return f(params)
|
| 518 |
+
|
| 519 |
+
if np.all(_memoized_func.last_params == params):
|
| 520 |
+
return _memoized_func.last_val
|
| 521 |
+
elif _memoized_func.last_params is not None:
|
| 522 |
+
_memoized_func.skip_lookup = True
|
| 523 |
+
|
| 524 |
+
val = f(params)
|
| 525 |
+
|
| 526 |
+
if _memoized_func.last_params is None:
|
| 527 |
+
_memoized_func.last_params = np.copy(params)
|
| 528 |
+
_memoized_func.last_val = val
|
| 529 |
+
|
| 530 |
+
return val
|
| 531 |
+
|
| 532 |
+
_memoized_func.last_params = None
|
| 533 |
+
_memoized_func.last_val = None
|
| 534 |
+
_memoized_func.skip_lookup = False
|
| 535 |
+
return _memoized_func
|
| 536 |
+
|
| 537 |
+
|
| 538 |
+
def _wrap_func(func, xdata, ydata, transform):
|
| 539 |
+
if transform is None:
|
| 540 |
+
def func_wrapped(params):
|
| 541 |
+
return func(xdata, *params) - ydata
|
| 542 |
+
elif transform.size == 1 or transform.ndim == 1:
|
| 543 |
+
def func_wrapped(params):
|
| 544 |
+
return transform * (func(xdata, *params) - ydata)
|
| 545 |
+
else:
|
| 546 |
+
# Chisq = (y - yd)^T C^{-1} (y-yd)
|
| 547 |
+
# transform = L such that C = L L^T
|
| 548 |
+
# C^{-1} = L^{-T} L^{-1}
|
| 549 |
+
# Chisq = (y - yd)^T L^{-T} L^{-1} (y-yd)
|
| 550 |
+
# Define (y-yd)' = L^{-1} (y-yd)
|
| 551 |
+
# by solving
|
| 552 |
+
# L (y-yd)' = (y-yd)
|
| 553 |
+
# and minimize (y-yd)'^T (y-yd)'
|
| 554 |
+
def func_wrapped(params):
|
| 555 |
+
return solve_triangular(transform, func(xdata, *params) - ydata, lower=True)
|
| 556 |
+
return func_wrapped
|
| 557 |
+
|
| 558 |
+
|
| 559 |
+
def _wrap_jac(jac, xdata, transform):
|
| 560 |
+
if transform is None:
|
| 561 |
+
def jac_wrapped(params):
|
| 562 |
+
return jac(xdata, *params)
|
| 563 |
+
elif transform.ndim == 1:
|
| 564 |
+
def jac_wrapped(params):
|
| 565 |
+
return transform[:, np.newaxis] * np.asarray(jac(xdata, *params))
|
| 566 |
+
else:
|
| 567 |
+
def jac_wrapped(params):
|
| 568 |
+
return solve_triangular(transform,
|
| 569 |
+
np.asarray(jac(xdata, *params)),
|
| 570 |
+
lower=True)
|
| 571 |
+
return jac_wrapped
|
| 572 |
+
|
| 573 |
+
|
| 574 |
+
def _initialize_feasible(lb, ub):
|
| 575 |
+
p0 = np.ones_like(lb)
|
| 576 |
+
lb_finite = np.isfinite(lb)
|
| 577 |
+
ub_finite = np.isfinite(ub)
|
| 578 |
+
|
| 579 |
+
mask = lb_finite & ub_finite
|
| 580 |
+
p0[mask] = 0.5 * (lb[mask] + ub[mask])
|
| 581 |
+
|
| 582 |
+
mask = lb_finite & ~ub_finite
|
| 583 |
+
p0[mask] = lb[mask] + 1
|
| 584 |
+
|
| 585 |
+
mask = ~lb_finite & ub_finite
|
| 586 |
+
p0[mask] = ub[mask] - 1
|
| 587 |
+
|
| 588 |
+
return p0
|
| 589 |
+
|
| 590 |
+
|
| 591 |
+
def curve_fit(f, xdata, ydata, p0=None, sigma=None, absolute_sigma=False,
|
| 592 |
+
check_finite=None, bounds=(-np.inf, np.inf), method=None,
|
| 593 |
+
jac=None, *, full_output=False, nan_policy=None,
|
| 594 |
+
**kwargs):
|
| 595 |
+
"""
|
| 596 |
+
Use non-linear least squares to fit a function, f, to data.
|
| 597 |
+
|
| 598 |
+
Assumes ``ydata = f(xdata, *params) + eps``.
|
| 599 |
+
|
| 600 |
+
Parameters
|
| 601 |
+
----------
|
| 602 |
+
f : callable
|
| 603 |
+
The model function, f(x, ...). It must take the independent
|
| 604 |
+
variable as the first argument and the parameters to fit as
|
| 605 |
+
separate remaining arguments.
|
| 606 |
+
xdata : array_like
|
| 607 |
+
The independent variable where the data is measured.
|
| 608 |
+
Should usually be an M-length sequence or an (k,M)-shaped array for
|
| 609 |
+
functions with k predictors, and each element should be float
|
| 610 |
+
convertible if it is an array like object.
|
| 611 |
+
ydata : array_like
|
| 612 |
+
The dependent data, a length M array - nominally ``f(xdata, ...)``.
|
| 613 |
+
p0 : array_like, optional
|
| 614 |
+
Initial guess for the parameters (length N). If None, then the
|
| 615 |
+
initial values will all be 1 (if the number of parameters for the
|
| 616 |
+
function can be determined using introspection, otherwise a
|
| 617 |
+
ValueError is raised).
|
| 618 |
+
sigma : None or scalar or M-length sequence or MxM array, optional
|
| 619 |
+
Determines the uncertainty in `ydata`. If we define residuals as
|
| 620 |
+
``r = ydata - f(xdata, *popt)``, then the interpretation of `sigma`
|
| 621 |
+
depends on its number of dimensions:
|
| 622 |
+
|
| 623 |
+
- A scalar or 1-D `sigma` should contain values of standard deviations of
|
| 624 |
+
errors in `ydata`. In this case, the optimized function is
|
| 625 |
+
``chisq = sum((r / sigma) ** 2)``.
|
| 626 |
+
|
| 627 |
+
- A 2-D `sigma` should contain the covariance matrix of
|
| 628 |
+
errors in `ydata`. In this case, the optimized function is
|
| 629 |
+
``chisq = r.T @ inv(sigma) @ r``.
|
| 630 |
+
|
| 631 |
+
.. versionadded:: 0.19
|
| 632 |
+
|
| 633 |
+
None (default) is equivalent of 1-D `sigma` filled with ones.
|
| 634 |
+
absolute_sigma : bool, optional
|
| 635 |
+
If True, `sigma` is used in an absolute sense and the estimated parameter
|
| 636 |
+
covariance `pcov` reflects these absolute values.
|
| 637 |
+
|
| 638 |
+
If False (default), only the relative magnitudes of the `sigma` values matter.
|
| 639 |
+
The returned parameter covariance matrix `pcov` is based on scaling
|
| 640 |
+
`sigma` by a constant factor. This constant is set by demanding that the
|
| 641 |
+
reduced `chisq` for the optimal parameters `popt` when using the
|
| 642 |
+
*scaled* `sigma` equals unity. In other words, `sigma` is scaled to
|
| 643 |
+
match the sample variance of the residuals after the fit. Default is False.
|
| 644 |
+
Mathematically,
|
| 645 |
+
``pcov(absolute_sigma=False) = pcov(absolute_sigma=True) * chisq(popt)/(M-N)``
|
| 646 |
+
check_finite : bool, optional
|
| 647 |
+
If True, check that the input arrays do not contain nans of infs,
|
| 648 |
+
and raise a ValueError if they do. Setting this parameter to
|
| 649 |
+
False may silently produce nonsensical results if the input arrays
|
| 650 |
+
do contain nans. Default is True if `nan_policy` is not specified
|
| 651 |
+
explicitly and False otherwise.
|
| 652 |
+
bounds : 2-tuple of array_like or `Bounds`, optional
|
| 653 |
+
Lower and upper bounds on parameters. Defaults to no bounds.
|
| 654 |
+
There are two ways to specify the bounds:
|
| 655 |
+
|
| 656 |
+
- Instance of `Bounds` class.
|
| 657 |
+
|
| 658 |
+
- 2-tuple of array_like: Each element of the tuple must be either
|
| 659 |
+
an array with the length equal to the number of parameters, or a
|
| 660 |
+
scalar (in which case the bound is taken to be the same for all
|
| 661 |
+
parameters). Use ``np.inf`` with an appropriate sign to disable
|
| 662 |
+
bounds on all or some parameters.
|
| 663 |
+
|
| 664 |
+
method : {'lm', 'trf', 'dogbox'}, optional
|
| 665 |
+
Method to use for optimization. See `least_squares` for more details.
|
| 666 |
+
Default is 'lm' for unconstrained problems and 'trf' if `bounds` are
|
| 667 |
+
provided. The method 'lm' won't work when the number of observations
|
| 668 |
+
is less than the number of variables, use 'trf' or 'dogbox' in this
|
| 669 |
+
case.
|
| 670 |
+
|
| 671 |
+
.. versionadded:: 0.17
|
| 672 |
+
jac : callable, string or None, optional
|
| 673 |
+
Function with signature ``jac(x, ...)`` which computes the Jacobian
|
| 674 |
+
matrix of the model function with respect to parameters as a dense
|
| 675 |
+
array_like structure. It will be scaled according to provided `sigma`.
|
| 676 |
+
If None (default), the Jacobian will be estimated numerically.
|
| 677 |
+
String keywords for 'trf' and 'dogbox' methods can be used to select
|
| 678 |
+
a finite difference scheme, see `least_squares`.
|
| 679 |
+
|
| 680 |
+
.. versionadded:: 0.18
|
| 681 |
+
full_output : boolean, optional
|
| 682 |
+
If True, this function returns additioal information: `infodict`,
|
| 683 |
+
`mesg`, and `ier`.
|
| 684 |
+
|
| 685 |
+
.. versionadded:: 1.9
|
| 686 |
+
nan_policy : {'raise', 'omit', None}, optional
|
| 687 |
+
Defines how to handle when input contains nan.
|
| 688 |
+
The following options are available (default is None):
|
| 689 |
+
|
| 690 |
+
* 'raise': throws an error
|
| 691 |
+
* 'omit': performs the calculations ignoring nan values
|
| 692 |
+
* None: no special handling of NaNs is performed
|
| 693 |
+
(except what is done by check_finite); the behavior when NaNs
|
| 694 |
+
are present is implementation-dependent and may change.
|
| 695 |
+
|
| 696 |
+
Note that if this value is specified explicitly (not None),
|
| 697 |
+
`check_finite` will be set as False.
|
| 698 |
+
|
| 699 |
+
.. versionadded:: 1.11
|
| 700 |
+
**kwargs
|
| 701 |
+
Keyword arguments passed to `leastsq` for ``method='lm'`` or
|
| 702 |
+
`least_squares` otherwise.
|
| 703 |
+
|
| 704 |
+
Returns
|
| 705 |
+
-------
|
| 706 |
+
popt : array
|
| 707 |
+
Optimal values for the parameters so that the sum of the squared
|
| 708 |
+
residuals of ``f(xdata, *popt) - ydata`` is minimized.
|
| 709 |
+
pcov : 2-D array
|
| 710 |
+
The estimated approximate covariance of popt. The diagonals provide
|
| 711 |
+
the variance of the parameter estimate. To compute one standard
|
| 712 |
+
deviation errors on the parameters, use
|
| 713 |
+
``perr = np.sqrt(np.diag(pcov))``. Note that the relationship between
|
| 714 |
+
`cov` and parameter error estimates is derived based on a linear
|
| 715 |
+
approximation to the model function around the optimum [1].
|
| 716 |
+
When this approximation becomes inaccurate, `cov` may not provide an
|
| 717 |
+
accurate measure of uncertainty.
|
| 718 |
+
|
| 719 |
+
How the `sigma` parameter affects the estimated covariance
|
| 720 |
+
depends on `absolute_sigma` argument, as described above.
|
| 721 |
+
|
| 722 |
+
If the Jacobian matrix at the solution doesn't have a full rank, then
|
| 723 |
+
'lm' method returns a matrix filled with ``np.inf``, on the other hand
|
| 724 |
+
'trf' and 'dogbox' methods use Moore-Penrose pseudoinverse to compute
|
| 725 |
+
the covariance matrix. Covariance matrices with large condition numbers
|
| 726 |
+
(e.g. computed with `numpy.linalg.cond`) may indicate that results are
|
| 727 |
+
unreliable.
|
| 728 |
+
infodict : dict (returned only if `full_output` is True)
|
| 729 |
+
a dictionary of optional outputs with the keys:
|
| 730 |
+
|
| 731 |
+
``nfev``
|
| 732 |
+
The number of function calls. Methods 'trf' and 'dogbox' do not
|
| 733 |
+
count function calls for numerical Jacobian approximation,
|
| 734 |
+
as opposed to 'lm' method.
|
| 735 |
+
``fvec``
|
| 736 |
+
The residual values evaluated at the solution, for a 1-D `sigma`
|
| 737 |
+
this is ``(f(x, *popt) - ydata)/sigma``.
|
| 738 |
+
``fjac``
|
| 739 |
+
A permutation of the R matrix of a QR
|
| 740 |
+
factorization of the final approximate
|
| 741 |
+
Jacobian matrix, stored column wise.
|
| 742 |
+
Together with ipvt, the covariance of the
|
| 743 |
+
estimate can be approximated.
|
| 744 |
+
Method 'lm' only provides this information.
|
| 745 |
+
``ipvt``
|
| 746 |
+
An integer array of length N which defines
|
| 747 |
+
a permutation matrix, p, such that
|
| 748 |
+
fjac*p = q*r, where r is upper triangular
|
| 749 |
+
with diagonal elements of nonincreasing
|
| 750 |
+
magnitude. Column j of p is column ipvt(j)
|
| 751 |
+
of the identity matrix.
|
| 752 |
+
Method 'lm' only provides this information.
|
| 753 |
+
``qtf``
|
| 754 |
+
The vector (transpose(q) * fvec).
|
| 755 |
+
Method 'lm' only provides this information.
|
| 756 |
+
|
| 757 |
+
.. versionadded:: 1.9
|
| 758 |
+
mesg : str (returned only if `full_output` is True)
|
| 759 |
+
A string message giving information about the solution.
|
| 760 |
+
|
| 761 |
+
.. versionadded:: 1.9
|
| 762 |
+
ier : int (returned only if `full_output` is True)
|
| 763 |
+
An integer flag. If it is equal to 1, 2, 3 or 4, the solution was
|
| 764 |
+
found. Otherwise, the solution was not found. In either case, the
|
| 765 |
+
optional output variable `mesg` gives more information.
|
| 766 |
+
|
| 767 |
+
.. versionadded:: 1.9
|
| 768 |
+
|
| 769 |
+
Raises
|
| 770 |
+
------
|
| 771 |
+
ValueError
|
| 772 |
+
if either `ydata` or `xdata` contain NaNs, or if incompatible options
|
| 773 |
+
are used.
|
| 774 |
+
|
| 775 |
+
RuntimeError
|
| 776 |
+
if the least-squares minimization fails.
|
| 777 |
+
|
| 778 |
+
OptimizeWarning
|
| 779 |
+
if covariance of the parameters can not be estimated.
|
| 780 |
+
|
| 781 |
+
See Also
|
| 782 |
+
--------
|
| 783 |
+
least_squares : Minimize the sum of squares of nonlinear functions.
|
| 784 |
+
scipy.stats.linregress : Calculate a linear least squares regression for
|
| 785 |
+
two sets of measurements.
|
| 786 |
+
|
| 787 |
+
Notes
|
| 788 |
+
-----
|
| 789 |
+
Users should ensure that inputs `xdata`, `ydata`, and the output of `f`
|
| 790 |
+
are ``float64``, or else the optimization may return incorrect results.
|
| 791 |
+
|
| 792 |
+
With ``method='lm'``, the algorithm uses the Levenberg-Marquardt algorithm
|
| 793 |
+
through `leastsq`. Note that this algorithm can only deal with
|
| 794 |
+
unconstrained problems.
|
| 795 |
+
|
| 796 |
+
Box constraints can be handled by methods 'trf' and 'dogbox'. Refer to
|
| 797 |
+
the docstring of `least_squares` for more information.
|
| 798 |
+
|
| 799 |
+
Parameters to be fitted must have similar scale. Differences of multiple
|
| 800 |
+
orders of magnitude can lead to incorrect results. For the 'trf' and
|
| 801 |
+
'dogbox' methods, the `x_scale` keyword argument can be used to scale
|
| 802 |
+
the parameters.
|
| 803 |
+
|
| 804 |
+
References
|
| 805 |
+
----------
|
| 806 |
+
[1] K. Vugrin et al. Confidence region estimation techniques for nonlinear
|
| 807 |
+
regression in groundwater flow: Three case studies. Water Resources
|
| 808 |
+
Research, Vol. 43, W03423, :doi:`10.1029/2005WR004804`
|
| 809 |
+
|
| 810 |
+
Examples
|
| 811 |
+
--------
|
| 812 |
+
>>> import numpy as np
|
| 813 |
+
>>> import matplotlib.pyplot as plt
|
| 814 |
+
>>> from scipy.optimize import curve_fit
|
| 815 |
+
|
| 816 |
+
>>> def func(x, a, b, c):
|
| 817 |
+
... return a * np.exp(-b * x) + c
|
| 818 |
+
|
| 819 |
+
Define the data to be fit with some noise:
|
| 820 |
+
|
| 821 |
+
>>> xdata = np.linspace(0, 4, 50)
|
| 822 |
+
>>> y = func(xdata, 2.5, 1.3, 0.5)
|
| 823 |
+
>>> rng = np.random.default_rng()
|
| 824 |
+
>>> y_noise = 0.2 * rng.normal(size=xdata.size)
|
| 825 |
+
>>> ydata = y + y_noise
|
| 826 |
+
>>> plt.plot(xdata, ydata, 'b-', label='data')
|
| 827 |
+
|
| 828 |
+
Fit for the parameters a, b, c of the function `func`:
|
| 829 |
+
|
| 830 |
+
>>> popt, pcov = curve_fit(func, xdata, ydata)
|
| 831 |
+
>>> popt
|
| 832 |
+
array([2.56274217, 1.37268521, 0.47427475])
|
| 833 |
+
>>> plt.plot(xdata, func(xdata, *popt), 'r-',
|
| 834 |
+
... label='fit: a=%5.3f, b=%5.3f, c=%5.3f' % tuple(popt))
|
| 835 |
+
|
| 836 |
+
Constrain the optimization to the region of ``0 <= a <= 3``,
|
| 837 |
+
``0 <= b <= 1`` and ``0 <= c <= 0.5``:
|
| 838 |
+
|
| 839 |
+
>>> popt, pcov = curve_fit(func, xdata, ydata, bounds=(0, [3., 1., 0.5]))
|
| 840 |
+
>>> popt
|
| 841 |
+
array([2.43736712, 1. , 0.34463856])
|
| 842 |
+
>>> plt.plot(xdata, func(xdata, *popt), 'g--',
|
| 843 |
+
... label='fit: a=%5.3f, b=%5.3f, c=%5.3f' % tuple(popt))
|
| 844 |
+
|
| 845 |
+
>>> plt.xlabel('x')
|
| 846 |
+
>>> plt.ylabel('y')
|
| 847 |
+
>>> plt.legend()
|
| 848 |
+
>>> plt.show()
|
| 849 |
+
|
| 850 |
+
For reliable results, the model `func` should not be overparametrized;
|
| 851 |
+
redundant parameters can cause unreliable covariance matrices and, in some
|
| 852 |
+
cases, poorer quality fits. As a quick check of whether the model may be
|
| 853 |
+
overparameterized, calculate the condition number of the covariance matrix:
|
| 854 |
+
|
| 855 |
+
>>> np.linalg.cond(pcov)
|
| 856 |
+
34.571092161547405 # may vary
|
| 857 |
+
|
| 858 |
+
The value is small, so it does not raise much concern. If, however, we were
|
| 859 |
+
to add a fourth parameter ``d`` to `func` with the same effect as ``a``:
|
| 860 |
+
|
| 861 |
+
>>> def func2(x, a, b, c, d):
|
| 862 |
+
... return a * d * np.exp(-b * x) + c # a and d are redundant
|
| 863 |
+
>>> popt, pcov = curve_fit(func2, xdata, ydata)
|
| 864 |
+
>>> np.linalg.cond(pcov)
|
| 865 |
+
1.13250718925596e+32 # may vary
|
| 866 |
+
|
| 867 |
+
Such a large value is cause for concern. The diagonal elements of the
|
| 868 |
+
covariance matrix, which is related to uncertainty of the fit, gives more
|
| 869 |
+
information:
|
| 870 |
+
|
| 871 |
+
>>> np.diag(pcov)
|
| 872 |
+
array([1.48814742e+29, 3.78596560e-02, 5.39253738e-03, 2.76417220e+28]) # may vary
|
| 873 |
+
|
| 874 |
+
Note that the first and last terms are much larger than the other elements,
|
| 875 |
+
suggesting that the optimal values of these parameters are ambiguous and
|
| 876 |
+
that only one of these parameters is needed in the model.
|
| 877 |
+
|
| 878 |
+
If the optimal parameters of `f` differ by multiple orders of magnitude, the
|
| 879 |
+
resulting fit can be inaccurate. Sometimes, `curve_fit` can fail to find any
|
| 880 |
+
results:
|
| 881 |
+
|
| 882 |
+
>>> ydata = func(xdata, 500000, 0.01, 15)
|
| 883 |
+
>>> try:
|
| 884 |
+
... popt, pcov = curve_fit(func, xdata, ydata, method = 'trf')
|
| 885 |
+
... except RuntimeError as e:
|
| 886 |
+
... print(e)
|
| 887 |
+
Optimal parameters not found: The maximum number of function evaluations is
|
| 888 |
+
exceeded.
|
| 889 |
+
|
| 890 |
+
If parameter scale is roughly known beforehand, it can be defined in
|
| 891 |
+
`x_scale` argument:
|
| 892 |
+
|
| 893 |
+
>>> popt, pcov = curve_fit(func, xdata, ydata, method = 'trf',
|
| 894 |
+
... x_scale = [1000, 1, 1])
|
| 895 |
+
>>> popt
|
| 896 |
+
array([5.00000000e+05, 1.00000000e-02, 1.49999999e+01])
|
| 897 |
+
"""
|
| 898 |
+
if p0 is None:
|
| 899 |
+
# determine number of parameters by inspecting the function
|
| 900 |
+
sig = _getfullargspec(f)
|
| 901 |
+
args = sig.args
|
| 902 |
+
if len(args) < 2:
|
| 903 |
+
raise ValueError("Unable to determine number of fit parameters.")
|
| 904 |
+
n = len(args) - 1
|
| 905 |
+
else:
|
| 906 |
+
p0 = np.atleast_1d(p0)
|
| 907 |
+
n = p0.size
|
| 908 |
+
|
| 909 |
+
if isinstance(bounds, Bounds):
|
| 910 |
+
lb, ub = bounds.lb, bounds.ub
|
| 911 |
+
else:
|
| 912 |
+
lb, ub = prepare_bounds(bounds, n)
|
| 913 |
+
if p0 is None:
|
| 914 |
+
p0 = _initialize_feasible(lb, ub)
|
| 915 |
+
|
| 916 |
+
bounded_problem = np.any((lb > -np.inf) | (ub < np.inf))
|
| 917 |
+
if method is None:
|
| 918 |
+
if bounded_problem:
|
| 919 |
+
method = 'trf'
|
| 920 |
+
else:
|
| 921 |
+
method = 'lm'
|
| 922 |
+
|
| 923 |
+
if method == 'lm' and bounded_problem:
|
| 924 |
+
raise ValueError("Method 'lm' only works for unconstrained problems. "
|
| 925 |
+
"Use 'trf' or 'dogbox' instead.")
|
| 926 |
+
|
| 927 |
+
if check_finite is None:
|
| 928 |
+
check_finite = True if nan_policy is None else False
|
| 929 |
+
|
| 930 |
+
# optimization may produce garbage for float32 inputs, cast them to float64
|
| 931 |
+
if check_finite:
|
| 932 |
+
ydata = np.asarray_chkfinite(ydata, float)
|
| 933 |
+
else:
|
| 934 |
+
ydata = np.asarray(ydata, float)
|
| 935 |
+
|
| 936 |
+
if isinstance(xdata, (list, tuple, np.ndarray)):
|
| 937 |
+
# `xdata` is passed straight to the user-defined `f`, so allow
|
| 938 |
+
# non-array_like `xdata`.
|
| 939 |
+
if check_finite:
|
| 940 |
+
xdata = np.asarray_chkfinite(xdata, float)
|
| 941 |
+
else:
|
| 942 |
+
xdata = np.asarray(xdata, float)
|
| 943 |
+
|
| 944 |
+
if ydata.size == 0:
|
| 945 |
+
raise ValueError("`ydata` must not be empty!")
|
| 946 |
+
|
| 947 |
+
# nan handling is needed only if check_finite is False because if True,
|
| 948 |
+
# the x-y data are already checked, and they don't contain nans.
|
| 949 |
+
if not check_finite and nan_policy is not None:
|
| 950 |
+
if nan_policy == "propagate":
|
| 951 |
+
raise ValueError("`nan_policy='propagate'` is not supported "
|
| 952 |
+
"by this function.")
|
| 953 |
+
|
| 954 |
+
policies = [None, 'raise', 'omit']
|
| 955 |
+
x_contains_nan, nan_policy = _contains_nan(xdata, nan_policy,
|
| 956 |
+
policies=policies)
|
| 957 |
+
y_contains_nan, nan_policy = _contains_nan(ydata, nan_policy,
|
| 958 |
+
policies=policies)
|
| 959 |
+
|
| 960 |
+
if (x_contains_nan or y_contains_nan) and nan_policy == 'omit':
|
| 961 |
+
# ignore NaNs for N dimensional arrays
|
| 962 |
+
has_nan = np.isnan(xdata)
|
| 963 |
+
has_nan = has_nan.any(axis=tuple(range(has_nan.ndim-1)))
|
| 964 |
+
has_nan |= np.isnan(ydata)
|
| 965 |
+
|
| 966 |
+
xdata = xdata[..., ~has_nan]
|
| 967 |
+
ydata = ydata[~has_nan]
|
| 968 |
+
|
| 969 |
+
# Determine type of sigma
|
| 970 |
+
if sigma is not None:
|
| 971 |
+
sigma = np.asarray(sigma)
|
| 972 |
+
|
| 973 |
+
# if 1-D or a scalar, sigma are errors, define transform = 1/sigma
|
| 974 |
+
if sigma.size == 1 or sigma.shape == (ydata.size, ):
|
| 975 |
+
transform = 1.0 / sigma
|
| 976 |
+
# if 2-D, sigma is the covariance matrix,
|
| 977 |
+
# define transform = L such that L L^T = C
|
| 978 |
+
elif sigma.shape == (ydata.size, ydata.size):
|
| 979 |
+
try:
|
| 980 |
+
# scipy.linalg.cholesky requires lower=True to return L L^T = A
|
| 981 |
+
transform = cholesky(sigma, lower=True)
|
| 982 |
+
except LinAlgError as e:
|
| 983 |
+
raise ValueError("`sigma` must be positive definite.") from e
|
| 984 |
+
else:
|
| 985 |
+
raise ValueError("`sigma` has incorrect shape.")
|
| 986 |
+
else:
|
| 987 |
+
transform = None
|
| 988 |
+
|
| 989 |
+
func = _lightweight_memoizer(_wrap_func(f, xdata, ydata, transform))
|
| 990 |
+
|
| 991 |
+
if callable(jac):
|
| 992 |
+
jac = _lightweight_memoizer(_wrap_jac(jac, xdata, transform))
|
| 993 |
+
elif jac is None and method != 'lm':
|
| 994 |
+
jac = '2-point'
|
| 995 |
+
|
| 996 |
+
if 'args' in kwargs:
|
| 997 |
+
# The specification for the model function `f` does not support
|
| 998 |
+
# additional arguments. Refer to the `curve_fit` docstring for
|
| 999 |
+
# acceptable call signatures of `f`.
|
| 1000 |
+
raise ValueError("'args' is not a supported keyword argument.")
|
| 1001 |
+
|
| 1002 |
+
if method == 'lm':
|
| 1003 |
+
# if ydata.size == 1, this might be used for broadcast.
|
| 1004 |
+
if ydata.size != 1 and n > ydata.size:
|
| 1005 |
+
raise TypeError(f"The number of func parameters={n} must not"
|
| 1006 |
+
f" exceed the number of data points={ydata.size}")
|
| 1007 |
+
res = leastsq(func, p0, Dfun=jac, full_output=1, **kwargs)
|
| 1008 |
+
popt, pcov, infodict, errmsg, ier = res
|
| 1009 |
+
ysize = len(infodict['fvec'])
|
| 1010 |
+
cost = np.sum(infodict['fvec'] ** 2)
|
| 1011 |
+
if ier not in [1, 2, 3, 4]:
|
| 1012 |
+
raise RuntimeError("Optimal parameters not found: " + errmsg)
|
| 1013 |
+
else:
|
| 1014 |
+
# Rename maxfev (leastsq) to max_nfev (least_squares), if specified.
|
| 1015 |
+
if 'max_nfev' not in kwargs:
|
| 1016 |
+
kwargs['max_nfev'] = kwargs.pop('maxfev', None)
|
| 1017 |
+
|
| 1018 |
+
res = least_squares(func, p0, jac=jac, bounds=bounds, method=method,
|
| 1019 |
+
**kwargs)
|
| 1020 |
+
|
| 1021 |
+
if not res.success:
|
| 1022 |
+
raise RuntimeError("Optimal parameters not found: " + res.message)
|
| 1023 |
+
|
| 1024 |
+
infodict = dict(nfev=res.nfev, fvec=res.fun)
|
| 1025 |
+
ier = res.status
|
| 1026 |
+
errmsg = res.message
|
| 1027 |
+
|
| 1028 |
+
ysize = len(res.fun)
|
| 1029 |
+
cost = 2 * res.cost # res.cost is half sum of squares!
|
| 1030 |
+
popt = res.x
|
| 1031 |
+
|
| 1032 |
+
# Do Moore-Penrose inverse discarding zero singular values.
|
| 1033 |
+
_, s, VT = svd(res.jac, full_matrices=False)
|
| 1034 |
+
threshold = np.finfo(float).eps * max(res.jac.shape) * s[0]
|
| 1035 |
+
s = s[s > threshold]
|
| 1036 |
+
VT = VT[:s.size]
|
| 1037 |
+
pcov = np.dot(VT.T / s**2, VT)
|
| 1038 |
+
|
| 1039 |
+
warn_cov = False
|
| 1040 |
+
if pcov is None or np.isnan(pcov).any():
|
| 1041 |
+
# indeterminate covariance
|
| 1042 |
+
pcov = zeros((len(popt), len(popt)), dtype=float)
|
| 1043 |
+
pcov.fill(inf)
|
| 1044 |
+
warn_cov = True
|
| 1045 |
+
elif not absolute_sigma:
|
| 1046 |
+
if ysize > p0.size:
|
| 1047 |
+
s_sq = cost / (ysize - p0.size)
|
| 1048 |
+
pcov = pcov * s_sq
|
| 1049 |
+
else:
|
| 1050 |
+
pcov.fill(inf)
|
| 1051 |
+
warn_cov = True
|
| 1052 |
+
|
| 1053 |
+
if warn_cov:
|
| 1054 |
+
warnings.warn('Covariance of the parameters could not be estimated',
|
| 1055 |
+
category=OptimizeWarning, stacklevel=2)
|
| 1056 |
+
|
| 1057 |
+
if full_output:
|
| 1058 |
+
return popt, pcov, infodict, errmsg, ier
|
| 1059 |
+
else:
|
| 1060 |
+
return popt, pcov
|
| 1061 |
+
|
| 1062 |
+
|
| 1063 |
+
def check_gradient(fcn, Dfcn, x0, args=(), col_deriv=0):
|
| 1064 |
+
"""Perform a simple check on the gradient for correctness.
|
| 1065 |
+
|
| 1066 |
+
"""
|
| 1067 |
+
|
| 1068 |
+
x = atleast_1d(x0)
|
| 1069 |
+
n = len(x)
|
| 1070 |
+
x = x.reshape((n,))
|
| 1071 |
+
fvec = atleast_1d(fcn(x, *args))
|
| 1072 |
+
m = len(fvec)
|
| 1073 |
+
fvec = fvec.reshape((m,))
|
| 1074 |
+
ldfjac = m
|
| 1075 |
+
fjac = atleast_1d(Dfcn(x, *args))
|
| 1076 |
+
fjac = fjac.reshape((m, n))
|
| 1077 |
+
if col_deriv == 0:
|
| 1078 |
+
fjac = transpose(fjac)
|
| 1079 |
+
|
| 1080 |
+
xp = zeros((n,), float)
|
| 1081 |
+
err = zeros((m,), float)
|
| 1082 |
+
fvecp = None
|
| 1083 |
+
_minpack._chkder(m, n, x, fvec, fjac, ldfjac, xp, fvecp, 1, err)
|
| 1084 |
+
|
| 1085 |
+
fvecp = atleast_1d(fcn(xp, *args))
|
| 1086 |
+
fvecp = fvecp.reshape((m,))
|
| 1087 |
+
_minpack._chkder(m, n, x, fvec, fjac, ldfjac, xp, fvecp, 2, err)
|
| 1088 |
+
|
| 1089 |
+
good = (prod(greater(err, 0.5), axis=0))
|
| 1090 |
+
|
| 1091 |
+
return (good, err)
|
| 1092 |
+
|
| 1093 |
+
|
| 1094 |
+
def _del2(p0, p1, d):
|
| 1095 |
+
return p0 - np.square(p1 - p0) / d
|
| 1096 |
+
|
| 1097 |
+
|
| 1098 |
+
def _relerr(actual, desired):
|
| 1099 |
+
return (actual - desired) / desired
|
| 1100 |
+
|
| 1101 |
+
|
| 1102 |
+
def _fixed_point_helper(func, x0, args, xtol, maxiter, use_accel):
|
| 1103 |
+
p0 = x0
|
| 1104 |
+
for i in range(maxiter):
|
| 1105 |
+
p1 = func(p0, *args)
|
| 1106 |
+
if use_accel:
|
| 1107 |
+
p2 = func(p1, *args)
|
| 1108 |
+
d = p2 - 2.0 * p1 + p0
|
| 1109 |
+
p = _lazywhere(d != 0, (p0, p1, d), f=_del2, fillvalue=p2)
|
| 1110 |
+
else:
|
| 1111 |
+
p = p1
|
| 1112 |
+
relerr = _lazywhere(p0 != 0, (p, p0), f=_relerr, fillvalue=p)
|
| 1113 |
+
if np.all(np.abs(relerr) < xtol):
|
| 1114 |
+
return p
|
| 1115 |
+
p0 = p
|
| 1116 |
+
msg = "Failed to converge after %d iterations, value is %s" % (maxiter, p)
|
| 1117 |
+
raise RuntimeError(msg)
|
| 1118 |
+
|
| 1119 |
+
|
| 1120 |
+
def fixed_point(func, x0, args=(), xtol=1e-8, maxiter=500, method='del2'):
|
| 1121 |
+
"""
|
| 1122 |
+
Find a fixed point of the function.
|
| 1123 |
+
|
| 1124 |
+
Given a function of one or more variables and a starting point, find a
|
| 1125 |
+
fixed point of the function: i.e., where ``func(x0) == x0``.
|
| 1126 |
+
|
| 1127 |
+
Parameters
|
| 1128 |
+
----------
|
| 1129 |
+
func : function
|
| 1130 |
+
Function to evaluate.
|
| 1131 |
+
x0 : array_like
|
| 1132 |
+
Fixed point of function.
|
| 1133 |
+
args : tuple, optional
|
| 1134 |
+
Extra arguments to `func`.
|
| 1135 |
+
xtol : float, optional
|
| 1136 |
+
Convergence tolerance, defaults to 1e-08.
|
| 1137 |
+
maxiter : int, optional
|
| 1138 |
+
Maximum number of iterations, defaults to 500.
|
| 1139 |
+
method : {"del2", "iteration"}, optional
|
| 1140 |
+
Method of finding the fixed-point, defaults to "del2",
|
| 1141 |
+
which uses Steffensen's Method with Aitken's ``Del^2``
|
| 1142 |
+
convergence acceleration [1]_. The "iteration" method simply iterates
|
| 1143 |
+
the function until convergence is detected, without attempting to
|
| 1144 |
+
accelerate the convergence.
|
| 1145 |
+
|
| 1146 |
+
References
|
| 1147 |
+
----------
|
| 1148 |
+
.. [1] Burden, Faires, "Numerical Analysis", 5th edition, pg. 80
|
| 1149 |
+
|
| 1150 |
+
Examples
|
| 1151 |
+
--------
|
| 1152 |
+
>>> import numpy as np
|
| 1153 |
+
>>> from scipy import optimize
|
| 1154 |
+
>>> def func(x, c1, c2):
|
| 1155 |
+
... return np.sqrt(c1/(x+c2))
|
| 1156 |
+
>>> c1 = np.array([10,12.])
|
| 1157 |
+
>>> c2 = np.array([3, 5.])
|
| 1158 |
+
>>> optimize.fixed_point(func, [1.2, 1.3], args=(c1,c2))
|
| 1159 |
+
array([ 1.4920333 , 1.37228132])
|
| 1160 |
+
|
| 1161 |
+
"""
|
| 1162 |
+
use_accel = {'del2': True, 'iteration': False}[method]
|
| 1163 |
+
x0 = _asarray_validated(x0, as_inexact=True)
|
| 1164 |
+
return _fixed_point_helper(func, x0, args, xtol, maxiter, use_accel)
|
llava_next/lib/python3.10/site-packages/scipy/optimize/_moduleTNC.cpython-310-x86_64-linux-gnu.so
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:d7584b3d74b2c7f2804c049af2291355762236b8a294520a6c7a83085ac11544
|
| 3 |
+
size 152168
|
llava_next/lib/python3.10/site-packages/scipy/optimize/_nonlin.py
ADDED
|
@@ -0,0 +1,1585 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (C) 2009, Pauli Virtanen <pav@iki.fi>
|
| 2 |
+
# Distributed under the same license as SciPy.
|
| 3 |
+
|
| 4 |
+
import inspect
|
| 5 |
+
import sys
|
| 6 |
+
import warnings
|
| 7 |
+
|
| 8 |
+
import numpy as np
|
| 9 |
+
from numpy import asarray, dot, vdot
|
| 10 |
+
|
| 11 |
+
from scipy.linalg import norm, solve, inv, qr, svd, LinAlgError
|
| 12 |
+
import scipy.sparse.linalg
|
| 13 |
+
import scipy.sparse
|
| 14 |
+
from scipy.linalg import get_blas_funcs
|
| 15 |
+
from scipy._lib._util import copy_if_needed
|
| 16 |
+
from scipy._lib._util import getfullargspec_no_self as _getfullargspec
|
| 17 |
+
from ._linesearch import scalar_search_wolfe1, scalar_search_armijo
|
| 18 |
+
|
| 19 |
+
|
| 20 |
+
__all__ = [
|
| 21 |
+
'broyden1', 'broyden2', 'anderson', 'linearmixing',
|
| 22 |
+
'diagbroyden', 'excitingmixing', 'newton_krylov',
|
| 23 |
+
'BroydenFirst', 'KrylovJacobian', 'InverseJacobian', 'NoConvergence']
|
| 24 |
+
|
| 25 |
+
#------------------------------------------------------------------------------
|
| 26 |
+
# Utility functions
|
| 27 |
+
#------------------------------------------------------------------------------
|
| 28 |
+
|
| 29 |
+
|
| 30 |
+
class NoConvergence(Exception):
|
| 31 |
+
"""Exception raised when nonlinear solver fails to converge within the specified
|
| 32 |
+
`maxiter`."""
|
| 33 |
+
pass
|
| 34 |
+
|
| 35 |
+
|
| 36 |
+
def maxnorm(x):
|
| 37 |
+
return np.absolute(x).max()
|
| 38 |
+
|
| 39 |
+
|
| 40 |
+
def _as_inexact(x):
|
| 41 |
+
"""Return `x` as an array, of either floats or complex floats"""
|
| 42 |
+
x = asarray(x)
|
| 43 |
+
if not np.issubdtype(x.dtype, np.inexact):
|
| 44 |
+
return asarray(x, dtype=np.float64)
|
| 45 |
+
return x
|
| 46 |
+
|
| 47 |
+
|
| 48 |
+
def _array_like(x, x0):
|
| 49 |
+
"""Return ndarray `x` as same array subclass and shape as `x0`"""
|
| 50 |
+
x = np.reshape(x, np.shape(x0))
|
| 51 |
+
wrap = getattr(x0, '__array_wrap__', x.__array_wrap__)
|
| 52 |
+
return wrap(x)
|
| 53 |
+
|
| 54 |
+
|
| 55 |
+
def _safe_norm(v):
|
| 56 |
+
if not np.isfinite(v).all():
|
| 57 |
+
return np.array(np.inf)
|
| 58 |
+
return norm(v)
|
| 59 |
+
|
| 60 |
+
#------------------------------------------------------------------------------
|
| 61 |
+
# Generic nonlinear solver machinery
|
| 62 |
+
#------------------------------------------------------------------------------
|
| 63 |
+
|
| 64 |
+
|
| 65 |
+
_doc_parts = dict(
|
| 66 |
+
params_basic="""
|
| 67 |
+
F : function(x) -> f
|
| 68 |
+
Function whose root to find; should take and return an array-like
|
| 69 |
+
object.
|
| 70 |
+
xin : array_like
|
| 71 |
+
Initial guess for the solution
|
| 72 |
+
""".strip(),
|
| 73 |
+
params_extra="""
|
| 74 |
+
iter : int, optional
|
| 75 |
+
Number of iterations to make. If omitted (default), make as many
|
| 76 |
+
as required to meet tolerances.
|
| 77 |
+
verbose : bool, optional
|
| 78 |
+
Print status to stdout on every iteration.
|
| 79 |
+
maxiter : int, optional
|
| 80 |
+
Maximum number of iterations to make. If more are needed to
|
| 81 |
+
meet convergence, `NoConvergence` is raised.
|
| 82 |
+
f_tol : float, optional
|
| 83 |
+
Absolute tolerance (in max-norm) for the residual.
|
| 84 |
+
If omitted, default is 6e-6.
|
| 85 |
+
f_rtol : float, optional
|
| 86 |
+
Relative tolerance for the residual. If omitted, not used.
|
| 87 |
+
x_tol : float, optional
|
| 88 |
+
Absolute minimum step size, as determined from the Jacobian
|
| 89 |
+
approximation. If the step size is smaller than this, optimization
|
| 90 |
+
is terminated as successful. If omitted, not used.
|
| 91 |
+
x_rtol : float, optional
|
| 92 |
+
Relative minimum step size. If omitted, not used.
|
| 93 |
+
tol_norm : function(vector) -> scalar, optional
|
| 94 |
+
Norm to use in convergence check. Default is the maximum norm.
|
| 95 |
+
line_search : {None, 'armijo' (default), 'wolfe'}, optional
|
| 96 |
+
Which type of a line search to use to determine the step size in the
|
| 97 |
+
direction given by the Jacobian approximation. Defaults to 'armijo'.
|
| 98 |
+
callback : function, optional
|
| 99 |
+
Optional callback function. It is called on every iteration as
|
| 100 |
+
``callback(x, f)`` where `x` is the current solution and `f`
|
| 101 |
+
the corresponding residual.
|
| 102 |
+
|
| 103 |
+
Returns
|
| 104 |
+
-------
|
| 105 |
+
sol : ndarray
|
| 106 |
+
An array (of similar array type as `x0`) containing the final solution.
|
| 107 |
+
|
| 108 |
+
Raises
|
| 109 |
+
------
|
| 110 |
+
NoConvergence
|
| 111 |
+
When a solution was not found.
|
| 112 |
+
|
| 113 |
+
""".strip()
|
| 114 |
+
)
|
| 115 |
+
|
| 116 |
+
|
| 117 |
+
def _set_doc(obj):
|
| 118 |
+
if obj.__doc__:
|
| 119 |
+
obj.__doc__ = obj.__doc__ % _doc_parts
|
| 120 |
+
|
| 121 |
+
|
| 122 |
+
def nonlin_solve(F, x0, jacobian='krylov', iter=None, verbose=False,
|
| 123 |
+
maxiter=None, f_tol=None, f_rtol=None, x_tol=None, x_rtol=None,
|
| 124 |
+
tol_norm=None, line_search='armijo', callback=None,
|
| 125 |
+
full_output=False, raise_exception=True):
|
| 126 |
+
"""
|
| 127 |
+
Find a root of a function, in a way suitable for large-scale problems.
|
| 128 |
+
|
| 129 |
+
Parameters
|
| 130 |
+
----------
|
| 131 |
+
%(params_basic)s
|
| 132 |
+
jacobian : Jacobian
|
| 133 |
+
A Jacobian approximation: `Jacobian` object or something that
|
| 134 |
+
`asjacobian` can transform to one. Alternatively, a string specifying
|
| 135 |
+
which of the builtin Jacobian approximations to use:
|
| 136 |
+
|
| 137 |
+
krylov, broyden1, broyden2, anderson
|
| 138 |
+
diagbroyden, linearmixing, excitingmixing
|
| 139 |
+
|
| 140 |
+
%(params_extra)s
|
| 141 |
+
full_output : bool
|
| 142 |
+
If true, returns a dictionary `info` containing convergence
|
| 143 |
+
information.
|
| 144 |
+
raise_exception : bool
|
| 145 |
+
If True, a `NoConvergence` exception is raise if no solution is found.
|
| 146 |
+
|
| 147 |
+
See Also
|
| 148 |
+
--------
|
| 149 |
+
asjacobian, Jacobian
|
| 150 |
+
|
| 151 |
+
Notes
|
| 152 |
+
-----
|
| 153 |
+
This algorithm implements the inexact Newton method, with
|
| 154 |
+
backtracking or full line searches. Several Jacobian
|
| 155 |
+
approximations are available, including Krylov and Quasi-Newton
|
| 156 |
+
methods.
|
| 157 |
+
|
| 158 |
+
References
|
| 159 |
+
----------
|
| 160 |
+
.. [KIM] C. T. Kelley, \"Iterative Methods for Linear and Nonlinear
|
| 161 |
+
Equations\". Society for Industrial and Applied Mathematics. (1995)
|
| 162 |
+
https://archive.siam.org/books/kelley/fr16/
|
| 163 |
+
|
| 164 |
+
"""
|
| 165 |
+
# Can't use default parameters because it's being explicitly passed as None
|
| 166 |
+
# from the calling function, so we need to set it here.
|
| 167 |
+
tol_norm = maxnorm if tol_norm is None else tol_norm
|
| 168 |
+
condition = TerminationCondition(f_tol=f_tol, f_rtol=f_rtol,
|
| 169 |
+
x_tol=x_tol, x_rtol=x_rtol,
|
| 170 |
+
iter=iter, norm=tol_norm)
|
| 171 |
+
|
| 172 |
+
x0 = _as_inexact(x0)
|
| 173 |
+
def func(z):
|
| 174 |
+
return _as_inexact(F(_array_like(z, x0))).flatten()
|
| 175 |
+
x = x0.flatten()
|
| 176 |
+
|
| 177 |
+
dx = np.full_like(x, np.inf)
|
| 178 |
+
Fx = func(x)
|
| 179 |
+
Fx_norm = norm(Fx)
|
| 180 |
+
|
| 181 |
+
jacobian = asjacobian(jacobian)
|
| 182 |
+
jacobian.setup(x.copy(), Fx, func)
|
| 183 |
+
|
| 184 |
+
if maxiter is None:
|
| 185 |
+
if iter is not None:
|
| 186 |
+
maxiter = iter + 1
|
| 187 |
+
else:
|
| 188 |
+
maxiter = 100*(x.size+1)
|
| 189 |
+
|
| 190 |
+
if line_search is True:
|
| 191 |
+
line_search = 'armijo'
|
| 192 |
+
elif line_search is False:
|
| 193 |
+
line_search = None
|
| 194 |
+
|
| 195 |
+
if line_search not in (None, 'armijo', 'wolfe'):
|
| 196 |
+
raise ValueError("Invalid line search")
|
| 197 |
+
|
| 198 |
+
# Solver tolerance selection
|
| 199 |
+
gamma = 0.9
|
| 200 |
+
eta_max = 0.9999
|
| 201 |
+
eta_treshold = 0.1
|
| 202 |
+
eta = 1e-3
|
| 203 |
+
|
| 204 |
+
for n in range(maxiter):
|
| 205 |
+
status = condition.check(Fx, x, dx)
|
| 206 |
+
if status:
|
| 207 |
+
break
|
| 208 |
+
|
| 209 |
+
# The tolerance, as computed for scipy.sparse.linalg.* routines
|
| 210 |
+
tol = min(eta, eta*Fx_norm)
|
| 211 |
+
dx = -jacobian.solve(Fx, tol=tol)
|
| 212 |
+
|
| 213 |
+
if norm(dx) == 0:
|
| 214 |
+
raise ValueError("Jacobian inversion yielded zero vector. "
|
| 215 |
+
"This indicates a bug in the Jacobian "
|
| 216 |
+
"approximation.")
|
| 217 |
+
|
| 218 |
+
# Line search, or Newton step
|
| 219 |
+
if line_search:
|
| 220 |
+
s, x, Fx, Fx_norm_new = _nonlin_line_search(func, x, Fx, dx,
|
| 221 |
+
line_search)
|
| 222 |
+
else:
|
| 223 |
+
s = 1.0
|
| 224 |
+
x = x + dx
|
| 225 |
+
Fx = func(x)
|
| 226 |
+
Fx_norm_new = norm(Fx)
|
| 227 |
+
|
| 228 |
+
jacobian.update(x.copy(), Fx)
|
| 229 |
+
|
| 230 |
+
if callback:
|
| 231 |
+
callback(x, Fx)
|
| 232 |
+
|
| 233 |
+
# Adjust forcing parameters for inexact methods
|
| 234 |
+
eta_A = gamma * Fx_norm_new**2 / Fx_norm**2
|
| 235 |
+
if gamma * eta**2 < eta_treshold:
|
| 236 |
+
eta = min(eta_max, eta_A)
|
| 237 |
+
else:
|
| 238 |
+
eta = min(eta_max, max(eta_A, gamma*eta**2))
|
| 239 |
+
|
| 240 |
+
Fx_norm = Fx_norm_new
|
| 241 |
+
|
| 242 |
+
# Print status
|
| 243 |
+
if verbose:
|
| 244 |
+
sys.stdout.write("%d: |F(x)| = %g; step %g\n" % (
|
| 245 |
+
n, tol_norm(Fx), s))
|
| 246 |
+
sys.stdout.flush()
|
| 247 |
+
else:
|
| 248 |
+
if raise_exception:
|
| 249 |
+
raise NoConvergence(_array_like(x, x0))
|
| 250 |
+
else:
|
| 251 |
+
status = 2
|
| 252 |
+
|
| 253 |
+
if full_output:
|
| 254 |
+
info = {'nit': condition.iteration,
|
| 255 |
+
'fun': Fx,
|
| 256 |
+
'status': status,
|
| 257 |
+
'success': status == 1,
|
| 258 |
+
'message': {1: 'A solution was found at the specified '
|
| 259 |
+
'tolerance.',
|
| 260 |
+
2: 'The maximum number of iterations allowed '
|
| 261 |
+
'has been reached.'
|
| 262 |
+
}[status]
|
| 263 |
+
}
|
| 264 |
+
return _array_like(x, x0), info
|
| 265 |
+
else:
|
| 266 |
+
return _array_like(x, x0)
|
| 267 |
+
|
| 268 |
+
|
| 269 |
+
_set_doc(nonlin_solve)
|
| 270 |
+
|
| 271 |
+
|
| 272 |
+
def _nonlin_line_search(func, x, Fx, dx, search_type='armijo', rdiff=1e-8,
|
| 273 |
+
smin=1e-2):
|
| 274 |
+
tmp_s = [0]
|
| 275 |
+
tmp_Fx = [Fx]
|
| 276 |
+
tmp_phi = [norm(Fx)**2]
|
| 277 |
+
s_norm = norm(x) / norm(dx)
|
| 278 |
+
|
| 279 |
+
def phi(s, store=True):
|
| 280 |
+
if s == tmp_s[0]:
|
| 281 |
+
return tmp_phi[0]
|
| 282 |
+
xt = x + s*dx
|
| 283 |
+
v = func(xt)
|
| 284 |
+
p = _safe_norm(v)**2
|
| 285 |
+
if store:
|
| 286 |
+
tmp_s[0] = s
|
| 287 |
+
tmp_phi[0] = p
|
| 288 |
+
tmp_Fx[0] = v
|
| 289 |
+
return p
|
| 290 |
+
|
| 291 |
+
def derphi(s):
|
| 292 |
+
ds = (abs(s) + s_norm + 1) * rdiff
|
| 293 |
+
return (phi(s+ds, store=False) - phi(s)) / ds
|
| 294 |
+
|
| 295 |
+
if search_type == 'wolfe':
|
| 296 |
+
s, phi1, phi0 = scalar_search_wolfe1(phi, derphi, tmp_phi[0],
|
| 297 |
+
xtol=1e-2, amin=smin)
|
| 298 |
+
elif search_type == 'armijo':
|
| 299 |
+
s, phi1 = scalar_search_armijo(phi, tmp_phi[0], -tmp_phi[0],
|
| 300 |
+
amin=smin)
|
| 301 |
+
|
| 302 |
+
if s is None:
|
| 303 |
+
# XXX: No suitable step length found. Take the full Newton step,
|
| 304 |
+
# and hope for the best.
|
| 305 |
+
s = 1.0
|
| 306 |
+
|
| 307 |
+
x = x + s*dx
|
| 308 |
+
if s == tmp_s[0]:
|
| 309 |
+
Fx = tmp_Fx[0]
|
| 310 |
+
else:
|
| 311 |
+
Fx = func(x)
|
| 312 |
+
Fx_norm = norm(Fx)
|
| 313 |
+
|
| 314 |
+
return s, x, Fx, Fx_norm
|
| 315 |
+
|
| 316 |
+
|
| 317 |
+
class TerminationCondition:
|
| 318 |
+
"""
|
| 319 |
+
Termination condition for an iteration. It is terminated if
|
| 320 |
+
|
| 321 |
+
- |F| < f_rtol*|F_0|, AND
|
| 322 |
+
- |F| < f_tol
|
| 323 |
+
|
| 324 |
+
AND
|
| 325 |
+
|
| 326 |
+
- |dx| < x_rtol*|x|, AND
|
| 327 |
+
- |dx| < x_tol
|
| 328 |
+
|
| 329 |
+
"""
|
| 330 |
+
def __init__(self, f_tol=None, f_rtol=None, x_tol=None, x_rtol=None,
|
| 331 |
+
iter=None, norm=maxnorm):
|
| 332 |
+
|
| 333 |
+
if f_tol is None:
|
| 334 |
+
f_tol = np.finfo(np.float64).eps ** (1./3)
|
| 335 |
+
if f_rtol is None:
|
| 336 |
+
f_rtol = np.inf
|
| 337 |
+
if x_tol is None:
|
| 338 |
+
x_tol = np.inf
|
| 339 |
+
if x_rtol is None:
|
| 340 |
+
x_rtol = np.inf
|
| 341 |
+
|
| 342 |
+
self.x_tol = x_tol
|
| 343 |
+
self.x_rtol = x_rtol
|
| 344 |
+
self.f_tol = f_tol
|
| 345 |
+
self.f_rtol = f_rtol
|
| 346 |
+
|
| 347 |
+
self.norm = norm
|
| 348 |
+
|
| 349 |
+
self.iter = iter
|
| 350 |
+
|
| 351 |
+
self.f0_norm = None
|
| 352 |
+
self.iteration = 0
|
| 353 |
+
|
| 354 |
+
def check(self, f, x, dx):
|
| 355 |
+
self.iteration += 1
|
| 356 |
+
f_norm = self.norm(f)
|
| 357 |
+
x_norm = self.norm(x)
|
| 358 |
+
dx_norm = self.norm(dx)
|
| 359 |
+
|
| 360 |
+
if self.f0_norm is None:
|
| 361 |
+
self.f0_norm = f_norm
|
| 362 |
+
|
| 363 |
+
if f_norm == 0:
|
| 364 |
+
return 1
|
| 365 |
+
|
| 366 |
+
if self.iter is not None:
|
| 367 |
+
# backwards compatibility with SciPy 0.6.0
|
| 368 |
+
return 2 * (self.iteration > self.iter)
|
| 369 |
+
|
| 370 |
+
# NB: condition must succeed for rtol=inf even if norm == 0
|
| 371 |
+
return int((f_norm <= self.f_tol
|
| 372 |
+
and f_norm/self.f_rtol <= self.f0_norm)
|
| 373 |
+
and (dx_norm <= self.x_tol
|
| 374 |
+
and dx_norm/self.x_rtol <= x_norm))
|
| 375 |
+
|
| 376 |
+
|
| 377 |
+
#------------------------------------------------------------------------------
|
| 378 |
+
# Generic Jacobian approximation
|
| 379 |
+
#------------------------------------------------------------------------------
|
| 380 |
+
|
| 381 |
+
class Jacobian:
|
| 382 |
+
"""
|
| 383 |
+
Common interface for Jacobians or Jacobian approximations.
|
| 384 |
+
|
| 385 |
+
The optional methods come useful when implementing trust region
|
| 386 |
+
etc., algorithms that often require evaluating transposes of the
|
| 387 |
+
Jacobian.
|
| 388 |
+
|
| 389 |
+
Methods
|
| 390 |
+
-------
|
| 391 |
+
solve
|
| 392 |
+
Returns J^-1 * v
|
| 393 |
+
update
|
| 394 |
+
Updates Jacobian to point `x` (where the function has residual `Fx`)
|
| 395 |
+
|
| 396 |
+
matvec : optional
|
| 397 |
+
Returns J * v
|
| 398 |
+
rmatvec : optional
|
| 399 |
+
Returns A^H * v
|
| 400 |
+
rsolve : optional
|
| 401 |
+
Returns A^-H * v
|
| 402 |
+
matmat : optional
|
| 403 |
+
Returns A * V, where V is a dense matrix with dimensions (N,K).
|
| 404 |
+
todense : optional
|
| 405 |
+
Form the dense Jacobian matrix. Necessary for dense trust region
|
| 406 |
+
algorithms, and useful for testing.
|
| 407 |
+
|
| 408 |
+
Attributes
|
| 409 |
+
----------
|
| 410 |
+
shape
|
| 411 |
+
Matrix dimensions (M, N)
|
| 412 |
+
dtype
|
| 413 |
+
Data type of the matrix.
|
| 414 |
+
func : callable, optional
|
| 415 |
+
Function the Jacobian corresponds to
|
| 416 |
+
|
| 417 |
+
"""
|
| 418 |
+
|
| 419 |
+
def __init__(self, **kw):
|
| 420 |
+
names = ["solve", "update", "matvec", "rmatvec", "rsolve",
|
| 421 |
+
"matmat", "todense", "shape", "dtype"]
|
| 422 |
+
for name, value in kw.items():
|
| 423 |
+
if name not in names:
|
| 424 |
+
raise ValueError("Unknown keyword argument %s" % name)
|
| 425 |
+
if value is not None:
|
| 426 |
+
setattr(self, name, kw[name])
|
| 427 |
+
|
| 428 |
+
|
| 429 |
+
if hasattr(self, "todense"):
|
| 430 |
+
def __array__(self, dtype=None, copy=None):
|
| 431 |
+
if dtype is not None:
|
| 432 |
+
raise ValueError(f"`dtype` must be None, was {dtype}")
|
| 433 |
+
return self.todense()
|
| 434 |
+
|
| 435 |
+
def aspreconditioner(self):
|
| 436 |
+
return InverseJacobian(self)
|
| 437 |
+
|
| 438 |
+
def solve(self, v, tol=0):
|
| 439 |
+
raise NotImplementedError
|
| 440 |
+
|
| 441 |
+
def update(self, x, F):
|
| 442 |
+
pass
|
| 443 |
+
|
| 444 |
+
def setup(self, x, F, func):
|
| 445 |
+
self.func = func
|
| 446 |
+
self.shape = (F.size, x.size)
|
| 447 |
+
self.dtype = F.dtype
|
| 448 |
+
if self.__class__.setup is Jacobian.setup:
|
| 449 |
+
# Call on the first point unless overridden
|
| 450 |
+
self.update(x, F)
|
| 451 |
+
|
| 452 |
+
|
| 453 |
+
class InverseJacobian:
|
| 454 |
+
def __init__(self, jacobian):
|
| 455 |
+
self.jacobian = jacobian
|
| 456 |
+
self.matvec = jacobian.solve
|
| 457 |
+
self.update = jacobian.update
|
| 458 |
+
if hasattr(jacobian, 'setup'):
|
| 459 |
+
self.setup = jacobian.setup
|
| 460 |
+
if hasattr(jacobian, 'rsolve'):
|
| 461 |
+
self.rmatvec = jacobian.rsolve
|
| 462 |
+
|
| 463 |
+
@property
|
| 464 |
+
def shape(self):
|
| 465 |
+
return self.jacobian.shape
|
| 466 |
+
|
| 467 |
+
@property
|
| 468 |
+
def dtype(self):
|
| 469 |
+
return self.jacobian.dtype
|
| 470 |
+
|
| 471 |
+
|
| 472 |
+
def asjacobian(J):
|
| 473 |
+
"""
|
| 474 |
+
Convert given object to one suitable for use as a Jacobian.
|
| 475 |
+
"""
|
| 476 |
+
spsolve = scipy.sparse.linalg.spsolve
|
| 477 |
+
if isinstance(J, Jacobian):
|
| 478 |
+
return J
|
| 479 |
+
elif inspect.isclass(J) and issubclass(J, Jacobian):
|
| 480 |
+
return J()
|
| 481 |
+
elif isinstance(J, np.ndarray):
|
| 482 |
+
if J.ndim > 2:
|
| 483 |
+
raise ValueError('array must have rank <= 2')
|
| 484 |
+
J = np.atleast_2d(np.asarray(J))
|
| 485 |
+
if J.shape[0] != J.shape[1]:
|
| 486 |
+
raise ValueError('array must be square')
|
| 487 |
+
|
| 488 |
+
return Jacobian(matvec=lambda v: dot(J, v),
|
| 489 |
+
rmatvec=lambda v: dot(J.conj().T, v),
|
| 490 |
+
solve=lambda v, tol=0: solve(J, v),
|
| 491 |
+
rsolve=lambda v, tol=0: solve(J.conj().T, v),
|
| 492 |
+
dtype=J.dtype, shape=J.shape)
|
| 493 |
+
elif scipy.sparse.issparse(J):
|
| 494 |
+
if J.shape[0] != J.shape[1]:
|
| 495 |
+
raise ValueError('matrix must be square')
|
| 496 |
+
return Jacobian(matvec=lambda v: J @ v,
|
| 497 |
+
rmatvec=lambda v: J.conj().T @ v,
|
| 498 |
+
solve=lambda v, tol=0: spsolve(J, v),
|
| 499 |
+
rsolve=lambda v, tol=0: spsolve(J.conj().T, v),
|
| 500 |
+
dtype=J.dtype, shape=J.shape)
|
| 501 |
+
elif hasattr(J, 'shape') and hasattr(J, 'dtype') and hasattr(J, 'solve'):
|
| 502 |
+
return Jacobian(matvec=getattr(J, 'matvec'),
|
| 503 |
+
rmatvec=getattr(J, 'rmatvec'),
|
| 504 |
+
solve=J.solve,
|
| 505 |
+
rsolve=getattr(J, 'rsolve'),
|
| 506 |
+
update=getattr(J, 'update'),
|
| 507 |
+
setup=getattr(J, 'setup'),
|
| 508 |
+
dtype=J.dtype,
|
| 509 |
+
shape=J.shape)
|
| 510 |
+
elif callable(J):
|
| 511 |
+
# Assume it's a function J(x) that returns the Jacobian
|
| 512 |
+
class Jac(Jacobian):
|
| 513 |
+
def update(self, x, F):
|
| 514 |
+
self.x = x
|
| 515 |
+
|
| 516 |
+
def solve(self, v, tol=0):
|
| 517 |
+
m = J(self.x)
|
| 518 |
+
if isinstance(m, np.ndarray):
|
| 519 |
+
return solve(m, v)
|
| 520 |
+
elif scipy.sparse.issparse(m):
|
| 521 |
+
return spsolve(m, v)
|
| 522 |
+
else:
|
| 523 |
+
raise ValueError("Unknown matrix type")
|
| 524 |
+
|
| 525 |
+
def matvec(self, v):
|
| 526 |
+
m = J(self.x)
|
| 527 |
+
if isinstance(m, np.ndarray):
|
| 528 |
+
return dot(m, v)
|
| 529 |
+
elif scipy.sparse.issparse(m):
|
| 530 |
+
return m @ v
|
| 531 |
+
else:
|
| 532 |
+
raise ValueError("Unknown matrix type")
|
| 533 |
+
|
| 534 |
+
def rsolve(self, v, tol=0):
|
| 535 |
+
m = J(self.x)
|
| 536 |
+
if isinstance(m, np.ndarray):
|
| 537 |
+
return solve(m.conj().T, v)
|
| 538 |
+
elif scipy.sparse.issparse(m):
|
| 539 |
+
return spsolve(m.conj().T, v)
|
| 540 |
+
else:
|
| 541 |
+
raise ValueError("Unknown matrix type")
|
| 542 |
+
|
| 543 |
+
def rmatvec(self, v):
|
| 544 |
+
m = J(self.x)
|
| 545 |
+
if isinstance(m, np.ndarray):
|
| 546 |
+
return dot(m.conj().T, v)
|
| 547 |
+
elif scipy.sparse.issparse(m):
|
| 548 |
+
return m.conj().T @ v
|
| 549 |
+
else:
|
| 550 |
+
raise ValueError("Unknown matrix type")
|
| 551 |
+
return Jac()
|
| 552 |
+
elif isinstance(J, str):
|
| 553 |
+
return dict(broyden1=BroydenFirst,
|
| 554 |
+
broyden2=BroydenSecond,
|
| 555 |
+
anderson=Anderson,
|
| 556 |
+
diagbroyden=DiagBroyden,
|
| 557 |
+
linearmixing=LinearMixing,
|
| 558 |
+
excitingmixing=ExcitingMixing,
|
| 559 |
+
krylov=KrylovJacobian)[J]()
|
| 560 |
+
else:
|
| 561 |
+
raise TypeError('Cannot convert object to a Jacobian')
|
| 562 |
+
|
| 563 |
+
|
| 564 |
+
#------------------------------------------------------------------------------
|
| 565 |
+
# Broyden
|
| 566 |
+
#------------------------------------------------------------------------------
|
| 567 |
+
|
| 568 |
+
class GenericBroyden(Jacobian):
|
| 569 |
+
def setup(self, x0, f0, func):
|
| 570 |
+
Jacobian.setup(self, x0, f0, func)
|
| 571 |
+
self.last_f = f0
|
| 572 |
+
self.last_x = x0
|
| 573 |
+
|
| 574 |
+
if hasattr(self, 'alpha') and self.alpha is None:
|
| 575 |
+
# Autoscale the initial Jacobian parameter
|
| 576 |
+
# unless we have already guessed the solution.
|
| 577 |
+
normf0 = norm(f0)
|
| 578 |
+
if normf0:
|
| 579 |
+
self.alpha = 0.5*max(norm(x0), 1) / normf0
|
| 580 |
+
else:
|
| 581 |
+
self.alpha = 1.0
|
| 582 |
+
|
| 583 |
+
def _update(self, x, f, dx, df, dx_norm, df_norm):
|
| 584 |
+
raise NotImplementedError
|
| 585 |
+
|
| 586 |
+
def update(self, x, f):
|
| 587 |
+
df = f - self.last_f
|
| 588 |
+
dx = x - self.last_x
|
| 589 |
+
self._update(x, f, dx, df, norm(dx), norm(df))
|
| 590 |
+
self.last_f = f
|
| 591 |
+
self.last_x = x
|
| 592 |
+
|
| 593 |
+
|
| 594 |
+
class LowRankMatrix:
|
| 595 |
+
r"""
|
| 596 |
+
A matrix represented as
|
| 597 |
+
|
| 598 |
+
.. math:: \alpha I + \sum_{n=0}^{n=M} c_n d_n^\dagger
|
| 599 |
+
|
| 600 |
+
However, if the rank of the matrix reaches the dimension of the vectors,
|
| 601 |
+
full matrix representation will be used thereon.
|
| 602 |
+
|
| 603 |
+
"""
|
| 604 |
+
|
| 605 |
+
def __init__(self, alpha, n, dtype):
|
| 606 |
+
self.alpha = alpha
|
| 607 |
+
self.cs = []
|
| 608 |
+
self.ds = []
|
| 609 |
+
self.n = n
|
| 610 |
+
self.dtype = dtype
|
| 611 |
+
self.collapsed = None
|
| 612 |
+
|
| 613 |
+
@staticmethod
|
| 614 |
+
def _matvec(v, alpha, cs, ds):
|
| 615 |
+
axpy, scal, dotc = get_blas_funcs(['axpy', 'scal', 'dotc'],
|
| 616 |
+
cs[:1] + [v])
|
| 617 |
+
w = alpha * v
|
| 618 |
+
for c, d in zip(cs, ds):
|
| 619 |
+
a = dotc(d, v)
|
| 620 |
+
w = axpy(c, w, w.size, a)
|
| 621 |
+
return w
|
| 622 |
+
|
| 623 |
+
@staticmethod
|
| 624 |
+
def _solve(v, alpha, cs, ds):
|
| 625 |
+
"""Evaluate w = M^-1 v"""
|
| 626 |
+
if len(cs) == 0:
|
| 627 |
+
return v/alpha
|
| 628 |
+
|
| 629 |
+
# (B + C D^H)^-1 = B^-1 - B^-1 C (I + D^H B^-1 C)^-1 D^H B^-1
|
| 630 |
+
|
| 631 |
+
axpy, dotc = get_blas_funcs(['axpy', 'dotc'], cs[:1] + [v])
|
| 632 |
+
|
| 633 |
+
c0 = cs[0]
|
| 634 |
+
A = alpha * np.identity(len(cs), dtype=c0.dtype)
|
| 635 |
+
for i, d in enumerate(ds):
|
| 636 |
+
for j, c in enumerate(cs):
|
| 637 |
+
A[i,j] += dotc(d, c)
|
| 638 |
+
|
| 639 |
+
q = np.zeros(len(cs), dtype=c0.dtype)
|
| 640 |
+
for j, d in enumerate(ds):
|
| 641 |
+
q[j] = dotc(d, v)
|
| 642 |
+
q /= alpha
|
| 643 |
+
q = solve(A, q)
|
| 644 |
+
|
| 645 |
+
w = v/alpha
|
| 646 |
+
for c, qc in zip(cs, q):
|
| 647 |
+
w = axpy(c, w, w.size, -qc)
|
| 648 |
+
|
| 649 |
+
return w
|
| 650 |
+
|
| 651 |
+
def matvec(self, v):
|
| 652 |
+
"""Evaluate w = M v"""
|
| 653 |
+
if self.collapsed is not None:
|
| 654 |
+
return np.dot(self.collapsed, v)
|
| 655 |
+
return LowRankMatrix._matvec(v, self.alpha, self.cs, self.ds)
|
| 656 |
+
|
| 657 |
+
def rmatvec(self, v):
|
| 658 |
+
"""Evaluate w = M^H v"""
|
| 659 |
+
if self.collapsed is not None:
|
| 660 |
+
return np.dot(self.collapsed.T.conj(), v)
|
| 661 |
+
return LowRankMatrix._matvec(v, np.conj(self.alpha), self.ds, self.cs)
|
| 662 |
+
|
| 663 |
+
def solve(self, v, tol=0):
|
| 664 |
+
"""Evaluate w = M^-1 v"""
|
| 665 |
+
if self.collapsed is not None:
|
| 666 |
+
return solve(self.collapsed, v)
|
| 667 |
+
return LowRankMatrix._solve(v, self.alpha, self.cs, self.ds)
|
| 668 |
+
|
| 669 |
+
def rsolve(self, v, tol=0):
|
| 670 |
+
"""Evaluate w = M^-H v"""
|
| 671 |
+
if self.collapsed is not None:
|
| 672 |
+
return solve(self.collapsed.T.conj(), v)
|
| 673 |
+
return LowRankMatrix._solve(v, np.conj(self.alpha), self.ds, self.cs)
|
| 674 |
+
|
| 675 |
+
def append(self, c, d):
|
| 676 |
+
if self.collapsed is not None:
|
| 677 |
+
self.collapsed += c[:,None] * d[None,:].conj()
|
| 678 |
+
return
|
| 679 |
+
|
| 680 |
+
self.cs.append(c)
|
| 681 |
+
self.ds.append(d)
|
| 682 |
+
|
| 683 |
+
if len(self.cs) > c.size:
|
| 684 |
+
self.collapse()
|
| 685 |
+
|
| 686 |
+
def __array__(self, dtype=None, copy=None):
|
| 687 |
+
if dtype is not None:
|
| 688 |
+
warnings.warn("LowRankMatrix is scipy-internal code, `dtype` "
|
| 689 |
+
f"should only be None but was {dtype} (not handled)",
|
| 690 |
+
stacklevel=3)
|
| 691 |
+
if copy is not None:
|
| 692 |
+
warnings.warn("LowRankMatrix is scipy-internal code, `copy` "
|
| 693 |
+
f"should only be None but was {copy} (not handled)",
|
| 694 |
+
stacklevel=3)
|
| 695 |
+
if self.collapsed is not None:
|
| 696 |
+
return self.collapsed
|
| 697 |
+
|
| 698 |
+
Gm = self.alpha*np.identity(self.n, dtype=self.dtype)
|
| 699 |
+
for c, d in zip(self.cs, self.ds):
|
| 700 |
+
Gm += c[:,None]*d[None,:].conj()
|
| 701 |
+
return Gm
|
| 702 |
+
|
| 703 |
+
def collapse(self):
|
| 704 |
+
"""Collapse the low-rank matrix to a full-rank one."""
|
| 705 |
+
self.collapsed = np.array(self, copy=copy_if_needed)
|
| 706 |
+
self.cs = None
|
| 707 |
+
self.ds = None
|
| 708 |
+
self.alpha = None
|
| 709 |
+
|
| 710 |
+
def restart_reduce(self, rank):
|
| 711 |
+
"""
|
| 712 |
+
Reduce the rank of the matrix by dropping all vectors.
|
| 713 |
+
"""
|
| 714 |
+
if self.collapsed is not None:
|
| 715 |
+
return
|
| 716 |
+
assert rank > 0
|
| 717 |
+
if len(self.cs) > rank:
|
| 718 |
+
del self.cs[:]
|
| 719 |
+
del self.ds[:]
|
| 720 |
+
|
| 721 |
+
def simple_reduce(self, rank):
|
| 722 |
+
"""
|
| 723 |
+
Reduce the rank of the matrix by dropping oldest vectors.
|
| 724 |
+
"""
|
| 725 |
+
if self.collapsed is not None:
|
| 726 |
+
return
|
| 727 |
+
assert rank > 0
|
| 728 |
+
while len(self.cs) > rank:
|
| 729 |
+
del self.cs[0]
|
| 730 |
+
del self.ds[0]
|
| 731 |
+
|
| 732 |
+
def svd_reduce(self, max_rank, to_retain=None):
|
| 733 |
+
"""
|
| 734 |
+
Reduce the rank of the matrix by retaining some SVD components.
|
| 735 |
+
|
| 736 |
+
This corresponds to the \"Broyden Rank Reduction Inverse\"
|
| 737 |
+
algorithm described in [1]_.
|
| 738 |
+
|
| 739 |
+
Note that the SVD decomposition can be done by solving only a
|
| 740 |
+
problem whose size is the effective rank of this matrix, which
|
| 741 |
+
is viable even for large problems.
|
| 742 |
+
|
| 743 |
+
Parameters
|
| 744 |
+
----------
|
| 745 |
+
max_rank : int
|
| 746 |
+
Maximum rank of this matrix after reduction.
|
| 747 |
+
to_retain : int, optional
|
| 748 |
+
Number of SVD components to retain when reduction is done
|
| 749 |
+
(ie. rank > max_rank). Default is ``max_rank - 2``.
|
| 750 |
+
|
| 751 |
+
References
|
| 752 |
+
----------
|
| 753 |
+
.. [1] B.A. van der Rotten, PhD thesis,
|
| 754 |
+
\"A limited memory Broyden method to solve high-dimensional
|
| 755 |
+
systems of nonlinear equations\". Mathematisch Instituut,
|
| 756 |
+
Universiteit Leiden, The Netherlands (2003).
|
| 757 |
+
|
| 758 |
+
https://web.archive.org/web/20161022015821/http://www.math.leidenuniv.nl/scripties/Rotten.pdf
|
| 759 |
+
|
| 760 |
+
"""
|
| 761 |
+
if self.collapsed is not None:
|
| 762 |
+
return
|
| 763 |
+
|
| 764 |
+
p = max_rank
|
| 765 |
+
if to_retain is not None:
|
| 766 |
+
q = to_retain
|
| 767 |
+
else:
|
| 768 |
+
q = p - 2
|
| 769 |
+
|
| 770 |
+
if self.cs:
|
| 771 |
+
p = min(p, len(self.cs[0]))
|
| 772 |
+
q = max(0, min(q, p-1))
|
| 773 |
+
|
| 774 |
+
m = len(self.cs)
|
| 775 |
+
if m < p:
|
| 776 |
+
# nothing to do
|
| 777 |
+
return
|
| 778 |
+
|
| 779 |
+
C = np.array(self.cs).T
|
| 780 |
+
D = np.array(self.ds).T
|
| 781 |
+
|
| 782 |
+
D, R = qr(D, mode='economic')
|
| 783 |
+
C = dot(C, R.T.conj())
|
| 784 |
+
|
| 785 |
+
U, S, WH = svd(C, full_matrices=False)
|
| 786 |
+
|
| 787 |
+
C = dot(C, inv(WH))
|
| 788 |
+
D = dot(D, WH.T.conj())
|
| 789 |
+
|
| 790 |
+
for k in range(q):
|
| 791 |
+
self.cs[k] = C[:,k].copy()
|
| 792 |
+
self.ds[k] = D[:,k].copy()
|
| 793 |
+
|
| 794 |
+
del self.cs[q:]
|
| 795 |
+
del self.ds[q:]
|
| 796 |
+
|
| 797 |
+
|
| 798 |
+
_doc_parts['broyden_params'] = """
|
| 799 |
+
alpha : float, optional
|
| 800 |
+
Initial guess for the Jacobian is ``(-1/alpha)``.
|
| 801 |
+
reduction_method : str or tuple, optional
|
| 802 |
+
Method used in ensuring that the rank of the Broyden matrix
|
| 803 |
+
stays low. Can either be a string giving the name of the method,
|
| 804 |
+
or a tuple of the form ``(method, param1, param2, ...)``
|
| 805 |
+
that gives the name of the method and values for additional parameters.
|
| 806 |
+
|
| 807 |
+
Methods available:
|
| 808 |
+
|
| 809 |
+
- ``restart``: drop all matrix columns. Has no extra parameters.
|
| 810 |
+
- ``simple``: drop oldest matrix column. Has no extra parameters.
|
| 811 |
+
- ``svd``: keep only the most significant SVD components.
|
| 812 |
+
Takes an extra parameter, ``to_retain``, which determines the
|
| 813 |
+
number of SVD components to retain when rank reduction is done.
|
| 814 |
+
Default is ``max_rank - 2``.
|
| 815 |
+
|
| 816 |
+
max_rank : int, optional
|
| 817 |
+
Maximum rank for the Broyden matrix.
|
| 818 |
+
Default is infinity (i.e., no rank reduction).
|
| 819 |
+
""".strip()
|
| 820 |
+
|
| 821 |
+
|
| 822 |
+
class BroydenFirst(GenericBroyden):
|
| 823 |
+
r"""
|
| 824 |
+
Find a root of a function, using Broyden's first Jacobian approximation.
|
| 825 |
+
|
| 826 |
+
This method is also known as \"Broyden's good method\".
|
| 827 |
+
|
| 828 |
+
Parameters
|
| 829 |
+
----------
|
| 830 |
+
%(params_basic)s
|
| 831 |
+
%(broyden_params)s
|
| 832 |
+
%(params_extra)s
|
| 833 |
+
|
| 834 |
+
See Also
|
| 835 |
+
--------
|
| 836 |
+
root : Interface to root finding algorithms for multivariate
|
| 837 |
+
functions. See ``method='broyden1'`` in particular.
|
| 838 |
+
|
| 839 |
+
Notes
|
| 840 |
+
-----
|
| 841 |
+
This algorithm implements the inverse Jacobian Quasi-Newton update
|
| 842 |
+
|
| 843 |
+
.. math:: H_+ = H + (dx - H df) dx^\dagger H / ( dx^\dagger H df)
|
| 844 |
+
|
| 845 |
+
which corresponds to Broyden's first Jacobian update
|
| 846 |
+
|
| 847 |
+
.. math:: J_+ = J + (df - J dx) dx^\dagger / dx^\dagger dx
|
| 848 |
+
|
| 849 |
+
|
| 850 |
+
References
|
| 851 |
+
----------
|
| 852 |
+
.. [1] B.A. van der Rotten, PhD thesis,
|
| 853 |
+
\"A limited memory Broyden method to solve high-dimensional
|
| 854 |
+
systems of nonlinear equations\". Mathematisch Instituut,
|
| 855 |
+
Universiteit Leiden, The Netherlands (2003).
|
| 856 |
+
|
| 857 |
+
https://web.archive.org/web/20161022015821/http://www.math.leidenuniv.nl/scripties/Rotten.pdf
|
| 858 |
+
|
| 859 |
+
Examples
|
| 860 |
+
--------
|
| 861 |
+
The following functions define a system of nonlinear equations
|
| 862 |
+
|
| 863 |
+
>>> def fun(x):
|
| 864 |
+
... return [x[0] + 0.5 * (x[0] - x[1])**3 - 1.0,
|
| 865 |
+
... 0.5 * (x[1] - x[0])**3 + x[1]]
|
| 866 |
+
|
| 867 |
+
A solution can be obtained as follows.
|
| 868 |
+
|
| 869 |
+
>>> from scipy import optimize
|
| 870 |
+
>>> sol = optimize.broyden1(fun, [0, 0])
|
| 871 |
+
>>> sol
|
| 872 |
+
array([0.84116396, 0.15883641])
|
| 873 |
+
|
| 874 |
+
"""
|
| 875 |
+
|
| 876 |
+
def __init__(self, alpha=None, reduction_method='restart', max_rank=None):
|
| 877 |
+
GenericBroyden.__init__(self)
|
| 878 |
+
self.alpha = alpha
|
| 879 |
+
self.Gm = None
|
| 880 |
+
|
| 881 |
+
if max_rank is None:
|
| 882 |
+
max_rank = np.inf
|
| 883 |
+
self.max_rank = max_rank
|
| 884 |
+
|
| 885 |
+
if isinstance(reduction_method, str):
|
| 886 |
+
reduce_params = ()
|
| 887 |
+
else:
|
| 888 |
+
reduce_params = reduction_method[1:]
|
| 889 |
+
reduction_method = reduction_method[0]
|
| 890 |
+
reduce_params = (max_rank - 1,) + reduce_params
|
| 891 |
+
|
| 892 |
+
if reduction_method == 'svd':
|
| 893 |
+
self._reduce = lambda: self.Gm.svd_reduce(*reduce_params)
|
| 894 |
+
elif reduction_method == 'simple':
|
| 895 |
+
self._reduce = lambda: self.Gm.simple_reduce(*reduce_params)
|
| 896 |
+
elif reduction_method == 'restart':
|
| 897 |
+
self._reduce = lambda: self.Gm.restart_reduce(*reduce_params)
|
| 898 |
+
else:
|
| 899 |
+
raise ValueError("Unknown rank reduction method '%s'" %
|
| 900 |
+
reduction_method)
|
| 901 |
+
|
| 902 |
+
def setup(self, x, F, func):
|
| 903 |
+
GenericBroyden.setup(self, x, F, func)
|
| 904 |
+
self.Gm = LowRankMatrix(-self.alpha, self.shape[0], self.dtype)
|
| 905 |
+
|
| 906 |
+
def todense(self):
|
| 907 |
+
return inv(self.Gm)
|
| 908 |
+
|
| 909 |
+
def solve(self, f, tol=0):
|
| 910 |
+
r = self.Gm.matvec(f)
|
| 911 |
+
if not np.isfinite(r).all():
|
| 912 |
+
# singular; reset the Jacobian approximation
|
| 913 |
+
self.setup(self.last_x, self.last_f, self.func)
|
| 914 |
+
return self.Gm.matvec(f)
|
| 915 |
+
return r
|
| 916 |
+
|
| 917 |
+
def matvec(self, f):
|
| 918 |
+
return self.Gm.solve(f)
|
| 919 |
+
|
| 920 |
+
def rsolve(self, f, tol=0):
|
| 921 |
+
return self.Gm.rmatvec(f)
|
| 922 |
+
|
| 923 |
+
def rmatvec(self, f):
|
| 924 |
+
return self.Gm.rsolve(f)
|
| 925 |
+
|
| 926 |
+
def _update(self, x, f, dx, df, dx_norm, df_norm):
|
| 927 |
+
self._reduce() # reduce first to preserve secant condition
|
| 928 |
+
|
| 929 |
+
v = self.Gm.rmatvec(dx)
|
| 930 |
+
c = dx - self.Gm.matvec(df)
|
| 931 |
+
d = v / vdot(df, v)
|
| 932 |
+
|
| 933 |
+
self.Gm.append(c, d)
|
| 934 |
+
|
| 935 |
+
|
| 936 |
+
class BroydenSecond(BroydenFirst):
|
| 937 |
+
"""
|
| 938 |
+
Find a root of a function, using Broyden\'s second Jacobian approximation.
|
| 939 |
+
|
| 940 |
+
This method is also known as \"Broyden's bad method\".
|
| 941 |
+
|
| 942 |
+
Parameters
|
| 943 |
+
----------
|
| 944 |
+
%(params_basic)s
|
| 945 |
+
%(broyden_params)s
|
| 946 |
+
%(params_extra)s
|
| 947 |
+
|
| 948 |
+
See Also
|
| 949 |
+
--------
|
| 950 |
+
root : Interface to root finding algorithms for multivariate
|
| 951 |
+
functions. See ``method='broyden2'`` in particular.
|
| 952 |
+
|
| 953 |
+
Notes
|
| 954 |
+
-----
|
| 955 |
+
This algorithm implements the inverse Jacobian Quasi-Newton update
|
| 956 |
+
|
| 957 |
+
.. math:: H_+ = H + (dx - H df) df^\\dagger / ( df^\\dagger df)
|
| 958 |
+
|
| 959 |
+
corresponding to Broyden's second method.
|
| 960 |
+
|
| 961 |
+
References
|
| 962 |
+
----------
|
| 963 |
+
.. [1] B.A. van der Rotten, PhD thesis,
|
| 964 |
+
\"A limited memory Broyden method to solve high-dimensional
|
| 965 |
+
systems of nonlinear equations\". Mathematisch Instituut,
|
| 966 |
+
Universiteit Leiden, The Netherlands (2003).
|
| 967 |
+
|
| 968 |
+
https://web.archive.org/web/20161022015821/http://www.math.leidenuniv.nl/scripties/Rotten.pdf
|
| 969 |
+
|
| 970 |
+
Examples
|
| 971 |
+
--------
|
| 972 |
+
The following functions define a system of nonlinear equations
|
| 973 |
+
|
| 974 |
+
>>> def fun(x):
|
| 975 |
+
... return [x[0] + 0.5 * (x[0] - x[1])**3 - 1.0,
|
| 976 |
+
... 0.5 * (x[1] - x[0])**3 + x[1]]
|
| 977 |
+
|
| 978 |
+
A solution can be obtained as follows.
|
| 979 |
+
|
| 980 |
+
>>> from scipy import optimize
|
| 981 |
+
>>> sol = optimize.broyden2(fun, [0, 0])
|
| 982 |
+
>>> sol
|
| 983 |
+
array([0.84116365, 0.15883529])
|
| 984 |
+
|
| 985 |
+
"""
|
| 986 |
+
|
| 987 |
+
def _update(self, x, f, dx, df, dx_norm, df_norm):
|
| 988 |
+
self._reduce() # reduce first to preserve secant condition
|
| 989 |
+
|
| 990 |
+
v = df
|
| 991 |
+
c = dx - self.Gm.matvec(df)
|
| 992 |
+
d = v / df_norm**2
|
| 993 |
+
self.Gm.append(c, d)
|
| 994 |
+
|
| 995 |
+
|
| 996 |
+
#------------------------------------------------------------------------------
|
| 997 |
+
# Broyden-like (restricted memory)
|
| 998 |
+
#------------------------------------------------------------------------------
|
| 999 |
+
|
| 1000 |
+
class Anderson(GenericBroyden):
|
| 1001 |
+
"""
|
| 1002 |
+
Find a root of a function, using (extended) Anderson mixing.
|
| 1003 |
+
|
| 1004 |
+
The Jacobian is formed by for a 'best' solution in the space
|
| 1005 |
+
spanned by last `M` vectors. As a result, only a MxM matrix
|
| 1006 |
+
inversions and MxN multiplications are required. [Ey]_
|
| 1007 |
+
|
| 1008 |
+
Parameters
|
| 1009 |
+
----------
|
| 1010 |
+
%(params_basic)s
|
| 1011 |
+
alpha : float, optional
|
| 1012 |
+
Initial guess for the Jacobian is (-1/alpha).
|
| 1013 |
+
M : float, optional
|
| 1014 |
+
Number of previous vectors to retain. Defaults to 5.
|
| 1015 |
+
w0 : float, optional
|
| 1016 |
+
Regularization parameter for numerical stability.
|
| 1017 |
+
Compared to unity, good values of the order of 0.01.
|
| 1018 |
+
%(params_extra)s
|
| 1019 |
+
|
| 1020 |
+
See Also
|
| 1021 |
+
--------
|
| 1022 |
+
root : Interface to root finding algorithms for multivariate
|
| 1023 |
+
functions. See ``method='anderson'`` in particular.
|
| 1024 |
+
|
| 1025 |
+
References
|
| 1026 |
+
----------
|
| 1027 |
+
.. [Ey] V. Eyert, J. Comp. Phys., 124, 271 (1996).
|
| 1028 |
+
|
| 1029 |
+
Examples
|
| 1030 |
+
--------
|
| 1031 |
+
The following functions define a system of nonlinear equations
|
| 1032 |
+
|
| 1033 |
+
>>> def fun(x):
|
| 1034 |
+
... return [x[0] + 0.5 * (x[0] - x[1])**3 - 1.0,
|
| 1035 |
+
... 0.5 * (x[1] - x[0])**3 + x[1]]
|
| 1036 |
+
|
| 1037 |
+
A solution can be obtained as follows.
|
| 1038 |
+
|
| 1039 |
+
>>> from scipy import optimize
|
| 1040 |
+
>>> sol = optimize.anderson(fun, [0, 0])
|
| 1041 |
+
>>> sol
|
| 1042 |
+
array([0.84116588, 0.15883789])
|
| 1043 |
+
|
| 1044 |
+
"""
|
| 1045 |
+
|
| 1046 |
+
# Note:
|
| 1047 |
+
#
|
| 1048 |
+
# Anderson method maintains a rank M approximation of the inverse Jacobian,
|
| 1049 |
+
#
|
| 1050 |
+
# J^-1 v ~ -v*alpha + (dX + alpha dF) A^-1 dF^H v
|
| 1051 |
+
# A = W + dF^H dF
|
| 1052 |
+
# W = w0^2 diag(dF^H dF)
|
| 1053 |
+
#
|
| 1054 |
+
# so that for w0 = 0 the secant condition applies for last M iterates, i.e.,
|
| 1055 |
+
#
|
| 1056 |
+
# J^-1 df_j = dx_j
|
| 1057 |
+
#
|
| 1058 |
+
# for all j = 0 ... M-1.
|
| 1059 |
+
#
|
| 1060 |
+
# Moreover, (from Sherman-Morrison-Woodbury formula)
|
| 1061 |
+
#
|
| 1062 |
+
# J v ~ [ b I - b^2 C (I + b dF^H A^-1 C)^-1 dF^H ] v
|
| 1063 |
+
# C = (dX + alpha dF) A^-1
|
| 1064 |
+
# b = -1/alpha
|
| 1065 |
+
#
|
| 1066 |
+
# and after simplification
|
| 1067 |
+
#
|
| 1068 |
+
# J v ~ -v/alpha + (dX/alpha + dF) (dF^H dX - alpha W)^-1 dF^H v
|
| 1069 |
+
#
|
| 1070 |
+
|
| 1071 |
+
def __init__(self, alpha=None, w0=0.01, M=5):
|
| 1072 |
+
GenericBroyden.__init__(self)
|
| 1073 |
+
self.alpha = alpha
|
| 1074 |
+
self.M = M
|
| 1075 |
+
self.dx = []
|
| 1076 |
+
self.df = []
|
| 1077 |
+
self.gamma = None
|
| 1078 |
+
self.w0 = w0
|
| 1079 |
+
|
| 1080 |
+
def solve(self, f, tol=0):
|
| 1081 |
+
dx = -self.alpha*f
|
| 1082 |
+
|
| 1083 |
+
n = len(self.dx)
|
| 1084 |
+
if n == 0:
|
| 1085 |
+
return dx
|
| 1086 |
+
|
| 1087 |
+
df_f = np.empty(n, dtype=f.dtype)
|
| 1088 |
+
for k in range(n):
|
| 1089 |
+
df_f[k] = vdot(self.df[k], f)
|
| 1090 |
+
|
| 1091 |
+
try:
|
| 1092 |
+
gamma = solve(self.a, df_f)
|
| 1093 |
+
except LinAlgError:
|
| 1094 |
+
# singular; reset the Jacobian approximation
|
| 1095 |
+
del self.dx[:]
|
| 1096 |
+
del self.df[:]
|
| 1097 |
+
return dx
|
| 1098 |
+
|
| 1099 |
+
for m in range(n):
|
| 1100 |
+
dx += gamma[m]*(self.dx[m] + self.alpha*self.df[m])
|
| 1101 |
+
return dx
|
| 1102 |
+
|
| 1103 |
+
def matvec(self, f):
|
| 1104 |
+
dx = -f/self.alpha
|
| 1105 |
+
|
| 1106 |
+
n = len(self.dx)
|
| 1107 |
+
if n == 0:
|
| 1108 |
+
return dx
|
| 1109 |
+
|
| 1110 |
+
df_f = np.empty(n, dtype=f.dtype)
|
| 1111 |
+
for k in range(n):
|
| 1112 |
+
df_f[k] = vdot(self.df[k], f)
|
| 1113 |
+
|
| 1114 |
+
b = np.empty((n, n), dtype=f.dtype)
|
| 1115 |
+
for i in range(n):
|
| 1116 |
+
for j in range(n):
|
| 1117 |
+
b[i,j] = vdot(self.df[i], self.dx[j])
|
| 1118 |
+
if i == j and self.w0 != 0:
|
| 1119 |
+
b[i,j] -= vdot(self.df[i], self.df[i])*self.w0**2*self.alpha
|
| 1120 |
+
gamma = solve(b, df_f)
|
| 1121 |
+
|
| 1122 |
+
for m in range(n):
|
| 1123 |
+
dx += gamma[m]*(self.df[m] + self.dx[m]/self.alpha)
|
| 1124 |
+
return dx
|
| 1125 |
+
|
| 1126 |
+
def _update(self, x, f, dx, df, dx_norm, df_norm):
|
| 1127 |
+
if self.M == 0:
|
| 1128 |
+
return
|
| 1129 |
+
|
| 1130 |
+
self.dx.append(dx)
|
| 1131 |
+
self.df.append(df)
|
| 1132 |
+
|
| 1133 |
+
while len(self.dx) > self.M:
|
| 1134 |
+
self.dx.pop(0)
|
| 1135 |
+
self.df.pop(0)
|
| 1136 |
+
|
| 1137 |
+
n = len(self.dx)
|
| 1138 |
+
a = np.zeros((n, n), dtype=f.dtype)
|
| 1139 |
+
|
| 1140 |
+
for i in range(n):
|
| 1141 |
+
for j in range(i, n):
|
| 1142 |
+
if i == j:
|
| 1143 |
+
wd = self.w0**2
|
| 1144 |
+
else:
|
| 1145 |
+
wd = 0
|
| 1146 |
+
a[i,j] = (1+wd)*vdot(self.df[i], self.df[j])
|
| 1147 |
+
|
| 1148 |
+
a += np.triu(a, 1).T.conj()
|
| 1149 |
+
self.a = a
|
| 1150 |
+
|
| 1151 |
+
#------------------------------------------------------------------------------
|
| 1152 |
+
# Simple iterations
|
| 1153 |
+
#------------------------------------------------------------------------------
|
| 1154 |
+
|
| 1155 |
+
|
| 1156 |
+
class DiagBroyden(GenericBroyden):
|
| 1157 |
+
"""
|
| 1158 |
+
Find a root of a function, using diagonal Broyden Jacobian approximation.
|
| 1159 |
+
|
| 1160 |
+
The Jacobian approximation is derived from previous iterations, by
|
| 1161 |
+
retaining only the diagonal of Broyden matrices.
|
| 1162 |
+
|
| 1163 |
+
.. warning::
|
| 1164 |
+
|
| 1165 |
+
This algorithm may be useful for specific problems, but whether
|
| 1166 |
+
it will work may depend strongly on the problem.
|
| 1167 |
+
|
| 1168 |
+
Parameters
|
| 1169 |
+
----------
|
| 1170 |
+
%(params_basic)s
|
| 1171 |
+
alpha : float, optional
|
| 1172 |
+
Initial guess for the Jacobian is (-1/alpha).
|
| 1173 |
+
%(params_extra)s
|
| 1174 |
+
|
| 1175 |
+
See Also
|
| 1176 |
+
--------
|
| 1177 |
+
root : Interface to root finding algorithms for multivariate
|
| 1178 |
+
functions. See ``method='diagbroyden'`` in particular.
|
| 1179 |
+
|
| 1180 |
+
Examples
|
| 1181 |
+
--------
|
| 1182 |
+
The following functions define a system of nonlinear equations
|
| 1183 |
+
|
| 1184 |
+
>>> def fun(x):
|
| 1185 |
+
... return [x[0] + 0.5 * (x[0] - x[1])**3 - 1.0,
|
| 1186 |
+
... 0.5 * (x[1] - x[0])**3 + x[1]]
|
| 1187 |
+
|
| 1188 |
+
A solution can be obtained as follows.
|
| 1189 |
+
|
| 1190 |
+
>>> from scipy import optimize
|
| 1191 |
+
>>> sol = optimize.diagbroyden(fun, [0, 0])
|
| 1192 |
+
>>> sol
|
| 1193 |
+
array([0.84116403, 0.15883384])
|
| 1194 |
+
|
| 1195 |
+
"""
|
| 1196 |
+
|
| 1197 |
+
def __init__(self, alpha=None):
|
| 1198 |
+
GenericBroyden.__init__(self)
|
| 1199 |
+
self.alpha = alpha
|
| 1200 |
+
|
| 1201 |
+
def setup(self, x, F, func):
|
| 1202 |
+
GenericBroyden.setup(self, x, F, func)
|
| 1203 |
+
self.d = np.full((self.shape[0],), 1 / self.alpha, dtype=self.dtype)
|
| 1204 |
+
|
| 1205 |
+
def solve(self, f, tol=0):
|
| 1206 |
+
return -f / self.d
|
| 1207 |
+
|
| 1208 |
+
def matvec(self, f):
|
| 1209 |
+
return -f * self.d
|
| 1210 |
+
|
| 1211 |
+
def rsolve(self, f, tol=0):
|
| 1212 |
+
return -f / self.d.conj()
|
| 1213 |
+
|
| 1214 |
+
def rmatvec(self, f):
|
| 1215 |
+
return -f * self.d.conj()
|
| 1216 |
+
|
| 1217 |
+
def todense(self):
|
| 1218 |
+
return np.diag(-self.d)
|
| 1219 |
+
|
| 1220 |
+
def _update(self, x, f, dx, df, dx_norm, df_norm):
|
| 1221 |
+
self.d -= (df + self.d*dx)*dx/dx_norm**2
|
| 1222 |
+
|
| 1223 |
+
|
| 1224 |
+
class LinearMixing(GenericBroyden):
|
| 1225 |
+
"""
|
| 1226 |
+
Find a root of a function, using a scalar Jacobian approximation.
|
| 1227 |
+
|
| 1228 |
+
.. warning::
|
| 1229 |
+
|
| 1230 |
+
This algorithm may be useful for specific problems, but whether
|
| 1231 |
+
it will work may depend strongly on the problem.
|
| 1232 |
+
|
| 1233 |
+
Parameters
|
| 1234 |
+
----------
|
| 1235 |
+
%(params_basic)s
|
| 1236 |
+
alpha : float, optional
|
| 1237 |
+
The Jacobian approximation is (-1/alpha).
|
| 1238 |
+
%(params_extra)s
|
| 1239 |
+
|
| 1240 |
+
See Also
|
| 1241 |
+
--------
|
| 1242 |
+
root : Interface to root finding algorithms for multivariate
|
| 1243 |
+
functions. See ``method='linearmixing'`` in particular.
|
| 1244 |
+
|
| 1245 |
+
"""
|
| 1246 |
+
|
| 1247 |
+
def __init__(self, alpha=None):
|
| 1248 |
+
GenericBroyden.__init__(self)
|
| 1249 |
+
self.alpha = alpha
|
| 1250 |
+
|
| 1251 |
+
def solve(self, f, tol=0):
|
| 1252 |
+
return -f*self.alpha
|
| 1253 |
+
|
| 1254 |
+
def matvec(self, f):
|
| 1255 |
+
return -f/self.alpha
|
| 1256 |
+
|
| 1257 |
+
def rsolve(self, f, tol=0):
|
| 1258 |
+
return -f*np.conj(self.alpha)
|
| 1259 |
+
|
| 1260 |
+
def rmatvec(self, f):
|
| 1261 |
+
return -f/np.conj(self.alpha)
|
| 1262 |
+
|
| 1263 |
+
def todense(self):
|
| 1264 |
+
return np.diag(np.full(self.shape[0], -1/self.alpha))
|
| 1265 |
+
|
| 1266 |
+
def _update(self, x, f, dx, df, dx_norm, df_norm):
|
| 1267 |
+
pass
|
| 1268 |
+
|
| 1269 |
+
|
| 1270 |
+
class ExcitingMixing(GenericBroyden):
|
| 1271 |
+
"""
|
| 1272 |
+
Find a root of a function, using a tuned diagonal Jacobian approximation.
|
| 1273 |
+
|
| 1274 |
+
The Jacobian matrix is diagonal and is tuned on each iteration.
|
| 1275 |
+
|
| 1276 |
+
.. warning::
|
| 1277 |
+
|
| 1278 |
+
This algorithm may be useful for specific problems, but whether
|
| 1279 |
+
it will work may depend strongly on the problem.
|
| 1280 |
+
|
| 1281 |
+
See Also
|
| 1282 |
+
--------
|
| 1283 |
+
root : Interface to root finding algorithms for multivariate
|
| 1284 |
+
functions. See ``method='excitingmixing'`` in particular.
|
| 1285 |
+
|
| 1286 |
+
Parameters
|
| 1287 |
+
----------
|
| 1288 |
+
%(params_basic)s
|
| 1289 |
+
alpha : float, optional
|
| 1290 |
+
Initial Jacobian approximation is (-1/alpha).
|
| 1291 |
+
alphamax : float, optional
|
| 1292 |
+
The entries of the diagonal Jacobian are kept in the range
|
| 1293 |
+
``[alpha, alphamax]``.
|
| 1294 |
+
%(params_extra)s
|
| 1295 |
+
"""
|
| 1296 |
+
|
| 1297 |
+
def __init__(self, alpha=None, alphamax=1.0):
|
| 1298 |
+
GenericBroyden.__init__(self)
|
| 1299 |
+
self.alpha = alpha
|
| 1300 |
+
self.alphamax = alphamax
|
| 1301 |
+
self.beta = None
|
| 1302 |
+
|
| 1303 |
+
def setup(self, x, F, func):
|
| 1304 |
+
GenericBroyden.setup(self, x, F, func)
|
| 1305 |
+
self.beta = np.full((self.shape[0],), self.alpha, dtype=self.dtype)
|
| 1306 |
+
|
| 1307 |
+
def solve(self, f, tol=0):
|
| 1308 |
+
return -f*self.beta
|
| 1309 |
+
|
| 1310 |
+
def matvec(self, f):
|
| 1311 |
+
return -f/self.beta
|
| 1312 |
+
|
| 1313 |
+
def rsolve(self, f, tol=0):
|
| 1314 |
+
return -f*self.beta.conj()
|
| 1315 |
+
|
| 1316 |
+
def rmatvec(self, f):
|
| 1317 |
+
return -f/self.beta.conj()
|
| 1318 |
+
|
| 1319 |
+
def todense(self):
|
| 1320 |
+
return np.diag(-1/self.beta)
|
| 1321 |
+
|
| 1322 |
+
def _update(self, x, f, dx, df, dx_norm, df_norm):
|
| 1323 |
+
incr = f*self.last_f > 0
|
| 1324 |
+
self.beta[incr] += self.alpha
|
| 1325 |
+
self.beta[~incr] = self.alpha
|
| 1326 |
+
np.clip(self.beta, 0, self.alphamax, out=self.beta)
|
| 1327 |
+
|
| 1328 |
+
|
| 1329 |
+
#------------------------------------------------------------------------------
|
| 1330 |
+
# Iterative/Krylov approximated Jacobians
|
| 1331 |
+
#------------------------------------------------------------------------------
|
| 1332 |
+
|
| 1333 |
+
class KrylovJacobian(Jacobian):
|
| 1334 |
+
r"""
|
| 1335 |
+
Find a root of a function, using Krylov approximation for inverse Jacobian.
|
| 1336 |
+
|
| 1337 |
+
This method is suitable for solving large-scale problems.
|
| 1338 |
+
|
| 1339 |
+
Parameters
|
| 1340 |
+
----------
|
| 1341 |
+
%(params_basic)s
|
| 1342 |
+
rdiff : float, optional
|
| 1343 |
+
Relative step size to use in numerical differentiation.
|
| 1344 |
+
method : str or callable, optional
|
| 1345 |
+
Krylov method to use to approximate the Jacobian. Can be a string,
|
| 1346 |
+
or a function implementing the same interface as the iterative
|
| 1347 |
+
solvers in `scipy.sparse.linalg`. If a string, needs to be one of:
|
| 1348 |
+
``'lgmres'``, ``'gmres'``, ``'bicgstab'``, ``'cgs'``, ``'minres'``,
|
| 1349 |
+
``'tfqmr'``.
|
| 1350 |
+
|
| 1351 |
+
The default is `scipy.sparse.linalg.lgmres`.
|
| 1352 |
+
inner_maxiter : int, optional
|
| 1353 |
+
Parameter to pass to the "inner" Krylov solver: maximum number of
|
| 1354 |
+
iterations. Iteration will stop after maxiter steps even if the
|
| 1355 |
+
specified tolerance has not been achieved.
|
| 1356 |
+
inner_M : LinearOperator or InverseJacobian
|
| 1357 |
+
Preconditioner for the inner Krylov iteration.
|
| 1358 |
+
Note that you can use also inverse Jacobians as (adaptive)
|
| 1359 |
+
preconditioners. For example,
|
| 1360 |
+
|
| 1361 |
+
>>> from scipy.optimize import BroydenFirst, KrylovJacobian
|
| 1362 |
+
>>> from scipy.optimize import InverseJacobian
|
| 1363 |
+
>>> jac = BroydenFirst()
|
| 1364 |
+
>>> kjac = KrylovJacobian(inner_M=InverseJacobian(jac))
|
| 1365 |
+
|
| 1366 |
+
If the preconditioner has a method named 'update', it will be called
|
| 1367 |
+
as ``update(x, f)`` after each nonlinear step, with ``x`` giving
|
| 1368 |
+
the current point, and ``f`` the current function value.
|
| 1369 |
+
outer_k : int, optional
|
| 1370 |
+
Size of the subspace kept across LGMRES nonlinear iterations.
|
| 1371 |
+
See `scipy.sparse.linalg.lgmres` for details.
|
| 1372 |
+
inner_kwargs : kwargs
|
| 1373 |
+
Keyword parameters for the "inner" Krylov solver
|
| 1374 |
+
(defined with `method`). Parameter names must start with
|
| 1375 |
+
the `inner_` prefix which will be stripped before passing on
|
| 1376 |
+
the inner method. See, e.g., `scipy.sparse.linalg.gmres` for details.
|
| 1377 |
+
%(params_extra)s
|
| 1378 |
+
|
| 1379 |
+
See Also
|
| 1380 |
+
--------
|
| 1381 |
+
root : Interface to root finding algorithms for multivariate
|
| 1382 |
+
functions. See ``method='krylov'`` in particular.
|
| 1383 |
+
scipy.sparse.linalg.gmres
|
| 1384 |
+
scipy.sparse.linalg.lgmres
|
| 1385 |
+
|
| 1386 |
+
Notes
|
| 1387 |
+
-----
|
| 1388 |
+
This function implements a Newton-Krylov solver. The basic idea is
|
| 1389 |
+
to compute the inverse of the Jacobian with an iterative Krylov
|
| 1390 |
+
method. These methods require only evaluating the Jacobian-vector
|
| 1391 |
+
products, which are conveniently approximated by a finite difference:
|
| 1392 |
+
|
| 1393 |
+
.. math:: J v \approx (f(x + \omega*v/|v|) - f(x)) / \omega
|
| 1394 |
+
|
| 1395 |
+
Due to the use of iterative matrix inverses, these methods can
|
| 1396 |
+
deal with large nonlinear problems.
|
| 1397 |
+
|
| 1398 |
+
SciPy's `scipy.sparse.linalg` module offers a selection of Krylov
|
| 1399 |
+
solvers to choose from. The default here is `lgmres`, which is a
|
| 1400 |
+
variant of restarted GMRES iteration that reuses some of the
|
| 1401 |
+
information obtained in the previous Newton steps to invert
|
| 1402 |
+
Jacobians in subsequent steps.
|
| 1403 |
+
|
| 1404 |
+
For a review on Newton-Krylov methods, see for example [1]_,
|
| 1405 |
+
and for the LGMRES sparse inverse method, see [2]_.
|
| 1406 |
+
|
| 1407 |
+
References
|
| 1408 |
+
----------
|
| 1409 |
+
.. [1] C. T. Kelley, Solving Nonlinear Equations with Newton's Method,
|
| 1410 |
+
SIAM, pp.57-83, 2003.
|
| 1411 |
+
:doi:`10.1137/1.9780898718898.ch3`
|
| 1412 |
+
.. [2] D.A. Knoll and D.E. Keyes, J. Comp. Phys. 193, 357 (2004).
|
| 1413 |
+
:doi:`10.1016/j.jcp.2003.08.010`
|
| 1414 |
+
.. [3] A.H. Baker and E.R. Jessup and T. Manteuffel,
|
| 1415 |
+
SIAM J. Matrix Anal. Appl. 26, 962 (2005).
|
| 1416 |
+
:doi:`10.1137/S0895479803422014`
|
| 1417 |
+
|
| 1418 |
+
Examples
|
| 1419 |
+
--------
|
| 1420 |
+
The following functions define a system of nonlinear equations
|
| 1421 |
+
|
| 1422 |
+
>>> def fun(x):
|
| 1423 |
+
... return [x[0] + 0.5 * x[1] - 1.0,
|
| 1424 |
+
... 0.5 * (x[1] - x[0]) ** 2]
|
| 1425 |
+
|
| 1426 |
+
A solution can be obtained as follows.
|
| 1427 |
+
|
| 1428 |
+
>>> from scipy import optimize
|
| 1429 |
+
>>> sol = optimize.newton_krylov(fun, [0, 0])
|
| 1430 |
+
>>> sol
|
| 1431 |
+
array([0.66731771, 0.66536458])
|
| 1432 |
+
|
| 1433 |
+
"""
|
| 1434 |
+
|
| 1435 |
+
def __init__(self, rdiff=None, method='lgmres', inner_maxiter=20,
|
| 1436 |
+
inner_M=None, outer_k=10, **kw):
|
| 1437 |
+
self.preconditioner = inner_M
|
| 1438 |
+
self.rdiff = rdiff
|
| 1439 |
+
# Note that this retrieves one of the named functions, or otherwise
|
| 1440 |
+
# uses `method` as is (i.e., for a user-provided callable).
|
| 1441 |
+
self.method = dict(
|
| 1442 |
+
bicgstab=scipy.sparse.linalg.bicgstab,
|
| 1443 |
+
gmres=scipy.sparse.linalg.gmres,
|
| 1444 |
+
lgmres=scipy.sparse.linalg.lgmres,
|
| 1445 |
+
cgs=scipy.sparse.linalg.cgs,
|
| 1446 |
+
minres=scipy.sparse.linalg.minres,
|
| 1447 |
+
tfqmr=scipy.sparse.linalg.tfqmr,
|
| 1448 |
+
).get(method, method)
|
| 1449 |
+
|
| 1450 |
+
self.method_kw = dict(maxiter=inner_maxiter, M=self.preconditioner)
|
| 1451 |
+
|
| 1452 |
+
if self.method is scipy.sparse.linalg.gmres:
|
| 1453 |
+
# Replace GMRES's outer iteration with Newton steps
|
| 1454 |
+
self.method_kw['restart'] = inner_maxiter
|
| 1455 |
+
self.method_kw['maxiter'] = 1
|
| 1456 |
+
self.method_kw.setdefault('atol', 0)
|
| 1457 |
+
elif self.method in (scipy.sparse.linalg.gcrotmk,
|
| 1458 |
+
scipy.sparse.linalg.bicgstab,
|
| 1459 |
+
scipy.sparse.linalg.cgs):
|
| 1460 |
+
self.method_kw.setdefault('atol', 0)
|
| 1461 |
+
elif self.method is scipy.sparse.linalg.lgmres:
|
| 1462 |
+
self.method_kw['outer_k'] = outer_k
|
| 1463 |
+
# Replace LGMRES's outer iteration with Newton steps
|
| 1464 |
+
self.method_kw['maxiter'] = 1
|
| 1465 |
+
# Carry LGMRES's `outer_v` vectors across nonlinear iterations
|
| 1466 |
+
self.method_kw.setdefault('outer_v', [])
|
| 1467 |
+
self.method_kw.setdefault('prepend_outer_v', True)
|
| 1468 |
+
# But don't carry the corresponding Jacobian*v products, in case
|
| 1469 |
+
# the Jacobian changes a lot in the nonlinear step
|
| 1470 |
+
#
|
| 1471 |
+
# XXX: some trust-region inspired ideas might be more efficient...
|
| 1472 |
+
# See e.g., Brown & Saad. But needs to be implemented separately
|
| 1473 |
+
# since it's not an inexact Newton method.
|
| 1474 |
+
self.method_kw.setdefault('store_outer_Av', False)
|
| 1475 |
+
self.method_kw.setdefault('atol', 0)
|
| 1476 |
+
|
| 1477 |
+
for key, value in kw.items():
|
| 1478 |
+
if not key.startswith('inner_'):
|
| 1479 |
+
raise ValueError("Unknown parameter %s" % key)
|
| 1480 |
+
self.method_kw[key[6:]] = value
|
| 1481 |
+
|
| 1482 |
+
def _update_diff_step(self):
|
| 1483 |
+
mx = abs(self.x0).max()
|
| 1484 |
+
mf = abs(self.f0).max()
|
| 1485 |
+
self.omega = self.rdiff * max(1, mx) / max(1, mf)
|
| 1486 |
+
|
| 1487 |
+
def matvec(self, v):
|
| 1488 |
+
nv = norm(v)
|
| 1489 |
+
if nv == 0:
|
| 1490 |
+
return 0*v
|
| 1491 |
+
sc = self.omega / nv
|
| 1492 |
+
r = (self.func(self.x0 + sc*v) - self.f0) / sc
|
| 1493 |
+
if not np.all(np.isfinite(r)) and np.all(np.isfinite(v)):
|
| 1494 |
+
raise ValueError('Function returned non-finite results')
|
| 1495 |
+
return r
|
| 1496 |
+
|
| 1497 |
+
def solve(self, rhs, tol=0):
|
| 1498 |
+
if 'rtol' in self.method_kw:
|
| 1499 |
+
sol, info = self.method(self.op, rhs, **self.method_kw)
|
| 1500 |
+
else:
|
| 1501 |
+
sol, info = self.method(self.op, rhs, rtol=tol, **self.method_kw)
|
| 1502 |
+
return sol
|
| 1503 |
+
|
| 1504 |
+
def update(self, x, f):
|
| 1505 |
+
self.x0 = x
|
| 1506 |
+
self.f0 = f
|
| 1507 |
+
self._update_diff_step()
|
| 1508 |
+
|
| 1509 |
+
# Update also the preconditioner, if possible
|
| 1510 |
+
if self.preconditioner is not None:
|
| 1511 |
+
if hasattr(self.preconditioner, 'update'):
|
| 1512 |
+
self.preconditioner.update(x, f)
|
| 1513 |
+
|
| 1514 |
+
def setup(self, x, f, func):
|
| 1515 |
+
Jacobian.setup(self, x, f, func)
|
| 1516 |
+
self.x0 = x
|
| 1517 |
+
self.f0 = f
|
| 1518 |
+
self.op = scipy.sparse.linalg.aslinearoperator(self)
|
| 1519 |
+
|
| 1520 |
+
if self.rdiff is None:
|
| 1521 |
+
self.rdiff = np.finfo(x.dtype).eps ** (1./2)
|
| 1522 |
+
|
| 1523 |
+
self._update_diff_step()
|
| 1524 |
+
|
| 1525 |
+
# Setup also the preconditioner, if possible
|
| 1526 |
+
if self.preconditioner is not None:
|
| 1527 |
+
if hasattr(self.preconditioner, 'setup'):
|
| 1528 |
+
self.preconditioner.setup(x, f, func)
|
| 1529 |
+
|
| 1530 |
+
|
| 1531 |
+
#------------------------------------------------------------------------------
|
| 1532 |
+
# Wrapper functions
|
| 1533 |
+
#------------------------------------------------------------------------------
|
| 1534 |
+
|
| 1535 |
+
def _nonlin_wrapper(name, jac):
|
| 1536 |
+
"""
|
| 1537 |
+
Construct a solver wrapper with given name and Jacobian approx.
|
| 1538 |
+
|
| 1539 |
+
It inspects the keyword arguments of ``jac.__init__``, and allows to
|
| 1540 |
+
use the same arguments in the wrapper function, in addition to the
|
| 1541 |
+
keyword arguments of `nonlin_solve`
|
| 1542 |
+
|
| 1543 |
+
"""
|
| 1544 |
+
signature = _getfullargspec(jac.__init__)
|
| 1545 |
+
args, varargs, varkw, defaults, kwonlyargs, kwdefaults, _ = signature
|
| 1546 |
+
kwargs = list(zip(args[-len(defaults):], defaults))
|
| 1547 |
+
kw_str = ", ".join([f"{k}={v!r}" for k, v in kwargs])
|
| 1548 |
+
if kw_str:
|
| 1549 |
+
kw_str = ", " + kw_str
|
| 1550 |
+
kwkw_str = ", ".join([f"{k}={k}" for k, v in kwargs])
|
| 1551 |
+
if kwkw_str:
|
| 1552 |
+
kwkw_str = kwkw_str + ", "
|
| 1553 |
+
if kwonlyargs:
|
| 1554 |
+
raise ValueError('Unexpected signature %s' % signature)
|
| 1555 |
+
|
| 1556 |
+
# Construct the wrapper function so that its keyword arguments
|
| 1557 |
+
# are visible in pydoc.help etc.
|
| 1558 |
+
wrapper = """
|
| 1559 |
+
def %(name)s(F, xin, iter=None %(kw)s, verbose=False, maxiter=None,
|
| 1560 |
+
f_tol=None, f_rtol=None, x_tol=None, x_rtol=None,
|
| 1561 |
+
tol_norm=None, line_search='armijo', callback=None, **kw):
|
| 1562 |
+
jac = %(jac)s(%(kwkw)s **kw)
|
| 1563 |
+
return nonlin_solve(F, xin, jac, iter, verbose, maxiter,
|
| 1564 |
+
f_tol, f_rtol, x_tol, x_rtol, tol_norm, line_search,
|
| 1565 |
+
callback)
|
| 1566 |
+
"""
|
| 1567 |
+
|
| 1568 |
+
wrapper = wrapper % dict(name=name, kw=kw_str, jac=jac.__name__,
|
| 1569 |
+
kwkw=kwkw_str)
|
| 1570 |
+
ns = {}
|
| 1571 |
+
ns.update(globals())
|
| 1572 |
+
exec(wrapper, ns)
|
| 1573 |
+
func = ns[name]
|
| 1574 |
+
func.__doc__ = jac.__doc__
|
| 1575 |
+
_set_doc(func)
|
| 1576 |
+
return func
|
| 1577 |
+
|
| 1578 |
+
|
| 1579 |
+
broyden1 = _nonlin_wrapper('broyden1', BroydenFirst)
|
| 1580 |
+
broyden2 = _nonlin_wrapper('broyden2', BroydenSecond)
|
| 1581 |
+
anderson = _nonlin_wrapper('anderson', Anderson)
|
| 1582 |
+
linearmixing = _nonlin_wrapper('linearmixing', LinearMixing)
|
| 1583 |
+
diagbroyden = _nonlin_wrapper('diagbroyden', DiagBroyden)
|
| 1584 |
+
excitingmixing = _nonlin_wrapper('excitingmixing', ExcitingMixing)
|
| 1585 |
+
newton_krylov = _nonlin_wrapper('newton_krylov', KrylovJacobian)
|
llava_next/lib/python3.10/site-packages/scipy/optimize/_qap.py
ADDED
|
@@ -0,0 +1,731 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import numpy as np
|
| 2 |
+
import operator
|
| 3 |
+
from . import (linear_sum_assignment, OptimizeResult)
|
| 4 |
+
from ._optimize import _check_unknown_options
|
| 5 |
+
|
| 6 |
+
from scipy._lib._util import check_random_state
|
| 7 |
+
import itertools
|
| 8 |
+
|
| 9 |
+
QUADRATIC_ASSIGNMENT_METHODS = ['faq', '2opt']
|
| 10 |
+
|
| 11 |
+
def quadratic_assignment(A, B, method="faq", options=None):
|
| 12 |
+
r"""
|
| 13 |
+
Approximates solution to the quadratic assignment problem and
|
| 14 |
+
the graph matching problem.
|
| 15 |
+
|
| 16 |
+
Quadratic assignment solves problems of the following form:
|
| 17 |
+
|
| 18 |
+
.. math::
|
| 19 |
+
|
| 20 |
+
\min_P & \ {\ \text{trace}(A^T P B P^T)}\\
|
| 21 |
+
\mbox{s.t. } & {P \ \epsilon \ \mathcal{P}}\\
|
| 22 |
+
|
| 23 |
+
where :math:`\mathcal{P}` is the set of all permutation matrices,
|
| 24 |
+
and :math:`A` and :math:`B` are square matrices.
|
| 25 |
+
|
| 26 |
+
Graph matching tries to *maximize* the same objective function.
|
| 27 |
+
This algorithm can be thought of as finding the alignment of the
|
| 28 |
+
nodes of two graphs that minimizes the number of induced edge
|
| 29 |
+
disagreements, or, in the case of weighted graphs, the sum of squared
|
| 30 |
+
edge weight differences.
|
| 31 |
+
|
| 32 |
+
Note that the quadratic assignment problem is NP-hard. The results given
|
| 33 |
+
here are approximations and are not guaranteed to be optimal.
|
| 34 |
+
|
| 35 |
+
|
| 36 |
+
Parameters
|
| 37 |
+
----------
|
| 38 |
+
A : 2-D array, square
|
| 39 |
+
The square matrix :math:`A` in the objective function above.
|
| 40 |
+
|
| 41 |
+
B : 2-D array, square
|
| 42 |
+
The square matrix :math:`B` in the objective function above.
|
| 43 |
+
|
| 44 |
+
method : str in {'faq', '2opt'} (default: 'faq')
|
| 45 |
+
The algorithm used to solve the problem.
|
| 46 |
+
:ref:`'faq' <optimize.qap-faq>` (default) and
|
| 47 |
+
:ref:`'2opt' <optimize.qap-2opt>` are available.
|
| 48 |
+
|
| 49 |
+
options : dict, optional
|
| 50 |
+
A dictionary of solver options. All solvers support the following:
|
| 51 |
+
|
| 52 |
+
maximize : bool (default: False)
|
| 53 |
+
Maximizes the objective function if ``True``.
|
| 54 |
+
|
| 55 |
+
partial_match : 2-D array of integers, optional (default: None)
|
| 56 |
+
Fixes part of the matching. Also known as a "seed" [2]_.
|
| 57 |
+
|
| 58 |
+
Each row of `partial_match` specifies a pair of matched nodes:
|
| 59 |
+
node ``partial_match[i, 0]`` of `A` is matched to node
|
| 60 |
+
``partial_match[i, 1]`` of `B`. The array has shape ``(m, 2)``,
|
| 61 |
+
where ``m`` is not greater than the number of nodes, :math:`n`.
|
| 62 |
+
|
| 63 |
+
rng : {None, int, `numpy.random.Generator`,
|
| 64 |
+
`numpy.random.RandomState`}, optional
|
| 65 |
+
|
| 66 |
+
If `seed` is None (or `np.random`), the `numpy.random.RandomState`
|
| 67 |
+
singleton is used.
|
| 68 |
+
If `seed` is an int, a new ``RandomState`` instance is used,
|
| 69 |
+
seeded with `seed`.
|
| 70 |
+
If `seed` is already a ``Generator`` or ``RandomState`` instance then
|
| 71 |
+
that instance is used.
|
| 72 |
+
|
| 73 |
+
For method-specific options, see
|
| 74 |
+
:func:`show_options('quadratic_assignment') <show_options>`.
|
| 75 |
+
|
| 76 |
+
Returns
|
| 77 |
+
-------
|
| 78 |
+
res : OptimizeResult
|
| 79 |
+
`OptimizeResult` containing the following fields.
|
| 80 |
+
|
| 81 |
+
col_ind : 1-D array
|
| 82 |
+
Column indices corresponding to the best permutation found of the
|
| 83 |
+
nodes of `B`.
|
| 84 |
+
fun : float
|
| 85 |
+
The objective value of the solution.
|
| 86 |
+
nit : int
|
| 87 |
+
The number of iterations performed during optimization.
|
| 88 |
+
|
| 89 |
+
Notes
|
| 90 |
+
-----
|
| 91 |
+
The default method :ref:`'faq' <optimize.qap-faq>` uses the Fast
|
| 92 |
+
Approximate QAP algorithm [1]_; it typically offers the best combination of
|
| 93 |
+
speed and accuracy.
|
| 94 |
+
Method :ref:`'2opt' <optimize.qap-2opt>` can be computationally expensive,
|
| 95 |
+
but may be a useful alternative, or it can be used to refine the solution
|
| 96 |
+
returned by another method.
|
| 97 |
+
|
| 98 |
+
References
|
| 99 |
+
----------
|
| 100 |
+
.. [1] J.T. Vogelstein, J.M. Conroy, V. Lyzinski, L.J. Podrazik,
|
| 101 |
+
S.G. Kratzer, E.T. Harley, D.E. Fishkind, R.J. Vogelstein, and
|
| 102 |
+
C.E. Priebe, "Fast approximate quadratic programming for graph
|
| 103 |
+
matching," PLOS one, vol. 10, no. 4, p. e0121002, 2015,
|
| 104 |
+
:doi:`10.1371/journal.pone.0121002`
|
| 105 |
+
|
| 106 |
+
.. [2] D. Fishkind, S. Adali, H. Patsolic, L. Meng, D. Singh, V. Lyzinski,
|
| 107 |
+
C. Priebe, "Seeded graph matching", Pattern Recognit. 87 (2019):
|
| 108 |
+
203-215, :doi:`10.1016/j.patcog.2018.09.014`
|
| 109 |
+
|
| 110 |
+
.. [3] "2-opt," Wikipedia.
|
| 111 |
+
https://en.wikipedia.org/wiki/2-opt
|
| 112 |
+
|
| 113 |
+
Examples
|
| 114 |
+
--------
|
| 115 |
+
>>> import numpy as np
|
| 116 |
+
>>> from scipy.optimize import quadratic_assignment
|
| 117 |
+
>>> A = np.array([[0, 80, 150, 170], [80, 0, 130, 100],
|
| 118 |
+
... [150, 130, 0, 120], [170, 100, 120, 0]])
|
| 119 |
+
>>> B = np.array([[0, 5, 2, 7], [0, 0, 3, 8],
|
| 120 |
+
... [0, 0, 0, 3], [0, 0, 0, 0]])
|
| 121 |
+
>>> res = quadratic_assignment(A, B)
|
| 122 |
+
>>> print(res)
|
| 123 |
+
fun: 3260
|
| 124 |
+
col_ind: [0 3 2 1]
|
| 125 |
+
nit: 9
|
| 126 |
+
|
| 127 |
+
The see the relationship between the returned ``col_ind`` and ``fun``,
|
| 128 |
+
use ``col_ind`` to form the best permutation matrix found, then evaluate
|
| 129 |
+
the objective function :math:`f(P) = trace(A^T P B P^T )`.
|
| 130 |
+
|
| 131 |
+
>>> perm = res['col_ind']
|
| 132 |
+
>>> P = np.eye(len(A), dtype=int)[perm]
|
| 133 |
+
>>> fun = np.trace(A.T @ P @ B @ P.T)
|
| 134 |
+
>>> print(fun)
|
| 135 |
+
3260
|
| 136 |
+
|
| 137 |
+
Alternatively, to avoid constructing the permutation matrix explicitly,
|
| 138 |
+
directly permute the rows and columns of the distance matrix.
|
| 139 |
+
|
| 140 |
+
>>> fun = np.trace(A.T @ B[perm][:, perm])
|
| 141 |
+
>>> print(fun)
|
| 142 |
+
3260
|
| 143 |
+
|
| 144 |
+
Although not guaranteed in general, ``quadratic_assignment`` happens to
|
| 145 |
+
have found the globally optimal solution.
|
| 146 |
+
|
| 147 |
+
>>> from itertools import permutations
|
| 148 |
+
>>> perm_opt, fun_opt = None, np.inf
|
| 149 |
+
>>> for perm in permutations([0, 1, 2, 3]):
|
| 150 |
+
... perm = np.array(perm)
|
| 151 |
+
... fun = np.trace(A.T @ B[perm][:, perm])
|
| 152 |
+
... if fun < fun_opt:
|
| 153 |
+
... fun_opt, perm_opt = fun, perm
|
| 154 |
+
>>> print(np.array_equal(perm_opt, res['col_ind']))
|
| 155 |
+
True
|
| 156 |
+
|
| 157 |
+
Here is an example for which the default method,
|
| 158 |
+
:ref:`'faq' <optimize.qap-faq>`, does not find the global optimum.
|
| 159 |
+
|
| 160 |
+
>>> A = np.array([[0, 5, 8, 6], [5, 0, 5, 1],
|
| 161 |
+
... [8, 5, 0, 2], [6, 1, 2, 0]])
|
| 162 |
+
>>> B = np.array([[0, 1, 8, 4], [1, 0, 5, 2],
|
| 163 |
+
... [8, 5, 0, 5], [4, 2, 5, 0]])
|
| 164 |
+
>>> res = quadratic_assignment(A, B)
|
| 165 |
+
>>> print(res)
|
| 166 |
+
fun: 178
|
| 167 |
+
col_ind: [1 0 3 2]
|
| 168 |
+
nit: 13
|
| 169 |
+
|
| 170 |
+
If accuracy is important, consider using :ref:`'2opt' <optimize.qap-2opt>`
|
| 171 |
+
to refine the solution.
|
| 172 |
+
|
| 173 |
+
>>> guess = np.array([np.arange(len(A)), res.col_ind]).T
|
| 174 |
+
>>> res = quadratic_assignment(A, B, method="2opt",
|
| 175 |
+
... options = {'partial_guess': guess})
|
| 176 |
+
>>> print(res)
|
| 177 |
+
fun: 176
|
| 178 |
+
col_ind: [1 2 3 0]
|
| 179 |
+
nit: 17
|
| 180 |
+
|
| 181 |
+
"""
|
| 182 |
+
|
| 183 |
+
if options is None:
|
| 184 |
+
options = {}
|
| 185 |
+
|
| 186 |
+
method = method.lower()
|
| 187 |
+
methods = {"faq": _quadratic_assignment_faq,
|
| 188 |
+
"2opt": _quadratic_assignment_2opt}
|
| 189 |
+
if method not in methods:
|
| 190 |
+
raise ValueError(f"method {method} must be in {methods}.")
|
| 191 |
+
res = methods[method](A, B, **options)
|
| 192 |
+
return res
|
| 193 |
+
|
| 194 |
+
|
| 195 |
+
def _calc_score(A, B, perm):
|
| 196 |
+
# equivalent to objective function but avoids matmul
|
| 197 |
+
return np.sum(A * B[perm][:, perm])
|
| 198 |
+
|
| 199 |
+
|
| 200 |
+
def _common_input_validation(A, B, partial_match):
|
| 201 |
+
A = np.atleast_2d(A)
|
| 202 |
+
B = np.atleast_2d(B)
|
| 203 |
+
|
| 204 |
+
if partial_match is None:
|
| 205 |
+
partial_match = np.array([[], []]).T
|
| 206 |
+
partial_match = np.atleast_2d(partial_match).astype(int)
|
| 207 |
+
|
| 208 |
+
msg = None
|
| 209 |
+
if A.shape[0] != A.shape[1]:
|
| 210 |
+
msg = "`A` must be square"
|
| 211 |
+
elif B.shape[0] != B.shape[1]:
|
| 212 |
+
msg = "`B` must be square"
|
| 213 |
+
elif A.ndim != 2 or B.ndim != 2:
|
| 214 |
+
msg = "`A` and `B` must have exactly two dimensions"
|
| 215 |
+
elif A.shape != B.shape:
|
| 216 |
+
msg = "`A` and `B` matrices must be of equal size"
|
| 217 |
+
elif partial_match.shape[0] > A.shape[0]:
|
| 218 |
+
msg = "`partial_match` can have only as many seeds as there are nodes"
|
| 219 |
+
elif partial_match.shape[1] != 2:
|
| 220 |
+
msg = "`partial_match` must have two columns"
|
| 221 |
+
elif partial_match.ndim != 2:
|
| 222 |
+
msg = "`partial_match` must have exactly two dimensions"
|
| 223 |
+
elif (partial_match < 0).any():
|
| 224 |
+
msg = "`partial_match` must contain only positive indices"
|
| 225 |
+
elif (partial_match >= len(A)).any():
|
| 226 |
+
msg = "`partial_match` entries must be less than number of nodes"
|
| 227 |
+
elif (not len(set(partial_match[:, 0])) == len(partial_match[:, 0]) or
|
| 228 |
+
not len(set(partial_match[:, 1])) == len(partial_match[:, 1])):
|
| 229 |
+
msg = "`partial_match` column entries must be unique"
|
| 230 |
+
|
| 231 |
+
if msg is not None:
|
| 232 |
+
raise ValueError(msg)
|
| 233 |
+
|
| 234 |
+
return A, B, partial_match
|
| 235 |
+
|
| 236 |
+
|
| 237 |
+
def _quadratic_assignment_faq(A, B,
|
| 238 |
+
maximize=False, partial_match=None, rng=None,
|
| 239 |
+
P0="barycenter", shuffle_input=False, maxiter=30,
|
| 240 |
+
tol=0.03, **unknown_options):
|
| 241 |
+
r"""Solve the quadratic assignment problem (approximately).
|
| 242 |
+
|
| 243 |
+
This function solves the Quadratic Assignment Problem (QAP) and the
|
| 244 |
+
Graph Matching Problem (GMP) using the Fast Approximate QAP Algorithm
|
| 245 |
+
(FAQ) [1]_.
|
| 246 |
+
|
| 247 |
+
Quadratic assignment solves problems of the following form:
|
| 248 |
+
|
| 249 |
+
.. math::
|
| 250 |
+
|
| 251 |
+
\min_P & \ {\ \text{trace}(A^T P B P^T)}\\
|
| 252 |
+
\mbox{s.t. } & {P \ \epsilon \ \mathcal{P}}\\
|
| 253 |
+
|
| 254 |
+
where :math:`\mathcal{P}` is the set of all permutation matrices,
|
| 255 |
+
and :math:`A` and :math:`B` are square matrices.
|
| 256 |
+
|
| 257 |
+
Graph matching tries to *maximize* the same objective function.
|
| 258 |
+
This algorithm can be thought of as finding the alignment of the
|
| 259 |
+
nodes of two graphs that minimizes the number of induced edge
|
| 260 |
+
disagreements, or, in the case of weighted graphs, the sum of squared
|
| 261 |
+
edge weight differences.
|
| 262 |
+
|
| 263 |
+
Note that the quadratic assignment problem is NP-hard. The results given
|
| 264 |
+
here are approximations and are not guaranteed to be optimal.
|
| 265 |
+
|
| 266 |
+
Parameters
|
| 267 |
+
----------
|
| 268 |
+
A : 2-D array, square
|
| 269 |
+
The square matrix :math:`A` in the objective function above.
|
| 270 |
+
B : 2-D array, square
|
| 271 |
+
The square matrix :math:`B` in the objective function above.
|
| 272 |
+
method : str in {'faq', '2opt'} (default: 'faq')
|
| 273 |
+
The algorithm used to solve the problem. This is the method-specific
|
| 274 |
+
documentation for 'faq'.
|
| 275 |
+
:ref:`'2opt' <optimize.qap-2opt>` is also available.
|
| 276 |
+
|
| 277 |
+
Options
|
| 278 |
+
-------
|
| 279 |
+
maximize : bool (default: False)
|
| 280 |
+
Maximizes the objective function if ``True``.
|
| 281 |
+
partial_match : 2-D array of integers, optional (default: None)
|
| 282 |
+
Fixes part of the matching. Also known as a "seed" [2]_.
|
| 283 |
+
|
| 284 |
+
Each row of `partial_match` specifies a pair of matched nodes:
|
| 285 |
+
node ``partial_match[i, 0]`` of `A` is matched to node
|
| 286 |
+
``partial_match[i, 1]`` of `B`. The array has shape ``(m, 2)``, where
|
| 287 |
+
``m`` is not greater than the number of nodes, :math:`n`.
|
| 288 |
+
|
| 289 |
+
rng : {None, int, `numpy.random.Generator`,
|
| 290 |
+
`numpy.random.RandomState`}, optional
|
| 291 |
+
|
| 292 |
+
If `seed` is None (or `np.random`), the `numpy.random.RandomState`
|
| 293 |
+
singleton is used.
|
| 294 |
+
If `seed` is an int, a new ``RandomState`` instance is used,
|
| 295 |
+
seeded with `seed`.
|
| 296 |
+
If `seed` is already a ``Generator`` or ``RandomState`` instance then
|
| 297 |
+
that instance is used.
|
| 298 |
+
P0 : 2-D array, "barycenter", or "randomized" (default: "barycenter")
|
| 299 |
+
Initial position. Must be a doubly-stochastic matrix [3]_.
|
| 300 |
+
|
| 301 |
+
If the initial position is an array, it must be a doubly stochastic
|
| 302 |
+
matrix of size :math:`m' \times m'` where :math:`m' = n - m`.
|
| 303 |
+
|
| 304 |
+
If ``"barycenter"`` (default), the initial position is the barycenter
|
| 305 |
+
of the Birkhoff polytope (the space of doubly stochastic matrices).
|
| 306 |
+
This is a :math:`m' \times m'` matrix with all entries equal to
|
| 307 |
+
:math:`1 / m'`.
|
| 308 |
+
|
| 309 |
+
If ``"randomized"`` the initial search position is
|
| 310 |
+
:math:`P_0 = (J + K) / 2`, where :math:`J` is the barycenter and
|
| 311 |
+
:math:`K` is a random doubly stochastic matrix.
|
| 312 |
+
shuffle_input : bool (default: False)
|
| 313 |
+
Set to `True` to resolve degenerate gradients randomly. For
|
| 314 |
+
non-degenerate gradients this option has no effect.
|
| 315 |
+
maxiter : int, positive (default: 30)
|
| 316 |
+
Integer specifying the max number of Frank-Wolfe iterations performed.
|
| 317 |
+
tol : float (default: 0.03)
|
| 318 |
+
Tolerance for termination. Frank-Wolfe iteration terminates when
|
| 319 |
+
:math:`\frac{||P_{i}-P_{i+1}||_F}{\sqrt{m')}} \leq tol`,
|
| 320 |
+
where :math:`i` is the iteration number.
|
| 321 |
+
|
| 322 |
+
Returns
|
| 323 |
+
-------
|
| 324 |
+
res : OptimizeResult
|
| 325 |
+
`OptimizeResult` containing the following fields.
|
| 326 |
+
|
| 327 |
+
col_ind : 1-D array
|
| 328 |
+
Column indices corresponding to the best permutation found of the
|
| 329 |
+
nodes of `B`.
|
| 330 |
+
fun : float
|
| 331 |
+
The objective value of the solution.
|
| 332 |
+
nit : int
|
| 333 |
+
The number of Frank-Wolfe iterations performed.
|
| 334 |
+
|
| 335 |
+
Notes
|
| 336 |
+
-----
|
| 337 |
+
The algorithm may be sensitive to the initial permutation matrix (or
|
| 338 |
+
search "position") due to the possibility of several local minima
|
| 339 |
+
within the feasible region. A barycenter initialization is more likely to
|
| 340 |
+
result in a better solution than a single random initialization. However,
|
| 341 |
+
calling ``quadratic_assignment`` several times with different random
|
| 342 |
+
initializations may result in a better optimum at the cost of longer
|
| 343 |
+
total execution time.
|
| 344 |
+
|
| 345 |
+
Examples
|
| 346 |
+
--------
|
| 347 |
+
As mentioned above, a barycenter initialization often results in a better
|
| 348 |
+
solution than a single random initialization.
|
| 349 |
+
|
| 350 |
+
>>> from numpy.random import default_rng
|
| 351 |
+
>>> rng = default_rng()
|
| 352 |
+
>>> n = 15
|
| 353 |
+
>>> A = rng.random((n, n))
|
| 354 |
+
>>> B = rng.random((n, n))
|
| 355 |
+
>>> res = quadratic_assignment(A, B) # FAQ is default method
|
| 356 |
+
>>> print(res.fun)
|
| 357 |
+
46.871483385480545 # may vary
|
| 358 |
+
|
| 359 |
+
>>> options = {"P0": "randomized"} # use randomized initialization
|
| 360 |
+
>>> res = quadratic_assignment(A, B, options=options)
|
| 361 |
+
>>> print(res.fun)
|
| 362 |
+
47.224831071310625 # may vary
|
| 363 |
+
|
| 364 |
+
However, consider running from several randomized initializations and
|
| 365 |
+
keeping the best result.
|
| 366 |
+
|
| 367 |
+
>>> res = min([quadratic_assignment(A, B, options=options)
|
| 368 |
+
... for i in range(30)], key=lambda x: x.fun)
|
| 369 |
+
>>> print(res.fun)
|
| 370 |
+
46.671852533681516 # may vary
|
| 371 |
+
|
| 372 |
+
The '2-opt' method can be used to further refine the results.
|
| 373 |
+
|
| 374 |
+
>>> options = {"partial_guess": np.array([np.arange(n), res.col_ind]).T}
|
| 375 |
+
>>> res = quadratic_assignment(A, B, method="2opt", options=options)
|
| 376 |
+
>>> print(res.fun)
|
| 377 |
+
46.47160735721583 # may vary
|
| 378 |
+
|
| 379 |
+
References
|
| 380 |
+
----------
|
| 381 |
+
.. [1] J.T. Vogelstein, J.M. Conroy, V. Lyzinski, L.J. Podrazik,
|
| 382 |
+
S.G. Kratzer, E.T. Harley, D.E. Fishkind, R.J. Vogelstein, and
|
| 383 |
+
C.E. Priebe, "Fast approximate quadratic programming for graph
|
| 384 |
+
matching," PLOS one, vol. 10, no. 4, p. e0121002, 2015,
|
| 385 |
+
:doi:`10.1371/journal.pone.0121002`
|
| 386 |
+
|
| 387 |
+
.. [2] D. Fishkind, S. Adali, H. Patsolic, L. Meng, D. Singh, V. Lyzinski,
|
| 388 |
+
C. Priebe, "Seeded graph matching", Pattern Recognit. 87 (2019):
|
| 389 |
+
203-215, :doi:`10.1016/j.patcog.2018.09.014`
|
| 390 |
+
|
| 391 |
+
.. [3] "Doubly stochastic Matrix," Wikipedia.
|
| 392 |
+
https://en.wikipedia.org/wiki/Doubly_stochastic_matrix
|
| 393 |
+
|
| 394 |
+
"""
|
| 395 |
+
|
| 396 |
+
_check_unknown_options(unknown_options)
|
| 397 |
+
|
| 398 |
+
maxiter = operator.index(maxiter)
|
| 399 |
+
|
| 400 |
+
# ValueError check
|
| 401 |
+
A, B, partial_match = _common_input_validation(A, B, partial_match)
|
| 402 |
+
|
| 403 |
+
msg = None
|
| 404 |
+
if isinstance(P0, str) and P0 not in {'barycenter', 'randomized'}:
|
| 405 |
+
msg = "Invalid 'P0' parameter string"
|
| 406 |
+
elif maxiter <= 0:
|
| 407 |
+
msg = "'maxiter' must be a positive integer"
|
| 408 |
+
elif tol <= 0:
|
| 409 |
+
msg = "'tol' must be a positive float"
|
| 410 |
+
if msg is not None:
|
| 411 |
+
raise ValueError(msg)
|
| 412 |
+
|
| 413 |
+
rng = check_random_state(rng)
|
| 414 |
+
n = len(A) # number of vertices in graphs
|
| 415 |
+
n_seeds = len(partial_match) # number of seeds
|
| 416 |
+
n_unseed = n - n_seeds
|
| 417 |
+
|
| 418 |
+
# [1] Algorithm 1 Line 1 - choose initialization
|
| 419 |
+
if not isinstance(P0, str):
|
| 420 |
+
P0 = np.atleast_2d(P0)
|
| 421 |
+
if P0.shape != (n_unseed, n_unseed):
|
| 422 |
+
msg = "`P0` matrix must have shape m' x m', where m'=n-m"
|
| 423 |
+
elif ((P0 < 0).any() or not np.allclose(np.sum(P0, axis=0), 1)
|
| 424 |
+
or not np.allclose(np.sum(P0, axis=1), 1)):
|
| 425 |
+
msg = "`P0` matrix must be doubly stochastic"
|
| 426 |
+
if msg is not None:
|
| 427 |
+
raise ValueError(msg)
|
| 428 |
+
elif P0 == 'barycenter':
|
| 429 |
+
P0 = np.ones((n_unseed, n_unseed)) / n_unseed
|
| 430 |
+
elif P0 == 'randomized':
|
| 431 |
+
J = np.ones((n_unseed, n_unseed)) / n_unseed
|
| 432 |
+
# generate a nxn matrix where each entry is a random number [0, 1]
|
| 433 |
+
# would use rand, but Generators don't have it
|
| 434 |
+
# would use random, but old mtrand.RandomStates don't have it
|
| 435 |
+
K = _doubly_stochastic(rng.uniform(size=(n_unseed, n_unseed)))
|
| 436 |
+
P0 = (J + K) / 2
|
| 437 |
+
|
| 438 |
+
# check trivial cases
|
| 439 |
+
if n == 0 or n_seeds == n:
|
| 440 |
+
score = _calc_score(A, B, partial_match[:, 1])
|
| 441 |
+
res = {"col_ind": partial_match[:, 1], "fun": score, "nit": 0}
|
| 442 |
+
return OptimizeResult(res)
|
| 443 |
+
|
| 444 |
+
obj_func_scalar = 1
|
| 445 |
+
if maximize:
|
| 446 |
+
obj_func_scalar = -1
|
| 447 |
+
|
| 448 |
+
nonseed_B = np.setdiff1d(range(n), partial_match[:, 1])
|
| 449 |
+
if shuffle_input:
|
| 450 |
+
nonseed_B = rng.permutation(nonseed_B)
|
| 451 |
+
|
| 452 |
+
nonseed_A = np.setdiff1d(range(n), partial_match[:, 0])
|
| 453 |
+
perm_A = np.concatenate([partial_match[:, 0], nonseed_A])
|
| 454 |
+
perm_B = np.concatenate([partial_match[:, 1], nonseed_B])
|
| 455 |
+
|
| 456 |
+
# definitions according to Seeded Graph Matching [2].
|
| 457 |
+
A11, A12, A21, A22 = _split_matrix(A[perm_A][:, perm_A], n_seeds)
|
| 458 |
+
B11, B12, B21, B22 = _split_matrix(B[perm_B][:, perm_B], n_seeds)
|
| 459 |
+
const_sum = A21 @ B21.T + A12.T @ B12
|
| 460 |
+
|
| 461 |
+
P = P0
|
| 462 |
+
# [1] Algorithm 1 Line 2 - loop while stopping criteria not met
|
| 463 |
+
for n_iter in range(1, maxiter+1):
|
| 464 |
+
# [1] Algorithm 1 Line 3 - compute the gradient of f(P) = -tr(APB^tP^t)
|
| 465 |
+
grad_fp = (const_sum + A22 @ P @ B22.T + A22.T @ P @ B22)
|
| 466 |
+
# [1] Algorithm 1 Line 4 - get direction Q by solving Eq. 8
|
| 467 |
+
_, cols = linear_sum_assignment(grad_fp, maximize=maximize)
|
| 468 |
+
Q = np.eye(n_unseed)[cols]
|
| 469 |
+
|
| 470 |
+
# [1] Algorithm 1 Line 5 - compute the step size
|
| 471 |
+
# Noting that e.g. trace(Ax) = trace(A)*x, expand and re-collect
|
| 472 |
+
# terms as ax**2 + bx + c. c does not affect location of minimum
|
| 473 |
+
# and can be ignored. Also, note that trace(A@B) = (A.T*B).sum();
|
| 474 |
+
# apply where possible for efficiency.
|
| 475 |
+
R = P - Q
|
| 476 |
+
b21 = ((R.T @ A21) * B21).sum()
|
| 477 |
+
b12 = ((R.T @ A12.T) * B12.T).sum()
|
| 478 |
+
AR22 = A22.T @ R
|
| 479 |
+
BR22 = B22 @ R.T
|
| 480 |
+
b22a = (AR22 * B22.T[cols]).sum()
|
| 481 |
+
b22b = (A22 * BR22[cols]).sum()
|
| 482 |
+
a = (AR22.T * BR22).sum()
|
| 483 |
+
b = b21 + b12 + b22a + b22b
|
| 484 |
+
# critical point of ax^2 + bx + c is at x = -d/(2*e)
|
| 485 |
+
# if a * obj_func_scalar > 0, it is a minimum
|
| 486 |
+
# if minimum is not in [0, 1], only endpoints need to be considered
|
| 487 |
+
if a*obj_func_scalar > 0 and 0 <= -b/(2*a) <= 1:
|
| 488 |
+
alpha = -b/(2*a)
|
| 489 |
+
else:
|
| 490 |
+
alpha = np.argmin([0, (b + a)*obj_func_scalar])
|
| 491 |
+
|
| 492 |
+
# [1] Algorithm 1 Line 6 - Update P
|
| 493 |
+
P_i1 = alpha * P + (1 - alpha) * Q
|
| 494 |
+
if np.linalg.norm(P - P_i1) / np.sqrt(n_unseed) < tol:
|
| 495 |
+
P = P_i1
|
| 496 |
+
break
|
| 497 |
+
P = P_i1
|
| 498 |
+
# [1] Algorithm 1 Line 7 - end main loop
|
| 499 |
+
|
| 500 |
+
# [1] Algorithm 1 Line 8 - project onto the set of permutation matrices
|
| 501 |
+
_, col = linear_sum_assignment(P, maximize=True)
|
| 502 |
+
perm = np.concatenate((np.arange(n_seeds), col + n_seeds))
|
| 503 |
+
|
| 504 |
+
unshuffled_perm = np.zeros(n, dtype=int)
|
| 505 |
+
unshuffled_perm[perm_A] = perm_B[perm]
|
| 506 |
+
|
| 507 |
+
score = _calc_score(A, B, unshuffled_perm)
|
| 508 |
+
res = {"col_ind": unshuffled_perm, "fun": score, "nit": n_iter}
|
| 509 |
+
return OptimizeResult(res)
|
| 510 |
+
|
| 511 |
+
|
| 512 |
+
def _split_matrix(X, n):
|
| 513 |
+
# definitions according to Seeded Graph Matching [2].
|
| 514 |
+
upper, lower = X[:n], X[n:]
|
| 515 |
+
return upper[:, :n], upper[:, n:], lower[:, :n], lower[:, n:]
|
| 516 |
+
|
| 517 |
+
|
| 518 |
+
def _doubly_stochastic(P, tol=1e-3):
|
| 519 |
+
# Adapted from @btaba implementation
|
| 520 |
+
# https://github.com/btaba/sinkhorn_knopp
|
| 521 |
+
# of Sinkhorn-Knopp algorithm
|
| 522 |
+
# https://projecteuclid.org/euclid.pjm/1102992505
|
| 523 |
+
|
| 524 |
+
max_iter = 1000
|
| 525 |
+
c = 1 / P.sum(axis=0)
|
| 526 |
+
r = 1 / (P @ c)
|
| 527 |
+
P_eps = P
|
| 528 |
+
|
| 529 |
+
for it in range(max_iter):
|
| 530 |
+
if ((np.abs(P_eps.sum(axis=1) - 1) < tol).all() and
|
| 531 |
+
(np.abs(P_eps.sum(axis=0) - 1) < tol).all()):
|
| 532 |
+
# All column/row sums ~= 1 within threshold
|
| 533 |
+
break
|
| 534 |
+
|
| 535 |
+
c = 1 / (r @ P)
|
| 536 |
+
r = 1 / (P @ c)
|
| 537 |
+
P_eps = r[:, None] * P * c
|
| 538 |
+
|
| 539 |
+
return P_eps
|
| 540 |
+
|
| 541 |
+
|
| 542 |
+
def _quadratic_assignment_2opt(A, B, maximize=False, rng=None,
|
| 543 |
+
partial_match=None,
|
| 544 |
+
partial_guess=None,
|
| 545 |
+
**unknown_options):
|
| 546 |
+
r"""Solve the quadratic assignment problem (approximately).
|
| 547 |
+
|
| 548 |
+
This function solves the Quadratic Assignment Problem (QAP) and the
|
| 549 |
+
Graph Matching Problem (GMP) using the 2-opt algorithm [1]_.
|
| 550 |
+
|
| 551 |
+
Quadratic assignment solves problems of the following form:
|
| 552 |
+
|
| 553 |
+
.. math::
|
| 554 |
+
|
| 555 |
+
\min_P & \ {\ \text{trace}(A^T P B P^T)}\\
|
| 556 |
+
\mbox{s.t. } & {P \ \epsilon \ \mathcal{P}}\\
|
| 557 |
+
|
| 558 |
+
where :math:`\mathcal{P}` is the set of all permutation matrices,
|
| 559 |
+
and :math:`A` and :math:`B` are square matrices.
|
| 560 |
+
|
| 561 |
+
Graph matching tries to *maximize* the same objective function.
|
| 562 |
+
This algorithm can be thought of as finding the alignment of the
|
| 563 |
+
nodes of two graphs that minimizes the number of induced edge
|
| 564 |
+
disagreements, or, in the case of weighted graphs, the sum of squared
|
| 565 |
+
edge weight differences.
|
| 566 |
+
|
| 567 |
+
Note that the quadratic assignment problem is NP-hard. The results given
|
| 568 |
+
here are approximations and are not guaranteed to be optimal.
|
| 569 |
+
|
| 570 |
+
Parameters
|
| 571 |
+
----------
|
| 572 |
+
A : 2-D array, square
|
| 573 |
+
The square matrix :math:`A` in the objective function above.
|
| 574 |
+
B : 2-D array, square
|
| 575 |
+
The square matrix :math:`B` in the objective function above.
|
| 576 |
+
method : str in {'faq', '2opt'} (default: 'faq')
|
| 577 |
+
The algorithm used to solve the problem. This is the method-specific
|
| 578 |
+
documentation for '2opt'.
|
| 579 |
+
:ref:`'faq' <optimize.qap-faq>` is also available.
|
| 580 |
+
|
| 581 |
+
Options
|
| 582 |
+
-------
|
| 583 |
+
maximize : bool (default: False)
|
| 584 |
+
Maximizes the objective function if ``True``.
|
| 585 |
+
rng : {None, int, `numpy.random.Generator`,
|
| 586 |
+
`numpy.random.RandomState`}, optional
|
| 587 |
+
|
| 588 |
+
If `seed` is None (or `np.random`), the `numpy.random.RandomState`
|
| 589 |
+
singleton is used.
|
| 590 |
+
If `seed` is an int, a new ``RandomState`` instance is used,
|
| 591 |
+
seeded with `seed`.
|
| 592 |
+
If `seed` is already a ``Generator`` or ``RandomState`` instance then
|
| 593 |
+
that instance is used.
|
| 594 |
+
partial_match : 2-D array of integers, optional (default: None)
|
| 595 |
+
Fixes part of the matching. Also known as a "seed" [2]_.
|
| 596 |
+
|
| 597 |
+
Each row of `partial_match` specifies a pair of matched nodes: node
|
| 598 |
+
``partial_match[i, 0]`` of `A` is matched to node
|
| 599 |
+
``partial_match[i, 1]`` of `B`. The array has shape ``(m, 2)``,
|
| 600 |
+
where ``m`` is not greater than the number of nodes, :math:`n`.
|
| 601 |
+
|
| 602 |
+
.. note::
|
| 603 |
+
`partial_match` must be sorted by the first column.
|
| 604 |
+
|
| 605 |
+
partial_guess : 2-D array of integers, optional (default: None)
|
| 606 |
+
A guess for the matching between the two matrices. Unlike
|
| 607 |
+
`partial_match`, `partial_guess` does not fix the indices; they are
|
| 608 |
+
still free to be optimized.
|
| 609 |
+
|
| 610 |
+
Each row of `partial_guess` specifies a pair of matched nodes: node
|
| 611 |
+
``partial_guess[i, 0]`` of `A` is matched to node
|
| 612 |
+
``partial_guess[i, 1]`` of `B`. The array has shape ``(m, 2)``,
|
| 613 |
+
where ``m`` is not greater than the number of nodes, :math:`n`.
|
| 614 |
+
|
| 615 |
+
.. note::
|
| 616 |
+
`partial_guess` must be sorted by the first column.
|
| 617 |
+
|
| 618 |
+
Returns
|
| 619 |
+
-------
|
| 620 |
+
res : OptimizeResult
|
| 621 |
+
`OptimizeResult` containing the following fields.
|
| 622 |
+
|
| 623 |
+
col_ind : 1-D array
|
| 624 |
+
Column indices corresponding to the best permutation found of the
|
| 625 |
+
nodes of `B`.
|
| 626 |
+
fun : float
|
| 627 |
+
The objective value of the solution.
|
| 628 |
+
nit : int
|
| 629 |
+
The number of iterations performed during optimization.
|
| 630 |
+
|
| 631 |
+
Notes
|
| 632 |
+
-----
|
| 633 |
+
This is a greedy algorithm that works similarly to bubble sort: beginning
|
| 634 |
+
with an initial permutation, it iteratively swaps pairs of indices to
|
| 635 |
+
improve the objective function until no such improvements are possible.
|
| 636 |
+
|
| 637 |
+
References
|
| 638 |
+
----------
|
| 639 |
+
.. [1] "2-opt," Wikipedia.
|
| 640 |
+
https://en.wikipedia.org/wiki/2-opt
|
| 641 |
+
|
| 642 |
+
.. [2] D. Fishkind, S. Adali, H. Patsolic, L. Meng, D. Singh, V. Lyzinski,
|
| 643 |
+
C. Priebe, "Seeded graph matching", Pattern Recognit. 87 (2019):
|
| 644 |
+
203-215, https://doi.org/10.1016/j.patcog.2018.09.014
|
| 645 |
+
|
| 646 |
+
"""
|
| 647 |
+
_check_unknown_options(unknown_options)
|
| 648 |
+
rng = check_random_state(rng)
|
| 649 |
+
A, B, partial_match = _common_input_validation(A, B, partial_match)
|
| 650 |
+
|
| 651 |
+
N = len(A)
|
| 652 |
+
# check trivial cases
|
| 653 |
+
if N == 0 or partial_match.shape[0] == N:
|
| 654 |
+
score = _calc_score(A, B, partial_match[:, 1])
|
| 655 |
+
res = {"col_ind": partial_match[:, 1], "fun": score, "nit": 0}
|
| 656 |
+
return OptimizeResult(res)
|
| 657 |
+
|
| 658 |
+
if partial_guess is None:
|
| 659 |
+
partial_guess = np.array([[], []]).T
|
| 660 |
+
partial_guess = np.atleast_2d(partial_guess).astype(int)
|
| 661 |
+
|
| 662 |
+
msg = None
|
| 663 |
+
if partial_guess.shape[0] > A.shape[0]:
|
| 664 |
+
msg = ("`partial_guess` can have only as "
|
| 665 |
+
"many entries as there are nodes")
|
| 666 |
+
elif partial_guess.shape[1] != 2:
|
| 667 |
+
msg = "`partial_guess` must have two columns"
|
| 668 |
+
elif partial_guess.ndim != 2:
|
| 669 |
+
msg = "`partial_guess` must have exactly two dimensions"
|
| 670 |
+
elif (partial_guess < 0).any():
|
| 671 |
+
msg = "`partial_guess` must contain only positive indices"
|
| 672 |
+
elif (partial_guess >= len(A)).any():
|
| 673 |
+
msg = "`partial_guess` entries must be less than number of nodes"
|
| 674 |
+
elif (not len(set(partial_guess[:, 0])) == len(partial_guess[:, 0]) or
|
| 675 |
+
not len(set(partial_guess[:, 1])) == len(partial_guess[:, 1])):
|
| 676 |
+
msg = "`partial_guess` column entries must be unique"
|
| 677 |
+
if msg is not None:
|
| 678 |
+
raise ValueError(msg)
|
| 679 |
+
|
| 680 |
+
fixed_rows = None
|
| 681 |
+
if partial_match.size or partial_guess.size:
|
| 682 |
+
# use partial_match and partial_guess for initial permutation,
|
| 683 |
+
# but randomly permute the rest.
|
| 684 |
+
guess_rows = np.zeros(N, dtype=bool)
|
| 685 |
+
guess_cols = np.zeros(N, dtype=bool)
|
| 686 |
+
fixed_rows = np.zeros(N, dtype=bool)
|
| 687 |
+
fixed_cols = np.zeros(N, dtype=bool)
|
| 688 |
+
perm = np.zeros(N, dtype=int)
|
| 689 |
+
|
| 690 |
+
rg, cg = partial_guess.T
|
| 691 |
+
guess_rows[rg] = True
|
| 692 |
+
guess_cols[cg] = True
|
| 693 |
+
perm[guess_rows] = cg
|
| 694 |
+
|
| 695 |
+
# match overrides guess
|
| 696 |
+
rf, cf = partial_match.T
|
| 697 |
+
fixed_rows[rf] = True
|
| 698 |
+
fixed_cols[cf] = True
|
| 699 |
+
perm[fixed_rows] = cf
|
| 700 |
+
|
| 701 |
+
random_rows = ~fixed_rows & ~guess_rows
|
| 702 |
+
random_cols = ~fixed_cols & ~guess_cols
|
| 703 |
+
perm[random_rows] = rng.permutation(np.arange(N)[random_cols])
|
| 704 |
+
else:
|
| 705 |
+
perm = rng.permutation(np.arange(N))
|
| 706 |
+
|
| 707 |
+
best_score = _calc_score(A, B, perm)
|
| 708 |
+
|
| 709 |
+
i_free = np.arange(N)
|
| 710 |
+
if fixed_rows is not None:
|
| 711 |
+
i_free = i_free[~fixed_rows]
|
| 712 |
+
|
| 713 |
+
better = operator.gt if maximize else operator.lt
|
| 714 |
+
n_iter = 0
|
| 715 |
+
done = False
|
| 716 |
+
while not done:
|
| 717 |
+
# equivalent to nested for loops i in range(N), j in range(i, N)
|
| 718 |
+
for i, j in itertools.combinations_with_replacement(i_free, 2):
|
| 719 |
+
n_iter += 1
|
| 720 |
+
perm[i], perm[j] = perm[j], perm[i]
|
| 721 |
+
score = _calc_score(A, B, perm)
|
| 722 |
+
if better(score, best_score):
|
| 723 |
+
best_score = score
|
| 724 |
+
break
|
| 725 |
+
# faster to swap back than to create a new list every time
|
| 726 |
+
perm[i], perm[j] = perm[j], perm[i]
|
| 727 |
+
else: # no swaps made
|
| 728 |
+
done = True
|
| 729 |
+
|
| 730 |
+
res = {"col_ind": perm, "fun": best_score, "nit": n_iter}
|
| 731 |
+
return OptimizeResult(res)
|
llava_next/lib/python3.10/site-packages/scipy/optimize/_remove_redundancy.py
ADDED
|
@@ -0,0 +1,522 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Routines for removing redundant (linearly dependent) equations from linear
|
| 3 |
+
programming equality constraints.
|
| 4 |
+
"""
|
| 5 |
+
# Author: Matt Haberland
|
| 6 |
+
|
| 7 |
+
import numpy as np
|
| 8 |
+
from scipy.linalg import svd
|
| 9 |
+
from scipy.linalg.interpolative import interp_decomp
|
| 10 |
+
import scipy
|
| 11 |
+
from scipy.linalg.blas import dtrsm
|
| 12 |
+
|
| 13 |
+
|
| 14 |
+
def _row_count(A):
|
| 15 |
+
"""
|
| 16 |
+
Counts the number of nonzeros in each row of input array A.
|
| 17 |
+
Nonzeros are defined as any element with absolute value greater than
|
| 18 |
+
tol = 1e-13. This value should probably be an input to the function.
|
| 19 |
+
|
| 20 |
+
Parameters
|
| 21 |
+
----------
|
| 22 |
+
A : 2-D array
|
| 23 |
+
An array representing a matrix
|
| 24 |
+
|
| 25 |
+
Returns
|
| 26 |
+
-------
|
| 27 |
+
rowcount : 1-D array
|
| 28 |
+
Number of nonzeros in each row of A
|
| 29 |
+
|
| 30 |
+
"""
|
| 31 |
+
tol = 1e-13
|
| 32 |
+
return np.array((abs(A) > tol).sum(axis=1)).flatten()
|
| 33 |
+
|
| 34 |
+
|
| 35 |
+
def _get_densest(A, eligibleRows):
|
| 36 |
+
"""
|
| 37 |
+
Returns the index of the densest row of A. Ignores rows that are not
|
| 38 |
+
eligible for consideration.
|
| 39 |
+
|
| 40 |
+
Parameters
|
| 41 |
+
----------
|
| 42 |
+
A : 2-D array
|
| 43 |
+
An array representing a matrix
|
| 44 |
+
eligibleRows : 1-D logical array
|
| 45 |
+
Values indicate whether the corresponding row of A is eligible
|
| 46 |
+
to be considered
|
| 47 |
+
|
| 48 |
+
Returns
|
| 49 |
+
-------
|
| 50 |
+
i_densest : int
|
| 51 |
+
Index of the densest row in A eligible for consideration
|
| 52 |
+
|
| 53 |
+
"""
|
| 54 |
+
rowCounts = _row_count(A)
|
| 55 |
+
return np.argmax(rowCounts * eligibleRows)
|
| 56 |
+
|
| 57 |
+
|
| 58 |
+
def _remove_zero_rows(A, b):
|
| 59 |
+
"""
|
| 60 |
+
Eliminates trivial equations from system of equations defined by Ax = b
|
| 61 |
+
and identifies trivial infeasibilities
|
| 62 |
+
|
| 63 |
+
Parameters
|
| 64 |
+
----------
|
| 65 |
+
A : 2-D array
|
| 66 |
+
An array representing the left-hand side of a system of equations
|
| 67 |
+
b : 1-D array
|
| 68 |
+
An array representing the right-hand side of a system of equations
|
| 69 |
+
|
| 70 |
+
Returns
|
| 71 |
+
-------
|
| 72 |
+
A : 2-D array
|
| 73 |
+
An array representing the left-hand side of a system of equations
|
| 74 |
+
b : 1-D array
|
| 75 |
+
An array representing the right-hand side of a system of equations
|
| 76 |
+
status: int
|
| 77 |
+
An integer indicating the status of the removal operation
|
| 78 |
+
0: No infeasibility identified
|
| 79 |
+
2: Trivially infeasible
|
| 80 |
+
message : str
|
| 81 |
+
A string descriptor of the exit status of the optimization.
|
| 82 |
+
|
| 83 |
+
"""
|
| 84 |
+
status = 0
|
| 85 |
+
message = ""
|
| 86 |
+
i_zero = _row_count(A) == 0
|
| 87 |
+
A = A[np.logical_not(i_zero), :]
|
| 88 |
+
if not np.allclose(b[i_zero], 0):
|
| 89 |
+
status = 2
|
| 90 |
+
message = "There is a zero row in A_eq with a nonzero corresponding " \
|
| 91 |
+
"entry in b_eq. The problem is infeasible."
|
| 92 |
+
b = b[np.logical_not(i_zero)]
|
| 93 |
+
return A, b, status, message
|
| 94 |
+
|
| 95 |
+
|
| 96 |
+
def bg_update_dense(plu, perm_r, v, j):
|
| 97 |
+
LU, p = plu
|
| 98 |
+
|
| 99 |
+
vperm = v[perm_r]
|
| 100 |
+
u = dtrsm(1, LU, vperm, lower=1, diag=1)
|
| 101 |
+
LU[:j+1, j] = u[:j+1]
|
| 102 |
+
l = u[j+1:]
|
| 103 |
+
piv = LU[j, j]
|
| 104 |
+
LU[j+1:, j] += (l/piv)
|
| 105 |
+
return LU, p
|
| 106 |
+
|
| 107 |
+
|
| 108 |
+
def _remove_redundancy_pivot_dense(A, rhs, true_rank=None):
|
| 109 |
+
"""
|
| 110 |
+
Eliminates redundant equations from system of equations defined by Ax = b
|
| 111 |
+
and identifies infeasibilities.
|
| 112 |
+
|
| 113 |
+
Parameters
|
| 114 |
+
----------
|
| 115 |
+
A : 2-D sparse matrix
|
| 116 |
+
An matrix representing the left-hand side of a system of equations
|
| 117 |
+
rhs : 1-D array
|
| 118 |
+
An array representing the right-hand side of a system of equations
|
| 119 |
+
|
| 120 |
+
Returns
|
| 121 |
+
-------
|
| 122 |
+
A : 2-D sparse matrix
|
| 123 |
+
A matrix representing the left-hand side of a system of equations
|
| 124 |
+
rhs : 1-D array
|
| 125 |
+
An array representing the right-hand side of a system of equations
|
| 126 |
+
status: int
|
| 127 |
+
An integer indicating the status of the system
|
| 128 |
+
0: No infeasibility identified
|
| 129 |
+
2: Trivially infeasible
|
| 130 |
+
message : str
|
| 131 |
+
A string descriptor of the exit status of the optimization.
|
| 132 |
+
|
| 133 |
+
References
|
| 134 |
+
----------
|
| 135 |
+
.. [2] Andersen, Erling D. "Finding all linearly dependent rows in
|
| 136 |
+
large-scale linear programming." Optimization Methods and Software
|
| 137 |
+
6.3 (1995): 219-227.
|
| 138 |
+
|
| 139 |
+
"""
|
| 140 |
+
tolapiv = 1e-8
|
| 141 |
+
tolprimal = 1e-8
|
| 142 |
+
status = 0
|
| 143 |
+
message = ""
|
| 144 |
+
inconsistent = ("There is a linear combination of rows of A_eq that "
|
| 145 |
+
"results in zero, suggesting a redundant constraint. "
|
| 146 |
+
"However the same linear combination of b_eq is "
|
| 147 |
+
"nonzero, suggesting that the constraints conflict "
|
| 148 |
+
"and the problem is infeasible.")
|
| 149 |
+
A, rhs, status, message = _remove_zero_rows(A, rhs)
|
| 150 |
+
|
| 151 |
+
if status != 0:
|
| 152 |
+
return A, rhs, status, message
|
| 153 |
+
|
| 154 |
+
m, n = A.shape
|
| 155 |
+
|
| 156 |
+
v = list(range(m)) # Artificial column indices.
|
| 157 |
+
b = list(v) # Basis column indices.
|
| 158 |
+
# This is better as a list than a set because column order of basis matrix
|
| 159 |
+
# needs to be consistent.
|
| 160 |
+
d = [] # Indices of dependent rows
|
| 161 |
+
perm_r = None
|
| 162 |
+
|
| 163 |
+
A_orig = A
|
| 164 |
+
A = np.zeros((m, m + n), order='F')
|
| 165 |
+
np.fill_diagonal(A, 1)
|
| 166 |
+
A[:, m:] = A_orig
|
| 167 |
+
e = np.zeros(m)
|
| 168 |
+
|
| 169 |
+
js_candidates = np.arange(m, m+n, dtype=int) # candidate columns for basis
|
| 170 |
+
# manual masking was faster than masked array
|
| 171 |
+
js_mask = np.ones(js_candidates.shape, dtype=bool)
|
| 172 |
+
|
| 173 |
+
# Implements basic algorithm from [2]
|
| 174 |
+
# Uses some of the suggested improvements (removing zero rows and
|
| 175 |
+
# Bartels-Golub update idea).
|
| 176 |
+
# Removing column singletons would be easy, but it is not as important
|
| 177 |
+
# because the procedure is performed only on the equality constraint
|
| 178 |
+
# matrix from the original problem - not on the canonical form matrix,
|
| 179 |
+
# which would have many more column singletons due to slack variables
|
| 180 |
+
# from the inequality constraints.
|
| 181 |
+
# The thoughts on "crashing" the initial basis are only really useful if
|
| 182 |
+
# the matrix is sparse.
|
| 183 |
+
|
| 184 |
+
lu = np.eye(m, order='F'), np.arange(m) # initial LU is trivial
|
| 185 |
+
perm_r = lu[1]
|
| 186 |
+
for i in v:
|
| 187 |
+
|
| 188 |
+
e[i] = 1
|
| 189 |
+
if i > 0:
|
| 190 |
+
e[i-1] = 0
|
| 191 |
+
|
| 192 |
+
try: # fails for i==0 and any time it gets ill-conditioned
|
| 193 |
+
j = b[i-1]
|
| 194 |
+
lu = bg_update_dense(lu, perm_r, A[:, j], i-1)
|
| 195 |
+
except Exception:
|
| 196 |
+
lu = scipy.linalg.lu_factor(A[:, b])
|
| 197 |
+
LU, p = lu
|
| 198 |
+
perm_r = list(range(m))
|
| 199 |
+
for i1, i2 in enumerate(p):
|
| 200 |
+
perm_r[i1], perm_r[i2] = perm_r[i2], perm_r[i1]
|
| 201 |
+
|
| 202 |
+
pi = scipy.linalg.lu_solve(lu, e, trans=1)
|
| 203 |
+
|
| 204 |
+
js = js_candidates[js_mask]
|
| 205 |
+
batch = 50
|
| 206 |
+
|
| 207 |
+
# This is a tiny bit faster than looping over columns individually,
|
| 208 |
+
# like for j in js: if abs(A[:,j].transpose().dot(pi)) > tolapiv:
|
| 209 |
+
for j_index in range(0, len(js), batch):
|
| 210 |
+
j_indices = js[j_index: min(j_index+batch, len(js))]
|
| 211 |
+
|
| 212 |
+
c = abs(A[:, j_indices].transpose().dot(pi))
|
| 213 |
+
if (c > tolapiv).any():
|
| 214 |
+
j = js[j_index + np.argmax(c)] # very independent column
|
| 215 |
+
b[i] = j
|
| 216 |
+
js_mask[j-m] = False
|
| 217 |
+
break
|
| 218 |
+
else:
|
| 219 |
+
bibar = pi.T.dot(rhs.reshape(-1, 1))
|
| 220 |
+
bnorm = np.linalg.norm(rhs)
|
| 221 |
+
if abs(bibar)/(1+bnorm) > tolprimal: # inconsistent
|
| 222 |
+
status = 2
|
| 223 |
+
message = inconsistent
|
| 224 |
+
return A_orig, rhs, status, message
|
| 225 |
+
else: # dependent
|
| 226 |
+
d.append(i)
|
| 227 |
+
if true_rank is not None and len(d) == m - true_rank:
|
| 228 |
+
break # found all redundancies
|
| 229 |
+
|
| 230 |
+
keep = set(range(m))
|
| 231 |
+
keep = list(keep - set(d))
|
| 232 |
+
return A_orig[keep, :], rhs[keep], status, message
|
| 233 |
+
|
| 234 |
+
|
| 235 |
+
def _remove_redundancy_pivot_sparse(A, rhs):
|
| 236 |
+
"""
|
| 237 |
+
Eliminates redundant equations from system of equations defined by Ax = b
|
| 238 |
+
and identifies infeasibilities.
|
| 239 |
+
|
| 240 |
+
Parameters
|
| 241 |
+
----------
|
| 242 |
+
A : 2-D sparse matrix
|
| 243 |
+
An matrix representing the left-hand side of a system of equations
|
| 244 |
+
rhs : 1-D array
|
| 245 |
+
An array representing the right-hand side of a system of equations
|
| 246 |
+
|
| 247 |
+
Returns
|
| 248 |
+
-------
|
| 249 |
+
A : 2-D sparse matrix
|
| 250 |
+
A matrix representing the left-hand side of a system of equations
|
| 251 |
+
rhs : 1-D array
|
| 252 |
+
An array representing the right-hand side of a system of equations
|
| 253 |
+
status: int
|
| 254 |
+
An integer indicating the status of the system
|
| 255 |
+
0: No infeasibility identified
|
| 256 |
+
2: Trivially infeasible
|
| 257 |
+
message : str
|
| 258 |
+
A string descriptor of the exit status of the optimization.
|
| 259 |
+
|
| 260 |
+
References
|
| 261 |
+
----------
|
| 262 |
+
.. [2] Andersen, Erling D. "Finding all linearly dependent rows in
|
| 263 |
+
large-scale linear programming." Optimization Methods and Software
|
| 264 |
+
6.3 (1995): 219-227.
|
| 265 |
+
|
| 266 |
+
"""
|
| 267 |
+
|
| 268 |
+
tolapiv = 1e-8
|
| 269 |
+
tolprimal = 1e-8
|
| 270 |
+
status = 0
|
| 271 |
+
message = ""
|
| 272 |
+
inconsistent = ("There is a linear combination of rows of A_eq that "
|
| 273 |
+
"results in zero, suggesting a redundant constraint. "
|
| 274 |
+
"However the same linear combination of b_eq is "
|
| 275 |
+
"nonzero, suggesting that the constraints conflict "
|
| 276 |
+
"and the problem is infeasible.")
|
| 277 |
+
A, rhs, status, message = _remove_zero_rows(A, rhs)
|
| 278 |
+
|
| 279 |
+
if status != 0:
|
| 280 |
+
return A, rhs, status, message
|
| 281 |
+
|
| 282 |
+
m, n = A.shape
|
| 283 |
+
|
| 284 |
+
v = list(range(m)) # Artificial column indices.
|
| 285 |
+
b = list(v) # Basis column indices.
|
| 286 |
+
# This is better as a list than a set because column order of basis matrix
|
| 287 |
+
# needs to be consistent.
|
| 288 |
+
k = set(range(m, m+n)) # Structural column indices.
|
| 289 |
+
d = [] # Indices of dependent rows
|
| 290 |
+
|
| 291 |
+
A_orig = A
|
| 292 |
+
A = scipy.sparse.hstack((scipy.sparse.eye(m), A)).tocsc()
|
| 293 |
+
e = np.zeros(m)
|
| 294 |
+
|
| 295 |
+
# Implements basic algorithm from [2]
|
| 296 |
+
# Uses only one of the suggested improvements (removing zero rows).
|
| 297 |
+
# Removing column singletons would be easy, but it is not as important
|
| 298 |
+
# because the procedure is performed only on the equality constraint
|
| 299 |
+
# matrix from the original problem - not on the canonical form matrix,
|
| 300 |
+
# which would have many more column singletons due to slack variables
|
| 301 |
+
# from the inequality constraints.
|
| 302 |
+
# The thoughts on "crashing" the initial basis sound useful, but the
|
| 303 |
+
# description of the procedure seems to assume a lot of familiarity with
|
| 304 |
+
# the subject; it is not very explicit. I already went through enough
|
| 305 |
+
# trouble getting the basic algorithm working, so I was not interested in
|
| 306 |
+
# trying to decipher this, too. (Overall, the paper is fraught with
|
| 307 |
+
# mistakes and ambiguities - which is strange, because the rest of
|
| 308 |
+
# Andersen's papers are quite good.)
|
| 309 |
+
# I tried and tried and tried to improve performance using the
|
| 310 |
+
# Bartels-Golub update. It works, but it's only practical if the LU
|
| 311 |
+
# factorization can be specialized as described, and that is not possible
|
| 312 |
+
# until the SciPy SuperLU interface permits control over column
|
| 313 |
+
# permutation - see issue #7700.
|
| 314 |
+
|
| 315 |
+
for i in v:
|
| 316 |
+
B = A[:, b]
|
| 317 |
+
|
| 318 |
+
e[i] = 1
|
| 319 |
+
if i > 0:
|
| 320 |
+
e[i-1] = 0
|
| 321 |
+
|
| 322 |
+
pi = scipy.sparse.linalg.spsolve(B.transpose(), e).reshape(-1, 1)
|
| 323 |
+
|
| 324 |
+
js = list(k-set(b)) # not efficient, but this is not the time sink...
|
| 325 |
+
|
| 326 |
+
# Due to overhead, it tends to be faster (for problems tested) to
|
| 327 |
+
# compute the full matrix-vector product rather than individual
|
| 328 |
+
# vector-vector products (with the chance of terminating as soon
|
| 329 |
+
# as any are nonzero). For very large matrices, it might be worth
|
| 330 |
+
# it to compute, say, 100 or 1000 at a time and stop when a nonzero
|
| 331 |
+
# is found.
|
| 332 |
+
|
| 333 |
+
c = (np.abs(A[:, js].transpose().dot(pi)) > tolapiv).nonzero()[0]
|
| 334 |
+
if len(c) > 0: # independent
|
| 335 |
+
j = js[c[0]]
|
| 336 |
+
# in a previous commit, the previous line was changed to choose
|
| 337 |
+
# index j corresponding with the maximum dot product.
|
| 338 |
+
# While this avoided issues with almost
|
| 339 |
+
# singular matrices, it slowed the routine in most NETLIB tests.
|
| 340 |
+
# I think this is because these columns were denser than the
|
| 341 |
+
# first column with nonzero dot product (c[0]).
|
| 342 |
+
# It would be nice to have a heuristic that balances sparsity with
|
| 343 |
+
# high dot product, but I don't think it's worth the time to
|
| 344 |
+
# develop one right now. Bartels-Golub update is a much higher
|
| 345 |
+
# priority.
|
| 346 |
+
b[i] = j # replace artificial column
|
| 347 |
+
else:
|
| 348 |
+
bibar = pi.T.dot(rhs.reshape(-1, 1))
|
| 349 |
+
bnorm = np.linalg.norm(rhs)
|
| 350 |
+
if abs(bibar)/(1 + bnorm) > tolprimal:
|
| 351 |
+
status = 2
|
| 352 |
+
message = inconsistent
|
| 353 |
+
return A_orig, rhs, status, message
|
| 354 |
+
else: # dependent
|
| 355 |
+
d.append(i)
|
| 356 |
+
|
| 357 |
+
keep = set(range(m))
|
| 358 |
+
keep = list(keep - set(d))
|
| 359 |
+
return A_orig[keep, :], rhs[keep], status, message
|
| 360 |
+
|
| 361 |
+
|
| 362 |
+
def _remove_redundancy_svd(A, b):
|
| 363 |
+
"""
|
| 364 |
+
Eliminates redundant equations from system of equations defined by Ax = b
|
| 365 |
+
and identifies infeasibilities.
|
| 366 |
+
|
| 367 |
+
Parameters
|
| 368 |
+
----------
|
| 369 |
+
A : 2-D array
|
| 370 |
+
An array representing the left-hand side of a system of equations
|
| 371 |
+
b : 1-D array
|
| 372 |
+
An array representing the right-hand side of a system of equations
|
| 373 |
+
|
| 374 |
+
Returns
|
| 375 |
+
-------
|
| 376 |
+
A : 2-D array
|
| 377 |
+
An array representing the left-hand side of a system of equations
|
| 378 |
+
b : 1-D array
|
| 379 |
+
An array representing the right-hand side of a system of equations
|
| 380 |
+
status: int
|
| 381 |
+
An integer indicating the status of the system
|
| 382 |
+
0: No infeasibility identified
|
| 383 |
+
2: Trivially infeasible
|
| 384 |
+
message : str
|
| 385 |
+
A string descriptor of the exit status of the optimization.
|
| 386 |
+
|
| 387 |
+
References
|
| 388 |
+
----------
|
| 389 |
+
.. [2] Andersen, Erling D. "Finding all linearly dependent rows in
|
| 390 |
+
large-scale linear programming." Optimization Methods and Software
|
| 391 |
+
6.3 (1995): 219-227.
|
| 392 |
+
|
| 393 |
+
"""
|
| 394 |
+
|
| 395 |
+
A, b, status, message = _remove_zero_rows(A, b)
|
| 396 |
+
|
| 397 |
+
if status != 0:
|
| 398 |
+
return A, b, status, message
|
| 399 |
+
|
| 400 |
+
U, s, Vh = svd(A)
|
| 401 |
+
eps = np.finfo(float).eps
|
| 402 |
+
tol = s.max() * max(A.shape) * eps
|
| 403 |
+
|
| 404 |
+
m, n = A.shape
|
| 405 |
+
s_min = s[-1] if m <= n else 0
|
| 406 |
+
|
| 407 |
+
# this algorithm is faster than that of [2] when the nullspace is small
|
| 408 |
+
# but it could probably be improvement by randomized algorithms and with
|
| 409 |
+
# a sparse implementation.
|
| 410 |
+
# it relies on repeated singular value decomposition to find linearly
|
| 411 |
+
# dependent rows (as identified by columns of U that correspond with zero
|
| 412 |
+
# singular values). Unfortunately, only one row can be removed per
|
| 413 |
+
# decomposition (I tried otherwise; doing so can cause problems.)
|
| 414 |
+
# It would be nice if we could do truncated SVD like sp.sparse.linalg.svds
|
| 415 |
+
# but that function is unreliable at finding singular values near zero.
|
| 416 |
+
# Finding max eigenvalue L of A A^T, then largest eigenvalue (and
|
| 417 |
+
# associated eigenvector) of -A A^T + L I (I is identity) via power
|
| 418 |
+
# iteration would also work in theory, but is only efficient if the
|
| 419 |
+
# smallest nonzero eigenvalue of A A^T is close to the largest nonzero
|
| 420 |
+
# eigenvalue.
|
| 421 |
+
|
| 422 |
+
while abs(s_min) < tol:
|
| 423 |
+
v = U[:, -1] # TODO: return these so user can eliminate from problem?
|
| 424 |
+
# rows need to be represented in significant amount
|
| 425 |
+
eligibleRows = np.abs(v) > tol * 10e6
|
| 426 |
+
if not np.any(eligibleRows) or np.any(np.abs(v.dot(A)) > tol):
|
| 427 |
+
status = 4
|
| 428 |
+
message = ("Due to numerical issues, redundant equality "
|
| 429 |
+
"constraints could not be removed automatically. "
|
| 430 |
+
"Try providing your constraint matrices as sparse "
|
| 431 |
+
"matrices to activate sparse presolve, try turning "
|
| 432 |
+
"off redundancy removal, or try turning off presolve "
|
| 433 |
+
"altogether.")
|
| 434 |
+
break
|
| 435 |
+
if np.any(np.abs(v.dot(b)) > tol * 100): # factor of 100 to fix 10038 and 10349
|
| 436 |
+
status = 2
|
| 437 |
+
message = ("There is a linear combination of rows of A_eq that "
|
| 438 |
+
"results in zero, suggesting a redundant constraint. "
|
| 439 |
+
"However the same linear combination of b_eq is "
|
| 440 |
+
"nonzero, suggesting that the constraints conflict "
|
| 441 |
+
"and the problem is infeasible.")
|
| 442 |
+
break
|
| 443 |
+
|
| 444 |
+
i_remove = _get_densest(A, eligibleRows)
|
| 445 |
+
A = np.delete(A, i_remove, axis=0)
|
| 446 |
+
b = np.delete(b, i_remove)
|
| 447 |
+
U, s, Vh = svd(A)
|
| 448 |
+
m, n = A.shape
|
| 449 |
+
s_min = s[-1] if m <= n else 0
|
| 450 |
+
|
| 451 |
+
return A, b, status, message
|
| 452 |
+
|
| 453 |
+
|
| 454 |
+
def _remove_redundancy_id(A, rhs, rank=None, randomized=True):
|
| 455 |
+
"""Eliminates redundant equations from a system of equations.
|
| 456 |
+
|
| 457 |
+
Eliminates redundant equations from system of equations defined by Ax = b
|
| 458 |
+
and identifies infeasibilities.
|
| 459 |
+
|
| 460 |
+
Parameters
|
| 461 |
+
----------
|
| 462 |
+
A : 2-D array
|
| 463 |
+
An array representing the left-hand side of a system of equations
|
| 464 |
+
rhs : 1-D array
|
| 465 |
+
An array representing the right-hand side of a system of equations
|
| 466 |
+
rank : int, optional
|
| 467 |
+
The rank of A
|
| 468 |
+
randomized: bool, optional
|
| 469 |
+
True for randomized interpolative decomposition
|
| 470 |
+
|
| 471 |
+
Returns
|
| 472 |
+
-------
|
| 473 |
+
A : 2-D array
|
| 474 |
+
An array representing the left-hand side of a system of equations
|
| 475 |
+
rhs : 1-D array
|
| 476 |
+
An array representing the right-hand side of a system of equations
|
| 477 |
+
status: int
|
| 478 |
+
An integer indicating the status of the system
|
| 479 |
+
0: No infeasibility identified
|
| 480 |
+
2: Trivially infeasible
|
| 481 |
+
message : str
|
| 482 |
+
A string descriptor of the exit status of the optimization.
|
| 483 |
+
|
| 484 |
+
"""
|
| 485 |
+
|
| 486 |
+
status = 0
|
| 487 |
+
message = ""
|
| 488 |
+
inconsistent = ("There is a linear combination of rows of A_eq that "
|
| 489 |
+
"results in zero, suggesting a redundant constraint. "
|
| 490 |
+
"However the same linear combination of b_eq is "
|
| 491 |
+
"nonzero, suggesting that the constraints conflict "
|
| 492 |
+
"and the problem is infeasible.")
|
| 493 |
+
|
| 494 |
+
A, rhs, status, message = _remove_zero_rows(A, rhs)
|
| 495 |
+
|
| 496 |
+
if status != 0:
|
| 497 |
+
return A, rhs, status, message
|
| 498 |
+
|
| 499 |
+
m, n = A.shape
|
| 500 |
+
|
| 501 |
+
k = rank
|
| 502 |
+
if rank is None:
|
| 503 |
+
k = np.linalg.matrix_rank(A)
|
| 504 |
+
|
| 505 |
+
idx, proj = interp_decomp(A.T, k, rand=randomized)
|
| 506 |
+
|
| 507 |
+
# first k entries in idx are indices of the independent rows
|
| 508 |
+
# remaining entries are the indices of the m-k dependent rows
|
| 509 |
+
# proj provides a linear combinations of rows of A2 that form the
|
| 510 |
+
# remaining m-k (dependent) rows. The same linear combination of entries
|
| 511 |
+
# in rhs2 must give the remaining m-k entries. If not, the system is
|
| 512 |
+
# inconsistent, and the problem is infeasible.
|
| 513 |
+
if not np.allclose(rhs[idx[:k]] @ proj, rhs[idx[k:]]):
|
| 514 |
+
status = 2
|
| 515 |
+
message = inconsistent
|
| 516 |
+
|
| 517 |
+
# sort indices because the other redundancy removal routines leave rows
|
| 518 |
+
# in original order and tests were written with that in mind
|
| 519 |
+
idx = sorted(idx[:k])
|
| 520 |
+
A2 = A[idx, :]
|
| 521 |
+
rhs2 = rhs[idx]
|
| 522 |
+
return A2, rhs2, status, message
|
llava_next/lib/python3.10/site-packages/scipy/optimize/_root.py
ADDED
|
@@ -0,0 +1,732 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Unified interfaces to root finding algorithms.
|
| 3 |
+
|
| 4 |
+
Functions
|
| 5 |
+
---------
|
| 6 |
+
- root : find a root of a vector function.
|
| 7 |
+
"""
|
| 8 |
+
__all__ = ['root']
|
| 9 |
+
|
| 10 |
+
import numpy as np
|
| 11 |
+
|
| 12 |
+
from warnings import warn
|
| 13 |
+
|
| 14 |
+
from ._optimize import MemoizeJac, OptimizeResult, _check_unknown_options
|
| 15 |
+
from ._minpack_py import _root_hybr, leastsq
|
| 16 |
+
from ._spectral import _root_df_sane
|
| 17 |
+
from . import _nonlin as nonlin
|
| 18 |
+
|
| 19 |
+
|
| 20 |
+
ROOT_METHODS = ['hybr', 'lm', 'broyden1', 'broyden2', 'anderson',
|
| 21 |
+
'linearmixing', 'diagbroyden', 'excitingmixing', 'krylov',
|
| 22 |
+
'df-sane']
|
| 23 |
+
|
| 24 |
+
|
| 25 |
+
def root(fun, x0, args=(), method='hybr', jac=None, tol=None, callback=None,
|
| 26 |
+
options=None):
|
| 27 |
+
r"""
|
| 28 |
+
Find a root of a vector function.
|
| 29 |
+
|
| 30 |
+
Parameters
|
| 31 |
+
----------
|
| 32 |
+
fun : callable
|
| 33 |
+
A vector function to find a root of.
|
| 34 |
+
x0 : ndarray
|
| 35 |
+
Initial guess.
|
| 36 |
+
args : tuple, optional
|
| 37 |
+
Extra arguments passed to the objective function and its Jacobian.
|
| 38 |
+
method : str, optional
|
| 39 |
+
Type of solver. Should be one of
|
| 40 |
+
|
| 41 |
+
- 'hybr' :ref:`(see here) <optimize.root-hybr>`
|
| 42 |
+
- 'lm' :ref:`(see here) <optimize.root-lm>`
|
| 43 |
+
- 'broyden1' :ref:`(see here) <optimize.root-broyden1>`
|
| 44 |
+
- 'broyden2' :ref:`(see here) <optimize.root-broyden2>`
|
| 45 |
+
- 'anderson' :ref:`(see here) <optimize.root-anderson>`
|
| 46 |
+
- 'linearmixing' :ref:`(see here) <optimize.root-linearmixing>`
|
| 47 |
+
- 'diagbroyden' :ref:`(see here) <optimize.root-diagbroyden>`
|
| 48 |
+
- 'excitingmixing' :ref:`(see here) <optimize.root-excitingmixing>`
|
| 49 |
+
- 'krylov' :ref:`(see here) <optimize.root-krylov>`
|
| 50 |
+
- 'df-sane' :ref:`(see here) <optimize.root-dfsane>`
|
| 51 |
+
|
| 52 |
+
jac : bool or callable, optional
|
| 53 |
+
If `jac` is a Boolean and is True, `fun` is assumed to return the
|
| 54 |
+
value of Jacobian along with the objective function. If False, the
|
| 55 |
+
Jacobian will be estimated numerically.
|
| 56 |
+
`jac` can also be a callable returning the Jacobian of `fun`. In
|
| 57 |
+
this case, it must accept the same arguments as `fun`.
|
| 58 |
+
tol : float, optional
|
| 59 |
+
Tolerance for termination. For detailed control, use solver-specific
|
| 60 |
+
options.
|
| 61 |
+
callback : function, optional
|
| 62 |
+
Optional callback function. It is called on every iteration as
|
| 63 |
+
``callback(x, f)`` where `x` is the current solution and `f`
|
| 64 |
+
the corresponding residual. For all methods but 'hybr' and 'lm'.
|
| 65 |
+
options : dict, optional
|
| 66 |
+
A dictionary of solver options. E.g., `xtol` or `maxiter`, see
|
| 67 |
+
:obj:`show_options()` for details.
|
| 68 |
+
|
| 69 |
+
Returns
|
| 70 |
+
-------
|
| 71 |
+
sol : OptimizeResult
|
| 72 |
+
The solution represented as a ``OptimizeResult`` object.
|
| 73 |
+
Important attributes are: ``x`` the solution array, ``success`` a
|
| 74 |
+
Boolean flag indicating if the algorithm exited successfully and
|
| 75 |
+
``message`` which describes the cause of the termination. See
|
| 76 |
+
`OptimizeResult` for a description of other attributes.
|
| 77 |
+
|
| 78 |
+
See also
|
| 79 |
+
--------
|
| 80 |
+
show_options : Additional options accepted by the solvers
|
| 81 |
+
|
| 82 |
+
Notes
|
| 83 |
+
-----
|
| 84 |
+
This section describes the available solvers that can be selected by the
|
| 85 |
+
'method' parameter. The default method is *hybr*.
|
| 86 |
+
|
| 87 |
+
Method *hybr* uses a modification of the Powell hybrid method as
|
| 88 |
+
implemented in MINPACK [1]_.
|
| 89 |
+
|
| 90 |
+
Method *lm* solves the system of nonlinear equations in a least squares
|
| 91 |
+
sense using a modification of the Levenberg-Marquardt algorithm as
|
| 92 |
+
implemented in MINPACK [1]_.
|
| 93 |
+
|
| 94 |
+
Method *df-sane* is a derivative-free spectral method. [3]_
|
| 95 |
+
|
| 96 |
+
Methods *broyden1*, *broyden2*, *anderson*, *linearmixing*,
|
| 97 |
+
*diagbroyden*, *excitingmixing*, *krylov* are inexact Newton methods,
|
| 98 |
+
with backtracking or full line searches [2]_. Each method corresponds
|
| 99 |
+
to a particular Jacobian approximations.
|
| 100 |
+
|
| 101 |
+
- Method *broyden1* uses Broyden's first Jacobian approximation, it is
|
| 102 |
+
known as Broyden's good method.
|
| 103 |
+
- Method *broyden2* uses Broyden's second Jacobian approximation, it
|
| 104 |
+
is known as Broyden's bad method.
|
| 105 |
+
- Method *anderson* uses (extended) Anderson mixing.
|
| 106 |
+
- Method *Krylov* uses Krylov approximation for inverse Jacobian. It
|
| 107 |
+
is suitable for large-scale problem.
|
| 108 |
+
- Method *diagbroyden* uses diagonal Broyden Jacobian approximation.
|
| 109 |
+
- Method *linearmixing* uses a scalar Jacobian approximation.
|
| 110 |
+
- Method *excitingmixing* uses a tuned diagonal Jacobian
|
| 111 |
+
approximation.
|
| 112 |
+
|
| 113 |
+
.. warning::
|
| 114 |
+
|
| 115 |
+
The algorithms implemented for methods *diagbroyden*,
|
| 116 |
+
*linearmixing* and *excitingmixing* may be useful for specific
|
| 117 |
+
problems, but whether they will work may depend strongly on the
|
| 118 |
+
problem.
|
| 119 |
+
|
| 120 |
+
.. versionadded:: 0.11.0
|
| 121 |
+
|
| 122 |
+
References
|
| 123 |
+
----------
|
| 124 |
+
.. [1] More, Jorge J., Burton S. Garbow, and Kenneth E. Hillstrom.
|
| 125 |
+
1980. User Guide for MINPACK-1.
|
| 126 |
+
.. [2] C. T. Kelley. 1995. Iterative Methods for Linear and Nonlinear
|
| 127 |
+
Equations. Society for Industrial and Applied Mathematics.
|
| 128 |
+
<https://archive.siam.org/books/kelley/fr16/>
|
| 129 |
+
.. [3] W. La Cruz, J.M. Martinez, M. Raydan. Math. Comp. 75, 1429 (2006).
|
| 130 |
+
|
| 131 |
+
Examples
|
| 132 |
+
--------
|
| 133 |
+
The following functions define a system of nonlinear equations and its
|
| 134 |
+
jacobian.
|
| 135 |
+
|
| 136 |
+
>>> import numpy as np
|
| 137 |
+
>>> def fun(x):
|
| 138 |
+
... return [x[0] + 0.5 * (x[0] - x[1])**3 - 1.0,
|
| 139 |
+
... 0.5 * (x[1] - x[0])**3 + x[1]]
|
| 140 |
+
|
| 141 |
+
>>> def jac(x):
|
| 142 |
+
... return np.array([[1 + 1.5 * (x[0] - x[1])**2,
|
| 143 |
+
... -1.5 * (x[0] - x[1])**2],
|
| 144 |
+
... [-1.5 * (x[1] - x[0])**2,
|
| 145 |
+
... 1 + 1.5 * (x[1] - x[0])**2]])
|
| 146 |
+
|
| 147 |
+
A solution can be obtained as follows.
|
| 148 |
+
|
| 149 |
+
>>> from scipy import optimize
|
| 150 |
+
>>> sol = optimize.root(fun, [0, 0], jac=jac, method='hybr')
|
| 151 |
+
>>> sol.x
|
| 152 |
+
array([ 0.8411639, 0.1588361])
|
| 153 |
+
|
| 154 |
+
**Large problem**
|
| 155 |
+
|
| 156 |
+
Suppose that we needed to solve the following integrodifferential
|
| 157 |
+
equation on the square :math:`[0,1]\times[0,1]`:
|
| 158 |
+
|
| 159 |
+
.. math::
|
| 160 |
+
|
| 161 |
+
\nabla^2 P = 10 \left(\int_0^1\int_0^1\cosh(P)\,dx\,dy\right)^2
|
| 162 |
+
|
| 163 |
+
with :math:`P(x,1) = 1` and :math:`P=0` elsewhere on the boundary of
|
| 164 |
+
the square.
|
| 165 |
+
|
| 166 |
+
The solution can be found using the ``method='krylov'`` solver:
|
| 167 |
+
|
| 168 |
+
>>> from scipy import optimize
|
| 169 |
+
>>> # parameters
|
| 170 |
+
>>> nx, ny = 75, 75
|
| 171 |
+
>>> hx, hy = 1./(nx-1), 1./(ny-1)
|
| 172 |
+
|
| 173 |
+
>>> P_left, P_right = 0, 0
|
| 174 |
+
>>> P_top, P_bottom = 1, 0
|
| 175 |
+
|
| 176 |
+
>>> def residual(P):
|
| 177 |
+
... d2x = np.zeros_like(P)
|
| 178 |
+
... d2y = np.zeros_like(P)
|
| 179 |
+
...
|
| 180 |
+
... d2x[1:-1] = (P[2:] - 2*P[1:-1] + P[:-2]) / hx/hx
|
| 181 |
+
... d2x[0] = (P[1] - 2*P[0] + P_left)/hx/hx
|
| 182 |
+
... d2x[-1] = (P_right - 2*P[-1] + P[-2])/hx/hx
|
| 183 |
+
...
|
| 184 |
+
... d2y[:,1:-1] = (P[:,2:] - 2*P[:,1:-1] + P[:,:-2])/hy/hy
|
| 185 |
+
... d2y[:,0] = (P[:,1] - 2*P[:,0] + P_bottom)/hy/hy
|
| 186 |
+
... d2y[:,-1] = (P_top - 2*P[:,-1] + P[:,-2])/hy/hy
|
| 187 |
+
...
|
| 188 |
+
... return d2x + d2y - 10*np.cosh(P).mean()**2
|
| 189 |
+
|
| 190 |
+
>>> guess = np.zeros((nx, ny), float)
|
| 191 |
+
>>> sol = optimize.root(residual, guess, method='krylov')
|
| 192 |
+
>>> print('Residual: %g' % abs(residual(sol.x)).max())
|
| 193 |
+
Residual: 5.7972e-06 # may vary
|
| 194 |
+
|
| 195 |
+
>>> import matplotlib.pyplot as plt
|
| 196 |
+
>>> x, y = np.mgrid[0:1:(nx*1j), 0:1:(ny*1j)]
|
| 197 |
+
>>> plt.pcolormesh(x, y, sol.x, shading='gouraud')
|
| 198 |
+
>>> plt.colorbar()
|
| 199 |
+
>>> plt.show()
|
| 200 |
+
|
| 201 |
+
"""
|
| 202 |
+
def _wrapped_fun(*fargs):
|
| 203 |
+
"""
|
| 204 |
+
Wrapped `func` to track the number of times
|
| 205 |
+
the function has been called.
|
| 206 |
+
"""
|
| 207 |
+
_wrapped_fun.nfev += 1
|
| 208 |
+
return fun(*fargs)
|
| 209 |
+
|
| 210 |
+
_wrapped_fun.nfev = 0
|
| 211 |
+
|
| 212 |
+
if not isinstance(args, tuple):
|
| 213 |
+
args = (args,)
|
| 214 |
+
|
| 215 |
+
meth = method.lower()
|
| 216 |
+
if options is None:
|
| 217 |
+
options = {}
|
| 218 |
+
|
| 219 |
+
if callback is not None and meth in ('hybr', 'lm'):
|
| 220 |
+
warn('Method %s does not accept callback.' % method,
|
| 221 |
+
RuntimeWarning, stacklevel=2)
|
| 222 |
+
|
| 223 |
+
# fun also returns the Jacobian
|
| 224 |
+
if not callable(jac) and meth in ('hybr', 'lm'):
|
| 225 |
+
if bool(jac):
|
| 226 |
+
fun = MemoizeJac(fun)
|
| 227 |
+
jac = fun.derivative
|
| 228 |
+
else:
|
| 229 |
+
jac = None
|
| 230 |
+
|
| 231 |
+
# set default tolerances
|
| 232 |
+
if tol is not None:
|
| 233 |
+
options = dict(options)
|
| 234 |
+
if meth in ('hybr', 'lm'):
|
| 235 |
+
options.setdefault('xtol', tol)
|
| 236 |
+
elif meth in ('df-sane',):
|
| 237 |
+
options.setdefault('ftol', tol)
|
| 238 |
+
elif meth in ('broyden1', 'broyden2', 'anderson', 'linearmixing',
|
| 239 |
+
'diagbroyden', 'excitingmixing', 'krylov'):
|
| 240 |
+
options.setdefault('xtol', tol)
|
| 241 |
+
options.setdefault('xatol', np.inf)
|
| 242 |
+
options.setdefault('ftol', np.inf)
|
| 243 |
+
options.setdefault('fatol', np.inf)
|
| 244 |
+
|
| 245 |
+
if meth == 'hybr':
|
| 246 |
+
sol = _root_hybr(_wrapped_fun, x0, args=args, jac=jac, **options)
|
| 247 |
+
elif meth == 'lm':
|
| 248 |
+
sol = _root_leastsq(_wrapped_fun, x0, args=args, jac=jac, **options)
|
| 249 |
+
elif meth == 'df-sane':
|
| 250 |
+
_warn_jac_unused(jac, method)
|
| 251 |
+
sol = _root_df_sane(_wrapped_fun, x0, args=args, callback=callback,
|
| 252 |
+
**options)
|
| 253 |
+
elif meth in ('broyden1', 'broyden2', 'anderson', 'linearmixing',
|
| 254 |
+
'diagbroyden', 'excitingmixing', 'krylov'):
|
| 255 |
+
_warn_jac_unused(jac, method)
|
| 256 |
+
sol = _root_nonlin_solve(_wrapped_fun, x0, args=args, jac=jac,
|
| 257 |
+
_method=meth, _callback=callback,
|
| 258 |
+
**options)
|
| 259 |
+
else:
|
| 260 |
+
raise ValueError('Unknown solver %s' % method)
|
| 261 |
+
|
| 262 |
+
sol.nfev = _wrapped_fun.nfev
|
| 263 |
+
return sol
|
| 264 |
+
|
| 265 |
+
|
| 266 |
+
def _warn_jac_unused(jac, method):
|
| 267 |
+
if jac is not None:
|
| 268 |
+
warn(f'Method {method} does not use the jacobian (jac).',
|
| 269 |
+
RuntimeWarning, stacklevel=2)
|
| 270 |
+
|
| 271 |
+
|
| 272 |
+
def _root_leastsq(fun, x0, args=(), jac=None,
|
| 273 |
+
col_deriv=0, xtol=1.49012e-08, ftol=1.49012e-08,
|
| 274 |
+
gtol=0.0, maxiter=0, eps=0.0, factor=100, diag=None,
|
| 275 |
+
**unknown_options):
|
| 276 |
+
"""
|
| 277 |
+
Solve for least squares with Levenberg-Marquardt
|
| 278 |
+
|
| 279 |
+
Options
|
| 280 |
+
-------
|
| 281 |
+
col_deriv : bool
|
| 282 |
+
non-zero to specify that the Jacobian function computes derivatives
|
| 283 |
+
down the columns (faster, because there is no transpose operation).
|
| 284 |
+
ftol : float
|
| 285 |
+
Relative error desired in the sum of squares.
|
| 286 |
+
xtol : float
|
| 287 |
+
Relative error desired in the approximate solution.
|
| 288 |
+
gtol : float
|
| 289 |
+
Orthogonality desired between the function vector and the columns
|
| 290 |
+
of the Jacobian.
|
| 291 |
+
maxiter : int
|
| 292 |
+
The maximum number of calls to the function. If zero, then
|
| 293 |
+
100*(N+1) is the maximum where N is the number of elements in x0.
|
| 294 |
+
eps : float
|
| 295 |
+
A suitable step length for the forward-difference approximation of
|
| 296 |
+
the Jacobian (for Dfun=None). If `eps` is less than the machine
|
| 297 |
+
precision, it is assumed that the relative errors in the functions
|
| 298 |
+
are of the order of the machine precision.
|
| 299 |
+
factor : float
|
| 300 |
+
A parameter determining the initial step bound
|
| 301 |
+
(``factor * || diag * x||``). Should be in interval ``(0.1, 100)``.
|
| 302 |
+
diag : sequence
|
| 303 |
+
N positive entries that serve as a scale factors for the variables.
|
| 304 |
+
"""
|
| 305 |
+
nfev = 0
|
| 306 |
+
def _wrapped_fun(*fargs):
|
| 307 |
+
"""
|
| 308 |
+
Wrapped `func` to track the number of times
|
| 309 |
+
the function has been called.
|
| 310 |
+
"""
|
| 311 |
+
nonlocal nfev
|
| 312 |
+
nfev += 1
|
| 313 |
+
return fun(*fargs)
|
| 314 |
+
|
| 315 |
+
_check_unknown_options(unknown_options)
|
| 316 |
+
x, cov_x, info, msg, ier = leastsq(_wrapped_fun, x0, args=args,
|
| 317 |
+
Dfun=jac, full_output=True,
|
| 318 |
+
col_deriv=col_deriv, xtol=xtol,
|
| 319 |
+
ftol=ftol, gtol=gtol,
|
| 320 |
+
maxfev=maxiter, epsfcn=eps,
|
| 321 |
+
factor=factor, diag=diag)
|
| 322 |
+
sol = OptimizeResult(x=x, message=msg, status=ier,
|
| 323 |
+
success=ier in (1, 2, 3, 4), cov_x=cov_x,
|
| 324 |
+
fun=info.pop('fvec'), method="lm")
|
| 325 |
+
sol.update(info)
|
| 326 |
+
sol.nfev = nfev
|
| 327 |
+
return sol
|
| 328 |
+
|
| 329 |
+
|
| 330 |
+
def _root_nonlin_solve(fun, x0, args=(), jac=None,
|
| 331 |
+
_callback=None, _method=None,
|
| 332 |
+
nit=None, disp=False, maxiter=None,
|
| 333 |
+
ftol=None, fatol=None, xtol=None, xatol=None,
|
| 334 |
+
tol_norm=None, line_search='armijo', jac_options=None,
|
| 335 |
+
**unknown_options):
|
| 336 |
+
_check_unknown_options(unknown_options)
|
| 337 |
+
|
| 338 |
+
f_tol = fatol
|
| 339 |
+
f_rtol = ftol
|
| 340 |
+
x_tol = xatol
|
| 341 |
+
x_rtol = xtol
|
| 342 |
+
verbose = disp
|
| 343 |
+
if jac_options is None:
|
| 344 |
+
jac_options = dict()
|
| 345 |
+
|
| 346 |
+
jacobian = {'broyden1': nonlin.BroydenFirst,
|
| 347 |
+
'broyden2': nonlin.BroydenSecond,
|
| 348 |
+
'anderson': nonlin.Anderson,
|
| 349 |
+
'linearmixing': nonlin.LinearMixing,
|
| 350 |
+
'diagbroyden': nonlin.DiagBroyden,
|
| 351 |
+
'excitingmixing': nonlin.ExcitingMixing,
|
| 352 |
+
'krylov': nonlin.KrylovJacobian
|
| 353 |
+
}[_method]
|
| 354 |
+
|
| 355 |
+
if args:
|
| 356 |
+
if jac is True:
|
| 357 |
+
def f(x):
|
| 358 |
+
return fun(x, *args)[0]
|
| 359 |
+
else:
|
| 360 |
+
def f(x):
|
| 361 |
+
return fun(x, *args)
|
| 362 |
+
else:
|
| 363 |
+
f = fun
|
| 364 |
+
|
| 365 |
+
x, info = nonlin.nonlin_solve(f, x0, jacobian=jacobian(**jac_options),
|
| 366 |
+
iter=nit, verbose=verbose,
|
| 367 |
+
maxiter=maxiter, f_tol=f_tol,
|
| 368 |
+
f_rtol=f_rtol, x_tol=x_tol,
|
| 369 |
+
x_rtol=x_rtol, tol_norm=tol_norm,
|
| 370 |
+
line_search=line_search,
|
| 371 |
+
callback=_callback, full_output=True,
|
| 372 |
+
raise_exception=False)
|
| 373 |
+
sol = OptimizeResult(x=x, method=_method)
|
| 374 |
+
sol.update(info)
|
| 375 |
+
return sol
|
| 376 |
+
|
| 377 |
+
def _root_broyden1_doc():
|
| 378 |
+
"""
|
| 379 |
+
Options
|
| 380 |
+
-------
|
| 381 |
+
nit : int, optional
|
| 382 |
+
Number of iterations to make. If omitted (default), make as many
|
| 383 |
+
as required to meet tolerances.
|
| 384 |
+
disp : bool, optional
|
| 385 |
+
Print status to stdout on every iteration.
|
| 386 |
+
maxiter : int, optional
|
| 387 |
+
Maximum number of iterations to make.
|
| 388 |
+
ftol : float, optional
|
| 389 |
+
Relative tolerance for the residual. If omitted, not used.
|
| 390 |
+
fatol : float, optional
|
| 391 |
+
Absolute tolerance (in max-norm) for the residual.
|
| 392 |
+
If omitted, default is 6e-6.
|
| 393 |
+
xtol : float, optional
|
| 394 |
+
Relative minimum step size. If omitted, not used.
|
| 395 |
+
xatol : float, optional
|
| 396 |
+
Absolute minimum step size, as determined from the Jacobian
|
| 397 |
+
approximation. If the step size is smaller than this, optimization
|
| 398 |
+
is terminated as successful. If omitted, not used.
|
| 399 |
+
tol_norm : function(vector) -> scalar, optional
|
| 400 |
+
Norm to use in convergence check. Default is the maximum norm.
|
| 401 |
+
line_search : {None, 'armijo' (default), 'wolfe'}, optional
|
| 402 |
+
Which type of a line search to use to determine the step size in
|
| 403 |
+
the direction given by the Jacobian approximation. Defaults to
|
| 404 |
+
'armijo'.
|
| 405 |
+
jac_options : dict, optional
|
| 406 |
+
Options for the respective Jacobian approximation.
|
| 407 |
+
alpha : float, optional
|
| 408 |
+
Initial guess for the Jacobian is (-1/alpha).
|
| 409 |
+
reduction_method : str or tuple, optional
|
| 410 |
+
Method used in ensuring that the rank of the Broyden
|
| 411 |
+
matrix stays low. Can either be a string giving the
|
| 412 |
+
name of the method, or a tuple of the form ``(method,
|
| 413 |
+
param1, param2, ...)`` that gives the name of the
|
| 414 |
+
method and values for additional parameters.
|
| 415 |
+
|
| 416 |
+
Methods available:
|
| 417 |
+
|
| 418 |
+
- ``restart``
|
| 419 |
+
Drop all matrix columns. Has no
|
| 420 |
+
extra parameters.
|
| 421 |
+
- ``simple``
|
| 422 |
+
Drop oldest matrix column. Has no
|
| 423 |
+
extra parameters.
|
| 424 |
+
- ``svd``
|
| 425 |
+
Keep only the most significant SVD
|
| 426 |
+
components.
|
| 427 |
+
|
| 428 |
+
Extra parameters:
|
| 429 |
+
|
| 430 |
+
- ``to_retain``
|
| 431 |
+
Number of SVD components to
|
| 432 |
+
retain when rank reduction is done.
|
| 433 |
+
Default is ``max_rank - 2``.
|
| 434 |
+
max_rank : int, optional
|
| 435 |
+
Maximum rank for the Broyden matrix.
|
| 436 |
+
Default is infinity (i.e., no rank reduction).
|
| 437 |
+
|
| 438 |
+
Examples
|
| 439 |
+
--------
|
| 440 |
+
>>> def func(x):
|
| 441 |
+
... return np.cos(x) + x[::-1] - [1, 2, 3, 4]
|
| 442 |
+
...
|
| 443 |
+
>>> from scipy import optimize
|
| 444 |
+
>>> res = optimize.root(func, [1, 1, 1, 1], method='broyden1', tol=1e-14)
|
| 445 |
+
>>> x = res.x
|
| 446 |
+
>>> x
|
| 447 |
+
array([4.04674914, 3.91158389, 2.71791677, 1.61756251])
|
| 448 |
+
>>> np.cos(x) + x[::-1]
|
| 449 |
+
array([1., 2., 3., 4.])
|
| 450 |
+
|
| 451 |
+
"""
|
| 452 |
+
pass
|
| 453 |
+
|
| 454 |
+
def _root_broyden2_doc():
|
| 455 |
+
"""
|
| 456 |
+
Options
|
| 457 |
+
-------
|
| 458 |
+
nit : int, optional
|
| 459 |
+
Number of iterations to make. If omitted (default), make as many
|
| 460 |
+
as required to meet tolerances.
|
| 461 |
+
disp : bool, optional
|
| 462 |
+
Print status to stdout on every iteration.
|
| 463 |
+
maxiter : int, optional
|
| 464 |
+
Maximum number of iterations to make.
|
| 465 |
+
ftol : float, optional
|
| 466 |
+
Relative tolerance for the residual. If omitted, not used.
|
| 467 |
+
fatol : float, optional
|
| 468 |
+
Absolute tolerance (in max-norm) for the residual.
|
| 469 |
+
If omitted, default is 6e-6.
|
| 470 |
+
xtol : float, optional
|
| 471 |
+
Relative minimum step size. If omitted, not used.
|
| 472 |
+
xatol : float, optional
|
| 473 |
+
Absolute minimum step size, as determined from the Jacobian
|
| 474 |
+
approximation. If the step size is smaller than this, optimization
|
| 475 |
+
is terminated as successful. If omitted, not used.
|
| 476 |
+
tol_norm : function(vector) -> scalar, optional
|
| 477 |
+
Norm to use in convergence check. Default is the maximum norm.
|
| 478 |
+
line_search : {None, 'armijo' (default), 'wolfe'}, optional
|
| 479 |
+
Which type of a line search to use to determine the step size in
|
| 480 |
+
the direction given by the Jacobian approximation. Defaults to
|
| 481 |
+
'armijo'.
|
| 482 |
+
jac_options : dict, optional
|
| 483 |
+
Options for the respective Jacobian approximation.
|
| 484 |
+
|
| 485 |
+
alpha : float, optional
|
| 486 |
+
Initial guess for the Jacobian is (-1/alpha).
|
| 487 |
+
reduction_method : str or tuple, optional
|
| 488 |
+
Method used in ensuring that the rank of the Broyden
|
| 489 |
+
matrix stays low. Can either be a string giving the
|
| 490 |
+
name of the method, or a tuple of the form ``(method,
|
| 491 |
+
param1, param2, ...)`` that gives the name of the
|
| 492 |
+
method and values for additional parameters.
|
| 493 |
+
|
| 494 |
+
Methods available:
|
| 495 |
+
|
| 496 |
+
- ``restart``
|
| 497 |
+
Drop all matrix columns. Has no
|
| 498 |
+
extra parameters.
|
| 499 |
+
- ``simple``
|
| 500 |
+
Drop oldest matrix column. Has no
|
| 501 |
+
extra parameters.
|
| 502 |
+
- ``svd``
|
| 503 |
+
Keep only the most significant SVD
|
| 504 |
+
components.
|
| 505 |
+
|
| 506 |
+
Extra parameters:
|
| 507 |
+
|
| 508 |
+
- ``to_retain``
|
| 509 |
+
Number of SVD components to
|
| 510 |
+
retain when rank reduction is done.
|
| 511 |
+
Default is ``max_rank - 2``.
|
| 512 |
+
max_rank : int, optional
|
| 513 |
+
Maximum rank for the Broyden matrix.
|
| 514 |
+
Default is infinity (i.e., no rank reduction).
|
| 515 |
+
"""
|
| 516 |
+
pass
|
| 517 |
+
|
| 518 |
+
def _root_anderson_doc():
|
| 519 |
+
"""
|
| 520 |
+
Options
|
| 521 |
+
-------
|
| 522 |
+
nit : int, optional
|
| 523 |
+
Number of iterations to make. If omitted (default), make as many
|
| 524 |
+
as required to meet tolerances.
|
| 525 |
+
disp : bool, optional
|
| 526 |
+
Print status to stdout on every iteration.
|
| 527 |
+
maxiter : int, optional
|
| 528 |
+
Maximum number of iterations to make.
|
| 529 |
+
ftol : float, optional
|
| 530 |
+
Relative tolerance for the residual. If omitted, not used.
|
| 531 |
+
fatol : float, optional
|
| 532 |
+
Absolute tolerance (in max-norm) for the residual.
|
| 533 |
+
If omitted, default is 6e-6.
|
| 534 |
+
xtol : float, optional
|
| 535 |
+
Relative minimum step size. If omitted, not used.
|
| 536 |
+
xatol : float, optional
|
| 537 |
+
Absolute minimum step size, as determined from the Jacobian
|
| 538 |
+
approximation. If the step size is smaller than this, optimization
|
| 539 |
+
is terminated as successful. If omitted, not used.
|
| 540 |
+
tol_norm : function(vector) -> scalar, optional
|
| 541 |
+
Norm to use in convergence check. Default is the maximum norm.
|
| 542 |
+
line_search : {None, 'armijo' (default), 'wolfe'}, optional
|
| 543 |
+
Which type of a line search to use to determine the step size in
|
| 544 |
+
the direction given by the Jacobian approximation. Defaults to
|
| 545 |
+
'armijo'.
|
| 546 |
+
jac_options : dict, optional
|
| 547 |
+
Options for the respective Jacobian approximation.
|
| 548 |
+
|
| 549 |
+
alpha : float, optional
|
| 550 |
+
Initial guess for the Jacobian is (-1/alpha).
|
| 551 |
+
M : float, optional
|
| 552 |
+
Number of previous vectors to retain. Defaults to 5.
|
| 553 |
+
w0 : float, optional
|
| 554 |
+
Regularization parameter for numerical stability.
|
| 555 |
+
Compared to unity, good values of the order of 0.01.
|
| 556 |
+
"""
|
| 557 |
+
pass
|
| 558 |
+
|
| 559 |
+
def _root_linearmixing_doc():
|
| 560 |
+
"""
|
| 561 |
+
Options
|
| 562 |
+
-------
|
| 563 |
+
nit : int, optional
|
| 564 |
+
Number of iterations to make. If omitted (default), make as many
|
| 565 |
+
as required to meet tolerances.
|
| 566 |
+
disp : bool, optional
|
| 567 |
+
Print status to stdout on every iteration.
|
| 568 |
+
maxiter : int, optional
|
| 569 |
+
Maximum number of iterations to make.
|
| 570 |
+
ftol : float, optional
|
| 571 |
+
Relative tolerance for the residual. If omitted, not used.
|
| 572 |
+
fatol : float, optional
|
| 573 |
+
Absolute tolerance (in max-norm) for the residual.
|
| 574 |
+
If omitted, default is 6e-6.
|
| 575 |
+
xtol : float, optional
|
| 576 |
+
Relative minimum step size. If omitted, not used.
|
| 577 |
+
xatol : float, optional
|
| 578 |
+
Absolute minimum step size, as determined from the Jacobian
|
| 579 |
+
approximation. If the step size is smaller than this, optimization
|
| 580 |
+
is terminated as successful. If omitted, not used.
|
| 581 |
+
tol_norm : function(vector) -> scalar, optional
|
| 582 |
+
Norm to use in convergence check. Default is the maximum norm.
|
| 583 |
+
line_search : {None, 'armijo' (default), 'wolfe'}, optional
|
| 584 |
+
Which type of a line search to use to determine the step size in
|
| 585 |
+
the direction given by the Jacobian approximation. Defaults to
|
| 586 |
+
'armijo'.
|
| 587 |
+
jac_options : dict, optional
|
| 588 |
+
Options for the respective Jacobian approximation.
|
| 589 |
+
|
| 590 |
+
alpha : float, optional
|
| 591 |
+
initial guess for the jacobian is (-1/alpha).
|
| 592 |
+
"""
|
| 593 |
+
pass
|
| 594 |
+
|
| 595 |
+
def _root_diagbroyden_doc():
|
| 596 |
+
"""
|
| 597 |
+
Options
|
| 598 |
+
-------
|
| 599 |
+
nit : int, optional
|
| 600 |
+
Number of iterations to make. If omitted (default), make as many
|
| 601 |
+
as required to meet tolerances.
|
| 602 |
+
disp : bool, optional
|
| 603 |
+
Print status to stdout on every iteration.
|
| 604 |
+
maxiter : int, optional
|
| 605 |
+
Maximum number of iterations to make.
|
| 606 |
+
ftol : float, optional
|
| 607 |
+
Relative tolerance for the residual. If omitted, not used.
|
| 608 |
+
fatol : float, optional
|
| 609 |
+
Absolute tolerance (in max-norm) for the residual.
|
| 610 |
+
If omitted, default is 6e-6.
|
| 611 |
+
xtol : float, optional
|
| 612 |
+
Relative minimum step size. If omitted, not used.
|
| 613 |
+
xatol : float, optional
|
| 614 |
+
Absolute minimum step size, as determined from the Jacobian
|
| 615 |
+
approximation. If the step size is smaller than this, optimization
|
| 616 |
+
is terminated as successful. If omitted, not used.
|
| 617 |
+
tol_norm : function(vector) -> scalar, optional
|
| 618 |
+
Norm to use in convergence check. Default is the maximum norm.
|
| 619 |
+
line_search : {None, 'armijo' (default), 'wolfe'}, optional
|
| 620 |
+
Which type of a line search to use to determine the step size in
|
| 621 |
+
the direction given by the Jacobian approximation. Defaults to
|
| 622 |
+
'armijo'.
|
| 623 |
+
jac_options : dict, optional
|
| 624 |
+
Options for the respective Jacobian approximation.
|
| 625 |
+
|
| 626 |
+
alpha : float, optional
|
| 627 |
+
initial guess for the jacobian is (-1/alpha).
|
| 628 |
+
"""
|
| 629 |
+
pass
|
| 630 |
+
|
| 631 |
+
def _root_excitingmixing_doc():
|
| 632 |
+
"""
|
| 633 |
+
Options
|
| 634 |
+
-------
|
| 635 |
+
nit : int, optional
|
| 636 |
+
Number of iterations to make. If omitted (default), make as many
|
| 637 |
+
as required to meet tolerances.
|
| 638 |
+
disp : bool, optional
|
| 639 |
+
Print status to stdout on every iteration.
|
| 640 |
+
maxiter : int, optional
|
| 641 |
+
Maximum number of iterations to make.
|
| 642 |
+
ftol : float, optional
|
| 643 |
+
Relative tolerance for the residual. If omitted, not used.
|
| 644 |
+
fatol : float, optional
|
| 645 |
+
Absolute tolerance (in max-norm) for the residual.
|
| 646 |
+
If omitted, default is 6e-6.
|
| 647 |
+
xtol : float, optional
|
| 648 |
+
Relative minimum step size. If omitted, not used.
|
| 649 |
+
xatol : float, optional
|
| 650 |
+
Absolute minimum step size, as determined from the Jacobian
|
| 651 |
+
approximation. If the step size is smaller than this, optimization
|
| 652 |
+
is terminated as successful. If omitted, not used.
|
| 653 |
+
tol_norm : function(vector) -> scalar, optional
|
| 654 |
+
Norm to use in convergence check. Default is the maximum norm.
|
| 655 |
+
line_search : {None, 'armijo' (default), 'wolfe'}, optional
|
| 656 |
+
Which type of a line search to use to determine the step size in
|
| 657 |
+
the direction given by the Jacobian approximation. Defaults to
|
| 658 |
+
'armijo'.
|
| 659 |
+
jac_options : dict, optional
|
| 660 |
+
Options for the respective Jacobian approximation.
|
| 661 |
+
|
| 662 |
+
alpha : float, optional
|
| 663 |
+
Initial Jacobian approximation is (-1/alpha).
|
| 664 |
+
alphamax : float, optional
|
| 665 |
+
The entries of the diagonal Jacobian are kept in the range
|
| 666 |
+
``[alpha, alphamax]``.
|
| 667 |
+
"""
|
| 668 |
+
pass
|
| 669 |
+
|
| 670 |
+
def _root_krylov_doc():
|
| 671 |
+
"""
|
| 672 |
+
Options
|
| 673 |
+
-------
|
| 674 |
+
nit : int, optional
|
| 675 |
+
Number of iterations to make. If omitted (default), make as many
|
| 676 |
+
as required to meet tolerances.
|
| 677 |
+
disp : bool, optional
|
| 678 |
+
Print status to stdout on every iteration.
|
| 679 |
+
maxiter : int, optional
|
| 680 |
+
Maximum number of iterations to make.
|
| 681 |
+
ftol : float, optional
|
| 682 |
+
Relative tolerance for the residual. If omitted, not used.
|
| 683 |
+
fatol : float, optional
|
| 684 |
+
Absolute tolerance (in max-norm) for the residual.
|
| 685 |
+
If omitted, default is 6e-6.
|
| 686 |
+
xtol : float, optional
|
| 687 |
+
Relative minimum step size. If omitted, not used.
|
| 688 |
+
xatol : float, optional
|
| 689 |
+
Absolute minimum step size, as determined from the Jacobian
|
| 690 |
+
approximation. If the step size is smaller than this, optimization
|
| 691 |
+
is terminated as successful. If omitted, not used.
|
| 692 |
+
tol_norm : function(vector) -> scalar, optional
|
| 693 |
+
Norm to use in convergence check. Default is the maximum norm.
|
| 694 |
+
line_search : {None, 'armijo' (default), 'wolfe'}, optional
|
| 695 |
+
Which type of a line search to use to determine the step size in
|
| 696 |
+
the direction given by the Jacobian approximation. Defaults to
|
| 697 |
+
'armijo'.
|
| 698 |
+
jac_options : dict, optional
|
| 699 |
+
Options for the respective Jacobian approximation.
|
| 700 |
+
|
| 701 |
+
rdiff : float, optional
|
| 702 |
+
Relative step size to use in numerical differentiation.
|
| 703 |
+
method : str or callable, optional
|
| 704 |
+
Krylov method to use to approximate the Jacobian. Can be a string,
|
| 705 |
+
or a function implementing the same interface as the iterative
|
| 706 |
+
solvers in `scipy.sparse.linalg`. If a string, needs to be one of:
|
| 707 |
+
``'lgmres'``, ``'gmres'``, ``'bicgstab'``, ``'cgs'``, ``'minres'``,
|
| 708 |
+
``'tfqmr'``.
|
| 709 |
+
|
| 710 |
+
The default is `scipy.sparse.linalg.lgmres`.
|
| 711 |
+
inner_M : LinearOperator or InverseJacobian
|
| 712 |
+
Preconditioner for the inner Krylov iteration.
|
| 713 |
+
Note that you can use also inverse Jacobians as (adaptive)
|
| 714 |
+
preconditioners. For example,
|
| 715 |
+
|
| 716 |
+
>>> jac = BroydenFirst()
|
| 717 |
+
>>> kjac = KrylovJacobian(inner_M=jac.inverse).
|
| 718 |
+
|
| 719 |
+
If the preconditioner has a method named 'update', it will
|
| 720 |
+
be called as ``update(x, f)`` after each nonlinear step,
|
| 721 |
+
with ``x`` giving the current point, and ``f`` the current
|
| 722 |
+
function value.
|
| 723 |
+
inner_tol, inner_maxiter, ...
|
| 724 |
+
Parameters to pass on to the "inner" Krylov solver.
|
| 725 |
+
See `scipy.sparse.linalg.gmres` for details.
|
| 726 |
+
outer_k : int, optional
|
| 727 |
+
Size of the subspace kept across LGMRES nonlinear
|
| 728 |
+
iterations.
|
| 729 |
+
|
| 730 |
+
See `scipy.sparse.linalg.lgmres` for details.
|
| 731 |
+
"""
|
| 732 |
+
pass
|
llava_next/lib/python3.10/site-packages/scipy/optimize/_root_scalar.py
ADDED
|
@@ -0,0 +1,525 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Unified interfaces to root finding algorithms for real or complex
|
| 3 |
+
scalar functions.
|
| 4 |
+
|
| 5 |
+
Functions
|
| 6 |
+
---------
|
| 7 |
+
- root : find a root of a scalar function.
|
| 8 |
+
"""
|
| 9 |
+
import numpy as np
|
| 10 |
+
|
| 11 |
+
from . import _zeros_py as optzeros
|
| 12 |
+
from ._numdiff import approx_derivative
|
| 13 |
+
|
| 14 |
+
__all__ = ['root_scalar']
|
| 15 |
+
|
| 16 |
+
ROOT_SCALAR_METHODS = ['bisect', 'brentq', 'brenth', 'ridder', 'toms748',
|
| 17 |
+
'newton', 'secant', 'halley']
|
| 18 |
+
|
| 19 |
+
|
| 20 |
+
class MemoizeDer:
|
| 21 |
+
"""Decorator that caches the value and derivative(s) of function each
|
| 22 |
+
time it is called.
|
| 23 |
+
|
| 24 |
+
This is a simplistic memoizer that calls and caches a single value
|
| 25 |
+
of `f(x, *args)`.
|
| 26 |
+
It assumes that `args` does not change between invocations.
|
| 27 |
+
It supports the use case of a root-finder where `args` is fixed,
|
| 28 |
+
`x` changes, and only rarely, if at all, does x assume the same value
|
| 29 |
+
more than once."""
|
| 30 |
+
def __init__(self, fun):
|
| 31 |
+
self.fun = fun
|
| 32 |
+
self.vals = None
|
| 33 |
+
self.x = None
|
| 34 |
+
self.n_calls = 0
|
| 35 |
+
|
| 36 |
+
def __call__(self, x, *args):
|
| 37 |
+
r"""Calculate f or use cached value if available"""
|
| 38 |
+
# Derivative may be requested before the function itself, always check
|
| 39 |
+
if self.vals is None or x != self.x:
|
| 40 |
+
fg = self.fun(x, *args)
|
| 41 |
+
self.x = x
|
| 42 |
+
self.n_calls += 1
|
| 43 |
+
self.vals = fg[:]
|
| 44 |
+
return self.vals[0]
|
| 45 |
+
|
| 46 |
+
def fprime(self, x, *args):
|
| 47 |
+
r"""Calculate f' or use a cached value if available"""
|
| 48 |
+
if self.vals is None or x != self.x:
|
| 49 |
+
self(x, *args)
|
| 50 |
+
return self.vals[1]
|
| 51 |
+
|
| 52 |
+
def fprime2(self, x, *args):
|
| 53 |
+
r"""Calculate f'' or use a cached value if available"""
|
| 54 |
+
if self.vals is None or x != self.x:
|
| 55 |
+
self(x, *args)
|
| 56 |
+
return self.vals[2]
|
| 57 |
+
|
| 58 |
+
def ncalls(self):
|
| 59 |
+
return self.n_calls
|
| 60 |
+
|
| 61 |
+
|
| 62 |
+
def root_scalar(f, args=(), method=None, bracket=None,
|
| 63 |
+
fprime=None, fprime2=None,
|
| 64 |
+
x0=None, x1=None,
|
| 65 |
+
xtol=None, rtol=None, maxiter=None,
|
| 66 |
+
options=None):
|
| 67 |
+
"""
|
| 68 |
+
Find a root of a scalar function.
|
| 69 |
+
|
| 70 |
+
Parameters
|
| 71 |
+
----------
|
| 72 |
+
f : callable
|
| 73 |
+
A function to find a root of.
|
| 74 |
+
args : tuple, optional
|
| 75 |
+
Extra arguments passed to the objective function and its derivative(s).
|
| 76 |
+
method : str, optional
|
| 77 |
+
Type of solver. Should be one of
|
| 78 |
+
|
| 79 |
+
- 'bisect' :ref:`(see here) <optimize.root_scalar-bisect>`
|
| 80 |
+
- 'brentq' :ref:`(see here) <optimize.root_scalar-brentq>`
|
| 81 |
+
- 'brenth' :ref:`(see here) <optimize.root_scalar-brenth>`
|
| 82 |
+
- 'ridder' :ref:`(see here) <optimize.root_scalar-ridder>`
|
| 83 |
+
- 'toms748' :ref:`(see here) <optimize.root_scalar-toms748>`
|
| 84 |
+
- 'newton' :ref:`(see here) <optimize.root_scalar-newton>`
|
| 85 |
+
- 'secant' :ref:`(see here) <optimize.root_scalar-secant>`
|
| 86 |
+
- 'halley' :ref:`(see here) <optimize.root_scalar-halley>`
|
| 87 |
+
|
| 88 |
+
bracket: A sequence of 2 floats, optional
|
| 89 |
+
An interval bracketing a root. `f(x, *args)` must have different
|
| 90 |
+
signs at the two endpoints.
|
| 91 |
+
x0 : float, optional
|
| 92 |
+
Initial guess.
|
| 93 |
+
x1 : float, optional
|
| 94 |
+
A second guess.
|
| 95 |
+
fprime : bool or callable, optional
|
| 96 |
+
If `fprime` is a boolean and is True, `f` is assumed to return the
|
| 97 |
+
value of the objective function and of the derivative.
|
| 98 |
+
`fprime` can also be a callable returning the derivative of `f`. In
|
| 99 |
+
this case, it must accept the same arguments as `f`.
|
| 100 |
+
fprime2 : bool or callable, optional
|
| 101 |
+
If `fprime2` is a boolean and is True, `f` is assumed to return the
|
| 102 |
+
value of the objective function and of the
|
| 103 |
+
first and second derivatives.
|
| 104 |
+
`fprime2` can also be a callable returning the second derivative of `f`.
|
| 105 |
+
In this case, it must accept the same arguments as `f`.
|
| 106 |
+
xtol : float, optional
|
| 107 |
+
Tolerance (absolute) for termination.
|
| 108 |
+
rtol : float, optional
|
| 109 |
+
Tolerance (relative) for termination.
|
| 110 |
+
maxiter : int, optional
|
| 111 |
+
Maximum number of iterations.
|
| 112 |
+
options : dict, optional
|
| 113 |
+
A dictionary of solver options. E.g., ``k``, see
|
| 114 |
+
:obj:`show_options()` for details.
|
| 115 |
+
|
| 116 |
+
Returns
|
| 117 |
+
-------
|
| 118 |
+
sol : RootResults
|
| 119 |
+
The solution represented as a ``RootResults`` object.
|
| 120 |
+
Important attributes are: ``root`` the solution , ``converged`` a
|
| 121 |
+
boolean flag indicating if the algorithm exited successfully and
|
| 122 |
+
``flag`` which describes the cause of the termination. See
|
| 123 |
+
`RootResults` for a description of other attributes.
|
| 124 |
+
|
| 125 |
+
See also
|
| 126 |
+
--------
|
| 127 |
+
show_options : Additional options accepted by the solvers
|
| 128 |
+
root : Find a root of a vector function.
|
| 129 |
+
|
| 130 |
+
Notes
|
| 131 |
+
-----
|
| 132 |
+
This section describes the available solvers that can be selected by the
|
| 133 |
+
'method' parameter.
|
| 134 |
+
|
| 135 |
+
The default is to use the best method available for the situation
|
| 136 |
+
presented.
|
| 137 |
+
If a bracket is provided, it may use one of the bracketing methods.
|
| 138 |
+
If a derivative and an initial value are specified, it may
|
| 139 |
+
select one of the derivative-based methods.
|
| 140 |
+
If no method is judged applicable, it will raise an Exception.
|
| 141 |
+
|
| 142 |
+
Arguments for each method are as follows (x=required, o=optional).
|
| 143 |
+
|
| 144 |
+
+-----------------------------------------------+---+------+---------+----+----+--------+---------+------+------+---------+---------+
|
| 145 |
+
| method | f | args | bracket | x0 | x1 | fprime | fprime2 | xtol | rtol | maxiter | options |
|
| 146 |
+
+===============================================+===+======+=========+====+====+========+=========+======+======+=========+=========+
|
| 147 |
+
| :ref:`bisect <optimize.root_scalar-bisect>` | x | o | x | | | | | o | o | o | o |
|
| 148 |
+
+-----------------------------------------------+---+------+---------+----+----+--------+---------+------+------+---------+---------+
|
| 149 |
+
| :ref:`brentq <optimize.root_scalar-brentq>` | x | o | x | | | | | o | o | o | o |
|
| 150 |
+
+-----------------------------------------------+---+------+---------+----+----+--------+---------+------+------+---------+---------+
|
| 151 |
+
| :ref:`brenth <optimize.root_scalar-brenth>` | x | o | x | | | | | o | o | o | o |
|
| 152 |
+
+-----------------------------------------------+---+------+---------+----+----+--------+---------+------+------+---------+---------+
|
| 153 |
+
| :ref:`ridder <optimize.root_scalar-ridder>` | x | o | x | | | | | o | o | o | o |
|
| 154 |
+
+-----------------------------------------------+---+------+---------+----+----+--------+---------+------+------+---------+---------+
|
| 155 |
+
| :ref:`toms748 <optimize.root_scalar-toms748>` | x | o | x | | | | | o | o | o | o |
|
| 156 |
+
+-----------------------------------------------+---+------+---------+----+----+--------+---------+------+------+---------+---------+
|
| 157 |
+
| :ref:`secant <optimize.root_scalar-secant>` | x | o | | x | o | | | o | o | o | o |
|
| 158 |
+
+-----------------------------------------------+---+------+---------+----+----+--------+---------+------+------+---------+---------+
|
| 159 |
+
| :ref:`newton <optimize.root_scalar-newton>` | x | o | | x | | o | | o | o | o | o |
|
| 160 |
+
+-----------------------------------------------+---+------+---------+----+----+--------+---------+------+------+---------+---------+
|
| 161 |
+
| :ref:`halley <optimize.root_scalar-halley>` | x | o | | x | | x | x | o | o | o | o |
|
| 162 |
+
+-----------------------------------------------+---+------+---------+----+----+--------+---------+------+------+---------+---------+
|
| 163 |
+
|
| 164 |
+
Examples
|
| 165 |
+
--------
|
| 166 |
+
|
| 167 |
+
Find the root of a simple cubic
|
| 168 |
+
|
| 169 |
+
>>> from scipy import optimize
|
| 170 |
+
>>> def f(x):
|
| 171 |
+
... return (x**3 - 1) # only one real root at x = 1
|
| 172 |
+
|
| 173 |
+
>>> def fprime(x):
|
| 174 |
+
... return 3*x**2
|
| 175 |
+
|
| 176 |
+
The `brentq` method takes as input a bracket
|
| 177 |
+
|
| 178 |
+
>>> sol = optimize.root_scalar(f, bracket=[0, 3], method='brentq')
|
| 179 |
+
>>> sol.root, sol.iterations, sol.function_calls
|
| 180 |
+
(1.0, 10, 11)
|
| 181 |
+
|
| 182 |
+
The `newton` method takes as input a single point and uses the
|
| 183 |
+
derivative(s).
|
| 184 |
+
|
| 185 |
+
>>> sol = optimize.root_scalar(f, x0=0.2, fprime=fprime, method='newton')
|
| 186 |
+
>>> sol.root, sol.iterations, sol.function_calls
|
| 187 |
+
(1.0, 11, 22)
|
| 188 |
+
|
| 189 |
+
The function can provide the value and derivative(s) in a single call.
|
| 190 |
+
|
| 191 |
+
>>> def f_p_pp(x):
|
| 192 |
+
... return (x**3 - 1), 3*x**2, 6*x
|
| 193 |
+
|
| 194 |
+
>>> sol = optimize.root_scalar(
|
| 195 |
+
... f_p_pp, x0=0.2, fprime=True, method='newton'
|
| 196 |
+
... )
|
| 197 |
+
>>> sol.root, sol.iterations, sol.function_calls
|
| 198 |
+
(1.0, 11, 11)
|
| 199 |
+
|
| 200 |
+
>>> sol = optimize.root_scalar(
|
| 201 |
+
... f_p_pp, x0=0.2, fprime=True, fprime2=True, method='halley'
|
| 202 |
+
... )
|
| 203 |
+
>>> sol.root, sol.iterations, sol.function_calls
|
| 204 |
+
(1.0, 7, 8)
|
| 205 |
+
|
| 206 |
+
|
| 207 |
+
""" # noqa: E501
|
| 208 |
+
if not isinstance(args, tuple):
|
| 209 |
+
args = (args,)
|
| 210 |
+
|
| 211 |
+
if options is None:
|
| 212 |
+
options = {}
|
| 213 |
+
|
| 214 |
+
# fun also returns the derivative(s)
|
| 215 |
+
is_memoized = False
|
| 216 |
+
if fprime2 is not None and not callable(fprime2):
|
| 217 |
+
if bool(fprime2):
|
| 218 |
+
f = MemoizeDer(f)
|
| 219 |
+
is_memoized = True
|
| 220 |
+
fprime2 = f.fprime2
|
| 221 |
+
fprime = f.fprime
|
| 222 |
+
else:
|
| 223 |
+
fprime2 = None
|
| 224 |
+
if fprime is not None and not callable(fprime):
|
| 225 |
+
if bool(fprime):
|
| 226 |
+
f = MemoizeDer(f)
|
| 227 |
+
is_memoized = True
|
| 228 |
+
fprime = f.fprime
|
| 229 |
+
else:
|
| 230 |
+
fprime = None
|
| 231 |
+
|
| 232 |
+
# respect solver-specific default tolerances - only pass in if actually set
|
| 233 |
+
kwargs = {}
|
| 234 |
+
for k in ['xtol', 'rtol', 'maxiter']:
|
| 235 |
+
v = locals().get(k)
|
| 236 |
+
if v is not None:
|
| 237 |
+
kwargs[k] = v
|
| 238 |
+
|
| 239 |
+
# Set any solver-specific options
|
| 240 |
+
if options:
|
| 241 |
+
kwargs.update(options)
|
| 242 |
+
# Always request full_output from the underlying method as _root_scalar
|
| 243 |
+
# always returns a RootResults object
|
| 244 |
+
kwargs.update(full_output=True, disp=False)
|
| 245 |
+
|
| 246 |
+
# Pick a method if not specified.
|
| 247 |
+
# Use the "best" method available for the situation.
|
| 248 |
+
if not method:
|
| 249 |
+
if bracket:
|
| 250 |
+
method = 'brentq'
|
| 251 |
+
elif x0 is not None:
|
| 252 |
+
if fprime:
|
| 253 |
+
if fprime2:
|
| 254 |
+
method = 'halley'
|
| 255 |
+
else:
|
| 256 |
+
method = 'newton'
|
| 257 |
+
elif x1 is not None:
|
| 258 |
+
method = 'secant'
|
| 259 |
+
else:
|
| 260 |
+
method = 'newton'
|
| 261 |
+
if not method:
|
| 262 |
+
raise ValueError('Unable to select a solver as neither bracket '
|
| 263 |
+
'nor starting point provided.')
|
| 264 |
+
|
| 265 |
+
meth = method.lower()
|
| 266 |
+
map2underlying = {'halley': 'newton', 'secant': 'newton'}
|
| 267 |
+
|
| 268 |
+
try:
|
| 269 |
+
methodc = getattr(optzeros, map2underlying.get(meth, meth))
|
| 270 |
+
except AttributeError as e:
|
| 271 |
+
raise ValueError('Unknown solver %s' % meth) from e
|
| 272 |
+
|
| 273 |
+
if meth in ['bisect', 'ridder', 'brentq', 'brenth', 'toms748']:
|
| 274 |
+
if not isinstance(bracket, (list, tuple, np.ndarray)):
|
| 275 |
+
raise ValueError('Bracket needed for %s' % method)
|
| 276 |
+
|
| 277 |
+
a, b = bracket[:2]
|
| 278 |
+
try:
|
| 279 |
+
r, sol = methodc(f, a, b, args=args, **kwargs)
|
| 280 |
+
except ValueError as e:
|
| 281 |
+
# gh-17622 fixed some bugs in low-level solvers by raising an error
|
| 282 |
+
# (rather than returning incorrect results) when the callable
|
| 283 |
+
# returns a NaN. It did so by wrapping the callable rather than
|
| 284 |
+
# modifying compiled code, so the iteration count is not available.
|
| 285 |
+
if hasattr(e, "_x"):
|
| 286 |
+
sol = optzeros.RootResults(root=e._x,
|
| 287 |
+
iterations=np.nan,
|
| 288 |
+
function_calls=e._function_calls,
|
| 289 |
+
flag=str(e), method=method)
|
| 290 |
+
else:
|
| 291 |
+
raise
|
| 292 |
+
|
| 293 |
+
elif meth in ['secant']:
|
| 294 |
+
if x0 is None:
|
| 295 |
+
raise ValueError('x0 must not be None for %s' % method)
|
| 296 |
+
if 'xtol' in kwargs:
|
| 297 |
+
kwargs['tol'] = kwargs.pop('xtol')
|
| 298 |
+
r, sol = methodc(f, x0, args=args, fprime=None, fprime2=None,
|
| 299 |
+
x1=x1, **kwargs)
|
| 300 |
+
elif meth in ['newton']:
|
| 301 |
+
if x0 is None:
|
| 302 |
+
raise ValueError('x0 must not be None for %s' % method)
|
| 303 |
+
if not fprime:
|
| 304 |
+
# approximate fprime with finite differences
|
| 305 |
+
|
| 306 |
+
def fprime(x, *args):
|
| 307 |
+
# `root_scalar` doesn't actually seem to support vectorized
|
| 308 |
+
# use of `newton`. In that case, `approx_derivative` will
|
| 309 |
+
# always get scalar input. Nonetheless, it always returns an
|
| 310 |
+
# array, so we extract the element to produce scalar output.
|
| 311 |
+
return approx_derivative(f, x, method='2-point', args=args)[0]
|
| 312 |
+
|
| 313 |
+
if 'xtol' in kwargs:
|
| 314 |
+
kwargs['tol'] = kwargs.pop('xtol')
|
| 315 |
+
r, sol = methodc(f, x0, args=args, fprime=fprime, fprime2=None,
|
| 316 |
+
**kwargs)
|
| 317 |
+
elif meth in ['halley']:
|
| 318 |
+
if x0 is None:
|
| 319 |
+
raise ValueError('x0 must not be None for %s' % method)
|
| 320 |
+
if not fprime:
|
| 321 |
+
raise ValueError('fprime must be specified for %s' % method)
|
| 322 |
+
if not fprime2:
|
| 323 |
+
raise ValueError('fprime2 must be specified for %s' % method)
|
| 324 |
+
if 'xtol' in kwargs:
|
| 325 |
+
kwargs['tol'] = kwargs.pop('xtol')
|
| 326 |
+
r, sol = methodc(f, x0, args=args, fprime=fprime, fprime2=fprime2, **kwargs)
|
| 327 |
+
else:
|
| 328 |
+
raise ValueError('Unknown solver %s' % method)
|
| 329 |
+
|
| 330 |
+
if is_memoized:
|
| 331 |
+
# Replace the function_calls count with the memoized count.
|
| 332 |
+
# Avoids double and triple-counting.
|
| 333 |
+
n_calls = f.n_calls
|
| 334 |
+
sol.function_calls = n_calls
|
| 335 |
+
|
| 336 |
+
return sol
|
| 337 |
+
|
| 338 |
+
|
| 339 |
+
def _root_scalar_brentq_doc():
|
| 340 |
+
r"""
|
| 341 |
+
Options
|
| 342 |
+
-------
|
| 343 |
+
args : tuple, optional
|
| 344 |
+
Extra arguments passed to the objective function.
|
| 345 |
+
bracket: A sequence of 2 floats, optional
|
| 346 |
+
An interval bracketing a root. `f(x, *args)` must have different
|
| 347 |
+
signs at the two endpoints.
|
| 348 |
+
xtol : float, optional
|
| 349 |
+
Tolerance (absolute) for termination.
|
| 350 |
+
rtol : float, optional
|
| 351 |
+
Tolerance (relative) for termination.
|
| 352 |
+
maxiter : int, optional
|
| 353 |
+
Maximum number of iterations.
|
| 354 |
+
options: dict, optional
|
| 355 |
+
Specifies any method-specific options not covered above
|
| 356 |
+
|
| 357 |
+
"""
|
| 358 |
+
pass
|
| 359 |
+
|
| 360 |
+
|
| 361 |
+
def _root_scalar_brenth_doc():
|
| 362 |
+
r"""
|
| 363 |
+
Options
|
| 364 |
+
-------
|
| 365 |
+
args : tuple, optional
|
| 366 |
+
Extra arguments passed to the objective function.
|
| 367 |
+
bracket: A sequence of 2 floats, optional
|
| 368 |
+
An interval bracketing a root. `f(x, *args)` must have different
|
| 369 |
+
signs at the two endpoints.
|
| 370 |
+
xtol : float, optional
|
| 371 |
+
Tolerance (absolute) for termination.
|
| 372 |
+
rtol : float, optional
|
| 373 |
+
Tolerance (relative) for termination.
|
| 374 |
+
maxiter : int, optional
|
| 375 |
+
Maximum number of iterations.
|
| 376 |
+
options: dict, optional
|
| 377 |
+
Specifies any method-specific options not covered above.
|
| 378 |
+
|
| 379 |
+
"""
|
| 380 |
+
pass
|
| 381 |
+
|
| 382 |
+
def _root_scalar_toms748_doc():
|
| 383 |
+
r"""
|
| 384 |
+
Options
|
| 385 |
+
-------
|
| 386 |
+
args : tuple, optional
|
| 387 |
+
Extra arguments passed to the objective function.
|
| 388 |
+
bracket: A sequence of 2 floats, optional
|
| 389 |
+
An interval bracketing a root. `f(x, *args)` must have different
|
| 390 |
+
signs at the two endpoints.
|
| 391 |
+
xtol : float, optional
|
| 392 |
+
Tolerance (absolute) for termination.
|
| 393 |
+
rtol : float, optional
|
| 394 |
+
Tolerance (relative) for termination.
|
| 395 |
+
maxiter : int, optional
|
| 396 |
+
Maximum number of iterations.
|
| 397 |
+
options: dict, optional
|
| 398 |
+
Specifies any method-specific options not covered above.
|
| 399 |
+
|
| 400 |
+
"""
|
| 401 |
+
pass
|
| 402 |
+
|
| 403 |
+
|
| 404 |
+
def _root_scalar_secant_doc():
|
| 405 |
+
r"""
|
| 406 |
+
Options
|
| 407 |
+
-------
|
| 408 |
+
args : tuple, optional
|
| 409 |
+
Extra arguments passed to the objective function.
|
| 410 |
+
xtol : float, optional
|
| 411 |
+
Tolerance (absolute) for termination.
|
| 412 |
+
rtol : float, optional
|
| 413 |
+
Tolerance (relative) for termination.
|
| 414 |
+
maxiter : int, optional
|
| 415 |
+
Maximum number of iterations.
|
| 416 |
+
x0 : float, required
|
| 417 |
+
Initial guess.
|
| 418 |
+
x1 : float, required
|
| 419 |
+
A second guess.
|
| 420 |
+
options: dict, optional
|
| 421 |
+
Specifies any method-specific options not covered above.
|
| 422 |
+
|
| 423 |
+
"""
|
| 424 |
+
pass
|
| 425 |
+
|
| 426 |
+
|
| 427 |
+
def _root_scalar_newton_doc():
|
| 428 |
+
r"""
|
| 429 |
+
Options
|
| 430 |
+
-------
|
| 431 |
+
args : tuple, optional
|
| 432 |
+
Extra arguments passed to the objective function and its derivative.
|
| 433 |
+
xtol : float, optional
|
| 434 |
+
Tolerance (absolute) for termination.
|
| 435 |
+
rtol : float, optional
|
| 436 |
+
Tolerance (relative) for termination.
|
| 437 |
+
maxiter : int, optional
|
| 438 |
+
Maximum number of iterations.
|
| 439 |
+
x0 : float, required
|
| 440 |
+
Initial guess.
|
| 441 |
+
fprime : bool or callable, optional
|
| 442 |
+
If `fprime` is a boolean and is True, `f` is assumed to return the
|
| 443 |
+
value of derivative along with the objective function.
|
| 444 |
+
`fprime` can also be a callable returning the derivative of `f`. In
|
| 445 |
+
this case, it must accept the same arguments as `f`.
|
| 446 |
+
options: dict, optional
|
| 447 |
+
Specifies any method-specific options not covered above.
|
| 448 |
+
|
| 449 |
+
"""
|
| 450 |
+
pass
|
| 451 |
+
|
| 452 |
+
|
| 453 |
+
def _root_scalar_halley_doc():
|
| 454 |
+
r"""
|
| 455 |
+
Options
|
| 456 |
+
-------
|
| 457 |
+
args : tuple, optional
|
| 458 |
+
Extra arguments passed to the objective function and its derivatives.
|
| 459 |
+
xtol : float, optional
|
| 460 |
+
Tolerance (absolute) for termination.
|
| 461 |
+
rtol : float, optional
|
| 462 |
+
Tolerance (relative) for termination.
|
| 463 |
+
maxiter : int, optional
|
| 464 |
+
Maximum number of iterations.
|
| 465 |
+
x0 : float, required
|
| 466 |
+
Initial guess.
|
| 467 |
+
fprime : bool or callable, required
|
| 468 |
+
If `fprime` is a boolean and is True, `f` is assumed to return the
|
| 469 |
+
value of derivative along with the objective function.
|
| 470 |
+
`fprime` can also be a callable returning the derivative of `f`. In
|
| 471 |
+
this case, it must accept the same arguments as `f`.
|
| 472 |
+
fprime2 : bool or callable, required
|
| 473 |
+
If `fprime2` is a boolean and is True, `f` is assumed to return the
|
| 474 |
+
value of 1st and 2nd derivatives along with the objective function.
|
| 475 |
+
`fprime2` can also be a callable returning the 2nd derivative of `f`.
|
| 476 |
+
In this case, it must accept the same arguments as `f`.
|
| 477 |
+
options: dict, optional
|
| 478 |
+
Specifies any method-specific options not covered above.
|
| 479 |
+
|
| 480 |
+
"""
|
| 481 |
+
pass
|
| 482 |
+
|
| 483 |
+
|
| 484 |
+
def _root_scalar_ridder_doc():
|
| 485 |
+
r"""
|
| 486 |
+
Options
|
| 487 |
+
-------
|
| 488 |
+
args : tuple, optional
|
| 489 |
+
Extra arguments passed to the objective function.
|
| 490 |
+
bracket: A sequence of 2 floats, optional
|
| 491 |
+
An interval bracketing a root. `f(x, *args)` must have different
|
| 492 |
+
signs at the two endpoints.
|
| 493 |
+
xtol : float, optional
|
| 494 |
+
Tolerance (absolute) for termination.
|
| 495 |
+
rtol : float, optional
|
| 496 |
+
Tolerance (relative) for termination.
|
| 497 |
+
maxiter : int, optional
|
| 498 |
+
Maximum number of iterations.
|
| 499 |
+
options: dict, optional
|
| 500 |
+
Specifies any method-specific options not covered above.
|
| 501 |
+
|
| 502 |
+
"""
|
| 503 |
+
pass
|
| 504 |
+
|
| 505 |
+
|
| 506 |
+
def _root_scalar_bisect_doc():
|
| 507 |
+
r"""
|
| 508 |
+
Options
|
| 509 |
+
-------
|
| 510 |
+
args : tuple, optional
|
| 511 |
+
Extra arguments passed to the objective function.
|
| 512 |
+
bracket: A sequence of 2 floats, optional
|
| 513 |
+
An interval bracketing a root. `f(x, *args)` must have different
|
| 514 |
+
signs at the two endpoints.
|
| 515 |
+
xtol : float, optional
|
| 516 |
+
Tolerance (absolute) for termination.
|
| 517 |
+
rtol : float, optional
|
| 518 |
+
Tolerance (relative) for termination.
|
| 519 |
+
maxiter : int, optional
|
| 520 |
+
Maximum number of iterations.
|
| 521 |
+
options: dict, optional
|
| 522 |
+
Specifies any method-specific options not covered above.
|
| 523 |
+
|
| 524 |
+
"""
|
| 525 |
+
pass
|
llava_next/lib/python3.10/site-packages/scipy/optimize/_shgo.py
ADDED
|
@@ -0,0 +1,1598 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""shgo: The simplicial homology global optimisation algorithm."""
|
| 2 |
+
from collections import namedtuple
|
| 3 |
+
import time
|
| 4 |
+
import logging
|
| 5 |
+
import warnings
|
| 6 |
+
import sys
|
| 7 |
+
|
| 8 |
+
import numpy as np
|
| 9 |
+
|
| 10 |
+
from scipy import spatial
|
| 11 |
+
from scipy.optimize import OptimizeResult, minimize, Bounds
|
| 12 |
+
from scipy.optimize._optimize import MemoizeJac
|
| 13 |
+
from scipy.optimize._constraints import new_bounds_to_old
|
| 14 |
+
from scipy.optimize._minimize import standardize_constraints
|
| 15 |
+
from scipy._lib._util import _FunctionWrapper
|
| 16 |
+
|
| 17 |
+
from scipy.optimize._shgo_lib._complex import Complex
|
| 18 |
+
|
| 19 |
+
__all__ = ['shgo']
|
| 20 |
+
|
| 21 |
+
|
| 22 |
+
def shgo(
|
| 23 |
+
func, bounds, args=(), constraints=None, n=100, iters=1, callback=None,
|
| 24 |
+
minimizer_kwargs=None, options=None, sampling_method='simplicial', *,
|
| 25 |
+
workers=1
|
| 26 |
+
):
|
| 27 |
+
"""
|
| 28 |
+
Finds the global minimum of a function using SHG optimization.
|
| 29 |
+
|
| 30 |
+
SHGO stands for "simplicial homology global optimization".
|
| 31 |
+
|
| 32 |
+
Parameters
|
| 33 |
+
----------
|
| 34 |
+
func : callable
|
| 35 |
+
The objective function to be minimized. Must be in the form
|
| 36 |
+
``f(x, *args)``, where ``x`` is the argument in the form of a 1-D array
|
| 37 |
+
and ``args`` is a tuple of any additional fixed parameters needed to
|
| 38 |
+
completely specify the function.
|
| 39 |
+
bounds : sequence or `Bounds`
|
| 40 |
+
Bounds for variables. There are two ways to specify the bounds:
|
| 41 |
+
|
| 42 |
+
1. Instance of `Bounds` class.
|
| 43 |
+
2. Sequence of ``(min, max)`` pairs for each element in `x`.
|
| 44 |
+
|
| 45 |
+
args : tuple, optional
|
| 46 |
+
Any additional fixed parameters needed to completely specify the
|
| 47 |
+
objective function.
|
| 48 |
+
constraints : {Constraint, dict} or List of {Constraint, dict}, optional
|
| 49 |
+
Constraints definition. Only for COBYLA, COBYQA, SLSQP and trust-constr.
|
| 50 |
+
See the tutorial [5]_ for further details on specifying constraints.
|
| 51 |
+
|
| 52 |
+
.. note::
|
| 53 |
+
|
| 54 |
+
Only COBYLA, COBYQA, SLSQP, and trust-constr local minimize methods
|
| 55 |
+
currently support constraint arguments. If the ``constraints``
|
| 56 |
+
sequence used in the local optimization problem is not defined in
|
| 57 |
+
``minimizer_kwargs`` and a constrained method is used then the
|
| 58 |
+
global ``constraints`` will be used.
|
| 59 |
+
(Defining a ``constraints`` sequence in ``minimizer_kwargs``
|
| 60 |
+
means that ``constraints`` will not be added so if equality
|
| 61 |
+
constraints and so forth need to be added then the inequality
|
| 62 |
+
functions in ``constraints`` need to be added to
|
| 63 |
+
``minimizer_kwargs`` too).
|
| 64 |
+
COBYLA only supports inequality constraints.
|
| 65 |
+
|
| 66 |
+
.. versionchanged:: 1.11.0
|
| 67 |
+
|
| 68 |
+
``constraints`` accepts `NonlinearConstraint`, `LinearConstraint`.
|
| 69 |
+
|
| 70 |
+
n : int, optional
|
| 71 |
+
Number of sampling points used in the construction of the simplicial
|
| 72 |
+
complex. For the default ``simplicial`` sampling method 2**dim + 1
|
| 73 |
+
sampling points are generated instead of the default `n=100`. For all
|
| 74 |
+
other specified values `n` sampling points are generated. For
|
| 75 |
+
``sobol``, ``halton`` and other arbitrary `sampling_methods` `n=100` or
|
| 76 |
+
another specified number of sampling points are generated.
|
| 77 |
+
iters : int, optional
|
| 78 |
+
Number of iterations used in the construction of the simplicial
|
| 79 |
+
complex. Default is 1.
|
| 80 |
+
callback : callable, optional
|
| 81 |
+
Called after each iteration, as ``callback(xk)``, where ``xk`` is the
|
| 82 |
+
current parameter vector.
|
| 83 |
+
minimizer_kwargs : dict, optional
|
| 84 |
+
Extra keyword arguments to be passed to the minimizer
|
| 85 |
+
``scipy.optimize.minimize`` Some important options could be:
|
| 86 |
+
|
| 87 |
+
* method : str
|
| 88 |
+
The minimization method. If not given, chosen to be one of
|
| 89 |
+
BFGS, L-BFGS-B, SLSQP, depending on whether or not the
|
| 90 |
+
problem has constraints or bounds.
|
| 91 |
+
* args : tuple
|
| 92 |
+
Extra arguments passed to the objective function (``func``) and
|
| 93 |
+
its derivatives (Jacobian, Hessian).
|
| 94 |
+
* options : dict, optional
|
| 95 |
+
Note that by default the tolerance is specified as
|
| 96 |
+
``{ftol: 1e-12}``
|
| 97 |
+
|
| 98 |
+
options : dict, optional
|
| 99 |
+
A dictionary of solver options. Many of the options specified for the
|
| 100 |
+
global routine are also passed to the scipy.optimize.minimize routine.
|
| 101 |
+
The options that are also passed to the local routine are marked with
|
| 102 |
+
"(L)".
|
| 103 |
+
|
| 104 |
+
Stopping criteria, the algorithm will terminate if any of the specified
|
| 105 |
+
criteria are met. However, the default algorithm does not require any
|
| 106 |
+
to be specified:
|
| 107 |
+
|
| 108 |
+
* maxfev : int (L)
|
| 109 |
+
Maximum number of function evaluations in the feasible domain.
|
| 110 |
+
(Note only methods that support this option will terminate
|
| 111 |
+
the routine at precisely exact specified value. Otherwise the
|
| 112 |
+
criterion will only terminate during a global iteration)
|
| 113 |
+
* f_min
|
| 114 |
+
Specify the minimum objective function value, if it is known.
|
| 115 |
+
* f_tol : float
|
| 116 |
+
Precision goal for the value of f in the stopping
|
| 117 |
+
criterion. Note that the global routine will also
|
| 118 |
+
terminate if a sampling point in the global routine is
|
| 119 |
+
within this tolerance.
|
| 120 |
+
* maxiter : int
|
| 121 |
+
Maximum number of iterations to perform.
|
| 122 |
+
* maxev : int
|
| 123 |
+
Maximum number of sampling evaluations to perform (includes
|
| 124 |
+
searching in infeasible points).
|
| 125 |
+
* maxtime : float
|
| 126 |
+
Maximum processing runtime allowed
|
| 127 |
+
* minhgrd : int
|
| 128 |
+
Minimum homology group rank differential. The homology group of the
|
| 129 |
+
objective function is calculated (approximately) during every
|
| 130 |
+
iteration. The rank of this group has a one-to-one correspondence
|
| 131 |
+
with the number of locally convex subdomains in the objective
|
| 132 |
+
function (after adequate sampling points each of these subdomains
|
| 133 |
+
contain a unique global minimum). If the difference in the hgr is 0
|
| 134 |
+
between iterations for ``maxhgrd`` specified iterations the
|
| 135 |
+
algorithm will terminate.
|
| 136 |
+
|
| 137 |
+
Objective function knowledge:
|
| 138 |
+
|
| 139 |
+
* symmetry : list or bool
|
| 140 |
+
Specify if the objective function contains symmetric variables.
|
| 141 |
+
The search space (and therefore performance) is decreased by up to
|
| 142 |
+
O(n!) times in the fully symmetric case. If `True` is specified
|
| 143 |
+
then all variables will be set symmetric to the first variable.
|
| 144 |
+
Default
|
| 145 |
+
is set to False.
|
| 146 |
+
|
| 147 |
+
E.g. f(x) = (x_1 + x_2 + x_3) + (x_4)**2 + (x_5)**2 + (x_6)**2
|
| 148 |
+
|
| 149 |
+
In this equation x_2 and x_3 are symmetric to x_1, while x_5 and
|
| 150 |
+
x_6 are symmetric to x_4, this can be specified to the solver as:
|
| 151 |
+
|
| 152 |
+
symmetry = [0, # Variable 1
|
| 153 |
+
0, # symmetric to variable 1
|
| 154 |
+
0, # symmetric to variable 1
|
| 155 |
+
3, # Variable 4
|
| 156 |
+
3, # symmetric to variable 4
|
| 157 |
+
3, # symmetric to variable 4
|
| 158 |
+
]
|
| 159 |
+
|
| 160 |
+
* jac : bool or callable, optional
|
| 161 |
+
Jacobian (gradient) of objective function. Only for CG, BFGS,
|
| 162 |
+
Newton-CG, L-BFGS-B, TNC, SLSQP, dogleg, trust-ncg. If ``jac`` is a
|
| 163 |
+
boolean and is True, ``fun`` is assumed to return the gradient
|
| 164 |
+
along with the objective function. If False, the gradient will be
|
| 165 |
+
estimated numerically. ``jac`` can also be a callable returning the
|
| 166 |
+
gradient of the objective. In this case, it must accept the same
|
| 167 |
+
arguments as ``fun``. (Passed to `scipy.optimize.minimize`
|
| 168 |
+
automatically)
|
| 169 |
+
|
| 170 |
+
* hess, hessp : callable, optional
|
| 171 |
+
Hessian (matrix of second-order derivatives) of objective function
|
| 172 |
+
or Hessian of objective function times an arbitrary vector p.
|
| 173 |
+
Only for Newton-CG, dogleg, trust-ncg. Only one of ``hessp`` or
|
| 174 |
+
``hess`` needs to be given. If ``hess`` is provided, then
|
| 175 |
+
``hessp`` will be ignored. If neither ``hess`` nor ``hessp`` is
|
| 176 |
+
provided, then the Hessian product will be approximated using
|
| 177 |
+
finite differences on ``jac``. ``hessp`` must compute the Hessian
|
| 178 |
+
times an arbitrary vector. (Passed to `scipy.optimize.minimize`
|
| 179 |
+
automatically)
|
| 180 |
+
|
| 181 |
+
Algorithm settings:
|
| 182 |
+
|
| 183 |
+
* minimize_every_iter : bool
|
| 184 |
+
If True then promising global sampling points will be passed to a
|
| 185 |
+
local minimization routine every iteration. If True then only the
|
| 186 |
+
final minimizer pool will be run. Defaults to True.
|
| 187 |
+
* local_iter : int
|
| 188 |
+
Only evaluate a few of the best minimizer pool candidates every
|
| 189 |
+
iteration. If False all potential points are passed to the local
|
| 190 |
+
minimization routine.
|
| 191 |
+
* infty_constraints : bool
|
| 192 |
+
If True then any sampling points generated which are outside will
|
| 193 |
+
the feasible domain will be saved and given an objective function
|
| 194 |
+
value of ``inf``. If False then these points will be discarded.
|
| 195 |
+
Using this functionality could lead to higher performance with
|
| 196 |
+
respect to function evaluations before the global minimum is found,
|
| 197 |
+
specifying False will use less memory at the cost of a slight
|
| 198 |
+
decrease in performance. Defaults to True.
|
| 199 |
+
|
| 200 |
+
Feedback:
|
| 201 |
+
|
| 202 |
+
* disp : bool (L)
|
| 203 |
+
Set to True to print convergence messages.
|
| 204 |
+
|
| 205 |
+
sampling_method : str or function, optional
|
| 206 |
+
Current built in sampling method options are ``halton``, ``sobol`` and
|
| 207 |
+
``simplicial``. The default ``simplicial`` provides
|
| 208 |
+
the theoretical guarantee of convergence to the global minimum in
|
| 209 |
+
finite time. ``halton`` and ``sobol`` method are faster in terms of
|
| 210 |
+
sampling point generation at the cost of the loss of
|
| 211 |
+
guaranteed convergence. It is more appropriate for most "easier"
|
| 212 |
+
problems where the convergence is relatively fast.
|
| 213 |
+
User defined sampling functions must accept two arguments of ``n``
|
| 214 |
+
sampling points of dimension ``dim`` per call and output an array of
|
| 215 |
+
sampling points with shape `n x dim`.
|
| 216 |
+
|
| 217 |
+
workers : int or map-like callable, optional
|
| 218 |
+
Sample and run the local serial minimizations in parallel.
|
| 219 |
+
Supply -1 to use all available CPU cores, or an int to use
|
| 220 |
+
that many Processes (uses `multiprocessing.Pool <multiprocessing>`).
|
| 221 |
+
|
| 222 |
+
Alternatively supply a map-like callable, such as
|
| 223 |
+
`multiprocessing.Pool.map` for parallel evaluation.
|
| 224 |
+
This evaluation is carried out as ``workers(func, iterable)``.
|
| 225 |
+
Requires that `func` be pickleable.
|
| 226 |
+
|
| 227 |
+
.. versionadded:: 1.11.0
|
| 228 |
+
|
| 229 |
+
Returns
|
| 230 |
+
-------
|
| 231 |
+
res : OptimizeResult
|
| 232 |
+
The optimization result represented as a `OptimizeResult` object.
|
| 233 |
+
Important attributes are:
|
| 234 |
+
``x`` the solution array corresponding to the global minimum,
|
| 235 |
+
``fun`` the function output at the global solution,
|
| 236 |
+
``xl`` an ordered list of local minima solutions,
|
| 237 |
+
``funl`` the function output at the corresponding local solutions,
|
| 238 |
+
``success`` a Boolean flag indicating if the optimizer exited
|
| 239 |
+
successfully,
|
| 240 |
+
``message`` which describes the cause of the termination,
|
| 241 |
+
``nfev`` the total number of objective function evaluations including
|
| 242 |
+
the sampling calls,
|
| 243 |
+
``nlfev`` the total number of objective function evaluations
|
| 244 |
+
culminating from all local search optimizations,
|
| 245 |
+
``nit`` number of iterations performed by the global routine.
|
| 246 |
+
|
| 247 |
+
Notes
|
| 248 |
+
-----
|
| 249 |
+
Global optimization using simplicial homology global optimization [1]_.
|
| 250 |
+
Appropriate for solving general purpose NLP and blackbox optimization
|
| 251 |
+
problems to global optimality (low-dimensional problems).
|
| 252 |
+
|
| 253 |
+
In general, the optimization problems are of the form::
|
| 254 |
+
|
| 255 |
+
minimize f(x) subject to
|
| 256 |
+
|
| 257 |
+
g_i(x) >= 0, i = 1,...,m
|
| 258 |
+
h_j(x) = 0, j = 1,...,p
|
| 259 |
+
|
| 260 |
+
where x is a vector of one or more variables. ``f(x)`` is the objective
|
| 261 |
+
function ``R^n -> R``, ``g_i(x)`` are the inequality constraints, and
|
| 262 |
+
``h_j(x)`` are the equality constraints.
|
| 263 |
+
|
| 264 |
+
Optionally, the lower and upper bounds for each element in x can also be
|
| 265 |
+
specified using the `bounds` argument.
|
| 266 |
+
|
| 267 |
+
While most of the theoretical advantages of SHGO are only proven for when
|
| 268 |
+
``f(x)`` is a Lipschitz smooth function, the algorithm is also proven to
|
| 269 |
+
converge to the global optimum for the more general case where ``f(x)`` is
|
| 270 |
+
non-continuous, non-convex and non-smooth, if the default sampling method
|
| 271 |
+
is used [1]_.
|
| 272 |
+
|
| 273 |
+
The local search method may be specified using the ``minimizer_kwargs``
|
| 274 |
+
parameter which is passed on to ``scipy.optimize.minimize``. By default,
|
| 275 |
+
the ``SLSQP`` method is used. In general, it is recommended to use the
|
| 276 |
+
``SLSQP``, ``COBYLA``, or ``COBYQA`` local minimization if inequality
|
| 277 |
+
constraints are defined for the problem since the other methods do not use
|
| 278 |
+
constraints.
|
| 279 |
+
|
| 280 |
+
The ``halton`` and ``sobol`` method points are generated using
|
| 281 |
+
`scipy.stats.qmc`. Any other QMC method could be used.
|
| 282 |
+
|
| 283 |
+
References
|
| 284 |
+
----------
|
| 285 |
+
.. [1] Endres, SC, Sandrock, C, Focke, WW (2018) "A simplicial homology
|
| 286 |
+
algorithm for lipschitz optimisation", Journal of Global
|
| 287 |
+
Optimization.
|
| 288 |
+
.. [2] Joe, SW and Kuo, FY (2008) "Constructing Sobol' sequences with
|
| 289 |
+
better two-dimensional projections", SIAM J. Sci. Comput. 30,
|
| 290 |
+
2635-2654.
|
| 291 |
+
.. [3] Hock, W and Schittkowski, K (1981) "Test examples for nonlinear
|
| 292 |
+
programming codes", Lecture Notes in Economics and Mathematical
|
| 293 |
+
Systems, 187. Springer-Verlag, New York.
|
| 294 |
+
http://www.ai7.uni-bayreuth.de/test_problem_coll.pdf
|
| 295 |
+
.. [4] Wales, DJ (2015) "Perspective: Insight into reaction coordinates and
|
| 296 |
+
dynamics from the potential energy landscape",
|
| 297 |
+
Journal of Chemical Physics, 142(13), 2015.
|
| 298 |
+
.. [5] https://docs.scipy.org/doc/scipy/tutorial/optimize.html#constrained-minimization-of-multivariate-scalar-functions-minimize
|
| 299 |
+
|
| 300 |
+
Examples
|
| 301 |
+
--------
|
| 302 |
+
First consider the problem of minimizing the Rosenbrock function, `rosen`:
|
| 303 |
+
|
| 304 |
+
>>> from scipy.optimize import rosen, shgo
|
| 305 |
+
>>> bounds = [(0,2), (0, 2), (0, 2), (0, 2), (0, 2)]
|
| 306 |
+
>>> result = shgo(rosen, bounds)
|
| 307 |
+
>>> result.x, result.fun
|
| 308 |
+
(array([1., 1., 1., 1., 1.]), 2.920392374190081e-18)
|
| 309 |
+
|
| 310 |
+
Note that bounds determine the dimensionality of the objective
|
| 311 |
+
function and is therefore a required input, however you can specify
|
| 312 |
+
empty bounds using ``None`` or objects like ``np.inf`` which will be
|
| 313 |
+
converted to large float numbers.
|
| 314 |
+
|
| 315 |
+
>>> bounds = [(None, None), ]*4
|
| 316 |
+
>>> result = shgo(rosen, bounds)
|
| 317 |
+
>>> result.x
|
| 318 |
+
array([0.99999851, 0.99999704, 0.99999411, 0.9999882 ])
|
| 319 |
+
|
| 320 |
+
Next, we consider the Eggholder function, a problem with several local
|
| 321 |
+
minima and one global minimum. We will demonstrate the use of arguments and
|
| 322 |
+
the capabilities of `shgo`.
|
| 323 |
+
(https://en.wikipedia.org/wiki/Test_functions_for_optimization)
|
| 324 |
+
|
| 325 |
+
>>> import numpy as np
|
| 326 |
+
>>> def eggholder(x):
|
| 327 |
+
... return (-(x[1] + 47.0)
|
| 328 |
+
... * np.sin(np.sqrt(abs(x[0]/2.0 + (x[1] + 47.0))))
|
| 329 |
+
... - x[0] * np.sin(np.sqrt(abs(x[0] - (x[1] + 47.0))))
|
| 330 |
+
... )
|
| 331 |
+
...
|
| 332 |
+
>>> bounds = [(-512, 512), (-512, 512)]
|
| 333 |
+
|
| 334 |
+
`shgo` has built-in low discrepancy sampling sequences. First, we will
|
| 335 |
+
input 64 initial sampling points of the *Sobol'* sequence:
|
| 336 |
+
|
| 337 |
+
>>> result = shgo(eggholder, bounds, n=64, sampling_method='sobol')
|
| 338 |
+
>>> result.x, result.fun
|
| 339 |
+
(array([512. , 404.23180824]), -959.6406627208397)
|
| 340 |
+
|
| 341 |
+
`shgo` also has a return for any other local minima that was found, these
|
| 342 |
+
can be called using:
|
| 343 |
+
|
| 344 |
+
>>> result.xl
|
| 345 |
+
array([[ 512. , 404.23180824],
|
| 346 |
+
[ 283.0759062 , -487.12565635],
|
| 347 |
+
[-294.66820039, -462.01964031],
|
| 348 |
+
[-105.87688911, 423.15323845],
|
| 349 |
+
[-242.97926 , 274.38030925],
|
| 350 |
+
[-506.25823477, 6.3131022 ],
|
| 351 |
+
[-408.71980731, -156.10116949],
|
| 352 |
+
[ 150.23207937, 301.31376595],
|
| 353 |
+
[ 91.00920901, -391.283763 ],
|
| 354 |
+
[ 202.89662724, -269.38043241],
|
| 355 |
+
[ 361.66623976, -106.96493868],
|
| 356 |
+
[-219.40612786, -244.06020508]])
|
| 357 |
+
|
| 358 |
+
>>> result.funl
|
| 359 |
+
array([-959.64066272, -718.16745962, -704.80659592, -565.99778097,
|
| 360 |
+
-559.78685655, -557.36868733, -507.87385942, -493.9605115 ,
|
| 361 |
+
-426.48799655, -421.15571437, -419.31194957, -410.98477763])
|
| 362 |
+
|
| 363 |
+
These results are useful in applications where there are many global minima
|
| 364 |
+
and the values of other global minima are desired or where the local minima
|
| 365 |
+
can provide insight into the system (for example morphologies
|
| 366 |
+
in physical chemistry [4]_).
|
| 367 |
+
|
| 368 |
+
If we want to find a larger number of local minima, we can increase the
|
| 369 |
+
number of sampling points or the number of iterations. We'll increase the
|
| 370 |
+
number of sampling points to 64 and the number of iterations from the
|
| 371 |
+
default of 1 to 3. Using ``simplicial`` this would have given us
|
| 372 |
+
64 x 3 = 192 initial sampling points.
|
| 373 |
+
|
| 374 |
+
>>> result_2 = shgo(eggholder,
|
| 375 |
+
... bounds, n=64, iters=3, sampling_method='sobol')
|
| 376 |
+
>>> len(result.xl), len(result_2.xl)
|
| 377 |
+
(12, 23)
|
| 378 |
+
|
| 379 |
+
Note the difference between, e.g., ``n=192, iters=1`` and ``n=64,
|
| 380 |
+
iters=3``.
|
| 381 |
+
In the first case the promising points contained in the minimiser pool
|
| 382 |
+
are processed only once. In the latter case it is processed every 64
|
| 383 |
+
sampling points for a total of 3 times.
|
| 384 |
+
|
| 385 |
+
To demonstrate solving problems with non-linear constraints consider the
|
| 386 |
+
following example from Hock and Schittkowski problem 73 (cattle-feed)
|
| 387 |
+
[3]_::
|
| 388 |
+
|
| 389 |
+
minimize: f = 24.55 * x_1 + 26.75 * x_2 + 39 * x_3 + 40.50 * x_4
|
| 390 |
+
|
| 391 |
+
subject to: 2.3 * x_1 + 5.6 * x_2 + 11.1 * x_3 + 1.3 * x_4 - 5 >= 0,
|
| 392 |
+
|
| 393 |
+
12 * x_1 + 11.9 * x_2 + 41.8 * x_3 + 52.1 * x_4 - 21
|
| 394 |
+
-1.645 * sqrt(0.28 * x_1**2 + 0.19 * x_2**2 +
|
| 395 |
+
20.5 * x_3**2 + 0.62 * x_4**2) >= 0,
|
| 396 |
+
|
| 397 |
+
x_1 + x_2 + x_3 + x_4 - 1 == 0,
|
| 398 |
+
|
| 399 |
+
1 >= x_i >= 0 for all i
|
| 400 |
+
|
| 401 |
+
The approximate answer given in [3]_ is::
|
| 402 |
+
|
| 403 |
+
f([0.6355216, -0.12e-11, 0.3127019, 0.05177655]) = 29.894378
|
| 404 |
+
|
| 405 |
+
>>> def f(x): # (cattle-feed)
|
| 406 |
+
... return 24.55*x[0] + 26.75*x[1] + 39*x[2] + 40.50*x[3]
|
| 407 |
+
...
|
| 408 |
+
>>> def g1(x):
|
| 409 |
+
... return 2.3*x[0] + 5.6*x[1] + 11.1*x[2] + 1.3*x[3] - 5 # >=0
|
| 410 |
+
...
|
| 411 |
+
>>> def g2(x):
|
| 412 |
+
... return (12*x[0] + 11.9*x[1] +41.8*x[2] + 52.1*x[3] - 21
|
| 413 |
+
... - 1.645 * np.sqrt(0.28*x[0]**2 + 0.19*x[1]**2
|
| 414 |
+
... + 20.5*x[2]**2 + 0.62*x[3]**2)
|
| 415 |
+
... ) # >=0
|
| 416 |
+
...
|
| 417 |
+
>>> def h1(x):
|
| 418 |
+
... return x[0] + x[1] + x[2] + x[3] - 1 # == 0
|
| 419 |
+
...
|
| 420 |
+
>>> cons = ({'type': 'ineq', 'fun': g1},
|
| 421 |
+
... {'type': 'ineq', 'fun': g2},
|
| 422 |
+
... {'type': 'eq', 'fun': h1})
|
| 423 |
+
>>> bounds = [(0, 1.0),]*4
|
| 424 |
+
>>> res = shgo(f, bounds, n=150, constraints=cons)
|
| 425 |
+
>>> res
|
| 426 |
+
message: Optimization terminated successfully.
|
| 427 |
+
success: True
|
| 428 |
+
fun: 29.894378159142136
|
| 429 |
+
funl: [ 2.989e+01]
|
| 430 |
+
x: [ 6.355e-01 1.137e-13 3.127e-01 5.178e-02] # may vary
|
| 431 |
+
xl: [[ 6.355e-01 1.137e-13 3.127e-01 5.178e-02]] # may vary
|
| 432 |
+
nit: 1
|
| 433 |
+
nfev: 142 # may vary
|
| 434 |
+
nlfev: 35 # may vary
|
| 435 |
+
nljev: 5
|
| 436 |
+
nlhev: 0
|
| 437 |
+
|
| 438 |
+
>>> g1(res.x), g2(res.x), h1(res.x)
|
| 439 |
+
(-5.062616992290714e-14, -2.9594104944408173e-12, 0.0)
|
| 440 |
+
|
| 441 |
+
"""
|
| 442 |
+
# if necessary, convert bounds class to old bounds
|
| 443 |
+
if isinstance(bounds, Bounds):
|
| 444 |
+
bounds = new_bounds_to_old(bounds.lb, bounds.ub, len(bounds.lb))
|
| 445 |
+
|
| 446 |
+
# Initiate SHGO class
|
| 447 |
+
# use in context manager to make sure that any parallelization
|
| 448 |
+
# resources are freed.
|
| 449 |
+
with SHGO(func, bounds, args=args, constraints=constraints, n=n,
|
| 450 |
+
iters=iters, callback=callback,
|
| 451 |
+
minimizer_kwargs=minimizer_kwargs,
|
| 452 |
+
options=options, sampling_method=sampling_method,
|
| 453 |
+
workers=workers) as shc:
|
| 454 |
+
# Run the algorithm, process results and test success
|
| 455 |
+
shc.iterate_all()
|
| 456 |
+
|
| 457 |
+
if not shc.break_routine:
|
| 458 |
+
if shc.disp:
|
| 459 |
+
logging.info("Successfully completed construction of complex.")
|
| 460 |
+
|
| 461 |
+
# Test post iterations success
|
| 462 |
+
if len(shc.LMC.xl_maps) == 0:
|
| 463 |
+
# If sampling failed to find pool, return lowest sampled point
|
| 464 |
+
# with a warning
|
| 465 |
+
shc.find_lowest_vertex()
|
| 466 |
+
shc.break_routine = True
|
| 467 |
+
shc.fail_routine(mes="Failed to find a feasible minimizer point. "
|
| 468 |
+
f"Lowest sampling point = {shc.f_lowest}")
|
| 469 |
+
shc.res.fun = shc.f_lowest
|
| 470 |
+
shc.res.x = shc.x_lowest
|
| 471 |
+
shc.res.nfev = shc.fn
|
| 472 |
+
shc.res.tnev = shc.n_sampled
|
| 473 |
+
else:
|
| 474 |
+
# Test that the optimal solutions do not violate any constraints
|
| 475 |
+
pass # TODO
|
| 476 |
+
|
| 477 |
+
# Confirm the routine ran successfully
|
| 478 |
+
if not shc.break_routine:
|
| 479 |
+
shc.res.message = 'Optimization terminated successfully.'
|
| 480 |
+
shc.res.success = True
|
| 481 |
+
|
| 482 |
+
# Return the final results
|
| 483 |
+
return shc.res
|
| 484 |
+
|
| 485 |
+
|
| 486 |
+
class SHGO:
|
| 487 |
+
def __init__(self, func, bounds, args=(), constraints=None, n=None,
|
| 488 |
+
iters=None, callback=None, minimizer_kwargs=None,
|
| 489 |
+
options=None, sampling_method='simplicial', workers=1):
|
| 490 |
+
from scipy.stats import qmc
|
| 491 |
+
# Input checks
|
| 492 |
+
methods = ['halton', 'sobol', 'simplicial']
|
| 493 |
+
if isinstance(sampling_method, str) and sampling_method not in methods:
|
| 494 |
+
raise ValueError(("Unknown sampling_method specified."
|
| 495 |
+
" Valid methods: {}").format(', '.join(methods)))
|
| 496 |
+
|
| 497 |
+
# Split obj func if given with Jac
|
| 498 |
+
try:
|
| 499 |
+
if ((minimizer_kwargs['jac'] is True) and
|
| 500 |
+
(not callable(minimizer_kwargs['jac']))):
|
| 501 |
+
self.func = MemoizeJac(func)
|
| 502 |
+
jac = self.func.derivative
|
| 503 |
+
minimizer_kwargs['jac'] = jac
|
| 504 |
+
func = self.func # .fun
|
| 505 |
+
else:
|
| 506 |
+
self.func = func # Normal definition of objective function
|
| 507 |
+
except (TypeError, KeyError):
|
| 508 |
+
self.func = func # Normal definition of objective function
|
| 509 |
+
|
| 510 |
+
# Initiate class
|
| 511 |
+
self.func = _FunctionWrapper(func, args)
|
| 512 |
+
self.bounds = bounds
|
| 513 |
+
self.args = args
|
| 514 |
+
self.callback = callback
|
| 515 |
+
|
| 516 |
+
# Bounds
|
| 517 |
+
abound = np.array(bounds, float)
|
| 518 |
+
self.dim = np.shape(abound)[0] # Dimensionality of problem
|
| 519 |
+
|
| 520 |
+
# Set none finite values to large floats
|
| 521 |
+
infind = ~np.isfinite(abound)
|
| 522 |
+
abound[infind[:, 0], 0] = -1e50
|
| 523 |
+
abound[infind[:, 1], 1] = 1e50
|
| 524 |
+
|
| 525 |
+
# Check if bounds are correctly specified
|
| 526 |
+
bnderr = abound[:, 0] > abound[:, 1]
|
| 527 |
+
if bnderr.any():
|
| 528 |
+
raise ValueError('Error: lb > ub in bounds {}.'
|
| 529 |
+
.format(', '.join(str(b) for b in bnderr)))
|
| 530 |
+
|
| 531 |
+
self.bounds = abound
|
| 532 |
+
|
| 533 |
+
# Constraints
|
| 534 |
+
# Process constraint dict sequence:
|
| 535 |
+
self.constraints = constraints
|
| 536 |
+
if constraints is not None:
|
| 537 |
+
self.min_cons = constraints
|
| 538 |
+
self.g_cons = []
|
| 539 |
+
self.g_args = []
|
| 540 |
+
|
| 541 |
+
# shgo internals deals with old-style constraints
|
| 542 |
+
# self.constraints is used to create Complex, so need
|
| 543 |
+
# to be stored internally in old-style.
|
| 544 |
+
# `minimize` takes care of normalising these constraints
|
| 545 |
+
# for slsqp/cobyla/cobyqa/trust-constr.
|
| 546 |
+
self.constraints = standardize_constraints(
|
| 547 |
+
constraints,
|
| 548 |
+
np.empty(self.dim, float),
|
| 549 |
+
'old'
|
| 550 |
+
)
|
| 551 |
+
for cons in self.constraints:
|
| 552 |
+
if cons['type'] in ('ineq'):
|
| 553 |
+
self.g_cons.append(cons['fun'])
|
| 554 |
+
try:
|
| 555 |
+
self.g_args.append(cons['args'])
|
| 556 |
+
except KeyError:
|
| 557 |
+
self.g_args.append(())
|
| 558 |
+
self.g_cons = tuple(self.g_cons)
|
| 559 |
+
self.g_args = tuple(self.g_args)
|
| 560 |
+
else:
|
| 561 |
+
self.g_cons = None
|
| 562 |
+
self.g_args = None
|
| 563 |
+
|
| 564 |
+
# Define local minimization keyword arguments
|
| 565 |
+
# Start with defaults
|
| 566 |
+
self.minimizer_kwargs = {'method': 'SLSQP',
|
| 567 |
+
'bounds': self.bounds,
|
| 568 |
+
'options': {},
|
| 569 |
+
'callback': self.callback
|
| 570 |
+
}
|
| 571 |
+
if minimizer_kwargs is not None:
|
| 572 |
+
# Overwrite with supplied values
|
| 573 |
+
self.minimizer_kwargs.update(minimizer_kwargs)
|
| 574 |
+
|
| 575 |
+
else:
|
| 576 |
+
self.minimizer_kwargs['options'] = {'ftol': 1e-12}
|
| 577 |
+
|
| 578 |
+
if (
|
| 579 |
+
self.minimizer_kwargs['method'].lower() in ('slsqp', 'cobyla',
|
| 580 |
+
'cobyqa',
|
| 581 |
+
'trust-constr')
|
| 582 |
+
and (
|
| 583 |
+
minimizer_kwargs is not None and
|
| 584 |
+
'constraints' not in minimizer_kwargs and
|
| 585 |
+
constraints is not None
|
| 586 |
+
) or
|
| 587 |
+
(self.g_cons is not None)
|
| 588 |
+
):
|
| 589 |
+
self.minimizer_kwargs['constraints'] = self.min_cons
|
| 590 |
+
|
| 591 |
+
# Process options dict
|
| 592 |
+
if options is not None:
|
| 593 |
+
self.init_options(options)
|
| 594 |
+
else: # Default settings:
|
| 595 |
+
self.f_min_true = None
|
| 596 |
+
self.minimize_every_iter = True
|
| 597 |
+
|
| 598 |
+
# Algorithm limits
|
| 599 |
+
self.maxiter = None
|
| 600 |
+
self.maxfev = None
|
| 601 |
+
self.maxev = None
|
| 602 |
+
self.maxtime = None
|
| 603 |
+
self.f_min_true = None
|
| 604 |
+
self.minhgrd = None
|
| 605 |
+
|
| 606 |
+
# Objective function knowledge
|
| 607 |
+
self.symmetry = None
|
| 608 |
+
|
| 609 |
+
# Algorithm functionality
|
| 610 |
+
self.infty_cons_sampl = True
|
| 611 |
+
self.local_iter = False
|
| 612 |
+
|
| 613 |
+
# Feedback
|
| 614 |
+
self.disp = False
|
| 615 |
+
|
| 616 |
+
# Remove unknown arguments in self.minimizer_kwargs
|
| 617 |
+
# Start with arguments all the solvers have in common
|
| 618 |
+
self.min_solver_args = ['fun', 'x0', 'args',
|
| 619 |
+
'callback', 'options', 'method']
|
| 620 |
+
# then add the ones unique to specific solvers
|
| 621 |
+
solver_args = {
|
| 622 |
+
'_custom': ['jac', 'hess', 'hessp', 'bounds', 'constraints'],
|
| 623 |
+
'nelder-mead': [],
|
| 624 |
+
'powell': [],
|
| 625 |
+
'cg': ['jac'],
|
| 626 |
+
'bfgs': ['jac'],
|
| 627 |
+
'newton-cg': ['jac', 'hess', 'hessp'],
|
| 628 |
+
'l-bfgs-b': ['jac', 'bounds'],
|
| 629 |
+
'tnc': ['jac', 'bounds'],
|
| 630 |
+
'cobyla': ['constraints', 'catol'],
|
| 631 |
+
'cobyqa': ['bounds', 'constraints', 'feasibility_tol'],
|
| 632 |
+
'slsqp': ['jac', 'bounds', 'constraints'],
|
| 633 |
+
'dogleg': ['jac', 'hess'],
|
| 634 |
+
'trust-ncg': ['jac', 'hess', 'hessp'],
|
| 635 |
+
'trust-krylov': ['jac', 'hess', 'hessp'],
|
| 636 |
+
'trust-exact': ['jac', 'hess'],
|
| 637 |
+
'trust-constr': ['jac', 'hess', 'hessp', 'constraints'],
|
| 638 |
+
}
|
| 639 |
+
method = self.minimizer_kwargs['method']
|
| 640 |
+
self.min_solver_args += solver_args[method.lower()]
|
| 641 |
+
|
| 642 |
+
# Only retain the known arguments
|
| 643 |
+
def _restrict_to_keys(dictionary, goodkeys):
|
| 644 |
+
"""Remove keys from dictionary if not in goodkeys - inplace"""
|
| 645 |
+
existingkeys = set(dictionary)
|
| 646 |
+
for key in existingkeys - set(goodkeys):
|
| 647 |
+
dictionary.pop(key, None)
|
| 648 |
+
|
| 649 |
+
_restrict_to_keys(self.minimizer_kwargs, self.min_solver_args)
|
| 650 |
+
_restrict_to_keys(self.minimizer_kwargs['options'],
|
| 651 |
+
self.min_solver_args + ['ftol'])
|
| 652 |
+
|
| 653 |
+
# Algorithm controls
|
| 654 |
+
# Global controls
|
| 655 |
+
self.stop_global = False # Used in the stopping_criteria method
|
| 656 |
+
self.break_routine = False # Break the algorithm globally
|
| 657 |
+
self.iters = iters # Iterations to be ran
|
| 658 |
+
self.iters_done = 0 # Iterations completed
|
| 659 |
+
self.n = n # Sampling points per iteration
|
| 660 |
+
self.nc = 0 # n # Sampling points to sample in current iteration
|
| 661 |
+
self.n_prc = 0 # Processed points (used to track Delaunay iters)
|
| 662 |
+
self.n_sampled = 0 # To track no. of sampling points already generated
|
| 663 |
+
self.fn = 0 # Number of feasible sampling points evaluations performed
|
| 664 |
+
self.hgr = 0 # Homology group rank
|
| 665 |
+
# Initially attempt to build the triangulation incrementally:
|
| 666 |
+
self.qhull_incremental = True
|
| 667 |
+
|
| 668 |
+
# Default settings if no sampling criteria.
|
| 669 |
+
if (self.n is None) and (self.iters is None) \
|
| 670 |
+
and (sampling_method == 'simplicial'):
|
| 671 |
+
self.n = 2 ** self.dim + 1
|
| 672 |
+
self.nc = 0 # self.n
|
| 673 |
+
if self.iters is None:
|
| 674 |
+
self.iters = 1
|
| 675 |
+
if (self.n is None) and not (sampling_method == 'simplicial'):
|
| 676 |
+
self.n = self.n = 100
|
| 677 |
+
self.nc = 0 # self.n
|
| 678 |
+
if (self.n == 100) and (sampling_method == 'simplicial'):
|
| 679 |
+
self.n = 2 ** self.dim + 1
|
| 680 |
+
|
| 681 |
+
if not ((self.maxiter is None) and (self.maxfev is None) and (
|
| 682 |
+
self.maxev is None)
|
| 683 |
+
and (self.minhgrd is None) and (self.f_min_true is None)):
|
| 684 |
+
self.iters = None
|
| 685 |
+
|
| 686 |
+
# Set complex construction mode based on a provided stopping criteria:
|
| 687 |
+
# Initialise sampling Complex and function cache
|
| 688 |
+
# Note that sfield_args=() since args are already wrapped in self.func
|
| 689 |
+
# using the_FunctionWrapper class.
|
| 690 |
+
self.HC = Complex(dim=self.dim, domain=self.bounds,
|
| 691 |
+
sfield=self.func, sfield_args=(),
|
| 692 |
+
symmetry=self.symmetry,
|
| 693 |
+
constraints=self.constraints,
|
| 694 |
+
workers=workers)
|
| 695 |
+
|
| 696 |
+
# Choose complex constructor
|
| 697 |
+
if sampling_method == 'simplicial':
|
| 698 |
+
self.iterate_complex = self.iterate_hypercube
|
| 699 |
+
self.sampling_method = sampling_method
|
| 700 |
+
|
| 701 |
+
elif sampling_method in ['halton', 'sobol'] or \
|
| 702 |
+
not isinstance(sampling_method, str):
|
| 703 |
+
self.iterate_complex = self.iterate_delaunay
|
| 704 |
+
# Sampling method used
|
| 705 |
+
if sampling_method in ['halton', 'sobol']:
|
| 706 |
+
if sampling_method == 'sobol':
|
| 707 |
+
self.n = int(2 ** np.ceil(np.log2(self.n)))
|
| 708 |
+
# self.n #TODO: Should always be self.n, this is
|
| 709 |
+
# unacceptable for shgo, check that nfev behaves as
|
| 710 |
+
# expected.
|
| 711 |
+
self.nc = 0
|
| 712 |
+
self.sampling_method = 'sobol'
|
| 713 |
+
self.qmc_engine = qmc.Sobol(d=self.dim, scramble=False,
|
| 714 |
+
seed=0)
|
| 715 |
+
else:
|
| 716 |
+
self.sampling_method = 'halton'
|
| 717 |
+
self.qmc_engine = qmc.Halton(d=self.dim, scramble=True,
|
| 718 |
+
seed=0)
|
| 719 |
+
|
| 720 |
+
def sampling_method(n, d):
|
| 721 |
+
return self.qmc_engine.random(n)
|
| 722 |
+
|
| 723 |
+
else:
|
| 724 |
+
# A user defined sampling method:
|
| 725 |
+
self.sampling_method = 'custom'
|
| 726 |
+
|
| 727 |
+
self.sampling = self.sampling_custom
|
| 728 |
+
self.sampling_function = sampling_method # F(n, d)
|
| 729 |
+
|
| 730 |
+
# Local controls
|
| 731 |
+
self.stop_l_iter = False # Local minimisation iterations
|
| 732 |
+
self.stop_complex_iter = False # Sampling iterations
|
| 733 |
+
|
| 734 |
+
# Initiate storage objects used in algorithm classes
|
| 735 |
+
self.minimizer_pool = []
|
| 736 |
+
|
| 737 |
+
# Cache of local minimizers mapped
|
| 738 |
+
self.LMC = LMapCache()
|
| 739 |
+
|
| 740 |
+
# Initialize return object
|
| 741 |
+
self.res = OptimizeResult() # scipy.optimize.OptimizeResult object
|
| 742 |
+
self.res.nfev = 0 # Includes each sampling point as func evaluation
|
| 743 |
+
self.res.nlfev = 0 # Local function evals for all minimisers
|
| 744 |
+
self.res.nljev = 0 # Local Jacobian evals for all minimisers
|
| 745 |
+
self.res.nlhev = 0 # Local Hessian evals for all minimisers
|
| 746 |
+
|
| 747 |
+
# Initiation aids
|
| 748 |
+
def init_options(self, options):
|
| 749 |
+
"""
|
| 750 |
+
Initiates the options.
|
| 751 |
+
|
| 752 |
+
Can also be useful to change parameters after class initiation.
|
| 753 |
+
|
| 754 |
+
Parameters
|
| 755 |
+
----------
|
| 756 |
+
options : dict
|
| 757 |
+
|
| 758 |
+
Returns
|
| 759 |
+
-------
|
| 760 |
+
None
|
| 761 |
+
|
| 762 |
+
"""
|
| 763 |
+
# Update 'options' dict passed to optimize.minimize
|
| 764 |
+
# Do this first so we don't mutate `options` below.
|
| 765 |
+
self.minimizer_kwargs['options'].update(options)
|
| 766 |
+
|
| 767 |
+
# Ensure that 'jac', 'hess', and 'hessp' are passed directly to
|
| 768 |
+
# `minimize` as keywords, not as part of its 'options' dictionary.
|
| 769 |
+
for opt in ['jac', 'hess', 'hessp']:
|
| 770 |
+
if opt in self.minimizer_kwargs['options']:
|
| 771 |
+
self.minimizer_kwargs[opt] = (
|
| 772 |
+
self.minimizer_kwargs['options'].pop(opt))
|
| 773 |
+
|
| 774 |
+
# Default settings:
|
| 775 |
+
self.minimize_every_iter = options.get('minimize_every_iter', True)
|
| 776 |
+
|
| 777 |
+
# Algorithm limits
|
| 778 |
+
# Maximum number of iterations to perform.
|
| 779 |
+
self.maxiter = options.get('maxiter', None)
|
| 780 |
+
# Maximum number of function evaluations in the feasible domain
|
| 781 |
+
self.maxfev = options.get('maxfev', None)
|
| 782 |
+
# Maximum number of sampling evaluations (includes searching in
|
| 783 |
+
# infeasible points
|
| 784 |
+
self.maxev = options.get('maxev', None)
|
| 785 |
+
# Maximum processing runtime allowed
|
| 786 |
+
self.init = time.time()
|
| 787 |
+
self.maxtime = options.get('maxtime', None)
|
| 788 |
+
if 'f_min' in options:
|
| 789 |
+
# Specify the minimum objective function value, if it is known.
|
| 790 |
+
self.f_min_true = options['f_min']
|
| 791 |
+
self.f_tol = options.get('f_tol', 1e-4)
|
| 792 |
+
else:
|
| 793 |
+
self.f_min_true = None
|
| 794 |
+
|
| 795 |
+
self.minhgrd = options.get('minhgrd', None)
|
| 796 |
+
|
| 797 |
+
# Objective function knowledge
|
| 798 |
+
self.symmetry = options.get('symmetry', False)
|
| 799 |
+
if self.symmetry:
|
| 800 |
+
self.symmetry = [0, ]*len(self.bounds)
|
| 801 |
+
else:
|
| 802 |
+
self.symmetry = None
|
| 803 |
+
# Algorithm functionality
|
| 804 |
+
# Only evaluate a few of the best candidates
|
| 805 |
+
self.local_iter = options.get('local_iter', False)
|
| 806 |
+
self.infty_cons_sampl = options.get('infty_constraints', True)
|
| 807 |
+
|
| 808 |
+
# Feedback
|
| 809 |
+
self.disp = options.get('disp', False)
|
| 810 |
+
|
| 811 |
+
def __enter__(self):
|
| 812 |
+
return self
|
| 813 |
+
|
| 814 |
+
def __exit__(self, *args):
|
| 815 |
+
return self.HC.V._mapwrapper.__exit__(*args)
|
| 816 |
+
|
| 817 |
+
# Iteration properties
|
| 818 |
+
# Main construction loop:
|
| 819 |
+
def iterate_all(self):
|
| 820 |
+
"""
|
| 821 |
+
Construct for `iters` iterations.
|
| 822 |
+
|
| 823 |
+
If uniform sampling is used, every iteration adds 'n' sampling points.
|
| 824 |
+
|
| 825 |
+
Iterations if a stopping criteria (e.g., sampling points or
|
| 826 |
+
processing time) has been met.
|
| 827 |
+
|
| 828 |
+
"""
|
| 829 |
+
if self.disp:
|
| 830 |
+
logging.info('Splitting first generation')
|
| 831 |
+
|
| 832 |
+
while not self.stop_global:
|
| 833 |
+
if self.break_routine:
|
| 834 |
+
break
|
| 835 |
+
# Iterate complex, process minimisers
|
| 836 |
+
self.iterate()
|
| 837 |
+
self.stopping_criteria()
|
| 838 |
+
|
| 839 |
+
# Build minimiser pool
|
| 840 |
+
# Final iteration only needed if pools weren't minimised every
|
| 841 |
+
# iteration
|
| 842 |
+
if not self.minimize_every_iter:
|
| 843 |
+
if not self.break_routine:
|
| 844 |
+
self.find_minima()
|
| 845 |
+
|
| 846 |
+
self.res.nit = self.iters_done # + 1
|
| 847 |
+
self.fn = self.HC.V.nfev
|
| 848 |
+
|
| 849 |
+
def find_minima(self):
|
| 850 |
+
"""
|
| 851 |
+
Construct the minimizer pool, map the minimizers to local minima
|
| 852 |
+
and sort the results into a global return object.
|
| 853 |
+
"""
|
| 854 |
+
if self.disp:
|
| 855 |
+
logging.info('Searching for minimizer pool...')
|
| 856 |
+
|
| 857 |
+
self.minimizers()
|
| 858 |
+
|
| 859 |
+
if len(self.X_min) != 0:
|
| 860 |
+
# Minimize the pool of minimizers with local minimization methods
|
| 861 |
+
# Note that if Options['local_iter'] is an `int` instead of default
|
| 862 |
+
# value False then only that number of candidates will be minimized
|
| 863 |
+
self.minimise_pool(self.local_iter)
|
| 864 |
+
# Sort results and build the global return object
|
| 865 |
+
self.sort_result()
|
| 866 |
+
|
| 867 |
+
# Lowest values used to report in case of failures
|
| 868 |
+
self.f_lowest = self.res.fun
|
| 869 |
+
self.x_lowest = self.res.x
|
| 870 |
+
else:
|
| 871 |
+
self.find_lowest_vertex()
|
| 872 |
+
|
| 873 |
+
if self.disp:
|
| 874 |
+
logging.info(f"Minimiser pool = SHGO.X_min = {self.X_min}")
|
| 875 |
+
|
| 876 |
+
def find_lowest_vertex(self):
|
| 877 |
+
# Find the lowest objective function value on one of
|
| 878 |
+
# the vertices of the simplicial complex
|
| 879 |
+
self.f_lowest = np.inf
|
| 880 |
+
for x in self.HC.V.cache:
|
| 881 |
+
if self.HC.V[x].f < self.f_lowest:
|
| 882 |
+
if self.disp:
|
| 883 |
+
logging.info(f'self.HC.V[x].f = {self.HC.V[x].f}')
|
| 884 |
+
self.f_lowest = self.HC.V[x].f
|
| 885 |
+
self.x_lowest = self.HC.V[x].x_a
|
| 886 |
+
for lmc in self.LMC.cache:
|
| 887 |
+
if self.LMC[lmc].f_min < self.f_lowest:
|
| 888 |
+
self.f_lowest = self.LMC[lmc].f_min
|
| 889 |
+
self.x_lowest = self.LMC[lmc].x_l
|
| 890 |
+
|
| 891 |
+
if self.f_lowest == np.inf: # no feasible point
|
| 892 |
+
self.f_lowest = None
|
| 893 |
+
self.x_lowest = None
|
| 894 |
+
|
| 895 |
+
# Stopping criteria functions:
|
| 896 |
+
def finite_iterations(self):
|
| 897 |
+
mi = min(x for x in [self.iters, self.maxiter] if x is not None)
|
| 898 |
+
if self.disp:
|
| 899 |
+
logging.info(f'Iterations done = {self.iters_done} / {mi}')
|
| 900 |
+
if self.iters is not None:
|
| 901 |
+
if self.iters_done >= (self.iters):
|
| 902 |
+
self.stop_global = True
|
| 903 |
+
|
| 904 |
+
if self.maxiter is not None: # Stop for infeasible sampling
|
| 905 |
+
if self.iters_done >= (self.maxiter):
|
| 906 |
+
self.stop_global = True
|
| 907 |
+
return self.stop_global
|
| 908 |
+
|
| 909 |
+
def finite_fev(self):
|
| 910 |
+
# Finite function evals in the feasible domain
|
| 911 |
+
if self.disp:
|
| 912 |
+
logging.info(f'Function evaluations done = {self.fn} / {self.maxfev}')
|
| 913 |
+
if self.fn >= self.maxfev:
|
| 914 |
+
self.stop_global = True
|
| 915 |
+
return self.stop_global
|
| 916 |
+
|
| 917 |
+
def finite_ev(self):
|
| 918 |
+
# Finite evaluations including infeasible sampling points
|
| 919 |
+
if self.disp:
|
| 920 |
+
logging.info(f'Sampling evaluations done = {self.n_sampled} '
|
| 921 |
+
f'/ {self.maxev}')
|
| 922 |
+
if self.n_sampled >= self.maxev:
|
| 923 |
+
self.stop_global = True
|
| 924 |
+
|
| 925 |
+
def finite_time(self):
|
| 926 |
+
if self.disp:
|
| 927 |
+
logging.info(f'Time elapsed = {time.time() - self.init} '
|
| 928 |
+
f'/ {self.maxtime}')
|
| 929 |
+
if (time.time() - self.init) >= self.maxtime:
|
| 930 |
+
self.stop_global = True
|
| 931 |
+
|
| 932 |
+
def finite_precision(self):
|
| 933 |
+
"""
|
| 934 |
+
Stop the algorithm if the final function value is known
|
| 935 |
+
|
| 936 |
+
Specify in options (with ``self.f_min_true = options['f_min']``)
|
| 937 |
+
and the tolerance with ``f_tol = options['f_tol']``
|
| 938 |
+
"""
|
| 939 |
+
# If no minimizer has been found use the lowest sampling value
|
| 940 |
+
self.find_lowest_vertex()
|
| 941 |
+
if self.disp:
|
| 942 |
+
logging.info(f'Lowest function evaluation = {self.f_lowest}')
|
| 943 |
+
logging.info(f'Specified minimum = {self.f_min_true}')
|
| 944 |
+
# If no feasible point was return from test
|
| 945 |
+
if self.f_lowest is None:
|
| 946 |
+
return self.stop_global
|
| 947 |
+
|
| 948 |
+
# Function to stop algorithm at specified percentage error:
|
| 949 |
+
if self.f_min_true == 0.0:
|
| 950 |
+
if self.f_lowest <= self.f_tol:
|
| 951 |
+
self.stop_global = True
|
| 952 |
+
else:
|
| 953 |
+
pe = (self.f_lowest - self.f_min_true) / abs(self.f_min_true)
|
| 954 |
+
if self.f_lowest <= self.f_min_true:
|
| 955 |
+
self.stop_global = True
|
| 956 |
+
# 2if (pe - self.f_tol) <= abs(1.0 / abs(self.f_min_true)):
|
| 957 |
+
if abs(pe) >= 2 * self.f_tol:
|
| 958 |
+
warnings.warn(
|
| 959 |
+
f"A much lower value than expected f* = {self.f_min_true} "
|
| 960 |
+
f"was found f_lowest = {self.f_lowest}",
|
| 961 |
+
stacklevel=3
|
| 962 |
+
)
|
| 963 |
+
if pe <= self.f_tol:
|
| 964 |
+
self.stop_global = True
|
| 965 |
+
|
| 966 |
+
return self.stop_global
|
| 967 |
+
|
| 968 |
+
def finite_homology_growth(self):
|
| 969 |
+
"""
|
| 970 |
+
Stop the algorithm if homology group rank did not grow in iteration.
|
| 971 |
+
"""
|
| 972 |
+
if self.LMC.size == 0:
|
| 973 |
+
return # pass on no reason to stop yet.
|
| 974 |
+
self.hgrd = self.LMC.size - self.hgr
|
| 975 |
+
|
| 976 |
+
self.hgr = self.LMC.size
|
| 977 |
+
if self.hgrd <= self.minhgrd:
|
| 978 |
+
self.stop_global = True
|
| 979 |
+
if self.disp:
|
| 980 |
+
logging.info(f'Current homology growth = {self.hgrd} '
|
| 981 |
+
f' (minimum growth = {self.minhgrd})')
|
| 982 |
+
return self.stop_global
|
| 983 |
+
|
| 984 |
+
def stopping_criteria(self):
|
| 985 |
+
"""
|
| 986 |
+
Various stopping criteria ran every iteration
|
| 987 |
+
|
| 988 |
+
Returns
|
| 989 |
+
-------
|
| 990 |
+
stop : bool
|
| 991 |
+
"""
|
| 992 |
+
if self.maxiter is not None:
|
| 993 |
+
self.finite_iterations()
|
| 994 |
+
if self.iters is not None:
|
| 995 |
+
self.finite_iterations()
|
| 996 |
+
if self.maxfev is not None:
|
| 997 |
+
self.finite_fev()
|
| 998 |
+
if self.maxev is not None:
|
| 999 |
+
self.finite_ev()
|
| 1000 |
+
if self.maxtime is not None:
|
| 1001 |
+
self.finite_time()
|
| 1002 |
+
if self.f_min_true is not None:
|
| 1003 |
+
self.finite_precision()
|
| 1004 |
+
if self.minhgrd is not None:
|
| 1005 |
+
self.finite_homology_growth()
|
| 1006 |
+
return self.stop_global
|
| 1007 |
+
|
| 1008 |
+
def iterate(self):
|
| 1009 |
+
self.iterate_complex()
|
| 1010 |
+
|
| 1011 |
+
# Build minimizer pool
|
| 1012 |
+
if self.minimize_every_iter:
|
| 1013 |
+
if not self.break_routine:
|
| 1014 |
+
self.find_minima() # Process minimizer pool
|
| 1015 |
+
|
| 1016 |
+
# Algorithm updates
|
| 1017 |
+
self.iters_done += 1
|
| 1018 |
+
|
| 1019 |
+
def iterate_hypercube(self):
|
| 1020 |
+
"""
|
| 1021 |
+
Iterate a subdivision of the complex
|
| 1022 |
+
|
| 1023 |
+
Note: called with ``self.iterate_complex()`` after class initiation
|
| 1024 |
+
"""
|
| 1025 |
+
# Iterate the complex
|
| 1026 |
+
if self.disp:
|
| 1027 |
+
logging.info('Constructing and refining simplicial complex graph '
|
| 1028 |
+
'structure')
|
| 1029 |
+
if self.n is None:
|
| 1030 |
+
self.HC.refine_all()
|
| 1031 |
+
self.n_sampled = self.HC.V.size() # nevs counted
|
| 1032 |
+
else:
|
| 1033 |
+
self.HC.refine(self.n)
|
| 1034 |
+
self.n_sampled += self.n
|
| 1035 |
+
|
| 1036 |
+
if self.disp:
|
| 1037 |
+
logging.info('Triangulation completed, evaluating all constraints '
|
| 1038 |
+
'and objective function values.')
|
| 1039 |
+
|
| 1040 |
+
# Re-add minimisers to complex
|
| 1041 |
+
if len(self.LMC.xl_maps) > 0:
|
| 1042 |
+
for xl in self.LMC.cache:
|
| 1043 |
+
v = self.HC.V[xl]
|
| 1044 |
+
v_near = v.star()
|
| 1045 |
+
for v in v.nn:
|
| 1046 |
+
v_near = v_near.union(v.nn)
|
| 1047 |
+
# Reconnect vertices to complex
|
| 1048 |
+
# if self.HC.connect_vertex_non_symm(tuple(self.LMC[xl].x_l),
|
| 1049 |
+
# near=v_near):
|
| 1050 |
+
# continue
|
| 1051 |
+
# else:
|
| 1052 |
+
# If failure to find in v_near, then search all vertices
|
| 1053 |
+
# (very expensive operation:
|
| 1054 |
+
# self.HC.connect_vertex_non_symm(tuple(self.LMC[xl].x_l)
|
| 1055 |
+
# )
|
| 1056 |
+
|
| 1057 |
+
# Evaluate all constraints and functions
|
| 1058 |
+
self.HC.V.process_pools()
|
| 1059 |
+
if self.disp:
|
| 1060 |
+
logging.info('Evaluations completed.')
|
| 1061 |
+
|
| 1062 |
+
# feasible sampling points counted by the triangulation.py routines
|
| 1063 |
+
self.fn = self.HC.V.nfev
|
| 1064 |
+
return
|
| 1065 |
+
|
| 1066 |
+
def iterate_delaunay(self):
|
| 1067 |
+
"""
|
| 1068 |
+
Build a complex of Delaunay triangulated points
|
| 1069 |
+
|
| 1070 |
+
Note: called with ``self.iterate_complex()`` after class initiation
|
| 1071 |
+
"""
|
| 1072 |
+
self.nc += self.n
|
| 1073 |
+
self.sampled_surface(infty_cons_sampl=self.infty_cons_sampl)
|
| 1074 |
+
|
| 1075 |
+
# Add sampled points to a triangulation, construct self.Tri
|
| 1076 |
+
if self.disp:
|
| 1077 |
+
logging.info(f'self.n = {self.n}')
|
| 1078 |
+
logging.info(f'self.nc = {self.nc}')
|
| 1079 |
+
logging.info('Constructing and refining simplicial complex graph '
|
| 1080 |
+
'structure from sampling points.')
|
| 1081 |
+
|
| 1082 |
+
if self.dim < 2:
|
| 1083 |
+
self.Ind_sorted = np.argsort(self.C, axis=0)
|
| 1084 |
+
self.Ind_sorted = self.Ind_sorted.flatten()
|
| 1085 |
+
tris = []
|
| 1086 |
+
for ind, ind_s in enumerate(self.Ind_sorted):
|
| 1087 |
+
if ind > 0:
|
| 1088 |
+
tris.append(self.Ind_sorted[ind - 1:ind + 1])
|
| 1089 |
+
|
| 1090 |
+
tris = np.array(tris)
|
| 1091 |
+
# Store 1D triangulation:
|
| 1092 |
+
self.Tri = namedtuple('Tri', ['points', 'simplices'])(self.C, tris)
|
| 1093 |
+
self.points = {}
|
| 1094 |
+
else:
|
| 1095 |
+
if self.C.shape[0] > self.dim + 1: # Ensure a simplex can be built
|
| 1096 |
+
self.delaunay_triangulation(n_prc=self.n_prc)
|
| 1097 |
+
self.n_prc = self.C.shape[0]
|
| 1098 |
+
|
| 1099 |
+
if self.disp:
|
| 1100 |
+
logging.info('Triangulation completed, evaluating all '
|
| 1101 |
+
'constraints and objective function values.')
|
| 1102 |
+
|
| 1103 |
+
if hasattr(self, 'Tri'):
|
| 1104 |
+
self.HC.vf_to_vv(self.Tri.points, self.Tri.simplices)
|
| 1105 |
+
|
| 1106 |
+
# Process all pools
|
| 1107 |
+
# Evaluate all constraints and functions
|
| 1108 |
+
if self.disp:
|
| 1109 |
+
logging.info('Triangulation completed, evaluating all constraints '
|
| 1110 |
+
'and objective function values.')
|
| 1111 |
+
|
| 1112 |
+
# Evaluate all constraints and functions
|
| 1113 |
+
self.HC.V.process_pools()
|
| 1114 |
+
if self.disp:
|
| 1115 |
+
logging.info('Evaluations completed.')
|
| 1116 |
+
|
| 1117 |
+
# feasible sampling points counted by the triangulation.py routines
|
| 1118 |
+
self.fn = self.HC.V.nfev
|
| 1119 |
+
self.n_sampled = self.nc # nevs counted in triangulation
|
| 1120 |
+
return
|
| 1121 |
+
|
| 1122 |
+
# Hypercube minimizers
|
| 1123 |
+
def minimizers(self):
|
| 1124 |
+
"""
|
| 1125 |
+
Returns the indexes of all minimizers
|
| 1126 |
+
"""
|
| 1127 |
+
self.minimizer_pool = []
|
| 1128 |
+
# Note: Can implement parallelization here
|
| 1129 |
+
for x in self.HC.V.cache:
|
| 1130 |
+
in_LMC = False
|
| 1131 |
+
if len(self.LMC.xl_maps) > 0:
|
| 1132 |
+
for xlmi in self.LMC.xl_maps:
|
| 1133 |
+
if np.all(np.array(x) == np.array(xlmi)):
|
| 1134 |
+
in_LMC = True
|
| 1135 |
+
if in_LMC:
|
| 1136 |
+
continue
|
| 1137 |
+
|
| 1138 |
+
if self.HC.V[x].minimiser():
|
| 1139 |
+
if self.disp:
|
| 1140 |
+
logging.info('=' * 60)
|
| 1141 |
+
logging.info(f'v.x = {self.HC.V[x].x_a} is minimizer')
|
| 1142 |
+
logging.info(f'v.f = {self.HC.V[x].f} is minimizer')
|
| 1143 |
+
logging.info('=' * 30)
|
| 1144 |
+
|
| 1145 |
+
if self.HC.V[x] not in self.minimizer_pool:
|
| 1146 |
+
self.minimizer_pool.append(self.HC.V[x])
|
| 1147 |
+
|
| 1148 |
+
if self.disp:
|
| 1149 |
+
logging.info('Neighbors:')
|
| 1150 |
+
logging.info('=' * 30)
|
| 1151 |
+
for vn in self.HC.V[x].nn:
|
| 1152 |
+
logging.info(f'x = {vn.x} || f = {vn.f}')
|
| 1153 |
+
|
| 1154 |
+
logging.info('=' * 60)
|
| 1155 |
+
self.minimizer_pool_F = []
|
| 1156 |
+
self.X_min = []
|
| 1157 |
+
# normalized tuple in the Vertex cache
|
| 1158 |
+
self.X_min_cache = {} # Cache used in hypercube sampling
|
| 1159 |
+
|
| 1160 |
+
for v in self.minimizer_pool:
|
| 1161 |
+
self.X_min.append(v.x_a)
|
| 1162 |
+
self.minimizer_pool_F.append(v.f)
|
| 1163 |
+
self.X_min_cache[tuple(v.x_a)] = v.x
|
| 1164 |
+
|
| 1165 |
+
self.minimizer_pool_F = np.array(self.minimizer_pool_F)
|
| 1166 |
+
self.X_min = np.array(self.X_min)
|
| 1167 |
+
|
| 1168 |
+
# TODO: Only do this if global mode
|
| 1169 |
+
self.sort_min_pool()
|
| 1170 |
+
|
| 1171 |
+
return self.X_min
|
| 1172 |
+
|
| 1173 |
+
# Local minimisation
|
| 1174 |
+
# Minimiser pool processing
|
| 1175 |
+
def minimise_pool(self, force_iter=False):
|
| 1176 |
+
"""
|
| 1177 |
+
This processing method can optionally minimise only the best candidate
|
| 1178 |
+
solutions in the minimiser pool
|
| 1179 |
+
|
| 1180 |
+
Parameters
|
| 1181 |
+
----------
|
| 1182 |
+
force_iter : int
|
| 1183 |
+
Number of starting minimizers to process (can be specified
|
| 1184 |
+
globally or locally)
|
| 1185 |
+
|
| 1186 |
+
"""
|
| 1187 |
+
# Find first local minimum
|
| 1188 |
+
# NOTE: Since we always minimize this value regardless it is a waste to
|
| 1189 |
+
# build the topograph first before minimizing
|
| 1190 |
+
lres_f_min = self.minimize(self.X_min[0], ind=self.minimizer_pool[0])
|
| 1191 |
+
|
| 1192 |
+
# Trim minimized point from current minimizer set
|
| 1193 |
+
self.trim_min_pool(0)
|
| 1194 |
+
|
| 1195 |
+
while not self.stop_l_iter:
|
| 1196 |
+
# Global stopping criteria:
|
| 1197 |
+
self.stopping_criteria()
|
| 1198 |
+
|
| 1199 |
+
# Note first iteration is outside loop:
|
| 1200 |
+
if force_iter:
|
| 1201 |
+
force_iter -= 1
|
| 1202 |
+
if force_iter == 0:
|
| 1203 |
+
self.stop_l_iter = True
|
| 1204 |
+
break
|
| 1205 |
+
|
| 1206 |
+
if np.shape(self.X_min)[0] == 0:
|
| 1207 |
+
self.stop_l_iter = True
|
| 1208 |
+
break
|
| 1209 |
+
|
| 1210 |
+
# Construct topograph from current minimizer set
|
| 1211 |
+
# (NOTE: This is a very small topograph using only the minizer pool
|
| 1212 |
+
# , it might be worth using some graph theory tools instead.
|
| 1213 |
+
self.g_topograph(lres_f_min.x, self.X_min)
|
| 1214 |
+
|
| 1215 |
+
# Find local minimum at the miniser with the greatest Euclidean
|
| 1216 |
+
# distance from the current solution
|
| 1217 |
+
ind_xmin_l = self.Z[:, -1]
|
| 1218 |
+
lres_f_min = self.minimize(self.Ss[-1, :], self.minimizer_pool[-1])
|
| 1219 |
+
|
| 1220 |
+
# Trim minimised point from current minimizer set
|
| 1221 |
+
self.trim_min_pool(ind_xmin_l)
|
| 1222 |
+
|
| 1223 |
+
# Reset controls
|
| 1224 |
+
self.stop_l_iter = False
|
| 1225 |
+
return
|
| 1226 |
+
|
| 1227 |
+
def sort_min_pool(self):
|
| 1228 |
+
# Sort to find minimum func value in min_pool
|
| 1229 |
+
self.ind_f_min = np.argsort(self.minimizer_pool_F)
|
| 1230 |
+
self.minimizer_pool = np.array(self.minimizer_pool)[self.ind_f_min]
|
| 1231 |
+
self.minimizer_pool_F = np.array(self.minimizer_pool_F)[
|
| 1232 |
+
self.ind_f_min]
|
| 1233 |
+
return
|
| 1234 |
+
|
| 1235 |
+
def trim_min_pool(self, trim_ind):
|
| 1236 |
+
self.X_min = np.delete(self.X_min, trim_ind, axis=0)
|
| 1237 |
+
self.minimizer_pool_F = np.delete(self.minimizer_pool_F, trim_ind)
|
| 1238 |
+
self.minimizer_pool = np.delete(self.minimizer_pool, trim_ind)
|
| 1239 |
+
return
|
| 1240 |
+
|
| 1241 |
+
def g_topograph(self, x_min, X_min):
|
| 1242 |
+
"""
|
| 1243 |
+
Returns the topographical vector stemming from the specified value
|
| 1244 |
+
``x_min`` for the current feasible set ``X_min`` with True boolean
|
| 1245 |
+
values indicating positive entries and False values indicating
|
| 1246 |
+
negative entries.
|
| 1247 |
+
|
| 1248 |
+
"""
|
| 1249 |
+
x_min = np.array([x_min])
|
| 1250 |
+
self.Y = spatial.distance.cdist(x_min, X_min, 'euclidean')
|
| 1251 |
+
# Find sorted indexes of spatial distances:
|
| 1252 |
+
self.Z = np.argsort(self.Y, axis=-1)
|
| 1253 |
+
|
| 1254 |
+
self.Ss = X_min[self.Z][0]
|
| 1255 |
+
self.minimizer_pool = self.minimizer_pool[self.Z]
|
| 1256 |
+
self.minimizer_pool = self.minimizer_pool[0]
|
| 1257 |
+
return self.Ss
|
| 1258 |
+
|
| 1259 |
+
# Local bound functions
|
| 1260 |
+
def construct_lcb_simplicial(self, v_min):
|
| 1261 |
+
"""
|
| 1262 |
+
Construct locally (approximately) convex bounds
|
| 1263 |
+
|
| 1264 |
+
Parameters
|
| 1265 |
+
----------
|
| 1266 |
+
v_min : Vertex object
|
| 1267 |
+
The minimizer vertex
|
| 1268 |
+
|
| 1269 |
+
Returns
|
| 1270 |
+
-------
|
| 1271 |
+
cbounds : list of lists
|
| 1272 |
+
List of size dimension with length-2 list of bounds for each
|
| 1273 |
+
dimension.
|
| 1274 |
+
|
| 1275 |
+
"""
|
| 1276 |
+
cbounds = [[x_b_i[0], x_b_i[1]] for x_b_i in self.bounds]
|
| 1277 |
+
# Loop over all bounds
|
| 1278 |
+
for vn in v_min.nn:
|
| 1279 |
+
for i, x_i in enumerate(vn.x_a):
|
| 1280 |
+
# Lower bound
|
| 1281 |
+
if (x_i < v_min.x_a[i]) and (x_i > cbounds[i][0]):
|
| 1282 |
+
cbounds[i][0] = x_i
|
| 1283 |
+
|
| 1284 |
+
# Upper bound
|
| 1285 |
+
if (x_i > v_min.x_a[i]) and (x_i < cbounds[i][1]):
|
| 1286 |
+
cbounds[i][1] = x_i
|
| 1287 |
+
|
| 1288 |
+
if self.disp:
|
| 1289 |
+
logging.info(f'cbounds found for v_min.x_a = {v_min.x_a}')
|
| 1290 |
+
logging.info(f'cbounds = {cbounds}')
|
| 1291 |
+
|
| 1292 |
+
return cbounds
|
| 1293 |
+
|
| 1294 |
+
def construct_lcb_delaunay(self, v_min, ind=None):
|
| 1295 |
+
"""
|
| 1296 |
+
Construct locally (approximately) convex bounds
|
| 1297 |
+
|
| 1298 |
+
Parameters
|
| 1299 |
+
----------
|
| 1300 |
+
v_min : Vertex object
|
| 1301 |
+
The minimizer vertex
|
| 1302 |
+
|
| 1303 |
+
Returns
|
| 1304 |
+
-------
|
| 1305 |
+
cbounds : list of lists
|
| 1306 |
+
List of size dimension with length-2 list of bounds for each
|
| 1307 |
+
dimension.
|
| 1308 |
+
"""
|
| 1309 |
+
cbounds = [[x_b_i[0], x_b_i[1]] for x_b_i in self.bounds]
|
| 1310 |
+
|
| 1311 |
+
return cbounds
|
| 1312 |
+
|
| 1313 |
+
# Minimize a starting point locally
|
| 1314 |
+
def minimize(self, x_min, ind=None):
|
| 1315 |
+
"""
|
| 1316 |
+
This function is used to calculate the local minima using the specified
|
| 1317 |
+
sampling point as a starting value.
|
| 1318 |
+
|
| 1319 |
+
Parameters
|
| 1320 |
+
----------
|
| 1321 |
+
x_min : vector of floats
|
| 1322 |
+
Current starting point to minimize.
|
| 1323 |
+
|
| 1324 |
+
Returns
|
| 1325 |
+
-------
|
| 1326 |
+
lres : OptimizeResult
|
| 1327 |
+
The local optimization result represented as a `OptimizeResult`
|
| 1328 |
+
object.
|
| 1329 |
+
"""
|
| 1330 |
+
# Use minima maps if vertex was already run
|
| 1331 |
+
if self.disp:
|
| 1332 |
+
logging.info(f'Vertex minimiser maps = {self.LMC.v_maps}')
|
| 1333 |
+
|
| 1334 |
+
if self.LMC[x_min].lres is not None:
|
| 1335 |
+
logging.info(f'Found self.LMC[x_min].lres = '
|
| 1336 |
+
f'{self.LMC[x_min].lres}')
|
| 1337 |
+
return self.LMC[x_min].lres
|
| 1338 |
+
|
| 1339 |
+
if self.callback is not None:
|
| 1340 |
+
logging.info(f'Callback for minimizer starting at {x_min}:')
|
| 1341 |
+
|
| 1342 |
+
if self.disp:
|
| 1343 |
+
logging.info(f'Starting minimization at {x_min}...')
|
| 1344 |
+
|
| 1345 |
+
if self.sampling_method == 'simplicial':
|
| 1346 |
+
x_min_t = tuple(x_min)
|
| 1347 |
+
# Find the normalized tuple in the Vertex cache:
|
| 1348 |
+
x_min_t_norm = self.X_min_cache[tuple(x_min_t)]
|
| 1349 |
+
x_min_t_norm = tuple(x_min_t_norm)
|
| 1350 |
+
g_bounds = self.construct_lcb_simplicial(self.HC.V[x_min_t_norm])
|
| 1351 |
+
if 'bounds' in self.min_solver_args:
|
| 1352 |
+
self.minimizer_kwargs['bounds'] = g_bounds
|
| 1353 |
+
logging.info(self.minimizer_kwargs['bounds'])
|
| 1354 |
+
|
| 1355 |
+
else:
|
| 1356 |
+
g_bounds = self.construct_lcb_delaunay(x_min, ind=ind)
|
| 1357 |
+
if 'bounds' in self.min_solver_args:
|
| 1358 |
+
self.minimizer_kwargs['bounds'] = g_bounds
|
| 1359 |
+
logging.info(self.minimizer_kwargs['bounds'])
|
| 1360 |
+
|
| 1361 |
+
if self.disp and 'bounds' in self.minimizer_kwargs:
|
| 1362 |
+
logging.info('bounds in kwarg:')
|
| 1363 |
+
logging.info(self.minimizer_kwargs['bounds'])
|
| 1364 |
+
|
| 1365 |
+
# Local minimization using scipy.optimize.minimize:
|
| 1366 |
+
lres = minimize(self.func, x_min, **self.minimizer_kwargs)
|
| 1367 |
+
|
| 1368 |
+
if self.disp:
|
| 1369 |
+
logging.info(f'lres = {lres}')
|
| 1370 |
+
|
| 1371 |
+
# Local function evals for all minimizers
|
| 1372 |
+
self.res.nlfev += lres.nfev
|
| 1373 |
+
if 'njev' in lres:
|
| 1374 |
+
self.res.nljev += lres.njev
|
| 1375 |
+
if 'nhev' in lres:
|
| 1376 |
+
self.res.nlhev += lres.nhev
|
| 1377 |
+
|
| 1378 |
+
try: # Needed because of the brain dead 1x1 NumPy arrays
|
| 1379 |
+
lres.fun = lres.fun[0]
|
| 1380 |
+
except (IndexError, TypeError):
|
| 1381 |
+
lres.fun
|
| 1382 |
+
|
| 1383 |
+
# Append minima maps
|
| 1384 |
+
self.LMC[x_min]
|
| 1385 |
+
self.LMC.add_res(x_min, lres, bounds=g_bounds)
|
| 1386 |
+
|
| 1387 |
+
return lres
|
| 1388 |
+
|
| 1389 |
+
# Post local minimization processing
|
| 1390 |
+
def sort_result(self):
|
| 1391 |
+
"""
|
| 1392 |
+
Sort results and build the global return object
|
| 1393 |
+
"""
|
| 1394 |
+
# Sort results in local minima cache
|
| 1395 |
+
results = self.LMC.sort_cache_result()
|
| 1396 |
+
self.res.xl = results['xl']
|
| 1397 |
+
self.res.funl = results['funl']
|
| 1398 |
+
self.res.x = results['x']
|
| 1399 |
+
self.res.fun = results['fun']
|
| 1400 |
+
|
| 1401 |
+
# Add local func evals to sampling func evals
|
| 1402 |
+
# Count the number of feasible vertices and add to local func evals:
|
| 1403 |
+
self.res.nfev = self.fn + self.res.nlfev
|
| 1404 |
+
return self.res
|
| 1405 |
+
|
| 1406 |
+
# Algorithm controls
|
| 1407 |
+
def fail_routine(self, mes=("Failed to converge")):
|
| 1408 |
+
self.break_routine = True
|
| 1409 |
+
self.res.success = False
|
| 1410 |
+
self.X_min = [None]
|
| 1411 |
+
self.res.message = mes
|
| 1412 |
+
|
| 1413 |
+
def sampled_surface(self, infty_cons_sampl=False):
|
| 1414 |
+
"""
|
| 1415 |
+
Sample the function surface.
|
| 1416 |
+
|
| 1417 |
+
There are 2 modes, if ``infty_cons_sampl`` is True then the sampled
|
| 1418 |
+
points that are generated outside the feasible domain will be
|
| 1419 |
+
assigned an ``inf`` value in accordance with SHGO rules.
|
| 1420 |
+
This guarantees convergence and usually requires less objective
|
| 1421 |
+
function evaluations at the computational costs of more Delaunay
|
| 1422 |
+
triangulation points.
|
| 1423 |
+
|
| 1424 |
+
If ``infty_cons_sampl`` is False, then the infeasible points are
|
| 1425 |
+
discarded and only a subspace of the sampled points are used. This
|
| 1426 |
+
comes at the cost of the loss of guaranteed convergence and usually
|
| 1427 |
+
requires more objective function evaluations.
|
| 1428 |
+
"""
|
| 1429 |
+
# Generate sampling points
|
| 1430 |
+
if self.disp:
|
| 1431 |
+
logging.info('Generating sampling points')
|
| 1432 |
+
self.sampling(self.nc, self.dim)
|
| 1433 |
+
if len(self.LMC.xl_maps) > 0:
|
| 1434 |
+
self.C = np.vstack((self.C, np.array(self.LMC.xl_maps)))
|
| 1435 |
+
if not infty_cons_sampl:
|
| 1436 |
+
# Find subspace of feasible points
|
| 1437 |
+
if self.g_cons is not None:
|
| 1438 |
+
self.sampling_subspace()
|
| 1439 |
+
|
| 1440 |
+
# Sort remaining samples
|
| 1441 |
+
self.sorted_samples()
|
| 1442 |
+
|
| 1443 |
+
# Find objective function references
|
| 1444 |
+
self.n_sampled = self.nc
|
| 1445 |
+
|
| 1446 |
+
def sampling_custom(self, n, dim):
|
| 1447 |
+
"""
|
| 1448 |
+
Generates uniform sampling points in a hypercube and scales the points
|
| 1449 |
+
to the bound limits.
|
| 1450 |
+
"""
|
| 1451 |
+
# Generate sampling points.
|
| 1452 |
+
# Generate uniform sample points in [0, 1]^m \subset R^m
|
| 1453 |
+
if self.n_sampled == 0:
|
| 1454 |
+
self.C = self.sampling_function(n, dim)
|
| 1455 |
+
else:
|
| 1456 |
+
self.C = self.sampling_function(n, dim)
|
| 1457 |
+
# Distribute over bounds
|
| 1458 |
+
for i in range(len(self.bounds)):
|
| 1459 |
+
self.C[:, i] = (self.C[:, i] *
|
| 1460 |
+
(self.bounds[i][1] - self.bounds[i][0])
|
| 1461 |
+
+ self.bounds[i][0])
|
| 1462 |
+
return self.C
|
| 1463 |
+
|
| 1464 |
+
def sampling_subspace(self):
|
| 1465 |
+
"""Find subspace of feasible points from g_func definition"""
|
| 1466 |
+
# Subspace of feasible points.
|
| 1467 |
+
for ind, g in enumerate(self.g_cons):
|
| 1468 |
+
# C.shape = (Z, dim) where Z is the number of sampling points to
|
| 1469 |
+
# evaluate and dim is the dimensionality of the problem.
|
| 1470 |
+
# the constraint function may not be vectorised so have to step
|
| 1471 |
+
# through each sampling point sequentially.
|
| 1472 |
+
feasible = np.array(
|
| 1473 |
+
[np.all(g(x_C, *self.g_args[ind]) >= 0.0) for x_C in self.C],
|
| 1474 |
+
dtype=bool
|
| 1475 |
+
)
|
| 1476 |
+
self.C = self.C[feasible]
|
| 1477 |
+
|
| 1478 |
+
if self.C.size == 0:
|
| 1479 |
+
self.res.message = ('No sampling point found within the '
|
| 1480 |
+
+ 'feasible set. Increasing sampling '
|
| 1481 |
+
+ 'size.')
|
| 1482 |
+
# sampling correctly for both 1-D and >1-D cases
|
| 1483 |
+
if self.disp:
|
| 1484 |
+
logging.info(self.res.message)
|
| 1485 |
+
|
| 1486 |
+
def sorted_samples(self): # Validated
|
| 1487 |
+
"""Find indexes of the sorted sampling points"""
|
| 1488 |
+
self.Ind_sorted = np.argsort(self.C, axis=0)
|
| 1489 |
+
self.Xs = self.C[self.Ind_sorted]
|
| 1490 |
+
return self.Ind_sorted, self.Xs
|
| 1491 |
+
|
| 1492 |
+
def delaunay_triangulation(self, n_prc=0):
|
| 1493 |
+
if hasattr(self, 'Tri') and self.qhull_incremental:
|
| 1494 |
+
# TODO: Uncertain if n_prc needs to add len(self.LMC.xl_maps)
|
| 1495 |
+
# in self.sampled_surface
|
| 1496 |
+
self.Tri.add_points(self.C[n_prc:, :])
|
| 1497 |
+
else:
|
| 1498 |
+
try:
|
| 1499 |
+
self.Tri = spatial.Delaunay(self.C,
|
| 1500 |
+
incremental=self.qhull_incremental,
|
| 1501 |
+
)
|
| 1502 |
+
except spatial.QhullError:
|
| 1503 |
+
if str(sys.exc_info()[1])[:6] == 'QH6239':
|
| 1504 |
+
logging.warning('QH6239 Qhull precision error detected, '
|
| 1505 |
+
'this usually occurs when no bounds are '
|
| 1506 |
+
'specified, Qhull can only run with '
|
| 1507 |
+
'handling cocircular/cospherical points'
|
| 1508 |
+
' and in this case incremental mode is '
|
| 1509 |
+
'switched off. The performance of shgo '
|
| 1510 |
+
'will be reduced in this mode.')
|
| 1511 |
+
self.qhull_incremental = False
|
| 1512 |
+
self.Tri = spatial.Delaunay(self.C,
|
| 1513 |
+
incremental=
|
| 1514 |
+
self.qhull_incremental)
|
| 1515 |
+
else:
|
| 1516 |
+
raise
|
| 1517 |
+
|
| 1518 |
+
return self.Tri
|
| 1519 |
+
|
| 1520 |
+
|
| 1521 |
+
class LMap:
|
| 1522 |
+
def __init__(self, v):
|
| 1523 |
+
self.v = v
|
| 1524 |
+
self.x_l = None
|
| 1525 |
+
self.lres = None
|
| 1526 |
+
self.f_min = None
|
| 1527 |
+
self.lbounds = []
|
| 1528 |
+
|
| 1529 |
+
|
| 1530 |
+
class LMapCache:
|
| 1531 |
+
def __init__(self):
|
| 1532 |
+
self.cache = {}
|
| 1533 |
+
|
| 1534 |
+
# Lists for search queries
|
| 1535 |
+
self.v_maps = []
|
| 1536 |
+
self.xl_maps = []
|
| 1537 |
+
self.xl_maps_set = set()
|
| 1538 |
+
self.f_maps = []
|
| 1539 |
+
self.lbound_maps = []
|
| 1540 |
+
self.size = 0
|
| 1541 |
+
|
| 1542 |
+
def __getitem__(self, v):
|
| 1543 |
+
try:
|
| 1544 |
+
v = np.ndarray.tolist(v)
|
| 1545 |
+
except TypeError:
|
| 1546 |
+
pass
|
| 1547 |
+
v = tuple(v)
|
| 1548 |
+
try:
|
| 1549 |
+
return self.cache[v]
|
| 1550 |
+
except KeyError:
|
| 1551 |
+
xval = LMap(v)
|
| 1552 |
+
self.cache[v] = xval
|
| 1553 |
+
|
| 1554 |
+
return self.cache[v]
|
| 1555 |
+
|
| 1556 |
+
def add_res(self, v, lres, bounds=None):
|
| 1557 |
+
v = np.ndarray.tolist(v)
|
| 1558 |
+
v = tuple(v)
|
| 1559 |
+
self.cache[v].x_l = lres.x
|
| 1560 |
+
self.cache[v].lres = lres
|
| 1561 |
+
self.cache[v].f_min = lres.fun
|
| 1562 |
+
self.cache[v].lbounds = bounds
|
| 1563 |
+
|
| 1564 |
+
# Update cache size
|
| 1565 |
+
self.size += 1
|
| 1566 |
+
|
| 1567 |
+
# Cache lists for search queries
|
| 1568 |
+
self.v_maps.append(v)
|
| 1569 |
+
self.xl_maps.append(lres.x)
|
| 1570 |
+
self.xl_maps_set.add(tuple(lres.x))
|
| 1571 |
+
self.f_maps.append(lres.fun)
|
| 1572 |
+
self.lbound_maps.append(bounds)
|
| 1573 |
+
|
| 1574 |
+
def sort_cache_result(self):
|
| 1575 |
+
"""
|
| 1576 |
+
Sort results and build the global return object
|
| 1577 |
+
"""
|
| 1578 |
+
results = {}
|
| 1579 |
+
# Sort results and save
|
| 1580 |
+
self.xl_maps = np.array(self.xl_maps)
|
| 1581 |
+
self.f_maps = np.array(self.f_maps)
|
| 1582 |
+
|
| 1583 |
+
# Sorted indexes in Func_min
|
| 1584 |
+
ind_sorted = np.argsort(self.f_maps)
|
| 1585 |
+
|
| 1586 |
+
# Save ordered list of minima
|
| 1587 |
+
results['xl'] = self.xl_maps[ind_sorted] # Ordered x vals
|
| 1588 |
+
self.f_maps = np.array(self.f_maps)
|
| 1589 |
+
results['funl'] = self.f_maps[ind_sorted]
|
| 1590 |
+
results['funl'] = results['funl'].T
|
| 1591 |
+
|
| 1592 |
+
# Find global of all minimizers
|
| 1593 |
+
results['x'] = self.xl_maps[ind_sorted[0]] # Save global minima
|
| 1594 |
+
results['fun'] = self.f_maps[ind_sorted[0]] # Save global fun value
|
| 1595 |
+
|
| 1596 |
+
self.xl_maps = np.ndarray.tolist(self.xl_maps)
|
| 1597 |
+
self.f_maps = np.ndarray.tolist(self.f_maps)
|
| 1598 |
+
return results
|
llava_next/lib/python3.10/site-packages/scipy/optimize/_slsqp_py.py
ADDED
|
@@ -0,0 +1,510 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
This module implements the Sequential Least Squares Programming optimization
|
| 3 |
+
algorithm (SLSQP), originally developed by Dieter Kraft.
|
| 4 |
+
See http://www.netlib.org/toms/733
|
| 5 |
+
|
| 6 |
+
Functions
|
| 7 |
+
---------
|
| 8 |
+
.. autosummary::
|
| 9 |
+
:toctree: generated/
|
| 10 |
+
|
| 11 |
+
approx_jacobian
|
| 12 |
+
fmin_slsqp
|
| 13 |
+
|
| 14 |
+
"""
|
| 15 |
+
|
| 16 |
+
__all__ = ['approx_jacobian', 'fmin_slsqp']
|
| 17 |
+
|
| 18 |
+
import numpy as np
|
| 19 |
+
from scipy.optimize._slsqp import slsqp
|
| 20 |
+
from numpy import (zeros, array, linalg, append, concatenate, finfo,
|
| 21 |
+
sqrt, vstack, isfinite, atleast_1d)
|
| 22 |
+
from ._optimize import (OptimizeResult, _check_unknown_options,
|
| 23 |
+
_prepare_scalar_function, _clip_x_for_func,
|
| 24 |
+
_check_clip_x)
|
| 25 |
+
from ._numdiff import approx_derivative
|
| 26 |
+
from ._constraints import old_bound_to_new, _arr_to_scalar
|
| 27 |
+
from scipy._lib._array_api import atleast_nd, array_namespace
|
| 28 |
+
|
| 29 |
+
|
| 30 |
+
__docformat__ = "restructuredtext en"
|
| 31 |
+
|
| 32 |
+
_epsilon = sqrt(finfo(float).eps)
|
| 33 |
+
|
| 34 |
+
|
| 35 |
+
def approx_jacobian(x, func, epsilon, *args):
|
| 36 |
+
"""
|
| 37 |
+
Approximate the Jacobian matrix of a callable function.
|
| 38 |
+
|
| 39 |
+
Parameters
|
| 40 |
+
----------
|
| 41 |
+
x : array_like
|
| 42 |
+
The state vector at which to compute the Jacobian matrix.
|
| 43 |
+
func : callable f(x,*args)
|
| 44 |
+
The vector-valued function.
|
| 45 |
+
epsilon : float
|
| 46 |
+
The perturbation used to determine the partial derivatives.
|
| 47 |
+
args : sequence
|
| 48 |
+
Additional arguments passed to func.
|
| 49 |
+
|
| 50 |
+
Returns
|
| 51 |
+
-------
|
| 52 |
+
An array of dimensions ``(lenf, lenx)`` where ``lenf`` is the length
|
| 53 |
+
of the outputs of `func`, and ``lenx`` is the number of elements in
|
| 54 |
+
`x`.
|
| 55 |
+
|
| 56 |
+
Notes
|
| 57 |
+
-----
|
| 58 |
+
The approximation is done using forward differences.
|
| 59 |
+
|
| 60 |
+
"""
|
| 61 |
+
# approx_derivative returns (m, n) == (lenf, lenx)
|
| 62 |
+
jac = approx_derivative(func, x, method='2-point', abs_step=epsilon,
|
| 63 |
+
args=args)
|
| 64 |
+
# if func returns a scalar jac.shape will be (lenx,). Make sure
|
| 65 |
+
# it's at least a 2D array.
|
| 66 |
+
return np.atleast_2d(jac)
|
| 67 |
+
|
| 68 |
+
|
| 69 |
+
def fmin_slsqp(func, x0, eqcons=(), f_eqcons=None, ieqcons=(), f_ieqcons=None,
|
| 70 |
+
bounds=(), fprime=None, fprime_eqcons=None,
|
| 71 |
+
fprime_ieqcons=None, args=(), iter=100, acc=1.0E-6,
|
| 72 |
+
iprint=1, disp=None, full_output=0, epsilon=_epsilon,
|
| 73 |
+
callback=None):
|
| 74 |
+
"""
|
| 75 |
+
Minimize a function using Sequential Least Squares Programming
|
| 76 |
+
|
| 77 |
+
Python interface function for the SLSQP Optimization subroutine
|
| 78 |
+
originally implemented by Dieter Kraft.
|
| 79 |
+
|
| 80 |
+
Parameters
|
| 81 |
+
----------
|
| 82 |
+
func : callable f(x,*args)
|
| 83 |
+
Objective function. Must return a scalar.
|
| 84 |
+
x0 : 1-D ndarray of float
|
| 85 |
+
Initial guess for the independent variable(s).
|
| 86 |
+
eqcons : list, optional
|
| 87 |
+
A list of functions of length n such that
|
| 88 |
+
eqcons[j](x,*args) == 0.0 in a successfully optimized
|
| 89 |
+
problem.
|
| 90 |
+
f_eqcons : callable f(x,*args), optional
|
| 91 |
+
Returns a 1-D array in which each element must equal 0.0 in a
|
| 92 |
+
successfully optimized problem. If f_eqcons is specified,
|
| 93 |
+
eqcons is ignored.
|
| 94 |
+
ieqcons : list, optional
|
| 95 |
+
A list of functions of length n such that
|
| 96 |
+
ieqcons[j](x,*args) >= 0.0 in a successfully optimized
|
| 97 |
+
problem.
|
| 98 |
+
f_ieqcons : callable f(x,*args), optional
|
| 99 |
+
Returns a 1-D ndarray in which each element must be greater or
|
| 100 |
+
equal to 0.0 in a successfully optimized problem. If
|
| 101 |
+
f_ieqcons is specified, ieqcons is ignored.
|
| 102 |
+
bounds : list, optional
|
| 103 |
+
A list of tuples specifying the lower and upper bound
|
| 104 |
+
for each independent variable [(xl0, xu0),(xl1, xu1),...]
|
| 105 |
+
Infinite values will be interpreted as large floating values.
|
| 106 |
+
fprime : callable `f(x,*args)`, optional
|
| 107 |
+
A function that evaluates the partial derivatives of func.
|
| 108 |
+
fprime_eqcons : callable `f(x,*args)`, optional
|
| 109 |
+
A function of the form `f(x, *args)` that returns the m by n
|
| 110 |
+
array of equality constraint normals. If not provided,
|
| 111 |
+
the normals will be approximated. The array returned by
|
| 112 |
+
fprime_eqcons should be sized as ( len(eqcons), len(x0) ).
|
| 113 |
+
fprime_ieqcons : callable `f(x,*args)`, optional
|
| 114 |
+
A function of the form `f(x, *args)` that returns the m by n
|
| 115 |
+
array of inequality constraint normals. If not provided,
|
| 116 |
+
the normals will be approximated. The array returned by
|
| 117 |
+
fprime_ieqcons should be sized as ( len(ieqcons), len(x0) ).
|
| 118 |
+
args : sequence, optional
|
| 119 |
+
Additional arguments passed to func and fprime.
|
| 120 |
+
iter : int, optional
|
| 121 |
+
The maximum number of iterations.
|
| 122 |
+
acc : float, optional
|
| 123 |
+
Requested accuracy.
|
| 124 |
+
iprint : int, optional
|
| 125 |
+
The verbosity of fmin_slsqp :
|
| 126 |
+
|
| 127 |
+
* iprint <= 0 : Silent operation
|
| 128 |
+
* iprint == 1 : Print summary upon completion (default)
|
| 129 |
+
* iprint >= 2 : Print status of each iterate and summary
|
| 130 |
+
disp : int, optional
|
| 131 |
+
Overrides the iprint interface (preferred).
|
| 132 |
+
full_output : bool, optional
|
| 133 |
+
If False, return only the minimizer of func (default).
|
| 134 |
+
Otherwise, output final objective function and summary
|
| 135 |
+
information.
|
| 136 |
+
epsilon : float, optional
|
| 137 |
+
The step size for finite-difference derivative estimates.
|
| 138 |
+
callback : callable, optional
|
| 139 |
+
Called after each iteration, as ``callback(x)``, where ``x`` is the
|
| 140 |
+
current parameter vector.
|
| 141 |
+
|
| 142 |
+
Returns
|
| 143 |
+
-------
|
| 144 |
+
out : ndarray of float
|
| 145 |
+
The final minimizer of func.
|
| 146 |
+
fx : ndarray of float, if full_output is true
|
| 147 |
+
The final value of the objective function.
|
| 148 |
+
its : int, if full_output is true
|
| 149 |
+
The number of iterations.
|
| 150 |
+
imode : int, if full_output is true
|
| 151 |
+
The exit mode from the optimizer (see below).
|
| 152 |
+
smode : string, if full_output is true
|
| 153 |
+
Message describing the exit mode from the optimizer.
|
| 154 |
+
|
| 155 |
+
See also
|
| 156 |
+
--------
|
| 157 |
+
minimize: Interface to minimization algorithms for multivariate
|
| 158 |
+
functions. See the 'SLSQP' `method` in particular.
|
| 159 |
+
|
| 160 |
+
Notes
|
| 161 |
+
-----
|
| 162 |
+
Exit modes are defined as follows ::
|
| 163 |
+
|
| 164 |
+
-1 : Gradient evaluation required (g & a)
|
| 165 |
+
0 : Optimization terminated successfully
|
| 166 |
+
1 : Function evaluation required (f & c)
|
| 167 |
+
2 : More equality constraints than independent variables
|
| 168 |
+
3 : More than 3*n iterations in LSQ subproblem
|
| 169 |
+
4 : Inequality constraints incompatible
|
| 170 |
+
5 : Singular matrix E in LSQ subproblem
|
| 171 |
+
6 : Singular matrix C in LSQ subproblem
|
| 172 |
+
7 : Rank-deficient equality constraint subproblem HFTI
|
| 173 |
+
8 : Positive directional derivative for linesearch
|
| 174 |
+
9 : Iteration limit reached
|
| 175 |
+
|
| 176 |
+
Examples
|
| 177 |
+
--------
|
| 178 |
+
Examples are given :ref:`in the tutorial <tutorial-sqlsp>`.
|
| 179 |
+
|
| 180 |
+
"""
|
| 181 |
+
if disp is not None:
|
| 182 |
+
iprint = disp
|
| 183 |
+
|
| 184 |
+
opts = {'maxiter': iter,
|
| 185 |
+
'ftol': acc,
|
| 186 |
+
'iprint': iprint,
|
| 187 |
+
'disp': iprint != 0,
|
| 188 |
+
'eps': epsilon,
|
| 189 |
+
'callback': callback}
|
| 190 |
+
|
| 191 |
+
# Build the constraints as a tuple of dictionaries
|
| 192 |
+
cons = ()
|
| 193 |
+
# 1. constraints of the 1st kind (eqcons, ieqcons); no Jacobian; take
|
| 194 |
+
# the same extra arguments as the objective function.
|
| 195 |
+
cons += tuple({'type': 'eq', 'fun': c, 'args': args} for c in eqcons)
|
| 196 |
+
cons += tuple({'type': 'ineq', 'fun': c, 'args': args} for c in ieqcons)
|
| 197 |
+
# 2. constraints of the 2nd kind (f_eqcons, f_ieqcons) and their Jacobian
|
| 198 |
+
# (fprime_eqcons, fprime_ieqcons); also take the same extra arguments
|
| 199 |
+
# as the objective function.
|
| 200 |
+
if f_eqcons:
|
| 201 |
+
cons += ({'type': 'eq', 'fun': f_eqcons, 'jac': fprime_eqcons,
|
| 202 |
+
'args': args}, )
|
| 203 |
+
if f_ieqcons:
|
| 204 |
+
cons += ({'type': 'ineq', 'fun': f_ieqcons, 'jac': fprime_ieqcons,
|
| 205 |
+
'args': args}, )
|
| 206 |
+
|
| 207 |
+
res = _minimize_slsqp(func, x0, args, jac=fprime, bounds=bounds,
|
| 208 |
+
constraints=cons, **opts)
|
| 209 |
+
if full_output:
|
| 210 |
+
return res['x'], res['fun'], res['nit'], res['status'], res['message']
|
| 211 |
+
else:
|
| 212 |
+
return res['x']
|
| 213 |
+
|
| 214 |
+
|
| 215 |
+
def _minimize_slsqp(func, x0, args=(), jac=None, bounds=None,
|
| 216 |
+
constraints=(),
|
| 217 |
+
maxiter=100, ftol=1.0E-6, iprint=1, disp=False,
|
| 218 |
+
eps=_epsilon, callback=None, finite_diff_rel_step=None,
|
| 219 |
+
**unknown_options):
|
| 220 |
+
"""
|
| 221 |
+
Minimize a scalar function of one or more variables using Sequential
|
| 222 |
+
Least Squares Programming (SLSQP).
|
| 223 |
+
|
| 224 |
+
Options
|
| 225 |
+
-------
|
| 226 |
+
ftol : float
|
| 227 |
+
Precision goal for the value of f in the stopping criterion.
|
| 228 |
+
eps : float
|
| 229 |
+
Step size used for numerical approximation of the Jacobian.
|
| 230 |
+
disp : bool
|
| 231 |
+
Set to True to print convergence messages. If False,
|
| 232 |
+
`verbosity` is ignored and set to 0.
|
| 233 |
+
maxiter : int
|
| 234 |
+
Maximum number of iterations.
|
| 235 |
+
finite_diff_rel_step : None or array_like, optional
|
| 236 |
+
If `jac in ['2-point', '3-point', 'cs']` the relative step size to
|
| 237 |
+
use for numerical approximation of `jac`. The absolute step
|
| 238 |
+
size is computed as ``h = rel_step * sign(x) * max(1, abs(x))``,
|
| 239 |
+
possibly adjusted to fit into the bounds. For ``method='3-point'``
|
| 240 |
+
the sign of `h` is ignored. If None (default) then step is selected
|
| 241 |
+
automatically.
|
| 242 |
+
"""
|
| 243 |
+
_check_unknown_options(unknown_options)
|
| 244 |
+
iter = maxiter - 1
|
| 245 |
+
acc = ftol
|
| 246 |
+
epsilon = eps
|
| 247 |
+
|
| 248 |
+
if not disp:
|
| 249 |
+
iprint = 0
|
| 250 |
+
|
| 251 |
+
# Transform x0 into an array.
|
| 252 |
+
xp = array_namespace(x0)
|
| 253 |
+
x0 = atleast_nd(x0, ndim=1, xp=xp)
|
| 254 |
+
dtype = xp.float64
|
| 255 |
+
if xp.isdtype(x0.dtype, "real floating"):
|
| 256 |
+
dtype = x0.dtype
|
| 257 |
+
x = xp.reshape(xp.astype(x0, dtype), -1)
|
| 258 |
+
|
| 259 |
+
# SLSQP is sent 'old-style' bounds, 'new-style' bounds are required by
|
| 260 |
+
# ScalarFunction
|
| 261 |
+
if bounds is None or len(bounds) == 0:
|
| 262 |
+
new_bounds = (-np.inf, np.inf)
|
| 263 |
+
else:
|
| 264 |
+
new_bounds = old_bound_to_new(bounds)
|
| 265 |
+
|
| 266 |
+
# clip the initial guess to bounds, otherwise ScalarFunction doesn't work
|
| 267 |
+
x = np.clip(x, new_bounds[0], new_bounds[1])
|
| 268 |
+
|
| 269 |
+
# Constraints are triaged per type into a dictionary of tuples
|
| 270 |
+
if isinstance(constraints, dict):
|
| 271 |
+
constraints = (constraints, )
|
| 272 |
+
|
| 273 |
+
cons = {'eq': (), 'ineq': ()}
|
| 274 |
+
for ic, con in enumerate(constraints):
|
| 275 |
+
# check type
|
| 276 |
+
try:
|
| 277 |
+
ctype = con['type'].lower()
|
| 278 |
+
except KeyError as e:
|
| 279 |
+
raise KeyError('Constraint %d has no type defined.' % ic) from e
|
| 280 |
+
except TypeError as e:
|
| 281 |
+
raise TypeError('Constraints must be defined using a '
|
| 282 |
+
'dictionary.') from e
|
| 283 |
+
except AttributeError as e:
|
| 284 |
+
raise TypeError("Constraint's type must be a string.") from e
|
| 285 |
+
else:
|
| 286 |
+
if ctype not in ['eq', 'ineq']:
|
| 287 |
+
raise ValueError("Unknown constraint type '%s'." % con['type'])
|
| 288 |
+
|
| 289 |
+
# check function
|
| 290 |
+
if 'fun' not in con:
|
| 291 |
+
raise ValueError('Constraint %d has no function defined.' % ic)
|
| 292 |
+
|
| 293 |
+
# check Jacobian
|
| 294 |
+
cjac = con.get('jac')
|
| 295 |
+
if cjac is None:
|
| 296 |
+
# approximate Jacobian function. The factory function is needed
|
| 297 |
+
# to keep a reference to `fun`, see gh-4240.
|
| 298 |
+
def cjac_factory(fun):
|
| 299 |
+
def cjac(x, *args):
|
| 300 |
+
x = _check_clip_x(x, new_bounds)
|
| 301 |
+
|
| 302 |
+
if jac in ['2-point', '3-point', 'cs']:
|
| 303 |
+
return approx_derivative(fun, x, method=jac, args=args,
|
| 304 |
+
rel_step=finite_diff_rel_step,
|
| 305 |
+
bounds=new_bounds)
|
| 306 |
+
else:
|
| 307 |
+
return approx_derivative(fun, x, method='2-point',
|
| 308 |
+
abs_step=epsilon, args=args,
|
| 309 |
+
bounds=new_bounds)
|
| 310 |
+
|
| 311 |
+
return cjac
|
| 312 |
+
cjac = cjac_factory(con['fun'])
|
| 313 |
+
|
| 314 |
+
# update constraints' dictionary
|
| 315 |
+
cons[ctype] += ({'fun': con['fun'],
|
| 316 |
+
'jac': cjac,
|
| 317 |
+
'args': con.get('args', ())}, )
|
| 318 |
+
|
| 319 |
+
exit_modes = {-1: "Gradient evaluation required (g & a)",
|
| 320 |
+
0: "Optimization terminated successfully",
|
| 321 |
+
1: "Function evaluation required (f & c)",
|
| 322 |
+
2: "More equality constraints than independent variables",
|
| 323 |
+
3: "More than 3*n iterations in LSQ subproblem",
|
| 324 |
+
4: "Inequality constraints incompatible",
|
| 325 |
+
5: "Singular matrix E in LSQ subproblem",
|
| 326 |
+
6: "Singular matrix C in LSQ subproblem",
|
| 327 |
+
7: "Rank-deficient equality constraint subproblem HFTI",
|
| 328 |
+
8: "Positive directional derivative for linesearch",
|
| 329 |
+
9: "Iteration limit reached"}
|
| 330 |
+
|
| 331 |
+
# Set the parameters that SLSQP will need
|
| 332 |
+
# meq, mieq: number of equality and inequality constraints
|
| 333 |
+
meq = sum(map(len, [atleast_1d(c['fun'](x, *c['args']))
|
| 334 |
+
for c in cons['eq']]))
|
| 335 |
+
mieq = sum(map(len, [atleast_1d(c['fun'](x, *c['args']))
|
| 336 |
+
for c in cons['ineq']]))
|
| 337 |
+
# m = The total number of constraints
|
| 338 |
+
m = meq + mieq
|
| 339 |
+
# la = The number of constraints, or 1 if there are no constraints
|
| 340 |
+
la = array([1, m]).max()
|
| 341 |
+
# n = The number of independent variables
|
| 342 |
+
n = len(x)
|
| 343 |
+
|
| 344 |
+
# Define the workspaces for SLSQP
|
| 345 |
+
n1 = n + 1
|
| 346 |
+
mineq = m - meq + n1 + n1
|
| 347 |
+
len_w = (3*n1+m)*(n1+1)+(n1-meq+1)*(mineq+2) + 2*mineq+(n1+mineq)*(n1-meq) \
|
| 348 |
+
+ 2*meq + n1 + ((n+1)*n)//2 + 2*m + 3*n + 3*n1 + 1
|
| 349 |
+
len_jw = mineq
|
| 350 |
+
w = zeros(len_w)
|
| 351 |
+
jw = zeros(len_jw)
|
| 352 |
+
|
| 353 |
+
# Decompose bounds into xl and xu
|
| 354 |
+
if bounds is None or len(bounds) == 0:
|
| 355 |
+
xl = np.empty(n, dtype=float)
|
| 356 |
+
xu = np.empty(n, dtype=float)
|
| 357 |
+
xl.fill(np.nan)
|
| 358 |
+
xu.fill(np.nan)
|
| 359 |
+
else:
|
| 360 |
+
bnds = array([(_arr_to_scalar(l), _arr_to_scalar(u))
|
| 361 |
+
for (l, u) in bounds], float)
|
| 362 |
+
if bnds.shape[0] != n:
|
| 363 |
+
raise IndexError('SLSQP Error: the length of bounds is not '
|
| 364 |
+
'compatible with that of x0.')
|
| 365 |
+
|
| 366 |
+
with np.errstate(invalid='ignore'):
|
| 367 |
+
bnderr = bnds[:, 0] > bnds[:, 1]
|
| 368 |
+
|
| 369 |
+
if bnderr.any():
|
| 370 |
+
raise ValueError('SLSQP Error: lb > ub in bounds %s.' %
|
| 371 |
+
', '.join(str(b) for b in bnderr))
|
| 372 |
+
xl, xu = bnds[:, 0], bnds[:, 1]
|
| 373 |
+
|
| 374 |
+
# Mark infinite bounds with nans; the Fortran code understands this
|
| 375 |
+
infbnd = ~isfinite(bnds)
|
| 376 |
+
xl[infbnd[:, 0]] = np.nan
|
| 377 |
+
xu[infbnd[:, 1]] = np.nan
|
| 378 |
+
|
| 379 |
+
# ScalarFunction provides function and gradient evaluation
|
| 380 |
+
sf = _prepare_scalar_function(func, x, jac=jac, args=args, epsilon=eps,
|
| 381 |
+
finite_diff_rel_step=finite_diff_rel_step,
|
| 382 |
+
bounds=new_bounds)
|
| 383 |
+
# gh11403 SLSQP sometimes exceeds bounds by 1 or 2 ULP, make sure this
|
| 384 |
+
# doesn't get sent to the func/grad evaluator.
|
| 385 |
+
wrapped_fun = _clip_x_for_func(sf.fun, new_bounds)
|
| 386 |
+
wrapped_grad = _clip_x_for_func(sf.grad, new_bounds)
|
| 387 |
+
|
| 388 |
+
# Initialize the iteration counter and the mode value
|
| 389 |
+
mode = array(0, int)
|
| 390 |
+
acc = array(acc, float)
|
| 391 |
+
majiter = array(iter, int)
|
| 392 |
+
majiter_prev = 0
|
| 393 |
+
|
| 394 |
+
# Initialize internal SLSQP state variables
|
| 395 |
+
alpha = array(0, float)
|
| 396 |
+
f0 = array(0, float)
|
| 397 |
+
gs = array(0, float)
|
| 398 |
+
h1 = array(0, float)
|
| 399 |
+
h2 = array(0, float)
|
| 400 |
+
h3 = array(0, float)
|
| 401 |
+
h4 = array(0, float)
|
| 402 |
+
t = array(0, float)
|
| 403 |
+
t0 = array(0, float)
|
| 404 |
+
tol = array(0, float)
|
| 405 |
+
iexact = array(0, int)
|
| 406 |
+
incons = array(0, int)
|
| 407 |
+
ireset = array(0, int)
|
| 408 |
+
itermx = array(0, int)
|
| 409 |
+
line = array(0, int)
|
| 410 |
+
n1 = array(0, int)
|
| 411 |
+
n2 = array(0, int)
|
| 412 |
+
n3 = array(0, int)
|
| 413 |
+
|
| 414 |
+
# Print the header if iprint >= 2
|
| 415 |
+
if iprint >= 2:
|
| 416 |
+
print("%5s %5s %16s %16s" % ("NIT", "FC", "OBJFUN", "GNORM"))
|
| 417 |
+
|
| 418 |
+
# mode is zero on entry, so call objective, constraints and gradients
|
| 419 |
+
# there should be no func evaluations here because it's cached from
|
| 420 |
+
# ScalarFunction
|
| 421 |
+
fx = wrapped_fun(x)
|
| 422 |
+
g = append(wrapped_grad(x), 0.0)
|
| 423 |
+
c = _eval_constraint(x, cons)
|
| 424 |
+
a = _eval_con_normals(x, cons, la, n, m, meq, mieq)
|
| 425 |
+
|
| 426 |
+
while 1:
|
| 427 |
+
# Call SLSQP
|
| 428 |
+
slsqp(m, meq, x, xl, xu, fx, c, g, a, acc, majiter, mode, w, jw,
|
| 429 |
+
alpha, f0, gs, h1, h2, h3, h4, t, t0, tol,
|
| 430 |
+
iexact, incons, ireset, itermx, line,
|
| 431 |
+
n1, n2, n3)
|
| 432 |
+
|
| 433 |
+
if mode == 1: # objective and constraint evaluation required
|
| 434 |
+
fx = wrapped_fun(x)
|
| 435 |
+
c = _eval_constraint(x, cons)
|
| 436 |
+
|
| 437 |
+
if mode == -1: # gradient evaluation required
|
| 438 |
+
g = append(wrapped_grad(x), 0.0)
|
| 439 |
+
a = _eval_con_normals(x, cons, la, n, m, meq, mieq)
|
| 440 |
+
|
| 441 |
+
if majiter > majiter_prev:
|
| 442 |
+
# call callback if major iteration has incremented
|
| 443 |
+
if callback is not None:
|
| 444 |
+
callback(np.copy(x))
|
| 445 |
+
|
| 446 |
+
# Print the status of the current iterate if iprint > 2
|
| 447 |
+
if iprint >= 2:
|
| 448 |
+
print("%5i %5i % 16.6E % 16.6E" % (majiter, sf.nfev,
|
| 449 |
+
fx, linalg.norm(g)))
|
| 450 |
+
|
| 451 |
+
# If exit mode is not -1 or 1, slsqp has completed
|
| 452 |
+
if abs(mode) != 1:
|
| 453 |
+
break
|
| 454 |
+
|
| 455 |
+
majiter_prev = int(majiter)
|
| 456 |
+
|
| 457 |
+
# Optimization loop complete. Print status if requested
|
| 458 |
+
if iprint >= 1:
|
| 459 |
+
print(exit_modes[int(mode)] + " (Exit mode " + str(mode) + ')')
|
| 460 |
+
print(" Current function value:", fx)
|
| 461 |
+
print(" Iterations:", majiter)
|
| 462 |
+
print(" Function evaluations:", sf.nfev)
|
| 463 |
+
print(" Gradient evaluations:", sf.ngev)
|
| 464 |
+
|
| 465 |
+
return OptimizeResult(x=x, fun=fx, jac=g[:-1], nit=int(majiter),
|
| 466 |
+
nfev=sf.nfev, njev=sf.ngev, status=int(mode),
|
| 467 |
+
message=exit_modes[int(mode)], success=(mode == 0))
|
| 468 |
+
|
| 469 |
+
|
| 470 |
+
def _eval_constraint(x, cons):
|
| 471 |
+
# Compute constraints
|
| 472 |
+
if cons['eq']:
|
| 473 |
+
c_eq = concatenate([atleast_1d(con['fun'](x, *con['args']))
|
| 474 |
+
for con in cons['eq']])
|
| 475 |
+
else:
|
| 476 |
+
c_eq = zeros(0)
|
| 477 |
+
|
| 478 |
+
if cons['ineq']:
|
| 479 |
+
c_ieq = concatenate([atleast_1d(con['fun'](x, *con['args']))
|
| 480 |
+
for con in cons['ineq']])
|
| 481 |
+
else:
|
| 482 |
+
c_ieq = zeros(0)
|
| 483 |
+
|
| 484 |
+
# Now combine c_eq and c_ieq into a single matrix
|
| 485 |
+
c = concatenate((c_eq, c_ieq))
|
| 486 |
+
return c
|
| 487 |
+
|
| 488 |
+
|
| 489 |
+
def _eval_con_normals(x, cons, la, n, m, meq, mieq):
|
| 490 |
+
# Compute the normals of the constraints
|
| 491 |
+
if cons['eq']:
|
| 492 |
+
a_eq = vstack([con['jac'](x, *con['args'])
|
| 493 |
+
for con in cons['eq']])
|
| 494 |
+
else: # no equality constraint
|
| 495 |
+
a_eq = zeros((meq, n))
|
| 496 |
+
|
| 497 |
+
if cons['ineq']:
|
| 498 |
+
a_ieq = vstack([con['jac'](x, *con['args'])
|
| 499 |
+
for con in cons['ineq']])
|
| 500 |
+
else: # no inequality constraint
|
| 501 |
+
a_ieq = zeros((mieq, n))
|
| 502 |
+
|
| 503 |
+
# Now combine a_eq and a_ieq into a single a matrix
|
| 504 |
+
if m == 0: # no constraints
|
| 505 |
+
a = zeros((la, n))
|
| 506 |
+
else:
|
| 507 |
+
a = vstack((a_eq, a_ieq))
|
| 508 |
+
a = concatenate((a, zeros([la, 1])), 1)
|
| 509 |
+
|
| 510 |
+
return a
|
llava_next/lib/python3.10/site-packages/scipy/optimize/_trustregion.py
ADDED
|
@@ -0,0 +1,304 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Trust-region optimization."""
|
| 2 |
+
import math
|
| 3 |
+
import warnings
|
| 4 |
+
|
| 5 |
+
import numpy as np
|
| 6 |
+
import scipy.linalg
|
| 7 |
+
from ._optimize import (_check_unknown_options, _status_message,
|
| 8 |
+
OptimizeResult, _prepare_scalar_function,
|
| 9 |
+
_call_callback_maybe_halt)
|
| 10 |
+
from scipy.optimize._hessian_update_strategy import HessianUpdateStrategy
|
| 11 |
+
from scipy.optimize._differentiable_functions import FD_METHODS
|
| 12 |
+
__all__ = []
|
| 13 |
+
|
| 14 |
+
|
| 15 |
+
def _wrap_function(function, args):
|
| 16 |
+
# wraps a minimizer function to count number of evaluations
|
| 17 |
+
# and to easily provide an args kwd.
|
| 18 |
+
ncalls = [0]
|
| 19 |
+
if function is None:
|
| 20 |
+
return ncalls, None
|
| 21 |
+
|
| 22 |
+
def function_wrapper(x, *wrapper_args):
|
| 23 |
+
ncalls[0] += 1
|
| 24 |
+
# A copy of x is sent to the user function (gh13740)
|
| 25 |
+
return function(np.copy(x), *(wrapper_args + args))
|
| 26 |
+
|
| 27 |
+
return ncalls, function_wrapper
|
| 28 |
+
|
| 29 |
+
|
| 30 |
+
class BaseQuadraticSubproblem:
|
| 31 |
+
"""
|
| 32 |
+
Base/abstract class defining the quadratic model for trust-region
|
| 33 |
+
minimization. Child classes must implement the ``solve`` method.
|
| 34 |
+
|
| 35 |
+
Values of the objective function, Jacobian and Hessian (if provided) at
|
| 36 |
+
the current iterate ``x`` are evaluated on demand and then stored as
|
| 37 |
+
attributes ``fun``, ``jac``, ``hess``.
|
| 38 |
+
"""
|
| 39 |
+
|
| 40 |
+
def __init__(self, x, fun, jac, hess=None, hessp=None):
|
| 41 |
+
self._x = x
|
| 42 |
+
self._f = None
|
| 43 |
+
self._g = None
|
| 44 |
+
self._h = None
|
| 45 |
+
self._g_mag = None
|
| 46 |
+
self._cauchy_point = None
|
| 47 |
+
self._newton_point = None
|
| 48 |
+
self._fun = fun
|
| 49 |
+
self._jac = jac
|
| 50 |
+
self._hess = hess
|
| 51 |
+
self._hessp = hessp
|
| 52 |
+
|
| 53 |
+
def __call__(self, p):
|
| 54 |
+
return self.fun + np.dot(self.jac, p) + 0.5 * np.dot(p, self.hessp(p))
|
| 55 |
+
|
| 56 |
+
@property
|
| 57 |
+
def fun(self):
|
| 58 |
+
"""Value of objective function at current iteration."""
|
| 59 |
+
if self._f is None:
|
| 60 |
+
self._f = self._fun(self._x)
|
| 61 |
+
return self._f
|
| 62 |
+
|
| 63 |
+
@property
|
| 64 |
+
def jac(self):
|
| 65 |
+
"""Value of Jacobian of objective function at current iteration."""
|
| 66 |
+
if self._g is None:
|
| 67 |
+
self._g = self._jac(self._x)
|
| 68 |
+
return self._g
|
| 69 |
+
|
| 70 |
+
@property
|
| 71 |
+
def hess(self):
|
| 72 |
+
"""Value of Hessian of objective function at current iteration."""
|
| 73 |
+
if self._h is None:
|
| 74 |
+
self._h = self._hess(self._x)
|
| 75 |
+
return self._h
|
| 76 |
+
|
| 77 |
+
def hessp(self, p):
|
| 78 |
+
if self._hessp is not None:
|
| 79 |
+
return self._hessp(self._x, p)
|
| 80 |
+
else:
|
| 81 |
+
return np.dot(self.hess, p)
|
| 82 |
+
|
| 83 |
+
@property
|
| 84 |
+
def jac_mag(self):
|
| 85 |
+
"""Magnitude of jacobian of objective function at current iteration."""
|
| 86 |
+
if self._g_mag is None:
|
| 87 |
+
self._g_mag = scipy.linalg.norm(self.jac)
|
| 88 |
+
return self._g_mag
|
| 89 |
+
|
| 90 |
+
def get_boundaries_intersections(self, z, d, trust_radius):
|
| 91 |
+
"""
|
| 92 |
+
Solve the scalar quadratic equation ``||z + t d|| == trust_radius``.
|
| 93 |
+
This is like a line-sphere intersection.
|
| 94 |
+
Return the two values of t, sorted from low to high.
|
| 95 |
+
"""
|
| 96 |
+
a = np.dot(d, d)
|
| 97 |
+
b = 2 * np.dot(z, d)
|
| 98 |
+
c = np.dot(z, z) - trust_radius**2
|
| 99 |
+
sqrt_discriminant = math.sqrt(b*b - 4*a*c)
|
| 100 |
+
|
| 101 |
+
# The following calculation is mathematically
|
| 102 |
+
# equivalent to:
|
| 103 |
+
# ta = (-b - sqrt_discriminant) / (2*a)
|
| 104 |
+
# tb = (-b + sqrt_discriminant) / (2*a)
|
| 105 |
+
# but produce smaller round off errors.
|
| 106 |
+
# Look at Matrix Computation p.97
|
| 107 |
+
# for a better justification.
|
| 108 |
+
aux = b + math.copysign(sqrt_discriminant, b)
|
| 109 |
+
ta = -aux / (2*a)
|
| 110 |
+
tb = -2*c / aux
|
| 111 |
+
return sorted([ta, tb])
|
| 112 |
+
|
| 113 |
+
def solve(self, trust_radius):
|
| 114 |
+
raise NotImplementedError('The solve method should be implemented by '
|
| 115 |
+
'the child class')
|
| 116 |
+
|
| 117 |
+
|
| 118 |
+
def _minimize_trust_region(fun, x0, args=(), jac=None, hess=None, hessp=None,
|
| 119 |
+
subproblem=None, initial_trust_radius=1.0,
|
| 120 |
+
max_trust_radius=1000.0, eta=0.15, gtol=1e-4,
|
| 121 |
+
maxiter=None, disp=False, return_all=False,
|
| 122 |
+
callback=None, inexact=True, **unknown_options):
|
| 123 |
+
"""
|
| 124 |
+
Minimization of scalar function of one or more variables using a
|
| 125 |
+
trust-region algorithm.
|
| 126 |
+
|
| 127 |
+
Options for the trust-region algorithm are:
|
| 128 |
+
initial_trust_radius : float
|
| 129 |
+
Initial trust radius.
|
| 130 |
+
max_trust_radius : float
|
| 131 |
+
Never propose steps that are longer than this value.
|
| 132 |
+
eta : float
|
| 133 |
+
Trust region related acceptance stringency for proposed steps.
|
| 134 |
+
gtol : float
|
| 135 |
+
Gradient norm must be less than `gtol`
|
| 136 |
+
before successful termination.
|
| 137 |
+
maxiter : int
|
| 138 |
+
Maximum number of iterations to perform.
|
| 139 |
+
disp : bool
|
| 140 |
+
If True, print convergence message.
|
| 141 |
+
inexact : bool
|
| 142 |
+
Accuracy to solve subproblems. If True requires less nonlinear
|
| 143 |
+
iterations, but more vector products. Only effective for method
|
| 144 |
+
trust-krylov.
|
| 145 |
+
|
| 146 |
+
This function is called by the `minimize` function.
|
| 147 |
+
It is not supposed to be called directly.
|
| 148 |
+
"""
|
| 149 |
+
_check_unknown_options(unknown_options)
|
| 150 |
+
|
| 151 |
+
if jac is None:
|
| 152 |
+
raise ValueError('Jacobian is currently required for trust-region '
|
| 153 |
+
'methods')
|
| 154 |
+
if hess is None and hessp is None:
|
| 155 |
+
raise ValueError('Either the Hessian or the Hessian-vector product '
|
| 156 |
+
'is currently required for trust-region methods')
|
| 157 |
+
if subproblem is None:
|
| 158 |
+
raise ValueError('A subproblem solving strategy is required for '
|
| 159 |
+
'trust-region methods')
|
| 160 |
+
if not (0 <= eta < 0.25):
|
| 161 |
+
raise Exception('invalid acceptance stringency')
|
| 162 |
+
if max_trust_radius <= 0:
|
| 163 |
+
raise Exception('the max trust radius must be positive')
|
| 164 |
+
if initial_trust_radius <= 0:
|
| 165 |
+
raise ValueError('the initial trust radius must be positive')
|
| 166 |
+
if initial_trust_radius >= max_trust_radius:
|
| 167 |
+
raise ValueError('the initial trust radius must be less than the '
|
| 168 |
+
'max trust radius')
|
| 169 |
+
|
| 170 |
+
# force the initial guess into a nice format
|
| 171 |
+
x0 = np.asarray(x0).flatten()
|
| 172 |
+
|
| 173 |
+
# A ScalarFunction representing the problem. This caches calls to fun, jac,
|
| 174 |
+
# hess.
|
| 175 |
+
sf = _prepare_scalar_function(fun, x0, jac=jac, hess=hess, args=args)
|
| 176 |
+
fun = sf.fun
|
| 177 |
+
jac = sf.grad
|
| 178 |
+
if callable(hess):
|
| 179 |
+
hess = sf.hess
|
| 180 |
+
elif callable(hessp):
|
| 181 |
+
# this elif statement must come before examining whether hess
|
| 182 |
+
# is estimated by FD methods or a HessianUpdateStrategy
|
| 183 |
+
pass
|
| 184 |
+
elif (hess in FD_METHODS or isinstance(hess, HessianUpdateStrategy)):
|
| 185 |
+
# If the Hessian is being estimated by finite differences or a
|
| 186 |
+
# Hessian update strategy then ScalarFunction.hess returns a
|
| 187 |
+
# LinearOperator or a HessianUpdateStrategy. This enables the
|
| 188 |
+
# calculation/creation of a hessp. BUT you only want to do this
|
| 189 |
+
# if the user *hasn't* provided a callable(hessp) function.
|
| 190 |
+
hess = None
|
| 191 |
+
|
| 192 |
+
def hessp(x, p, *args):
|
| 193 |
+
return sf.hess(x).dot(p)
|
| 194 |
+
else:
|
| 195 |
+
raise ValueError('Either the Hessian or the Hessian-vector product '
|
| 196 |
+
'is currently required for trust-region methods')
|
| 197 |
+
|
| 198 |
+
# ScalarFunction doesn't represent hessp
|
| 199 |
+
nhessp, hessp = _wrap_function(hessp, args)
|
| 200 |
+
|
| 201 |
+
# limit the number of iterations
|
| 202 |
+
if maxiter is None:
|
| 203 |
+
maxiter = len(x0)*200
|
| 204 |
+
|
| 205 |
+
# init the search status
|
| 206 |
+
warnflag = 0
|
| 207 |
+
|
| 208 |
+
# initialize the search
|
| 209 |
+
trust_radius = initial_trust_radius
|
| 210 |
+
x = x0
|
| 211 |
+
if return_all:
|
| 212 |
+
allvecs = [x]
|
| 213 |
+
m = subproblem(x, fun, jac, hess, hessp)
|
| 214 |
+
k = 0
|
| 215 |
+
|
| 216 |
+
# search for the function min
|
| 217 |
+
# do not even start if the gradient is small enough
|
| 218 |
+
while m.jac_mag >= gtol:
|
| 219 |
+
|
| 220 |
+
# Solve the sub-problem.
|
| 221 |
+
# This gives us the proposed step relative to the current position
|
| 222 |
+
# and it tells us whether the proposed step
|
| 223 |
+
# has reached the trust region boundary or not.
|
| 224 |
+
try:
|
| 225 |
+
p, hits_boundary = m.solve(trust_radius)
|
| 226 |
+
except np.linalg.LinAlgError:
|
| 227 |
+
warnflag = 3
|
| 228 |
+
break
|
| 229 |
+
|
| 230 |
+
# calculate the predicted value at the proposed point
|
| 231 |
+
predicted_value = m(p)
|
| 232 |
+
|
| 233 |
+
# define the local approximation at the proposed point
|
| 234 |
+
x_proposed = x + p
|
| 235 |
+
m_proposed = subproblem(x_proposed, fun, jac, hess, hessp)
|
| 236 |
+
|
| 237 |
+
# evaluate the ratio defined in equation (4.4)
|
| 238 |
+
actual_reduction = m.fun - m_proposed.fun
|
| 239 |
+
predicted_reduction = m.fun - predicted_value
|
| 240 |
+
if predicted_reduction <= 0:
|
| 241 |
+
warnflag = 2
|
| 242 |
+
break
|
| 243 |
+
rho = actual_reduction / predicted_reduction
|
| 244 |
+
|
| 245 |
+
# update the trust radius according to the actual/predicted ratio
|
| 246 |
+
if rho < 0.25:
|
| 247 |
+
trust_radius *= 0.25
|
| 248 |
+
elif rho > 0.75 and hits_boundary:
|
| 249 |
+
trust_radius = min(2*trust_radius, max_trust_radius)
|
| 250 |
+
|
| 251 |
+
# if the ratio is high enough then accept the proposed step
|
| 252 |
+
if rho > eta:
|
| 253 |
+
x = x_proposed
|
| 254 |
+
m = m_proposed
|
| 255 |
+
|
| 256 |
+
# append the best guess, call back, increment the iteration count
|
| 257 |
+
if return_all:
|
| 258 |
+
allvecs.append(np.copy(x))
|
| 259 |
+
k += 1
|
| 260 |
+
|
| 261 |
+
intermediate_result = OptimizeResult(x=x, fun=m.fun)
|
| 262 |
+
if _call_callback_maybe_halt(callback, intermediate_result):
|
| 263 |
+
break
|
| 264 |
+
|
| 265 |
+
# check if the gradient is small enough to stop
|
| 266 |
+
if m.jac_mag < gtol:
|
| 267 |
+
warnflag = 0
|
| 268 |
+
break
|
| 269 |
+
|
| 270 |
+
# check if we have looked at enough iterations
|
| 271 |
+
if k >= maxiter:
|
| 272 |
+
warnflag = 1
|
| 273 |
+
break
|
| 274 |
+
|
| 275 |
+
# print some stuff if requested
|
| 276 |
+
status_messages = (
|
| 277 |
+
_status_message['success'],
|
| 278 |
+
_status_message['maxiter'],
|
| 279 |
+
'A bad approximation caused failure to predict improvement.',
|
| 280 |
+
'A linalg error occurred, such as a non-psd Hessian.',
|
| 281 |
+
)
|
| 282 |
+
if disp:
|
| 283 |
+
if warnflag == 0:
|
| 284 |
+
print(status_messages[warnflag])
|
| 285 |
+
else:
|
| 286 |
+
warnings.warn(status_messages[warnflag], RuntimeWarning, stacklevel=3)
|
| 287 |
+
print(" Current function value: %f" % m.fun)
|
| 288 |
+
print(" Iterations: %d" % k)
|
| 289 |
+
print(" Function evaluations: %d" % sf.nfev)
|
| 290 |
+
print(" Gradient evaluations: %d" % sf.ngev)
|
| 291 |
+
print(" Hessian evaluations: %d" % (sf.nhev + nhessp[0]))
|
| 292 |
+
|
| 293 |
+
result = OptimizeResult(x=x, success=(warnflag == 0), status=warnflag,
|
| 294 |
+
fun=m.fun, jac=m.jac, nfev=sf.nfev, njev=sf.ngev,
|
| 295 |
+
nhev=sf.nhev + nhessp[0], nit=k,
|
| 296 |
+
message=status_messages[warnflag])
|
| 297 |
+
|
| 298 |
+
if hess is not None:
|
| 299 |
+
result['hess'] = m.hess
|
| 300 |
+
|
| 301 |
+
if return_all:
|
| 302 |
+
result['allvecs'] = allvecs
|
| 303 |
+
|
| 304 |
+
return result
|
llava_next/lib/python3.10/site-packages/scipy/optimize/_trustregion_dogleg.py
ADDED
|
@@ -0,0 +1,122 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Dog-leg trust-region optimization."""
|
| 2 |
+
import numpy as np
|
| 3 |
+
import scipy.linalg
|
| 4 |
+
from ._trustregion import (_minimize_trust_region, BaseQuadraticSubproblem)
|
| 5 |
+
|
| 6 |
+
__all__ = []
|
| 7 |
+
|
| 8 |
+
|
| 9 |
+
def _minimize_dogleg(fun, x0, args=(), jac=None, hess=None,
|
| 10 |
+
**trust_region_options):
|
| 11 |
+
"""
|
| 12 |
+
Minimization of scalar function of one or more variables using
|
| 13 |
+
the dog-leg trust-region algorithm.
|
| 14 |
+
|
| 15 |
+
Options
|
| 16 |
+
-------
|
| 17 |
+
initial_trust_radius : float
|
| 18 |
+
Initial trust-region radius.
|
| 19 |
+
max_trust_radius : float
|
| 20 |
+
Maximum value of the trust-region radius. No steps that are longer
|
| 21 |
+
than this value will be proposed.
|
| 22 |
+
eta : float
|
| 23 |
+
Trust region related acceptance stringency for proposed steps.
|
| 24 |
+
gtol : float
|
| 25 |
+
Gradient norm must be less than `gtol` before successful
|
| 26 |
+
termination.
|
| 27 |
+
|
| 28 |
+
"""
|
| 29 |
+
if jac is None:
|
| 30 |
+
raise ValueError('Jacobian is required for dogleg minimization')
|
| 31 |
+
if not callable(hess):
|
| 32 |
+
raise ValueError('Hessian is required for dogleg minimization')
|
| 33 |
+
return _minimize_trust_region(fun, x0, args=args, jac=jac, hess=hess,
|
| 34 |
+
subproblem=DoglegSubproblem,
|
| 35 |
+
**trust_region_options)
|
| 36 |
+
|
| 37 |
+
|
| 38 |
+
class DoglegSubproblem(BaseQuadraticSubproblem):
|
| 39 |
+
"""Quadratic subproblem solved by the dogleg method"""
|
| 40 |
+
|
| 41 |
+
def cauchy_point(self):
|
| 42 |
+
"""
|
| 43 |
+
The Cauchy point is minimal along the direction of steepest descent.
|
| 44 |
+
"""
|
| 45 |
+
if self._cauchy_point is None:
|
| 46 |
+
g = self.jac
|
| 47 |
+
Bg = self.hessp(g)
|
| 48 |
+
self._cauchy_point = -(np.dot(g, g) / np.dot(g, Bg)) * g
|
| 49 |
+
return self._cauchy_point
|
| 50 |
+
|
| 51 |
+
def newton_point(self):
|
| 52 |
+
"""
|
| 53 |
+
The Newton point is a global minimum of the approximate function.
|
| 54 |
+
"""
|
| 55 |
+
if self._newton_point is None:
|
| 56 |
+
g = self.jac
|
| 57 |
+
B = self.hess
|
| 58 |
+
cho_info = scipy.linalg.cho_factor(B)
|
| 59 |
+
self._newton_point = -scipy.linalg.cho_solve(cho_info, g)
|
| 60 |
+
return self._newton_point
|
| 61 |
+
|
| 62 |
+
def solve(self, trust_radius):
|
| 63 |
+
"""
|
| 64 |
+
Minimize a function using the dog-leg trust-region algorithm.
|
| 65 |
+
|
| 66 |
+
This algorithm requires function values and first and second derivatives.
|
| 67 |
+
It also performs a costly Hessian decomposition for most iterations,
|
| 68 |
+
and the Hessian is required to be positive definite.
|
| 69 |
+
|
| 70 |
+
Parameters
|
| 71 |
+
----------
|
| 72 |
+
trust_radius : float
|
| 73 |
+
We are allowed to wander only this far away from the origin.
|
| 74 |
+
|
| 75 |
+
Returns
|
| 76 |
+
-------
|
| 77 |
+
p : ndarray
|
| 78 |
+
The proposed step.
|
| 79 |
+
hits_boundary : bool
|
| 80 |
+
True if the proposed step is on the boundary of the trust region.
|
| 81 |
+
|
| 82 |
+
Notes
|
| 83 |
+
-----
|
| 84 |
+
The Hessian is required to be positive definite.
|
| 85 |
+
|
| 86 |
+
References
|
| 87 |
+
----------
|
| 88 |
+
.. [1] Jorge Nocedal and Stephen Wright,
|
| 89 |
+
Numerical Optimization, second edition,
|
| 90 |
+
Springer-Verlag, 2006, page 73.
|
| 91 |
+
"""
|
| 92 |
+
|
| 93 |
+
# Compute the Newton point.
|
| 94 |
+
# This is the optimum for the quadratic model function.
|
| 95 |
+
# If it is inside the trust radius then return this point.
|
| 96 |
+
p_best = self.newton_point()
|
| 97 |
+
if scipy.linalg.norm(p_best) < trust_radius:
|
| 98 |
+
hits_boundary = False
|
| 99 |
+
return p_best, hits_boundary
|
| 100 |
+
|
| 101 |
+
# Compute the Cauchy point.
|
| 102 |
+
# This is the predicted optimum along the direction of steepest descent.
|
| 103 |
+
p_u = self.cauchy_point()
|
| 104 |
+
|
| 105 |
+
# If the Cauchy point is outside the trust region,
|
| 106 |
+
# then return the point where the path intersects the boundary.
|
| 107 |
+
p_u_norm = scipy.linalg.norm(p_u)
|
| 108 |
+
if p_u_norm >= trust_radius:
|
| 109 |
+
p_boundary = p_u * (trust_radius / p_u_norm)
|
| 110 |
+
hits_boundary = True
|
| 111 |
+
return p_boundary, hits_boundary
|
| 112 |
+
|
| 113 |
+
# Compute the intersection of the trust region boundary
|
| 114 |
+
# and the line segment connecting the Cauchy and Newton points.
|
| 115 |
+
# This requires solving a quadratic equation.
|
| 116 |
+
# ||p_u + t*(p_best - p_u)||**2 == trust_radius**2
|
| 117 |
+
# Solve this for positive time t using the quadratic formula.
|
| 118 |
+
_, tb = self.get_boundaries_intersections(p_u, p_best - p_u,
|
| 119 |
+
trust_radius)
|
| 120 |
+
p_boundary = p_u + tb * (p_best - p_u)
|
| 121 |
+
hits_boundary = True
|
| 122 |
+
return p_boundary, hits_boundary
|
llava_next/lib/python3.10/site-packages/scipy/optimize/_trustregion_exact.py
ADDED
|
@@ -0,0 +1,438 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Nearly exact trust-region optimization subproblem."""
|
| 2 |
+
import numpy as np
|
| 3 |
+
from scipy.linalg import (norm, get_lapack_funcs, solve_triangular,
|
| 4 |
+
cho_solve)
|
| 5 |
+
from ._trustregion import (_minimize_trust_region, BaseQuadraticSubproblem)
|
| 6 |
+
|
| 7 |
+
__all__ = ['_minimize_trustregion_exact',
|
| 8 |
+
'estimate_smallest_singular_value',
|
| 9 |
+
'singular_leading_submatrix',
|
| 10 |
+
'IterativeSubproblem']
|
| 11 |
+
|
| 12 |
+
|
| 13 |
+
def _minimize_trustregion_exact(fun, x0, args=(), jac=None, hess=None,
|
| 14 |
+
**trust_region_options):
|
| 15 |
+
"""
|
| 16 |
+
Minimization of scalar function of one or more variables using
|
| 17 |
+
a nearly exact trust-region algorithm.
|
| 18 |
+
|
| 19 |
+
Options
|
| 20 |
+
-------
|
| 21 |
+
initial_trust_radius : float
|
| 22 |
+
Initial trust-region radius.
|
| 23 |
+
max_trust_radius : float
|
| 24 |
+
Maximum value of the trust-region radius. No steps that are longer
|
| 25 |
+
than this value will be proposed.
|
| 26 |
+
eta : float
|
| 27 |
+
Trust region related acceptance stringency for proposed steps.
|
| 28 |
+
gtol : float
|
| 29 |
+
Gradient norm must be less than ``gtol`` before successful
|
| 30 |
+
termination.
|
| 31 |
+
"""
|
| 32 |
+
|
| 33 |
+
if jac is None:
|
| 34 |
+
raise ValueError('Jacobian is required for trust region '
|
| 35 |
+
'exact minimization.')
|
| 36 |
+
if not callable(hess):
|
| 37 |
+
raise ValueError('Hessian matrix is required for trust region '
|
| 38 |
+
'exact minimization.')
|
| 39 |
+
return _minimize_trust_region(fun, x0, args=args, jac=jac, hess=hess,
|
| 40 |
+
subproblem=IterativeSubproblem,
|
| 41 |
+
**trust_region_options)
|
| 42 |
+
|
| 43 |
+
|
| 44 |
+
def estimate_smallest_singular_value(U):
|
| 45 |
+
"""Given upper triangular matrix ``U`` estimate the smallest singular
|
| 46 |
+
value and the correspondent right singular vector in O(n**2) operations.
|
| 47 |
+
|
| 48 |
+
Parameters
|
| 49 |
+
----------
|
| 50 |
+
U : ndarray
|
| 51 |
+
Square upper triangular matrix.
|
| 52 |
+
|
| 53 |
+
Returns
|
| 54 |
+
-------
|
| 55 |
+
s_min : float
|
| 56 |
+
Estimated smallest singular value of the provided matrix.
|
| 57 |
+
z_min : ndarray
|
| 58 |
+
Estimatied right singular vector.
|
| 59 |
+
|
| 60 |
+
Notes
|
| 61 |
+
-----
|
| 62 |
+
The procedure is based on [1]_ and is done in two steps. First, it finds
|
| 63 |
+
a vector ``e`` with components selected from {+1, -1} such that the
|
| 64 |
+
solution ``w`` from the system ``U.T w = e`` is as large as possible.
|
| 65 |
+
Next it estimate ``U v = w``. The smallest singular value is close
|
| 66 |
+
to ``norm(w)/norm(v)`` and the right singular vector is close
|
| 67 |
+
to ``v/norm(v)``.
|
| 68 |
+
|
| 69 |
+
The estimation will be better more ill-conditioned is the matrix.
|
| 70 |
+
|
| 71 |
+
References
|
| 72 |
+
----------
|
| 73 |
+
.. [1] Cline, A. K., Moler, C. B., Stewart, G. W., Wilkinson, J. H.
|
| 74 |
+
An estimate for the condition number of a matrix. 1979.
|
| 75 |
+
SIAM Journal on Numerical Analysis, 16(2), 368-375.
|
| 76 |
+
"""
|
| 77 |
+
|
| 78 |
+
U = np.atleast_2d(U)
|
| 79 |
+
m, n = U.shape
|
| 80 |
+
|
| 81 |
+
if m != n:
|
| 82 |
+
raise ValueError("A square triangular matrix should be provided.")
|
| 83 |
+
|
| 84 |
+
# A vector `e` with components selected from {+1, -1}
|
| 85 |
+
# is selected so that the solution `w` to the system
|
| 86 |
+
# `U.T w = e` is as large as possible. Implementation
|
| 87 |
+
# based on algorithm 3.5.1, p. 142, from reference [2]
|
| 88 |
+
# adapted for lower triangular matrix.
|
| 89 |
+
|
| 90 |
+
p = np.zeros(n)
|
| 91 |
+
w = np.empty(n)
|
| 92 |
+
|
| 93 |
+
# Implemented according to: Golub, G. H., Van Loan, C. F. (2013).
|
| 94 |
+
# "Matrix computations". Forth Edition. JHU press. pp. 140-142.
|
| 95 |
+
for k in range(n):
|
| 96 |
+
wp = (1-p[k]) / U.T[k, k]
|
| 97 |
+
wm = (-1-p[k]) / U.T[k, k]
|
| 98 |
+
pp = p[k+1:] + U.T[k+1:, k]*wp
|
| 99 |
+
pm = p[k+1:] + U.T[k+1:, k]*wm
|
| 100 |
+
|
| 101 |
+
if abs(wp) + norm(pp, 1) >= abs(wm) + norm(pm, 1):
|
| 102 |
+
w[k] = wp
|
| 103 |
+
p[k+1:] = pp
|
| 104 |
+
else:
|
| 105 |
+
w[k] = wm
|
| 106 |
+
p[k+1:] = pm
|
| 107 |
+
|
| 108 |
+
# The system `U v = w` is solved using backward substitution.
|
| 109 |
+
v = solve_triangular(U, w)
|
| 110 |
+
|
| 111 |
+
v_norm = norm(v)
|
| 112 |
+
w_norm = norm(w)
|
| 113 |
+
|
| 114 |
+
# Smallest singular value
|
| 115 |
+
s_min = w_norm / v_norm
|
| 116 |
+
|
| 117 |
+
# Associated vector
|
| 118 |
+
z_min = v / v_norm
|
| 119 |
+
|
| 120 |
+
return s_min, z_min
|
| 121 |
+
|
| 122 |
+
|
| 123 |
+
def gershgorin_bounds(H):
|
| 124 |
+
"""
|
| 125 |
+
Given a square matrix ``H`` compute upper
|
| 126 |
+
and lower bounds for its eigenvalues (Gregoshgorin Bounds).
|
| 127 |
+
Defined ref. [1].
|
| 128 |
+
|
| 129 |
+
References
|
| 130 |
+
----------
|
| 131 |
+
.. [1] Conn, A. R., Gould, N. I., & Toint, P. L.
|
| 132 |
+
Trust region methods. 2000. Siam. pp. 19.
|
| 133 |
+
"""
|
| 134 |
+
|
| 135 |
+
H_diag = np.diag(H)
|
| 136 |
+
H_diag_abs = np.abs(H_diag)
|
| 137 |
+
H_row_sums = np.sum(np.abs(H), axis=1)
|
| 138 |
+
lb = np.min(H_diag + H_diag_abs - H_row_sums)
|
| 139 |
+
ub = np.max(H_diag - H_diag_abs + H_row_sums)
|
| 140 |
+
|
| 141 |
+
return lb, ub
|
| 142 |
+
|
| 143 |
+
|
| 144 |
+
def singular_leading_submatrix(A, U, k):
|
| 145 |
+
"""
|
| 146 |
+
Compute term that makes the leading ``k`` by ``k``
|
| 147 |
+
submatrix from ``A`` singular.
|
| 148 |
+
|
| 149 |
+
Parameters
|
| 150 |
+
----------
|
| 151 |
+
A : ndarray
|
| 152 |
+
Symmetric matrix that is not positive definite.
|
| 153 |
+
U : ndarray
|
| 154 |
+
Upper triangular matrix resulting of an incomplete
|
| 155 |
+
Cholesky decomposition of matrix ``A``.
|
| 156 |
+
k : int
|
| 157 |
+
Positive integer such that the leading k by k submatrix from
|
| 158 |
+
`A` is the first non-positive definite leading submatrix.
|
| 159 |
+
|
| 160 |
+
Returns
|
| 161 |
+
-------
|
| 162 |
+
delta : float
|
| 163 |
+
Amount that should be added to the element (k, k) of the
|
| 164 |
+
leading k by k submatrix of ``A`` to make it singular.
|
| 165 |
+
v : ndarray
|
| 166 |
+
A vector such that ``v.T B v = 0``. Where B is the matrix A after
|
| 167 |
+
``delta`` is added to its element (k, k).
|
| 168 |
+
"""
|
| 169 |
+
|
| 170 |
+
# Compute delta
|
| 171 |
+
delta = np.sum(U[:k-1, k-1]**2) - A[k-1, k-1]
|
| 172 |
+
|
| 173 |
+
n = len(A)
|
| 174 |
+
|
| 175 |
+
# Inicialize v
|
| 176 |
+
v = np.zeros(n)
|
| 177 |
+
v[k-1] = 1
|
| 178 |
+
|
| 179 |
+
# Compute the remaining values of v by solving a triangular system.
|
| 180 |
+
if k != 1:
|
| 181 |
+
v[:k-1] = solve_triangular(U[:k-1, :k-1], -U[:k-1, k-1])
|
| 182 |
+
|
| 183 |
+
return delta, v
|
| 184 |
+
|
| 185 |
+
|
| 186 |
+
class IterativeSubproblem(BaseQuadraticSubproblem):
|
| 187 |
+
"""Quadratic subproblem solved by nearly exact iterative method.
|
| 188 |
+
|
| 189 |
+
Notes
|
| 190 |
+
-----
|
| 191 |
+
This subproblem solver was based on [1]_, [2]_ and [3]_,
|
| 192 |
+
which implement similar algorithms. The algorithm is basically
|
| 193 |
+
that of [1]_ but ideas from [2]_ and [3]_ were also used.
|
| 194 |
+
|
| 195 |
+
References
|
| 196 |
+
----------
|
| 197 |
+
.. [1] A.R. Conn, N.I. Gould, and P.L. Toint, "Trust region methods",
|
| 198 |
+
Siam, pp. 169-200, 2000.
|
| 199 |
+
.. [2] J. Nocedal and S. Wright, "Numerical optimization",
|
| 200 |
+
Springer Science & Business Media. pp. 83-91, 2006.
|
| 201 |
+
.. [3] J.J. More and D.C. Sorensen, "Computing a trust region step",
|
| 202 |
+
SIAM Journal on Scientific and Statistical Computing, vol. 4(3),
|
| 203 |
+
pp. 553-572, 1983.
|
| 204 |
+
"""
|
| 205 |
+
|
| 206 |
+
# UPDATE_COEFF appears in reference [1]_
|
| 207 |
+
# in formula 7.3.14 (p. 190) named as "theta".
|
| 208 |
+
# As recommended there it value is fixed in 0.01.
|
| 209 |
+
UPDATE_COEFF = 0.01
|
| 210 |
+
|
| 211 |
+
EPS = np.finfo(float).eps
|
| 212 |
+
|
| 213 |
+
def __init__(self, x, fun, jac, hess, hessp=None,
|
| 214 |
+
k_easy=0.1, k_hard=0.2):
|
| 215 |
+
|
| 216 |
+
super().__init__(x, fun, jac, hess)
|
| 217 |
+
|
| 218 |
+
# When the trust-region shrinks in two consecutive
|
| 219 |
+
# calculations (``tr_radius < previous_tr_radius``)
|
| 220 |
+
# the lower bound ``lambda_lb`` may be reused,
|
| 221 |
+
# facilitating the convergence. To indicate no
|
| 222 |
+
# previous value is known at first ``previous_tr_radius``
|
| 223 |
+
# is set to -1 and ``lambda_lb`` to None.
|
| 224 |
+
self.previous_tr_radius = -1
|
| 225 |
+
self.lambda_lb = None
|
| 226 |
+
|
| 227 |
+
self.niter = 0
|
| 228 |
+
|
| 229 |
+
# ``k_easy`` and ``k_hard`` are parameters used
|
| 230 |
+
# to determine the stop criteria to the iterative
|
| 231 |
+
# subproblem solver. Take a look at pp. 194-197
|
| 232 |
+
# from reference _[1] for a more detailed description.
|
| 233 |
+
self.k_easy = k_easy
|
| 234 |
+
self.k_hard = k_hard
|
| 235 |
+
|
| 236 |
+
# Get Lapack function for cholesky decomposition.
|
| 237 |
+
# The implemented SciPy wrapper does not return
|
| 238 |
+
# the incomplete factorization needed by the method.
|
| 239 |
+
self.cholesky, = get_lapack_funcs(('potrf',), (self.hess,))
|
| 240 |
+
|
| 241 |
+
# Get info about Hessian
|
| 242 |
+
self.dimension = len(self.hess)
|
| 243 |
+
self.hess_gershgorin_lb,\
|
| 244 |
+
self.hess_gershgorin_ub = gershgorin_bounds(self.hess)
|
| 245 |
+
self.hess_inf = norm(self.hess, np.inf)
|
| 246 |
+
self.hess_fro = norm(self.hess, 'fro')
|
| 247 |
+
|
| 248 |
+
# A constant such that for vectors smaller than that
|
| 249 |
+
# backward substituition is not reliable. It was stabilished
|
| 250 |
+
# based on Golub, G. H., Van Loan, C. F. (2013).
|
| 251 |
+
# "Matrix computations". Forth Edition. JHU press., p.165.
|
| 252 |
+
self.CLOSE_TO_ZERO = self.dimension * self.EPS * self.hess_inf
|
| 253 |
+
|
| 254 |
+
def _initial_values(self, tr_radius):
|
| 255 |
+
"""Given a trust radius, return a good initial guess for
|
| 256 |
+
the damping factor, the lower bound and the upper bound.
|
| 257 |
+
The values were chosen accordingly to the guidelines on
|
| 258 |
+
section 7.3.8 (p. 192) from [1]_.
|
| 259 |
+
"""
|
| 260 |
+
|
| 261 |
+
# Upper bound for the damping factor
|
| 262 |
+
lambda_ub = max(0, self.jac_mag/tr_radius + min(-self.hess_gershgorin_lb,
|
| 263 |
+
self.hess_fro,
|
| 264 |
+
self.hess_inf))
|
| 265 |
+
|
| 266 |
+
# Lower bound for the damping factor
|
| 267 |
+
lambda_lb = max(0, -min(self.hess.diagonal()),
|
| 268 |
+
self.jac_mag/tr_radius - min(self.hess_gershgorin_ub,
|
| 269 |
+
self.hess_fro,
|
| 270 |
+
self.hess_inf))
|
| 271 |
+
|
| 272 |
+
# Improve bounds with previous info
|
| 273 |
+
if tr_radius < self.previous_tr_radius:
|
| 274 |
+
lambda_lb = max(self.lambda_lb, lambda_lb)
|
| 275 |
+
|
| 276 |
+
# Initial guess for the damping factor
|
| 277 |
+
if lambda_lb == 0:
|
| 278 |
+
lambda_initial = 0
|
| 279 |
+
else:
|
| 280 |
+
lambda_initial = max(np.sqrt(lambda_lb * lambda_ub),
|
| 281 |
+
lambda_lb + self.UPDATE_COEFF*(lambda_ub-lambda_lb))
|
| 282 |
+
|
| 283 |
+
return lambda_initial, lambda_lb, lambda_ub
|
| 284 |
+
|
| 285 |
+
def solve(self, tr_radius):
|
| 286 |
+
"""Solve quadratic subproblem"""
|
| 287 |
+
|
| 288 |
+
lambda_current, lambda_lb, lambda_ub = self._initial_values(tr_radius)
|
| 289 |
+
n = self.dimension
|
| 290 |
+
hits_boundary = True
|
| 291 |
+
already_factorized = False
|
| 292 |
+
self.niter = 0
|
| 293 |
+
|
| 294 |
+
while True:
|
| 295 |
+
|
| 296 |
+
# Compute Cholesky factorization
|
| 297 |
+
if already_factorized:
|
| 298 |
+
already_factorized = False
|
| 299 |
+
else:
|
| 300 |
+
H = self.hess+lambda_current*np.eye(n)
|
| 301 |
+
U, info = self.cholesky(H, lower=False,
|
| 302 |
+
overwrite_a=False,
|
| 303 |
+
clean=True)
|
| 304 |
+
|
| 305 |
+
self.niter += 1
|
| 306 |
+
|
| 307 |
+
# Check if factorization succeeded
|
| 308 |
+
if info == 0 and self.jac_mag > self.CLOSE_TO_ZERO:
|
| 309 |
+
# Successful factorization
|
| 310 |
+
|
| 311 |
+
# Solve `U.T U p = s`
|
| 312 |
+
p = cho_solve((U, False), -self.jac)
|
| 313 |
+
|
| 314 |
+
p_norm = norm(p)
|
| 315 |
+
|
| 316 |
+
# Check for interior convergence
|
| 317 |
+
if p_norm <= tr_radius and lambda_current == 0:
|
| 318 |
+
hits_boundary = False
|
| 319 |
+
break
|
| 320 |
+
|
| 321 |
+
# Solve `U.T w = p`
|
| 322 |
+
w = solve_triangular(U, p, trans='T')
|
| 323 |
+
|
| 324 |
+
w_norm = norm(w)
|
| 325 |
+
|
| 326 |
+
# Compute Newton step accordingly to
|
| 327 |
+
# formula (4.44) p.87 from ref [2]_.
|
| 328 |
+
delta_lambda = (p_norm/w_norm)**2 * (p_norm-tr_radius)/tr_radius
|
| 329 |
+
lambda_new = lambda_current + delta_lambda
|
| 330 |
+
|
| 331 |
+
if p_norm < tr_radius: # Inside boundary
|
| 332 |
+
s_min, z_min = estimate_smallest_singular_value(U)
|
| 333 |
+
|
| 334 |
+
ta, tb = self.get_boundaries_intersections(p, z_min,
|
| 335 |
+
tr_radius)
|
| 336 |
+
|
| 337 |
+
# Choose `step_len` with the smallest magnitude.
|
| 338 |
+
# The reason for this choice is explained at
|
| 339 |
+
# ref [3]_, p. 6 (Immediately before the formula
|
| 340 |
+
# for `tau`).
|
| 341 |
+
step_len = min([ta, tb], key=abs)
|
| 342 |
+
|
| 343 |
+
# Compute the quadratic term (p.T*H*p)
|
| 344 |
+
quadratic_term = np.dot(p, np.dot(H, p))
|
| 345 |
+
|
| 346 |
+
# Check stop criteria
|
| 347 |
+
relative_error = ((step_len**2 * s_min**2)
|
| 348 |
+
/ (quadratic_term + lambda_current*tr_radius**2))
|
| 349 |
+
if relative_error <= self.k_hard:
|
| 350 |
+
p += step_len * z_min
|
| 351 |
+
break
|
| 352 |
+
|
| 353 |
+
# Update uncertanty bounds
|
| 354 |
+
lambda_ub = lambda_current
|
| 355 |
+
lambda_lb = max(lambda_lb, lambda_current - s_min**2)
|
| 356 |
+
|
| 357 |
+
# Compute Cholesky factorization
|
| 358 |
+
H = self.hess + lambda_new*np.eye(n)
|
| 359 |
+
c, info = self.cholesky(H, lower=False,
|
| 360 |
+
overwrite_a=False,
|
| 361 |
+
clean=True)
|
| 362 |
+
|
| 363 |
+
# Check if the factorization have succeeded
|
| 364 |
+
#
|
| 365 |
+
if info == 0: # Successful factorization
|
| 366 |
+
# Update damping factor
|
| 367 |
+
lambda_current = lambda_new
|
| 368 |
+
already_factorized = True
|
| 369 |
+
else: # Unsuccessful factorization
|
| 370 |
+
# Update uncertanty bounds
|
| 371 |
+
lambda_lb = max(lambda_lb, lambda_new)
|
| 372 |
+
|
| 373 |
+
# Update damping factor
|
| 374 |
+
lambda_current = max(
|
| 375 |
+
np.sqrt(lambda_lb * lambda_ub),
|
| 376 |
+
lambda_lb + self.UPDATE_COEFF*(lambda_ub-lambda_lb)
|
| 377 |
+
)
|
| 378 |
+
|
| 379 |
+
else: # Outside boundary
|
| 380 |
+
# Check stop criteria
|
| 381 |
+
relative_error = abs(p_norm - tr_radius) / tr_radius
|
| 382 |
+
if relative_error <= self.k_easy:
|
| 383 |
+
break
|
| 384 |
+
|
| 385 |
+
# Update uncertanty bounds
|
| 386 |
+
lambda_lb = lambda_current
|
| 387 |
+
|
| 388 |
+
# Update damping factor
|
| 389 |
+
lambda_current = lambda_new
|
| 390 |
+
|
| 391 |
+
elif info == 0 and self.jac_mag <= self.CLOSE_TO_ZERO:
|
| 392 |
+
# jac_mag very close to zero
|
| 393 |
+
|
| 394 |
+
# Check for interior convergence
|
| 395 |
+
if lambda_current == 0:
|
| 396 |
+
p = np.zeros(n)
|
| 397 |
+
hits_boundary = False
|
| 398 |
+
break
|
| 399 |
+
|
| 400 |
+
s_min, z_min = estimate_smallest_singular_value(U)
|
| 401 |
+
step_len = tr_radius
|
| 402 |
+
|
| 403 |
+
# Check stop criteria
|
| 404 |
+
if (step_len**2 * s_min**2
|
| 405 |
+
<= self.k_hard * lambda_current * tr_radius**2):
|
| 406 |
+
p = step_len * z_min
|
| 407 |
+
break
|
| 408 |
+
|
| 409 |
+
# Update uncertanty bounds
|
| 410 |
+
lambda_ub = lambda_current
|
| 411 |
+
lambda_lb = max(lambda_lb, lambda_current - s_min**2)
|
| 412 |
+
|
| 413 |
+
# Update damping factor
|
| 414 |
+
lambda_current = max(
|
| 415 |
+
np.sqrt(lambda_lb * lambda_ub),
|
| 416 |
+
lambda_lb + self.UPDATE_COEFF*(lambda_ub-lambda_lb)
|
| 417 |
+
)
|
| 418 |
+
|
| 419 |
+
else: # Unsuccessful factorization
|
| 420 |
+
|
| 421 |
+
# Compute auxiliary terms
|
| 422 |
+
delta, v = singular_leading_submatrix(H, U, info)
|
| 423 |
+
v_norm = norm(v)
|
| 424 |
+
|
| 425 |
+
# Update uncertanty interval
|
| 426 |
+
lambda_lb = max(lambda_lb, lambda_current + delta/v_norm**2)
|
| 427 |
+
|
| 428 |
+
# Update damping factor
|
| 429 |
+
lambda_current = max(
|
| 430 |
+
np.sqrt(lambda_lb * lambda_ub),
|
| 431 |
+
lambda_lb + self.UPDATE_COEFF*(lambda_ub-lambda_lb)
|
| 432 |
+
)
|
| 433 |
+
|
| 434 |
+
self.lambda_lb = lambda_lb
|
| 435 |
+
self.lambda_current = lambda_current
|
| 436 |
+
self.previous_tr_radius = tr_radius
|
| 437 |
+
|
| 438 |
+
return p, hits_boundary
|
llava_next/lib/python3.10/site-packages/scipy/optimize/_trustregion_krylov.py
ADDED
|
@@ -0,0 +1,65 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from ._trustregion import (_minimize_trust_region)
|
| 2 |
+
from ._trlib import (get_trlib_quadratic_subproblem)
|
| 3 |
+
|
| 4 |
+
__all__ = ['_minimize_trust_krylov']
|
| 5 |
+
|
| 6 |
+
def _minimize_trust_krylov(fun, x0, args=(), jac=None, hess=None, hessp=None,
|
| 7 |
+
inexact=True, **trust_region_options):
|
| 8 |
+
"""
|
| 9 |
+
Minimization of a scalar function of one or more variables using
|
| 10 |
+
a nearly exact trust-region algorithm that only requires matrix
|
| 11 |
+
vector products with the hessian matrix.
|
| 12 |
+
|
| 13 |
+
.. versionadded:: 1.0.0
|
| 14 |
+
|
| 15 |
+
Options
|
| 16 |
+
-------
|
| 17 |
+
inexact : bool, optional
|
| 18 |
+
Accuracy to solve subproblems. If True requires less nonlinear
|
| 19 |
+
iterations, but more vector products.
|
| 20 |
+
"""
|
| 21 |
+
|
| 22 |
+
if jac is None:
|
| 23 |
+
raise ValueError('Jacobian is required for trust region ',
|
| 24 |
+
'exact minimization.')
|
| 25 |
+
if hess is None and hessp is None:
|
| 26 |
+
raise ValueError('Either the Hessian or the Hessian-vector product '
|
| 27 |
+
'is required for Krylov trust-region minimization')
|
| 28 |
+
|
| 29 |
+
# tol_rel specifies the termination tolerance relative to the initial
|
| 30 |
+
# gradient norm in the Krylov subspace iteration.
|
| 31 |
+
|
| 32 |
+
# - tol_rel_i specifies the tolerance for interior convergence.
|
| 33 |
+
# - tol_rel_b specifies the tolerance for boundary convergence.
|
| 34 |
+
# in nonlinear programming applications it is not necessary to solve
|
| 35 |
+
# the boundary case as exact as the interior case.
|
| 36 |
+
|
| 37 |
+
# - setting tol_rel_i=-2 leads to a forcing sequence in the Krylov
|
| 38 |
+
# subspace iteration leading to quadratic convergence if eventually
|
| 39 |
+
# the trust region stays inactive.
|
| 40 |
+
# - setting tol_rel_b=-3 leads to a forcing sequence in the Krylov
|
| 41 |
+
# subspace iteration leading to superlinear convergence as long
|
| 42 |
+
# as the iterates hit the trust region boundary.
|
| 43 |
+
|
| 44 |
+
# For details consult the documentation of trlib_krylov_min
|
| 45 |
+
# in _trlib/trlib_krylov.h
|
| 46 |
+
#
|
| 47 |
+
# Optimality of this choice of parameters among a range of possibilities
|
| 48 |
+
# has been tested on the unconstrained subset of the CUTEst library.
|
| 49 |
+
|
| 50 |
+
if inexact:
|
| 51 |
+
return _minimize_trust_region(fun, x0, args=args, jac=jac,
|
| 52 |
+
hess=hess, hessp=hessp,
|
| 53 |
+
subproblem=get_trlib_quadratic_subproblem(
|
| 54 |
+
tol_rel_i=-2.0, tol_rel_b=-3.0,
|
| 55 |
+
disp=trust_region_options.get('disp', False)
|
| 56 |
+
),
|
| 57 |
+
**trust_region_options)
|
| 58 |
+
else:
|
| 59 |
+
return _minimize_trust_region(fun, x0, args=args, jac=jac,
|
| 60 |
+
hess=hess, hessp=hessp,
|
| 61 |
+
subproblem=get_trlib_quadratic_subproblem(
|
| 62 |
+
tol_rel_i=1e-8, tol_rel_b=1e-6,
|
| 63 |
+
disp=trust_region_options.get('disp', False)
|
| 64 |
+
),
|
| 65 |
+
**trust_region_options)
|
llava_next/lib/python3.10/site-packages/scipy/optimize/_zeros.cpython-310-x86_64-linux-gnu.so
ADDED
|
Binary file (21.6 kB). View file
|
|
|
llava_next/lib/python3.10/site-packages/scipy/optimize/cython_optimize.pxd
ADDED
|
@@ -0,0 +1,11 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Public Cython API declarations
|
| 2 |
+
#
|
| 3 |
+
# See doc/source/dev/contributor/public_cython_api.rst for guidelines
|
| 4 |
+
|
| 5 |
+
|
| 6 |
+
# The following cimport statement provides legacy ABI
|
| 7 |
+
# support. Changing it causes an ABI forward-compatibility break
|
| 8 |
+
# (gh-11793), so we currently leave it as is (no further cimport
|
| 9 |
+
# statements should be used in this file).
|
| 10 |
+
from scipy.optimize.cython_optimize._zeros cimport (
|
| 11 |
+
brentq, brenth, ridder, bisect, zeros_full_output)
|
llava_next/lib/python3.10/site-packages/scipy/optimize/linesearch.py
ADDED
|
@@ -0,0 +1,18 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# This file is not meant for public use and will be removed in SciPy v2.0.0.
|
| 2 |
+
# Use the `scipy.optimize` namespace for importing the functions
|
| 3 |
+
# included below.
|
| 4 |
+
|
| 5 |
+
from scipy._lib.deprecation import _sub_module_deprecation
|
| 6 |
+
|
| 7 |
+
|
| 8 |
+
__all__ = ["line_search"] # noqa: F822
|
| 9 |
+
|
| 10 |
+
|
| 11 |
+
def __dir__():
|
| 12 |
+
return __all__
|
| 13 |
+
|
| 14 |
+
|
| 15 |
+
def __getattr__(name):
|
| 16 |
+
return _sub_module_deprecation(sub_package="optimize", module="linesearch",
|
| 17 |
+
private_modules=["_linesearch"], all=__all__,
|
| 18 |
+
attribute=name)
|
llava_next/lib/python3.10/site-packages/scipy/optimize/minpack.py
ADDED
|
@@ -0,0 +1,27 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# This file is not meant for public use and will be removed in SciPy v2.0.0.
|
| 2 |
+
# Use the `scipy.optimize` namespace for importing the functions
|
| 3 |
+
# included below.
|
| 4 |
+
|
| 5 |
+
from scipy._lib.deprecation import _sub_module_deprecation
|
| 6 |
+
|
| 7 |
+
|
| 8 |
+
__all__ = [ # noqa: F822
|
| 9 |
+
'OptimizeResult',
|
| 10 |
+
'OptimizeWarning',
|
| 11 |
+
'curve_fit',
|
| 12 |
+
'fixed_point',
|
| 13 |
+
'fsolve',
|
| 14 |
+
'least_squares',
|
| 15 |
+
'leastsq',
|
| 16 |
+
'zeros',
|
| 17 |
+
]
|
| 18 |
+
|
| 19 |
+
|
| 20 |
+
def __dir__():
|
| 21 |
+
return __all__
|
| 22 |
+
|
| 23 |
+
|
| 24 |
+
def __getattr__(name):
|
| 25 |
+
return _sub_module_deprecation(sub_package="optimize", module="minpack",
|
| 26 |
+
private_modules=["_minpack_py"], all=__all__,
|
| 27 |
+
attribute=name)
|
llava_next/lib/python3.10/site-packages/scipy/optimize/moduleTNC.py
ADDED
|
@@ -0,0 +1,19 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# This file is not meant for public use and will be removed in SciPy v2.0.0.
|
| 2 |
+
# Use the `scipy.optimize` namespace for importing the functions
|
| 3 |
+
# included below.
|
| 4 |
+
|
| 5 |
+
|
| 6 |
+
from scipy._lib.deprecation import _sub_module_deprecation
|
| 7 |
+
|
| 8 |
+
|
| 9 |
+
__all__ = []
|
| 10 |
+
|
| 11 |
+
|
| 12 |
+
def __dir__():
|
| 13 |
+
return __all__
|
| 14 |
+
|
| 15 |
+
|
| 16 |
+
def __getattr__(name):
|
| 17 |
+
return _sub_module_deprecation(sub_package="optimize", module="moduleTNC",
|
| 18 |
+
private_modules=["_moduleTNC"], all=__all__,
|
| 19 |
+
attribute=name)
|
llava_next/lib/python3.10/site-packages/scipy/optimize/nonlin.py
ADDED
|
@@ -0,0 +1,29 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# This file is not meant for public use and will be removed in SciPy v2.0.0.
|
| 2 |
+
# Use the `scipy.optimize` namespace for importing the functions
|
| 3 |
+
# included below.
|
| 4 |
+
|
| 5 |
+
from scipy._lib.deprecation import _sub_module_deprecation
|
| 6 |
+
|
| 7 |
+
|
| 8 |
+
__all__ = [ # noqa: F822
|
| 9 |
+
'BroydenFirst',
|
| 10 |
+
'InverseJacobian',
|
| 11 |
+
'KrylovJacobian',
|
| 12 |
+
'anderson',
|
| 13 |
+
'broyden1',
|
| 14 |
+
'broyden2',
|
| 15 |
+
'diagbroyden',
|
| 16 |
+
'excitingmixing',
|
| 17 |
+
'linearmixing',
|
| 18 |
+
'newton_krylov',
|
| 19 |
+
]
|
| 20 |
+
|
| 21 |
+
|
| 22 |
+
def __dir__():
|
| 23 |
+
return __all__
|
| 24 |
+
|
| 25 |
+
|
| 26 |
+
def __getattr__(name):
|
| 27 |
+
return _sub_module_deprecation(sub_package="optimize", module="nonlin",
|
| 28 |
+
private_modules=["_nonlin"], all=__all__,
|
| 29 |
+
attribute=name)
|
llava_next/lib/python3.10/site-packages/scipy/optimize/optimize.py
ADDED
|
@@ -0,0 +1,40 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# This file is not meant for public use and will be removed in SciPy v2.0.0.
|
| 2 |
+
# Use the `scipy.optimize` namespace for importing the functions
|
| 3 |
+
# included below.
|
| 4 |
+
|
| 5 |
+
from scipy._lib.deprecation import _sub_module_deprecation
|
| 6 |
+
|
| 7 |
+
|
| 8 |
+
__all__ = [ # noqa: F822
|
| 9 |
+
'OptimizeResult',
|
| 10 |
+
'OptimizeWarning',
|
| 11 |
+
'approx_fprime',
|
| 12 |
+
'bracket',
|
| 13 |
+
'brent',
|
| 14 |
+
'brute',
|
| 15 |
+
'check_grad',
|
| 16 |
+
'fmin',
|
| 17 |
+
'fmin_bfgs',
|
| 18 |
+
'fmin_cg',
|
| 19 |
+
'fmin_ncg',
|
| 20 |
+
'fmin_powell',
|
| 21 |
+
'fminbound',
|
| 22 |
+
'golden',
|
| 23 |
+
'line_search',
|
| 24 |
+
'rosen',
|
| 25 |
+
'rosen_der',
|
| 26 |
+
'rosen_hess',
|
| 27 |
+
'rosen_hess_prod',
|
| 28 |
+
'show_options',
|
| 29 |
+
'zeros',
|
| 30 |
+
]
|
| 31 |
+
|
| 32 |
+
|
| 33 |
+
def __dir__():
|
| 34 |
+
return __all__
|
| 35 |
+
|
| 36 |
+
|
| 37 |
+
def __getattr__(name):
|
| 38 |
+
return _sub_module_deprecation(sub_package="optimize", module="optimize",
|
| 39 |
+
private_modules=["_optimize"], all=__all__,
|
| 40 |
+
attribute=name)
|
llava_next/lib/python3.10/site-packages/scipy/optimize/tnc.py
ADDED
|
@@ -0,0 +1,22 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# This file is not meant for public use and will be removed in SciPy v2.0.0.
|
| 2 |
+
# Use the `scipy.optimize` namespace for importing the functions
|
| 3 |
+
# included below.
|
| 4 |
+
|
| 5 |
+
from scipy._lib.deprecation import _sub_module_deprecation
|
| 6 |
+
|
| 7 |
+
|
| 8 |
+
__all__ = [ # noqa: F822
|
| 9 |
+
'OptimizeResult',
|
| 10 |
+
'fmin_tnc',
|
| 11 |
+
'zeros',
|
| 12 |
+
]
|
| 13 |
+
|
| 14 |
+
|
| 15 |
+
def __dir__():
|
| 16 |
+
return __all__
|
| 17 |
+
|
| 18 |
+
|
| 19 |
+
def __getattr__(name):
|
| 20 |
+
return _sub_module_deprecation(sub_package="optimize", module="tnc",
|
| 21 |
+
private_modules=["_tnc"], all=__all__,
|
| 22 |
+
attribute=name)
|
llava_next/lib/python3.10/site-packages/scipy/optimize/zeros.py
ADDED
|
@@ -0,0 +1,26 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# This file is not meant for public use and will be removed in SciPy v2.0.0.
|
| 2 |
+
# Use the `scipy.optimize` namespace for importing the functions
|
| 3 |
+
# included below.
|
| 4 |
+
|
| 5 |
+
from scipy._lib.deprecation import _sub_module_deprecation
|
| 6 |
+
|
| 7 |
+
|
| 8 |
+
__all__ = [ # noqa: F822
|
| 9 |
+
'RootResults',
|
| 10 |
+
'bisect',
|
| 11 |
+
'brenth',
|
| 12 |
+
'brentq',
|
| 13 |
+
'newton',
|
| 14 |
+
'ridder',
|
| 15 |
+
'toms748',
|
| 16 |
+
]
|
| 17 |
+
|
| 18 |
+
|
| 19 |
+
def __dir__():
|
| 20 |
+
return __all__
|
| 21 |
+
|
| 22 |
+
|
| 23 |
+
def __getattr__(name):
|
| 24 |
+
return _sub_module_deprecation(sub_package="optimize", module="zeros",
|
| 25 |
+
private_modules=["_zeros_py"], all=__all__,
|
| 26 |
+
attribute=name)
|
llava_next/lib/python3.10/site-packages/scipy/special/_precompute/__pycache__/__init__.cpython-310.pyc
ADDED
|
Binary file (181 Bytes). View file
|
|
|
llava_next/lib/python3.10/site-packages/scipy/special/_precompute/__pycache__/cosine_cdf.cpython-310.pyc
ADDED
|
Binary file (710 Bytes). View file
|
|
|
llava_next/lib/python3.10/site-packages/scipy/special/_precompute/__pycache__/expn_asy.cpython-310.pyc
ADDED
|
Binary file (1.98 kB). View file
|
|
|
llava_next/lib/python3.10/site-packages/scipy/special/_precompute/__pycache__/gammainc_asy.cpython-310.pyc
ADDED
|
Binary file (3.34 kB). View file
|
|
|
llava_next/lib/python3.10/site-packages/scipy/special/_precompute/__pycache__/gammainc_data.cpython-310.pyc
ADDED
|
Binary file (3.87 kB). View file
|
|
|
llava_next/lib/python3.10/site-packages/scipy/special/_precompute/__pycache__/hyp2f1_data.cpython-310.pyc
ADDED
|
Binary file (12.9 kB). View file
|
|
|