Add files using upload-large-folder tool
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- .gitattributes +6 -0
- mantis_evalkit/lib/python3.10/site-packages/scipy/optimize/_cython_nnls.cpython-310-x86_64-linux-gnu.so +3 -0
- mantis_evalkit/lib/python3.10/site-packages/scipy/optimize/_lsq/__pycache__/__init__.cpython-310.pyc +0 -0
- mantis_evalkit/lib/python3.10/site-packages/scipy/optimize/_lsq/__pycache__/dogbox.cpython-310.pyc +0 -0
- mantis_evalkit/lib/python3.10/site-packages/scipy/optimize/_lsq/__pycache__/lsq_linear.cpython-310.pyc +0 -0
- mantis_evalkit/lib/python3.10/site-packages/scipy/optimize/_lsq/__pycache__/trf_linear.cpython-310.pyc +0 -0
- mantis_evalkit/lib/python3.10/site-packages/scipy/optimize/tests/__init__.py +0 -0
- mantis_evalkit/lib/python3.10/site-packages/scipy/optimize/tests/test__basinhopping.py +535 -0
- mantis_evalkit/lib/python3.10/site-packages/scipy/optimize/tests/test__differential_evolution.py +1703 -0
- mantis_evalkit/lib/python3.10/site-packages/scipy/optimize/tests/test__numdiff.py +841 -0
- mantis_evalkit/lib/python3.10/site-packages/scipy/optimize/tests/test__shgo.py +1156 -0
- mantis_evalkit/lib/python3.10/site-packages/scipy/optimize/tests/test_bracket.py +871 -0
- mantis_evalkit/lib/python3.10/site-packages/scipy/optimize/tests/test_chandrupatla.py +984 -0
- mantis_evalkit/lib/python3.10/site-packages/scipy/optimize/tests/test_cobyqa.py +252 -0
- mantis_evalkit/lib/python3.10/site-packages/scipy/optimize/tests/test_constraints.py +255 -0
- mantis_evalkit/lib/python3.10/site-packages/scipy/optimize/tests/test_cython_optimize.py +92 -0
- mantis_evalkit/lib/python3.10/site-packages/scipy/optimize/tests/test_differentiable_functions.py +805 -0
- mantis_evalkit/lib/python3.10/site-packages/scipy/optimize/tests/test_hessian_update_strategy.py +300 -0
- mantis_evalkit/lib/python3.10/site-packages/scipy/optimize/tests/test_isotonic_regression.py +167 -0
- mantis_evalkit/lib/python3.10/site-packages/scipy/optimize/tests/test_lbfgsb_hessinv.py +43 -0
- mantis_evalkit/lib/python3.10/site-packages/scipy/optimize/tests/test_lbfgsb_setulb.py +122 -0
- mantis_evalkit/lib/python3.10/site-packages/scipy/optimize/tests/test_least_squares.py +874 -0
- mantis_evalkit/lib/python3.10/site-packages/scipy/optimize/tests/test_linear_assignment.py +116 -0
- mantis_evalkit/lib/python3.10/site-packages/scipy/optimize/tests/test_linprog.py +0 -0
- mantis_evalkit/lib/python3.10/site-packages/scipy/optimize/tests/test_lsq_common.py +297 -0
- mantis_evalkit/lib/python3.10/site-packages/scipy/optimize/tests/test_lsq_linear.py +287 -0
- mantis_evalkit/lib/python3.10/site-packages/scipy/optimize/tests/test_minpack.py +1194 -0
- mantis_evalkit/lib/python3.10/site-packages/scipy/optimize/tests/test_optimize.py +0 -0
- mantis_evalkit/lib/python3.10/site-packages/scipy/optimize/tests/test_regression.py +40 -0
- mantis_evalkit/lib/python3.10/site-packages/scipy/optimize/tests/test_slsqp.py +613 -0
- mantis_evalkit/lib/python3.10/site-packages/scipy/optimize/tests/test_trustregion.py +110 -0
- mantis_evalkit/lib/python3.10/site-packages/scipy/optimize/tests/test_zeros.py +965 -0
- mantis_evalkit/lib/python3.10/site-packages/scipy/special/tests/__pycache__/test_basic.cpython-310.pyc +3 -0
- moondream/lib/python3.10/site-packages/numpy/random/_common.cpython-310-x86_64-linux-gnu.so +3 -0
- moondream/lib/python3.10/site-packages/numpy/random/_mt19937.cpython-310-x86_64-linux-gnu.so +3 -0
- moondream/lib/python3.10/site-packages/numpy/random/_philox.cpython-310-x86_64-linux-gnu.so +3 -0
- moondream/lib/python3.10/site-packages/numpy/random/bit_generator.cpython-310-x86_64-linux-gnu.so +3 -0
- moondream/lib/python3.10/site-packages/pytz/zoneinfo/Australia/Adelaide +0 -0
- moondream/lib/python3.10/site-packages/pytz/zoneinfo/Australia/LHI +0 -0
- moondream/lib/python3.10/site-packages/pytz/zoneinfo/Australia/Lindeman +0 -0
- moondream/lib/python3.10/site-packages/pytz/zoneinfo/Australia/Queensland +0 -0
- moondream/lib/python3.10/site-packages/pytz/zoneinfo/Australia/Sydney +0 -0
- moondream/lib/python3.10/site-packages/pytz/zoneinfo/Australia/Yancowinna +0 -0
- moondream/lib/python3.10/site-packages/pytz/zoneinfo/Europe/Belfast +0 -0
- moondream/lib/python3.10/site-packages/pytz/zoneinfo/Europe/Berlin +0 -0
- moondream/lib/python3.10/site-packages/pytz/zoneinfo/Europe/Bratislava +0 -0
- moondream/lib/python3.10/site-packages/pytz/zoneinfo/Europe/Brussels +0 -0
- moondream/lib/python3.10/site-packages/pytz/zoneinfo/Europe/Busingen +0 -0
- moondream/lib/python3.10/site-packages/pytz/zoneinfo/Europe/Copenhagen +0 -0
- moondream/lib/python3.10/site-packages/pytz/zoneinfo/Europe/Dublin +0 -0
.gitattributes
CHANGED
|
@@ -564,3 +564,9 @@ mantis_evalkit/lib/python3.10/site-packages/scipy/stats/_qmc_cy.cpython-310-x86_
|
|
| 564 |
mantis_evalkit/lib/python3.10/site-packages/scipy/stats/_stats.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
|
| 565 |
mantis_evalkit/lib/python3.10/site-packages/scipy/stats/__pycache__/_mstats_basic.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
|
| 566 |
moondream/lib/python3.10/site-packages/numpy/ma/__pycache__/core.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 564 |
mantis_evalkit/lib/python3.10/site-packages/scipy/stats/_stats.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
|
| 565 |
mantis_evalkit/lib/python3.10/site-packages/scipy/stats/__pycache__/_mstats_basic.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
|
| 566 |
moondream/lib/python3.10/site-packages/numpy/ma/__pycache__/core.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
|
| 567 |
+
moondream/lib/python3.10/site-packages/numpy/random/_philox.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
|
| 568 |
+
moondream/lib/python3.10/site-packages/numpy/random/_mt19937.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
|
| 569 |
+
moondream/lib/python3.10/site-packages/numpy/random/_common.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
|
| 570 |
+
moondream/lib/python3.10/site-packages/numpy/random/bit_generator.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
|
| 571 |
+
mantis_evalkit/lib/python3.10/site-packages/scipy/optimize/_cython_nnls.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
|
| 572 |
+
mantis_evalkit/lib/python3.10/site-packages/scipy/special/tests/__pycache__/test_basic.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
|
mantis_evalkit/lib/python3.10/site-packages/scipy/optimize/_cython_nnls.cpython-310-x86_64-linux-gnu.so
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:15974c1a1e5260c3240814b98b6e2d18902e42a530891499d0743669a3ae9b98
|
| 3 |
+
size 121024
|
mantis_evalkit/lib/python3.10/site-packages/scipy/optimize/_lsq/__pycache__/__init__.cpython-310.pyc
ADDED
|
Binary file (340 Bytes). View file
|
|
|
mantis_evalkit/lib/python3.10/site-packages/scipy/optimize/_lsq/__pycache__/dogbox.cpython-310.pyc
ADDED
|
Binary file (8.19 kB). View file
|
|
|
mantis_evalkit/lib/python3.10/site-packages/scipy/optimize/_lsq/__pycache__/lsq_linear.cpython-310.pyc
ADDED
|
Binary file (13.9 kB). View file
|
|
|
mantis_evalkit/lib/python3.10/site-packages/scipy/optimize/_lsq/__pycache__/trf_linear.cpython-310.pyc
ADDED
|
Binary file (5.65 kB). View file
|
|
|
mantis_evalkit/lib/python3.10/site-packages/scipy/optimize/tests/__init__.py
ADDED
|
File without changes
|
mantis_evalkit/lib/python3.10/site-packages/scipy/optimize/tests/test__basinhopping.py
ADDED
|
@@ -0,0 +1,535 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Unit tests for the basin hopping global minimization algorithm.
|
| 3 |
+
"""
|
| 4 |
+
import copy
|
| 5 |
+
|
| 6 |
+
from numpy.testing import (assert_almost_equal, assert_equal, assert_,
|
| 7 |
+
assert_allclose)
|
| 8 |
+
import pytest
|
| 9 |
+
from pytest import raises as assert_raises
|
| 10 |
+
import numpy as np
|
| 11 |
+
from numpy import cos, sin
|
| 12 |
+
|
| 13 |
+
from scipy.optimize import basinhopping, OptimizeResult
|
| 14 |
+
from scipy.optimize._basinhopping import (
|
| 15 |
+
Storage, RandomDisplacement, Metropolis, AdaptiveStepsize)
|
| 16 |
+
|
| 17 |
+
|
| 18 |
+
def func1d(x):
|
| 19 |
+
f = cos(14.5 * x - 0.3) + (x + 0.2) * x
|
| 20 |
+
df = np.array(-14.5 * sin(14.5 * x - 0.3) + 2. * x + 0.2)
|
| 21 |
+
return f, df
|
| 22 |
+
|
| 23 |
+
|
| 24 |
+
def func2d_nograd(x):
|
| 25 |
+
f = cos(14.5 * x[0] - 0.3) + (x[1] + 0.2) * x[1] + (x[0] + 0.2) * x[0]
|
| 26 |
+
return f
|
| 27 |
+
|
| 28 |
+
|
| 29 |
+
def func2d(x):
|
| 30 |
+
f = cos(14.5 * x[0] - 0.3) + (x[1] + 0.2) * x[1] + (x[0] + 0.2) * x[0]
|
| 31 |
+
df = np.zeros(2)
|
| 32 |
+
df[0] = -14.5 * sin(14.5 * x[0] - 0.3) + 2. * x[0] + 0.2
|
| 33 |
+
df[1] = 2. * x[1] + 0.2
|
| 34 |
+
return f, df
|
| 35 |
+
|
| 36 |
+
|
| 37 |
+
def func2d_easyderiv(x):
|
| 38 |
+
f = 2.0*x[0]**2 + 2.0*x[0]*x[1] + 2.0*x[1]**2 - 6.0*x[0]
|
| 39 |
+
df = np.zeros(2)
|
| 40 |
+
df[0] = 4.0*x[0] + 2.0*x[1] - 6.0
|
| 41 |
+
df[1] = 2.0*x[0] + 4.0*x[1]
|
| 42 |
+
|
| 43 |
+
return f, df
|
| 44 |
+
|
| 45 |
+
|
| 46 |
+
class MyTakeStep1(RandomDisplacement):
|
| 47 |
+
"""use a copy of displace, but have it set a special parameter to
|
| 48 |
+
make sure it's actually being used."""
|
| 49 |
+
def __init__(self):
|
| 50 |
+
self.been_called = False
|
| 51 |
+
super().__init__()
|
| 52 |
+
|
| 53 |
+
def __call__(self, x):
|
| 54 |
+
self.been_called = True
|
| 55 |
+
return super().__call__(x)
|
| 56 |
+
|
| 57 |
+
|
| 58 |
+
def myTakeStep2(x):
|
| 59 |
+
"""redo RandomDisplacement in function form without the attribute stepsize
|
| 60 |
+
to make sure everything still works ok
|
| 61 |
+
"""
|
| 62 |
+
s = 0.5
|
| 63 |
+
x += np.random.uniform(-s, s, np.shape(x))
|
| 64 |
+
return x
|
| 65 |
+
|
| 66 |
+
|
| 67 |
+
class MyAcceptTest:
|
| 68 |
+
"""pass a custom accept test
|
| 69 |
+
|
| 70 |
+
This does nothing but make sure it's being used and ensure all the
|
| 71 |
+
possible return values are accepted
|
| 72 |
+
"""
|
| 73 |
+
def __init__(self):
|
| 74 |
+
self.been_called = False
|
| 75 |
+
self.ncalls = 0
|
| 76 |
+
self.testres = [False, 'force accept', True, np.bool_(True),
|
| 77 |
+
np.bool_(False), [], {}, 0, 1]
|
| 78 |
+
|
| 79 |
+
def __call__(self, **kwargs):
|
| 80 |
+
self.been_called = True
|
| 81 |
+
self.ncalls += 1
|
| 82 |
+
if self.ncalls - 1 < len(self.testres):
|
| 83 |
+
return self.testres[self.ncalls - 1]
|
| 84 |
+
else:
|
| 85 |
+
return True
|
| 86 |
+
|
| 87 |
+
|
| 88 |
+
class MyCallBack:
|
| 89 |
+
"""pass a custom callback function
|
| 90 |
+
|
| 91 |
+
This makes sure it's being used. It also returns True after 10
|
| 92 |
+
steps to ensure that it's stopping early.
|
| 93 |
+
|
| 94 |
+
"""
|
| 95 |
+
def __init__(self):
|
| 96 |
+
self.been_called = False
|
| 97 |
+
self.ncalls = 0
|
| 98 |
+
|
| 99 |
+
def __call__(self, x, f, accepted):
|
| 100 |
+
self.been_called = True
|
| 101 |
+
self.ncalls += 1
|
| 102 |
+
if self.ncalls == 10:
|
| 103 |
+
return True
|
| 104 |
+
|
| 105 |
+
|
| 106 |
+
class TestBasinHopping:
|
| 107 |
+
|
| 108 |
+
def setup_method(self):
|
| 109 |
+
""" Tests setup.
|
| 110 |
+
|
| 111 |
+
Run tests based on the 1-D and 2-D functions described above.
|
| 112 |
+
"""
|
| 113 |
+
self.x0 = (1.0, [1.0, 1.0])
|
| 114 |
+
self.sol = (-0.195, np.array([-0.195, -0.1]))
|
| 115 |
+
|
| 116 |
+
self.tol = 3 # number of decimal places
|
| 117 |
+
|
| 118 |
+
self.niter = 100
|
| 119 |
+
self.disp = False
|
| 120 |
+
|
| 121 |
+
self.kwargs = {"method": "L-BFGS-B", "jac": True}
|
| 122 |
+
self.kwargs_nograd = {"method": "L-BFGS-B"}
|
| 123 |
+
|
| 124 |
+
def test_TypeError(self):
|
| 125 |
+
# test the TypeErrors are raised on bad input
|
| 126 |
+
i = 1
|
| 127 |
+
# if take_step is passed, it must be callable
|
| 128 |
+
assert_raises(TypeError, basinhopping, func2d, self.x0[i],
|
| 129 |
+
take_step=1)
|
| 130 |
+
# if accept_test is passed, it must be callable
|
| 131 |
+
assert_raises(TypeError, basinhopping, func2d, self.x0[i],
|
| 132 |
+
accept_test=1)
|
| 133 |
+
|
| 134 |
+
def test_input_validation(self):
|
| 135 |
+
msg = 'target_accept_rate has to be in range \\(0, 1\\)'
|
| 136 |
+
with assert_raises(ValueError, match=msg):
|
| 137 |
+
basinhopping(func1d, self.x0[0], target_accept_rate=0.)
|
| 138 |
+
with assert_raises(ValueError, match=msg):
|
| 139 |
+
basinhopping(func1d, self.x0[0], target_accept_rate=1.)
|
| 140 |
+
|
| 141 |
+
msg = 'stepwise_factor has to be in range \\(0, 1\\)'
|
| 142 |
+
with assert_raises(ValueError, match=msg):
|
| 143 |
+
basinhopping(func1d, self.x0[0], stepwise_factor=0.)
|
| 144 |
+
with assert_raises(ValueError, match=msg):
|
| 145 |
+
basinhopping(func1d, self.x0[0], stepwise_factor=1.)
|
| 146 |
+
|
| 147 |
+
def test_1d_grad(self):
|
| 148 |
+
# test 1-D minimizations with gradient
|
| 149 |
+
i = 0
|
| 150 |
+
res = basinhopping(func1d, self.x0[i], minimizer_kwargs=self.kwargs,
|
| 151 |
+
niter=self.niter, disp=self.disp)
|
| 152 |
+
assert_almost_equal(res.x, self.sol[i], self.tol)
|
| 153 |
+
|
| 154 |
+
def test_2d(self):
|
| 155 |
+
# test 2d minimizations with gradient
|
| 156 |
+
i = 1
|
| 157 |
+
res = basinhopping(func2d, self.x0[i], minimizer_kwargs=self.kwargs,
|
| 158 |
+
niter=self.niter, disp=self.disp)
|
| 159 |
+
assert_almost_equal(res.x, self.sol[i], self.tol)
|
| 160 |
+
assert_(res.nfev > 0)
|
| 161 |
+
|
| 162 |
+
def test_njev(self):
|
| 163 |
+
# test njev is returned correctly
|
| 164 |
+
i = 1
|
| 165 |
+
minimizer_kwargs = self.kwargs.copy()
|
| 166 |
+
# L-BFGS-B doesn't use njev, but BFGS does
|
| 167 |
+
minimizer_kwargs["method"] = "BFGS"
|
| 168 |
+
res = basinhopping(func2d, self.x0[i],
|
| 169 |
+
minimizer_kwargs=minimizer_kwargs, niter=self.niter,
|
| 170 |
+
disp=self.disp)
|
| 171 |
+
assert_(res.nfev > 0)
|
| 172 |
+
assert_equal(res.nfev, res.njev)
|
| 173 |
+
|
| 174 |
+
def test_jac(self):
|
| 175 |
+
# test Jacobian returned
|
| 176 |
+
minimizer_kwargs = self.kwargs.copy()
|
| 177 |
+
# BFGS returns a Jacobian
|
| 178 |
+
minimizer_kwargs["method"] = "BFGS"
|
| 179 |
+
|
| 180 |
+
res = basinhopping(func2d_easyderiv, [0.0, 0.0],
|
| 181 |
+
minimizer_kwargs=minimizer_kwargs, niter=self.niter,
|
| 182 |
+
disp=self.disp)
|
| 183 |
+
|
| 184 |
+
assert_(hasattr(res.lowest_optimization_result, "jac"))
|
| 185 |
+
|
| 186 |
+
# in this case, the Jacobian is just [df/dx, df/dy]
|
| 187 |
+
_, jacobian = func2d_easyderiv(res.x)
|
| 188 |
+
assert_almost_equal(res.lowest_optimization_result.jac, jacobian,
|
| 189 |
+
self.tol)
|
| 190 |
+
|
| 191 |
+
def test_2d_nograd(self):
|
| 192 |
+
# test 2-D minimizations without gradient
|
| 193 |
+
i = 1
|
| 194 |
+
res = basinhopping(func2d_nograd, self.x0[i],
|
| 195 |
+
minimizer_kwargs=self.kwargs_nograd,
|
| 196 |
+
niter=self.niter, disp=self.disp)
|
| 197 |
+
assert_almost_equal(res.x, self.sol[i], self.tol)
|
| 198 |
+
|
| 199 |
+
@pytest.mark.fail_slow(10)
|
| 200 |
+
def test_all_minimizers(self):
|
| 201 |
+
# Test 2-D minimizations with gradient. Nelder-Mead, Powell, COBYLA, and
|
| 202 |
+
# COBYQA don't accept jac=True, so aren't included here.
|
| 203 |
+
i = 1
|
| 204 |
+
methods = ['CG', 'BFGS', 'Newton-CG', 'L-BFGS-B', 'TNC', 'SLSQP']
|
| 205 |
+
minimizer_kwargs = copy.copy(self.kwargs)
|
| 206 |
+
for method in methods:
|
| 207 |
+
minimizer_kwargs["method"] = method
|
| 208 |
+
res = basinhopping(func2d, self.x0[i],
|
| 209 |
+
minimizer_kwargs=minimizer_kwargs,
|
| 210 |
+
niter=self.niter, disp=self.disp)
|
| 211 |
+
assert_almost_equal(res.x, self.sol[i], self.tol)
|
| 212 |
+
|
| 213 |
+
@pytest.mark.fail_slow(20)
|
| 214 |
+
def test_all_nograd_minimizers(self):
|
| 215 |
+
# Test 2-D minimizations without gradient. Newton-CG requires jac=True,
|
| 216 |
+
# so not included here.
|
| 217 |
+
i = 1
|
| 218 |
+
methods = ['CG', 'BFGS', 'L-BFGS-B', 'TNC', 'SLSQP',
|
| 219 |
+
'Nelder-Mead', 'Powell', 'COBYLA', 'COBYQA']
|
| 220 |
+
minimizer_kwargs = copy.copy(self.kwargs_nograd)
|
| 221 |
+
for method in methods:
|
| 222 |
+
# COBYQA takes extensive amount of time on this problem
|
| 223 |
+
niter = 10 if method == 'COBYQA' else self.niter
|
| 224 |
+
minimizer_kwargs["method"] = method
|
| 225 |
+
res = basinhopping(func2d_nograd, self.x0[i],
|
| 226 |
+
minimizer_kwargs=minimizer_kwargs,
|
| 227 |
+
niter=niter, disp=self.disp, seed=1234)
|
| 228 |
+
tol = self.tol
|
| 229 |
+
if method == 'COBYLA':
|
| 230 |
+
tol = 2
|
| 231 |
+
assert_almost_equal(res.x, self.sol[i], decimal=tol)
|
| 232 |
+
|
| 233 |
+
def test_pass_takestep(self):
|
| 234 |
+
# test that passing a custom takestep works
|
| 235 |
+
# also test that the stepsize is being adjusted
|
| 236 |
+
takestep = MyTakeStep1()
|
| 237 |
+
initial_step_size = takestep.stepsize
|
| 238 |
+
i = 1
|
| 239 |
+
res = basinhopping(func2d, self.x0[i], minimizer_kwargs=self.kwargs,
|
| 240 |
+
niter=self.niter, disp=self.disp,
|
| 241 |
+
take_step=takestep)
|
| 242 |
+
assert_almost_equal(res.x, self.sol[i], self.tol)
|
| 243 |
+
assert_(takestep.been_called)
|
| 244 |
+
# make sure that the build in adaptive step size has been used
|
| 245 |
+
assert_(initial_step_size != takestep.stepsize)
|
| 246 |
+
|
| 247 |
+
def test_pass_simple_takestep(self):
|
| 248 |
+
# test that passing a custom takestep without attribute stepsize
|
| 249 |
+
takestep = myTakeStep2
|
| 250 |
+
i = 1
|
| 251 |
+
res = basinhopping(func2d_nograd, self.x0[i],
|
| 252 |
+
minimizer_kwargs=self.kwargs_nograd,
|
| 253 |
+
niter=self.niter, disp=self.disp,
|
| 254 |
+
take_step=takestep)
|
| 255 |
+
assert_almost_equal(res.x, self.sol[i], self.tol)
|
| 256 |
+
|
| 257 |
+
def test_pass_accept_test(self):
|
| 258 |
+
# test passing a custom accept test
|
| 259 |
+
# makes sure it's being used and ensures all the possible return values
|
| 260 |
+
# are accepted.
|
| 261 |
+
accept_test = MyAcceptTest()
|
| 262 |
+
i = 1
|
| 263 |
+
# there's no point in running it more than a few steps.
|
| 264 |
+
basinhopping(func2d, self.x0[i], minimizer_kwargs=self.kwargs,
|
| 265 |
+
niter=10, disp=self.disp, accept_test=accept_test)
|
| 266 |
+
assert_(accept_test.been_called)
|
| 267 |
+
|
| 268 |
+
def test_pass_callback(self):
|
| 269 |
+
# test passing a custom callback function
|
| 270 |
+
# This makes sure it's being used. It also returns True after 10 steps
|
| 271 |
+
# to ensure that it's stopping early.
|
| 272 |
+
callback = MyCallBack()
|
| 273 |
+
i = 1
|
| 274 |
+
# there's no point in running it more than a few steps.
|
| 275 |
+
res = basinhopping(func2d, self.x0[i], minimizer_kwargs=self.kwargs,
|
| 276 |
+
niter=30, disp=self.disp, callback=callback)
|
| 277 |
+
assert_(callback.been_called)
|
| 278 |
+
assert_("callback" in res.message[0])
|
| 279 |
+
# One of the calls of MyCallBack is during BasinHoppingRunner
|
| 280 |
+
# construction, so there are only 9 remaining before MyCallBack stops
|
| 281 |
+
# the minimization.
|
| 282 |
+
assert_equal(res.nit, 9)
|
| 283 |
+
|
| 284 |
+
def test_minimizer_fail(self):
|
| 285 |
+
# test if a minimizer fails
|
| 286 |
+
i = 1
|
| 287 |
+
self.kwargs["options"] = dict(maxiter=0)
|
| 288 |
+
self.niter = 10
|
| 289 |
+
res = basinhopping(func2d, self.x0[i], minimizer_kwargs=self.kwargs,
|
| 290 |
+
niter=self.niter, disp=self.disp)
|
| 291 |
+
# the number of failed minimizations should be the number of
|
| 292 |
+
# iterations + 1
|
| 293 |
+
assert_equal(res.nit + 1, res.minimization_failures)
|
| 294 |
+
|
| 295 |
+
def test_niter_zero(self):
|
| 296 |
+
# gh5915, what happens if you call basinhopping with niter=0
|
| 297 |
+
i = 0
|
| 298 |
+
basinhopping(func1d, self.x0[i], minimizer_kwargs=self.kwargs,
|
| 299 |
+
niter=0, disp=self.disp)
|
| 300 |
+
|
| 301 |
+
def test_rng_reproducibility(self):
|
| 302 |
+
# rng should ensure reproducibility between runs
|
| 303 |
+
minimizer_kwargs = {"method": "L-BFGS-B", "jac": True}
|
| 304 |
+
|
| 305 |
+
f_1 = []
|
| 306 |
+
|
| 307 |
+
def callback(x, f, accepted):
|
| 308 |
+
f_1.append(f)
|
| 309 |
+
|
| 310 |
+
basinhopping(func2d, [1.0, 1.0], minimizer_kwargs=minimizer_kwargs,
|
| 311 |
+
niter=10, callback=callback, rng=10)
|
| 312 |
+
|
| 313 |
+
f_2 = []
|
| 314 |
+
|
| 315 |
+
def callback2(x, f, accepted):
|
| 316 |
+
f_2.append(f)
|
| 317 |
+
|
| 318 |
+
basinhopping(func2d, [1.0, 1.0], minimizer_kwargs=minimizer_kwargs,
|
| 319 |
+
niter=10, callback=callback2, rng=10)
|
| 320 |
+
assert_equal(np.array(f_1), np.array(f_2))
|
| 321 |
+
|
| 322 |
+
def test_random_gen(self):
|
| 323 |
+
# check that np.random.Generator can be used (numpy >= 1.17)
|
| 324 |
+
rng = np.random.default_rng(1)
|
| 325 |
+
|
| 326 |
+
minimizer_kwargs = {"method": "L-BFGS-B", "jac": True}
|
| 327 |
+
|
| 328 |
+
res1 = basinhopping(func2d, [1.0, 1.0],
|
| 329 |
+
minimizer_kwargs=minimizer_kwargs,
|
| 330 |
+
niter=10, rng=rng)
|
| 331 |
+
|
| 332 |
+
rng = np.random.default_rng(1)
|
| 333 |
+
res2 = basinhopping(func2d, [1.0, 1.0],
|
| 334 |
+
minimizer_kwargs=minimizer_kwargs,
|
| 335 |
+
niter=10, rng=rng)
|
| 336 |
+
assert_equal(res1.x, res2.x)
|
| 337 |
+
|
| 338 |
+
def test_monotonic_basin_hopping(self):
|
| 339 |
+
# test 1-D minimizations with gradient and T=0
|
| 340 |
+
i = 0
|
| 341 |
+
|
| 342 |
+
res = basinhopping(func1d, self.x0[i], minimizer_kwargs=self.kwargs,
|
| 343 |
+
niter=self.niter, disp=self.disp, T=0)
|
| 344 |
+
assert_almost_equal(res.x, self.sol[i], self.tol)
|
| 345 |
+
|
| 346 |
+
|
| 347 |
+
@pytest.mark.thread_unsafe
|
| 348 |
+
class Test_Storage:
|
| 349 |
+
def setup_method(self):
|
| 350 |
+
self.x0 = np.array(1)
|
| 351 |
+
self.f0 = 0
|
| 352 |
+
|
| 353 |
+
minres = OptimizeResult(success=True)
|
| 354 |
+
minres.x = self.x0
|
| 355 |
+
minres.fun = self.f0
|
| 356 |
+
|
| 357 |
+
self.storage = Storage(minres)
|
| 358 |
+
|
| 359 |
+
def test_higher_f_rejected(self):
|
| 360 |
+
new_minres = OptimizeResult(success=True)
|
| 361 |
+
new_minres.x = self.x0 + 1
|
| 362 |
+
new_minres.fun = self.f0 + 1
|
| 363 |
+
|
| 364 |
+
ret = self.storage.update(new_minres)
|
| 365 |
+
minres = self.storage.get_lowest()
|
| 366 |
+
assert_equal(self.x0, minres.x)
|
| 367 |
+
assert_equal(self.f0, minres.fun)
|
| 368 |
+
assert_(not ret)
|
| 369 |
+
|
| 370 |
+
@pytest.mark.parametrize('success', [True, False])
|
| 371 |
+
def test_lower_f_accepted(self, success):
|
| 372 |
+
new_minres = OptimizeResult(success=success)
|
| 373 |
+
new_minres.x = self.x0 + 1
|
| 374 |
+
new_minres.fun = self.f0 - 1
|
| 375 |
+
|
| 376 |
+
ret = self.storage.update(new_minres)
|
| 377 |
+
minres = self.storage.get_lowest()
|
| 378 |
+
assert (self.x0 != minres.x) == success # can't use `is`
|
| 379 |
+
assert (self.f0 != minres.fun) == success # left side is NumPy bool
|
| 380 |
+
assert ret is success
|
| 381 |
+
|
| 382 |
+
|
| 383 |
+
class Test_RandomDisplacement:
|
| 384 |
+
def setup_method(self):
|
| 385 |
+
self.stepsize = 1.0
|
| 386 |
+
self.N = 300000
|
| 387 |
+
|
| 388 |
+
def test_random(self):
|
| 389 |
+
# the mean should be 0
|
| 390 |
+
# the variance should be (2*stepsize)**2 / 12
|
| 391 |
+
# note these tests are random, they will fail from time to time
|
| 392 |
+
rng = np.random.RandomState(0)
|
| 393 |
+
x0 = np.zeros([self.N])
|
| 394 |
+
displace = RandomDisplacement(stepsize=self.stepsize, rng=rng)
|
| 395 |
+
x = displace(x0)
|
| 396 |
+
v = (2. * self.stepsize) ** 2 / 12
|
| 397 |
+
assert_almost_equal(np.mean(x), 0., 1)
|
| 398 |
+
assert_almost_equal(np.var(x), v, 1)
|
| 399 |
+
|
| 400 |
+
|
| 401 |
+
class Test_Metropolis:
|
| 402 |
+
def setup_method(self):
|
| 403 |
+
self.T = 2.
|
| 404 |
+
self.met = Metropolis(self.T)
|
| 405 |
+
self.res_new = OptimizeResult(success=True, fun=0.)
|
| 406 |
+
self.res_old = OptimizeResult(success=True, fun=1.)
|
| 407 |
+
|
| 408 |
+
def test_boolean_return(self):
|
| 409 |
+
# the return must be a bool, else an error will be raised in
|
| 410 |
+
# basinhopping
|
| 411 |
+
ret = self.met(res_new=self.res_new, res_old=self.res_old)
|
| 412 |
+
assert isinstance(ret, bool)
|
| 413 |
+
|
| 414 |
+
def test_lower_f_accepted(self):
|
| 415 |
+
assert_(self.met(res_new=self.res_new, res_old=self.res_old))
|
| 416 |
+
|
| 417 |
+
def test_accept(self):
|
| 418 |
+
# test that steps are randomly accepted for f_new > f_old
|
| 419 |
+
one_accept = False
|
| 420 |
+
one_reject = False
|
| 421 |
+
for i in range(1000):
|
| 422 |
+
if one_accept and one_reject:
|
| 423 |
+
break
|
| 424 |
+
res_new = OptimizeResult(success=True, fun=1.)
|
| 425 |
+
res_old = OptimizeResult(success=True, fun=0.5)
|
| 426 |
+
ret = self.met(res_new=res_new, res_old=res_old)
|
| 427 |
+
if ret:
|
| 428 |
+
one_accept = True
|
| 429 |
+
else:
|
| 430 |
+
one_reject = True
|
| 431 |
+
assert_(one_accept)
|
| 432 |
+
assert_(one_reject)
|
| 433 |
+
|
| 434 |
+
def test_GH7495(self):
|
| 435 |
+
# an overflow in exp was producing a RuntimeWarning
|
| 436 |
+
# create own object here in case someone changes self.T
|
| 437 |
+
met = Metropolis(2)
|
| 438 |
+
res_new = OptimizeResult(success=True, fun=0.)
|
| 439 |
+
res_old = OptimizeResult(success=True, fun=2000)
|
| 440 |
+
with np.errstate(over='raise'):
|
| 441 |
+
met.accept_reject(res_new=res_new, res_old=res_old)
|
| 442 |
+
|
| 443 |
+
def test_gh7799(self):
|
| 444 |
+
# gh-7799 reported a problem in which local search was successful but
|
| 445 |
+
# basinhopping returned an invalid solution. Show that this is fixed.
|
| 446 |
+
def func(x):
|
| 447 |
+
return (x**2-8)**2+(x+2)**2
|
| 448 |
+
|
| 449 |
+
x0 = -4
|
| 450 |
+
limit = 50 # Constrain to func value >= 50
|
| 451 |
+
con = {'type': 'ineq', 'fun': lambda x: func(x) - limit},
|
| 452 |
+
res = basinhopping(
|
| 453 |
+
func,
|
| 454 |
+
x0,
|
| 455 |
+
30,
|
| 456 |
+
seed=np.random.RandomState(1234),
|
| 457 |
+
minimizer_kwargs={'constraints': con}
|
| 458 |
+
)
|
| 459 |
+
assert res.success
|
| 460 |
+
assert_allclose(res.fun, limit, rtol=1e-6)
|
| 461 |
+
|
| 462 |
+
def test_accept_gh7799(self):
|
| 463 |
+
# Metropolis should not accept the result of an unsuccessful new local
|
| 464 |
+
# search if the old local search was successful
|
| 465 |
+
|
| 466 |
+
met = Metropolis(0) # monotonic basin hopping
|
| 467 |
+
res_new = OptimizeResult(success=True, fun=0.)
|
| 468 |
+
res_old = OptimizeResult(success=True, fun=1.)
|
| 469 |
+
|
| 470 |
+
# if new local search was successful and energy is lower, accept
|
| 471 |
+
assert met(res_new=res_new, res_old=res_old)
|
| 472 |
+
# if new res is unsuccessful, don't accept - even if energy is lower
|
| 473 |
+
res_new.success = False
|
| 474 |
+
assert not met(res_new=res_new, res_old=res_old)
|
| 475 |
+
# ...unless the old res was unsuccessful, too. In that case, why not?
|
| 476 |
+
res_old.success = False
|
| 477 |
+
assert met(res_new=res_new, res_old=res_old)
|
| 478 |
+
|
| 479 |
+
def test_reject_all_gh7799(self):
|
| 480 |
+
# Test the behavior when there is no feasible solution
|
| 481 |
+
def fun(x):
|
| 482 |
+
return x@x
|
| 483 |
+
|
| 484 |
+
def constraint(x):
|
| 485 |
+
return x + 1
|
| 486 |
+
|
| 487 |
+
kwargs = {'constraints': {'type': 'eq', 'fun': constraint},
|
| 488 |
+
'bounds': [(0, 1), (0, 1)], 'method': 'slsqp'}
|
| 489 |
+
res = basinhopping(fun, x0=[2, 3], niter=10, minimizer_kwargs=kwargs)
|
| 490 |
+
assert not res.success
|
| 491 |
+
|
| 492 |
+
|
| 493 |
+
class Test_AdaptiveStepsize:
|
| 494 |
+
def setup_method(self):
|
| 495 |
+
self.stepsize = 1.
|
| 496 |
+
self.ts = RandomDisplacement(stepsize=self.stepsize)
|
| 497 |
+
self.target_accept_rate = 0.5
|
| 498 |
+
self.takestep = AdaptiveStepsize(takestep=self.ts, verbose=False,
|
| 499 |
+
accept_rate=self.target_accept_rate)
|
| 500 |
+
|
| 501 |
+
def test_adaptive_increase(self):
|
| 502 |
+
# if few steps are rejected, the stepsize should increase
|
| 503 |
+
x = 0.
|
| 504 |
+
self.takestep(x)
|
| 505 |
+
self.takestep.report(False)
|
| 506 |
+
for i in range(self.takestep.interval):
|
| 507 |
+
self.takestep(x)
|
| 508 |
+
self.takestep.report(True)
|
| 509 |
+
assert_(self.ts.stepsize > self.stepsize)
|
| 510 |
+
|
| 511 |
+
def test_adaptive_decrease(self):
|
| 512 |
+
# if few steps are rejected, the stepsize should increase
|
| 513 |
+
x = 0.
|
| 514 |
+
self.takestep(x)
|
| 515 |
+
self.takestep.report(True)
|
| 516 |
+
for i in range(self.takestep.interval):
|
| 517 |
+
self.takestep(x)
|
| 518 |
+
self.takestep.report(False)
|
| 519 |
+
assert_(self.ts.stepsize < self.stepsize)
|
| 520 |
+
|
| 521 |
+
def test_all_accepted(self):
|
| 522 |
+
# test that everything works OK if all steps were accepted
|
| 523 |
+
x = 0.
|
| 524 |
+
for i in range(self.takestep.interval + 1):
|
| 525 |
+
self.takestep(x)
|
| 526 |
+
self.takestep.report(True)
|
| 527 |
+
assert_(self.ts.stepsize > self.stepsize)
|
| 528 |
+
|
| 529 |
+
def test_all_rejected(self):
|
| 530 |
+
# test that everything works OK if all steps were rejected
|
| 531 |
+
x = 0.
|
| 532 |
+
for i in range(self.takestep.interval + 1):
|
| 533 |
+
self.takestep(x)
|
| 534 |
+
self.takestep.report(False)
|
| 535 |
+
assert_(self.ts.stepsize < self.stepsize)
|
mantis_evalkit/lib/python3.10/site-packages/scipy/optimize/tests/test__differential_evolution.py
ADDED
|
@@ -0,0 +1,1703 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Unit tests for the differential global minimization algorithm.
|
| 3 |
+
"""
|
| 4 |
+
import multiprocessing
|
| 5 |
+
from multiprocessing.dummy import Pool as ThreadPool
|
| 6 |
+
import platform
|
| 7 |
+
|
| 8 |
+
from scipy.optimize._differentialevolution import (DifferentialEvolutionSolver,
|
| 9 |
+
_ConstraintWrapper)
|
| 10 |
+
from scipy.optimize import differential_evolution, OptimizeResult
|
| 11 |
+
from scipy.optimize._constraints import (Bounds, NonlinearConstraint,
|
| 12 |
+
LinearConstraint)
|
| 13 |
+
from scipy.optimize import rosen, minimize
|
| 14 |
+
from scipy.sparse import csr_matrix
|
| 15 |
+
from scipy import stats
|
| 16 |
+
|
| 17 |
+
import numpy as np
|
| 18 |
+
from numpy.testing import (assert_equal, assert_allclose, assert_almost_equal,
|
| 19 |
+
assert_string_equal, assert_, suppress_warnings)
|
| 20 |
+
from pytest import raises as assert_raises, warns
|
| 21 |
+
import pytest
|
| 22 |
+
|
| 23 |
+
|
| 24 |
+
class TestDifferentialEvolutionSolver:
|
| 25 |
+
|
| 26 |
+
def setup_method(self):
|
| 27 |
+
self.old_seterr = np.seterr(invalid='raise')
|
| 28 |
+
self.limits = np.array([[0., 0.],
|
| 29 |
+
[2., 2.]])
|
| 30 |
+
self.bounds = [(0., 2.), (0., 2.)]
|
| 31 |
+
|
| 32 |
+
self.dummy_solver = DifferentialEvolutionSolver(self.quadratic,
|
| 33 |
+
[(0, 100)])
|
| 34 |
+
|
| 35 |
+
# dummy_solver2 will be used to test mutation strategies
|
| 36 |
+
self.dummy_solver2 = DifferentialEvolutionSolver(self.quadratic,
|
| 37 |
+
[(0, 1)],
|
| 38 |
+
popsize=7,
|
| 39 |
+
mutation=0.5)
|
| 40 |
+
# create a population that's only 7 members long
|
| 41 |
+
# [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7]
|
| 42 |
+
population = np.atleast_2d(np.arange(0.1, 0.8, 0.1)).T
|
| 43 |
+
self.dummy_solver2.population = population
|
| 44 |
+
|
| 45 |
+
def teardown_method(self):
|
| 46 |
+
np.seterr(**self.old_seterr)
|
| 47 |
+
|
| 48 |
+
def quadratic(self, x):
|
| 49 |
+
return x[0]**2
|
| 50 |
+
|
| 51 |
+
def test__strategy_resolves(self):
|
| 52 |
+
# test that the correct mutation function is resolved by
|
| 53 |
+
# different requested strategy arguments
|
| 54 |
+
solver = DifferentialEvolutionSolver(rosen,
|
| 55 |
+
self.bounds,
|
| 56 |
+
strategy='best1exp')
|
| 57 |
+
assert_equal(solver.strategy, 'best1exp')
|
| 58 |
+
assert_equal(solver.mutation_func.__name__, '_best1')
|
| 59 |
+
|
| 60 |
+
solver = DifferentialEvolutionSolver(rosen,
|
| 61 |
+
self.bounds,
|
| 62 |
+
strategy='best1bin')
|
| 63 |
+
assert_equal(solver.strategy, 'best1bin')
|
| 64 |
+
assert_equal(solver.mutation_func.__name__, '_best1')
|
| 65 |
+
|
| 66 |
+
solver = DifferentialEvolutionSolver(rosen,
|
| 67 |
+
self.bounds,
|
| 68 |
+
strategy='rand1bin')
|
| 69 |
+
assert_equal(solver.strategy, 'rand1bin')
|
| 70 |
+
assert_equal(solver.mutation_func.__name__, '_rand1')
|
| 71 |
+
|
| 72 |
+
solver = DifferentialEvolutionSolver(rosen,
|
| 73 |
+
self.bounds,
|
| 74 |
+
strategy='rand1exp')
|
| 75 |
+
assert_equal(solver.strategy, 'rand1exp')
|
| 76 |
+
assert_equal(solver.mutation_func.__name__, '_rand1')
|
| 77 |
+
|
| 78 |
+
solver = DifferentialEvolutionSolver(rosen,
|
| 79 |
+
self.bounds,
|
| 80 |
+
strategy='rand2exp')
|
| 81 |
+
assert_equal(solver.strategy, 'rand2exp')
|
| 82 |
+
assert_equal(solver.mutation_func.__name__, '_rand2')
|
| 83 |
+
|
| 84 |
+
solver = DifferentialEvolutionSolver(rosen,
|
| 85 |
+
self.bounds,
|
| 86 |
+
strategy='best2bin')
|
| 87 |
+
assert_equal(solver.strategy, 'best2bin')
|
| 88 |
+
assert_equal(solver.mutation_func.__name__, '_best2')
|
| 89 |
+
|
| 90 |
+
solver = DifferentialEvolutionSolver(rosen,
|
| 91 |
+
self.bounds,
|
| 92 |
+
strategy='rand2bin')
|
| 93 |
+
assert_equal(solver.strategy, 'rand2bin')
|
| 94 |
+
assert_equal(solver.mutation_func.__name__, '_rand2')
|
| 95 |
+
|
| 96 |
+
solver = DifferentialEvolutionSolver(rosen,
|
| 97 |
+
self.bounds,
|
| 98 |
+
strategy='rand2exp')
|
| 99 |
+
assert_equal(solver.strategy, 'rand2exp')
|
| 100 |
+
assert_equal(solver.mutation_func.__name__, '_rand2')
|
| 101 |
+
|
| 102 |
+
solver = DifferentialEvolutionSolver(rosen,
|
| 103 |
+
self.bounds,
|
| 104 |
+
strategy='randtobest1bin')
|
| 105 |
+
assert_equal(solver.strategy, 'randtobest1bin')
|
| 106 |
+
assert_equal(solver.mutation_func.__name__, '_randtobest1')
|
| 107 |
+
|
| 108 |
+
solver = DifferentialEvolutionSolver(rosen,
|
| 109 |
+
self.bounds,
|
| 110 |
+
strategy='randtobest1exp')
|
| 111 |
+
assert_equal(solver.strategy, 'randtobest1exp')
|
| 112 |
+
assert_equal(solver.mutation_func.__name__, '_randtobest1')
|
| 113 |
+
|
| 114 |
+
solver = DifferentialEvolutionSolver(rosen,
|
| 115 |
+
self.bounds,
|
| 116 |
+
strategy='currenttobest1bin')
|
| 117 |
+
assert_equal(solver.strategy, 'currenttobest1bin')
|
| 118 |
+
assert_equal(solver.mutation_func.__name__, '_currenttobest1')
|
| 119 |
+
|
| 120 |
+
solver = DifferentialEvolutionSolver(rosen,
|
| 121 |
+
self.bounds,
|
| 122 |
+
strategy='currenttobest1exp')
|
| 123 |
+
assert_equal(solver.strategy, 'currenttobest1exp')
|
| 124 |
+
assert_equal(solver.mutation_func.__name__, '_currenttobest1')
|
| 125 |
+
|
| 126 |
+
def test__mutate1(self):
|
| 127 |
+
# strategies */1/*, i.e. rand/1/bin, best/1/exp, etc.
|
| 128 |
+
result = np.array([0.05])
|
| 129 |
+
trial = self.dummy_solver2._best1(np.array([2, 3, 4, 5, 6]))
|
| 130 |
+
assert_allclose(trial, result)
|
| 131 |
+
|
| 132 |
+
result = np.array([0.25])
|
| 133 |
+
trial = self.dummy_solver2._rand1(np.array([2, 3, 4, 5, 6]))
|
| 134 |
+
assert_allclose(trial, result)
|
| 135 |
+
|
| 136 |
+
def test__mutate2(self):
|
| 137 |
+
# strategies */2/*, i.e. rand/2/bin, best/2/exp, etc.
|
| 138 |
+
# [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7]
|
| 139 |
+
|
| 140 |
+
result = np.array([-0.1])
|
| 141 |
+
trial = self.dummy_solver2._best2(np.array([2, 3, 4, 5, 6]))
|
| 142 |
+
assert_allclose(trial, result)
|
| 143 |
+
|
| 144 |
+
result = np.array([0.1])
|
| 145 |
+
trial = self.dummy_solver2._rand2(np.array([2, 3, 4, 5, 6]))
|
| 146 |
+
assert_allclose(trial, result)
|
| 147 |
+
|
| 148 |
+
def test__randtobest1(self):
|
| 149 |
+
# strategies randtobest/1/*
|
| 150 |
+
result = np.array([0.15])
|
| 151 |
+
trial = self.dummy_solver2._randtobest1(np.array([2, 3, 4, 5, 6]))
|
| 152 |
+
assert_allclose(trial, result)
|
| 153 |
+
|
| 154 |
+
def test__currenttobest1(self):
|
| 155 |
+
# strategies currenttobest/1/*
|
| 156 |
+
result = np.array([0.1])
|
| 157 |
+
trial = self.dummy_solver2._currenttobest1(
|
| 158 |
+
1,
|
| 159 |
+
np.array([2, 3, 4, 5, 6])
|
| 160 |
+
)
|
| 161 |
+
assert_allclose(trial, result)
|
| 162 |
+
|
| 163 |
+
def test_can_init_with_dithering(self):
|
| 164 |
+
mutation = (0.5, 1)
|
| 165 |
+
solver = DifferentialEvolutionSolver(self.quadratic,
|
| 166 |
+
self.bounds,
|
| 167 |
+
mutation=mutation)
|
| 168 |
+
|
| 169 |
+
assert_equal(solver.dither, list(mutation))
|
| 170 |
+
|
| 171 |
+
def test_invalid_mutation_values_arent_accepted(self):
|
| 172 |
+
func = rosen
|
| 173 |
+
mutation = (0.5, 3)
|
| 174 |
+
assert_raises(ValueError,
|
| 175 |
+
DifferentialEvolutionSolver,
|
| 176 |
+
func,
|
| 177 |
+
self.bounds,
|
| 178 |
+
mutation=mutation)
|
| 179 |
+
|
| 180 |
+
mutation = (-1, 1)
|
| 181 |
+
assert_raises(ValueError,
|
| 182 |
+
DifferentialEvolutionSolver,
|
| 183 |
+
func,
|
| 184 |
+
self.bounds,
|
| 185 |
+
mutation=mutation)
|
| 186 |
+
|
| 187 |
+
mutation = (0.1, np.nan)
|
| 188 |
+
assert_raises(ValueError,
|
| 189 |
+
DifferentialEvolutionSolver,
|
| 190 |
+
func,
|
| 191 |
+
self.bounds,
|
| 192 |
+
mutation=mutation)
|
| 193 |
+
|
| 194 |
+
mutation = 0.5
|
| 195 |
+
solver = DifferentialEvolutionSolver(func,
|
| 196 |
+
self.bounds,
|
| 197 |
+
mutation=mutation)
|
| 198 |
+
assert_equal(0.5, solver.scale)
|
| 199 |
+
assert_equal(None, solver.dither)
|
| 200 |
+
|
| 201 |
+
def test_invalid_functional(self):
|
| 202 |
+
def func(x):
|
| 203 |
+
return np.array([np.sum(x ** 2), np.sum(x)])
|
| 204 |
+
|
| 205 |
+
with assert_raises(
|
| 206 |
+
RuntimeError,
|
| 207 |
+
match=r"func\(x, \*args\) must return a scalar value"):
|
| 208 |
+
differential_evolution(func, [(-2, 2), (-2, 2)])
|
| 209 |
+
|
| 210 |
+
def test__scale_parameters(self):
|
| 211 |
+
trial = np.array([0.3])
|
| 212 |
+
assert_equal(30, self.dummy_solver._scale_parameters(trial))
|
| 213 |
+
|
| 214 |
+
# it should also work with the limits reversed
|
| 215 |
+
self.dummy_solver.limits = np.array([[100], [0.]])
|
| 216 |
+
assert_equal(30, self.dummy_solver._scale_parameters(trial))
|
| 217 |
+
|
| 218 |
+
def test__unscale_parameters(self):
|
| 219 |
+
trial = np.array([30])
|
| 220 |
+
assert_equal(0.3, self.dummy_solver._unscale_parameters(trial))
|
| 221 |
+
|
| 222 |
+
# it should also work with the limits reversed
|
| 223 |
+
self.dummy_solver.limits = np.array([[100], [0.]])
|
| 224 |
+
assert_equal(0.3, self.dummy_solver._unscale_parameters(trial))
|
| 225 |
+
|
| 226 |
+
def test_equal_bounds(self):
|
| 227 |
+
with np.errstate(invalid='raise'):
|
| 228 |
+
solver = DifferentialEvolutionSolver(
|
| 229 |
+
self.quadratic,
|
| 230 |
+
bounds=[(2.0, 2.0), (1.0, 3.0)]
|
| 231 |
+
)
|
| 232 |
+
v = solver._unscale_parameters([2.0, 2.0])
|
| 233 |
+
assert_allclose(v, 0.5)
|
| 234 |
+
|
| 235 |
+
res = differential_evolution(self.quadratic, [(2.0, 2.0), (3.0, 3.0)])
|
| 236 |
+
assert_equal(res.x, [2.0, 3.0])
|
| 237 |
+
|
| 238 |
+
def test__ensure_constraint(self):
|
| 239 |
+
trial = np.array([1.1, -100, 0.9, 2., 300., -0.00001])
|
| 240 |
+
self.dummy_solver._ensure_constraint(trial)
|
| 241 |
+
|
| 242 |
+
assert_equal(trial[2], 0.9)
|
| 243 |
+
assert_(np.logical_and(trial >= 0, trial <= 1).all())
|
| 244 |
+
|
| 245 |
+
def test_differential_evolution(self):
|
| 246 |
+
# test that the Jmin of DifferentialEvolutionSolver
|
| 247 |
+
# is the same as the function evaluation
|
| 248 |
+
solver = DifferentialEvolutionSolver(
|
| 249 |
+
self.quadratic, [(-2, 2)], maxiter=1, polish=False
|
| 250 |
+
)
|
| 251 |
+
result = solver.solve()
|
| 252 |
+
assert_equal(result.fun, self.quadratic(result.x))
|
| 253 |
+
|
| 254 |
+
solver = DifferentialEvolutionSolver(
|
| 255 |
+
self.quadratic, [(-2, 2)], maxiter=1, polish=True
|
| 256 |
+
)
|
| 257 |
+
result = solver.solve()
|
| 258 |
+
assert_equal(result.fun, self.quadratic(result.x))
|
| 259 |
+
|
| 260 |
+
def test_best_solution_retrieval(self):
|
| 261 |
+
# test that the getter property method for the best solution works.
|
| 262 |
+
solver = DifferentialEvolutionSolver(self.quadratic, [(-2, 2)])
|
| 263 |
+
result = solver.solve()
|
| 264 |
+
assert_equal(result.x, solver.x)
|
| 265 |
+
|
| 266 |
+
def test_intermediate_result(self):
|
| 267 |
+
# Check that intermediate result object passed into the callback
|
| 268 |
+
# function contains the expected information and that raising
|
| 269 |
+
# `StopIteration` causes the expected behavior.
|
| 270 |
+
maxiter = 10
|
| 271 |
+
|
| 272 |
+
def func(x):
|
| 273 |
+
val = rosen(x)
|
| 274 |
+
if val < func.val:
|
| 275 |
+
func.x = x
|
| 276 |
+
func.val = val
|
| 277 |
+
return val
|
| 278 |
+
func.x = None
|
| 279 |
+
func.val = np.inf
|
| 280 |
+
|
| 281 |
+
def callback(intermediate_result):
|
| 282 |
+
callback.nit += 1
|
| 283 |
+
callback.intermediate_result = intermediate_result
|
| 284 |
+
assert intermediate_result.population.ndim == 2
|
| 285 |
+
assert intermediate_result.population.shape[1] == 2
|
| 286 |
+
assert intermediate_result.nit == callback.nit
|
| 287 |
+
|
| 288 |
+
# Check that `x` and `fun` attributes are the best found so far
|
| 289 |
+
assert_equal(intermediate_result.x, callback.func.x)
|
| 290 |
+
assert_equal(intermediate_result.fun, callback.func.val)
|
| 291 |
+
|
| 292 |
+
# Check for consistency between `fun`, `population_energies`,
|
| 293 |
+
# `x`, and `population`
|
| 294 |
+
assert_equal(intermediate_result.fun, rosen(intermediate_result.x))
|
| 295 |
+
for i in range(len(intermediate_result.population_energies)):
|
| 296 |
+
res = intermediate_result.population_energies[i]
|
| 297 |
+
ref = rosen(intermediate_result.population[i])
|
| 298 |
+
assert_equal(res, ref)
|
| 299 |
+
assert_equal(intermediate_result.x,
|
| 300 |
+
intermediate_result.population[0])
|
| 301 |
+
assert_equal(intermediate_result.fun,
|
| 302 |
+
intermediate_result.population_energies[0])
|
| 303 |
+
|
| 304 |
+
assert intermediate_result.message == 'in progress'
|
| 305 |
+
assert intermediate_result.success is True
|
| 306 |
+
assert isinstance(intermediate_result, OptimizeResult)
|
| 307 |
+
if callback.nit == maxiter:
|
| 308 |
+
raise StopIteration
|
| 309 |
+
callback.nit = 0
|
| 310 |
+
callback.intermediate_result = None
|
| 311 |
+
callback.func = func
|
| 312 |
+
|
| 313 |
+
bounds = [(0, 2), (0, 2)]
|
| 314 |
+
kwargs = dict(func=func, bounds=bounds, rng=838245, polish=False)
|
| 315 |
+
res = differential_evolution(**kwargs, callback=callback)
|
| 316 |
+
ref = differential_evolution(**kwargs, maxiter=maxiter)
|
| 317 |
+
|
| 318 |
+
# Check that final `intermediate_result` is equivalent to returned
|
| 319 |
+
# result object and that terminating with callback `StopIteration`
|
| 320 |
+
# after `maxiter` iterations is equivalent to terminating with
|
| 321 |
+
# `maxiter` parameter.
|
| 322 |
+
assert res.success is ref.success is False
|
| 323 |
+
assert callback.nit == res.nit == maxiter
|
| 324 |
+
assert res.message == 'callback function requested stop early'
|
| 325 |
+
assert ref.message == 'Maximum number of iterations has been exceeded.'
|
| 326 |
+
for field, val in ref.items():
|
| 327 |
+
if field in {'message', 'success'}: # checked separately
|
| 328 |
+
continue
|
| 329 |
+
assert_equal(callback.intermediate_result[field], val)
|
| 330 |
+
assert_equal(res[field], val)
|
| 331 |
+
|
| 332 |
+
# Check that polish occurs after `StopIteration` as advertised
|
| 333 |
+
callback.nit = 0
|
| 334 |
+
func.val = np.inf
|
| 335 |
+
kwargs['polish'] = True
|
| 336 |
+
res = differential_evolution(**kwargs, callback=callback)
|
| 337 |
+
assert res.fun < ref.fun
|
| 338 |
+
|
| 339 |
+
def test_callback_terminates(self):
|
| 340 |
+
# test that if the callback returns true, then the minimization halts
|
| 341 |
+
bounds = [(0, 2), (0, 2)]
|
| 342 |
+
expected_msg = 'callback function requested stop early'
|
| 343 |
+
def callback_python_true(param, convergence=0.):
|
| 344 |
+
return True
|
| 345 |
+
|
| 346 |
+
result = differential_evolution(
|
| 347 |
+
rosen, bounds, callback=callback_python_true
|
| 348 |
+
)
|
| 349 |
+
assert_string_equal(result.message, expected_msg)
|
| 350 |
+
|
| 351 |
+
# if callback raises StopIteration then solve should be interrupted
|
| 352 |
+
def callback_stop(intermediate_result):
|
| 353 |
+
raise StopIteration
|
| 354 |
+
|
| 355 |
+
result = differential_evolution(rosen, bounds, callback=callback_stop)
|
| 356 |
+
assert not result.success
|
| 357 |
+
|
| 358 |
+
def callback_evaluates_true(param, convergence=0.):
|
| 359 |
+
# DE should stop if bool(self.callback) is True
|
| 360 |
+
return [10]
|
| 361 |
+
|
| 362 |
+
result = differential_evolution(rosen, bounds, callback=callback_evaluates_true)
|
| 363 |
+
assert_string_equal(result.message, expected_msg)
|
| 364 |
+
assert not result.success
|
| 365 |
+
|
| 366 |
+
def callback_evaluates_false(param, convergence=0.):
|
| 367 |
+
return []
|
| 368 |
+
|
| 369 |
+
result = differential_evolution(rosen, bounds,
|
| 370 |
+
callback=callback_evaluates_false)
|
| 371 |
+
assert result.success
|
| 372 |
+
|
| 373 |
+
def test_args_tuple_is_passed(self):
|
| 374 |
+
# test that the args tuple is passed to the cost function properly.
|
| 375 |
+
bounds = [(-10, 10)]
|
| 376 |
+
args = (1., 2., 3.)
|
| 377 |
+
|
| 378 |
+
def quadratic(x, *args):
|
| 379 |
+
if not isinstance(args, tuple):
|
| 380 |
+
raise ValueError('args should be a tuple')
|
| 381 |
+
return args[0] + args[1] * x + args[2] * x**2.
|
| 382 |
+
|
| 383 |
+
result = differential_evolution(quadratic,
|
| 384 |
+
bounds,
|
| 385 |
+
args=args,
|
| 386 |
+
polish=True)
|
| 387 |
+
assert_almost_equal(result.fun, 2 / 3.)
|
| 388 |
+
|
| 389 |
+
def test_init_with_invalid_strategy(self):
|
| 390 |
+
# test that passing an invalid strategy raises ValueError
|
| 391 |
+
func = rosen
|
| 392 |
+
bounds = [(-3, 3)]
|
| 393 |
+
assert_raises(ValueError,
|
| 394 |
+
differential_evolution,
|
| 395 |
+
func,
|
| 396 |
+
bounds,
|
| 397 |
+
strategy='abc')
|
| 398 |
+
|
| 399 |
+
def test_bounds_checking(self):
|
| 400 |
+
# test that the bounds checking works
|
| 401 |
+
func = rosen
|
| 402 |
+
bounds = [(-3)]
|
| 403 |
+
assert_raises(ValueError,
|
| 404 |
+
differential_evolution,
|
| 405 |
+
func,
|
| 406 |
+
bounds)
|
| 407 |
+
bounds = [(-3, 3), (3, 4, 5)]
|
| 408 |
+
assert_raises(ValueError,
|
| 409 |
+
differential_evolution,
|
| 410 |
+
func,
|
| 411 |
+
bounds)
|
| 412 |
+
|
| 413 |
+
# test that we can use a new-type Bounds object
|
| 414 |
+
result = differential_evolution(rosen, Bounds([0, 0], [2, 2]))
|
| 415 |
+
assert_almost_equal(result.x, (1., 1.))
|
| 416 |
+
|
| 417 |
+
def test_select_samples(self):
|
| 418 |
+
# select_samples should return 5 separate random numbers.
|
| 419 |
+
limits = np.arange(12., dtype='float64').reshape(2, 6)
|
| 420 |
+
bounds = list(zip(limits[0, :], limits[1, :]))
|
| 421 |
+
solver = DifferentialEvolutionSolver(None, bounds, popsize=1)
|
| 422 |
+
candidate = 0
|
| 423 |
+
r1, r2, r3, r4, r5 = solver._select_samples(candidate, 5)
|
| 424 |
+
assert_equal(
|
| 425 |
+
len(np.unique(np.array([candidate, r1, r2, r3, r4, r5]))), 6)
|
| 426 |
+
|
| 427 |
+
def test_maxiter_stops_solve(self):
|
| 428 |
+
# test that if the maximum number of iterations is exceeded
|
| 429 |
+
# the solver stops.
|
| 430 |
+
solver = DifferentialEvolutionSolver(rosen, self.bounds, maxiter=1)
|
| 431 |
+
result = solver.solve()
|
| 432 |
+
assert_equal(result.success, False)
|
| 433 |
+
assert_equal(result.message,
|
| 434 |
+
'Maximum number of iterations has been exceeded.')
|
| 435 |
+
|
| 436 |
+
def test_maxfun_stops_solve(self):
|
| 437 |
+
# test that if the maximum number of function evaluations is exceeded
|
| 438 |
+
# during initialisation the solver stops
|
| 439 |
+
solver = DifferentialEvolutionSolver(rosen, self.bounds, maxfun=1,
|
| 440 |
+
polish=False)
|
| 441 |
+
result = solver.solve()
|
| 442 |
+
|
| 443 |
+
assert_equal(result.nfev, 2)
|
| 444 |
+
assert_equal(result.success, False)
|
| 445 |
+
assert_equal(result.message,
|
| 446 |
+
'Maximum number of function evaluations has '
|
| 447 |
+
'been exceeded.')
|
| 448 |
+
|
| 449 |
+
# test that if the maximum number of function evaluations is exceeded
|
| 450 |
+
# during the actual minimisation, then the solver stops.
|
| 451 |
+
# Have to turn polishing off, as this will still occur even if maxfun
|
| 452 |
+
# is reached. For popsize=5 and len(bounds)=2, then there are only 10
|
| 453 |
+
# function evaluations during initialisation.
|
| 454 |
+
solver = DifferentialEvolutionSolver(rosen,
|
| 455 |
+
self.bounds,
|
| 456 |
+
popsize=5,
|
| 457 |
+
polish=False,
|
| 458 |
+
maxfun=40)
|
| 459 |
+
result = solver.solve()
|
| 460 |
+
|
| 461 |
+
assert_equal(result.nfev, 41)
|
| 462 |
+
assert_equal(result.success, False)
|
| 463 |
+
assert_equal(result.message,
|
| 464 |
+
'Maximum number of function evaluations has '
|
| 465 |
+
'been exceeded.')
|
| 466 |
+
|
| 467 |
+
# now repeat for updating='deferred version
|
| 468 |
+
# 47 function evaluations is not a multiple of the population size,
|
| 469 |
+
# so maxfun is reached partway through a population evaluation.
|
| 470 |
+
solver = DifferentialEvolutionSolver(rosen,
|
| 471 |
+
self.bounds,
|
| 472 |
+
popsize=5,
|
| 473 |
+
polish=False,
|
| 474 |
+
maxfun=47,
|
| 475 |
+
updating='deferred')
|
| 476 |
+
result = solver.solve()
|
| 477 |
+
|
| 478 |
+
assert_equal(result.nfev, 47)
|
| 479 |
+
assert_equal(result.success, False)
|
| 480 |
+
assert_equal(result.message,
|
| 481 |
+
'Maximum number of function evaluations has '
|
| 482 |
+
'been reached.')
|
| 483 |
+
|
| 484 |
+
def test_quadratic(self):
|
| 485 |
+
# test the quadratic function from object
|
| 486 |
+
solver = DifferentialEvolutionSolver(self.quadratic,
|
| 487 |
+
[(-100, 100)],
|
| 488 |
+
tol=0.02)
|
| 489 |
+
solver.solve()
|
| 490 |
+
assert_equal(np.argmin(solver.population_energies), 0)
|
| 491 |
+
|
| 492 |
+
def test_quadratic_from_diff_ev(self):
|
| 493 |
+
# test the quadratic function from differential_evolution function
|
| 494 |
+
differential_evolution(self.quadratic,
|
| 495 |
+
[(-100, 100)],
|
| 496 |
+
tol=0.02,
|
| 497 |
+
seed=1)
|
| 498 |
+
|
| 499 |
+
def test_rng_gives_repeatability(self):
|
| 500 |
+
result = differential_evolution(self.quadratic,
|
| 501 |
+
[(-100, 100)],
|
| 502 |
+
polish=False,
|
| 503 |
+
rng=1,
|
| 504 |
+
tol=0.5)
|
| 505 |
+
result2 = differential_evolution(self.quadratic,
|
| 506 |
+
[(-100, 100)],
|
| 507 |
+
polish=False,
|
| 508 |
+
rng=1,
|
| 509 |
+
tol=0.5)
|
| 510 |
+
assert_equal(result.x, result2.x)
|
| 511 |
+
assert_equal(result.nfev, result2.nfev)
|
| 512 |
+
|
| 513 |
+
def test_random_generator(self):
|
| 514 |
+
# check that np.random.Generator can be used (numpy >= 1.17)
|
| 515 |
+
# obtain a np.random.Generator object
|
| 516 |
+
rng = np.random.default_rng()
|
| 517 |
+
|
| 518 |
+
inits = ['random', 'latinhypercube', 'sobol', 'halton']
|
| 519 |
+
for init in inits:
|
| 520 |
+
differential_evolution(self.quadratic,
|
| 521 |
+
[(-100, 100)],
|
| 522 |
+
polish=False,
|
| 523 |
+
rng=rng,
|
| 524 |
+
tol=0.5,
|
| 525 |
+
init=init)
|
| 526 |
+
|
| 527 |
+
def test_exp_runs(self):
|
| 528 |
+
# test whether exponential mutation loop runs
|
| 529 |
+
solver = DifferentialEvolutionSolver(rosen,
|
| 530 |
+
self.bounds,
|
| 531 |
+
strategy='best1exp',
|
| 532 |
+
maxiter=1)
|
| 533 |
+
|
| 534 |
+
solver.solve()
|
| 535 |
+
|
| 536 |
+
def test_gh_4511_regression(self):
|
| 537 |
+
# This modification of the differential evolution docstring example
|
| 538 |
+
# uses a custom popsize that had triggered an off-by-one error.
|
| 539 |
+
# Because we do not care about solving the optimization problem in
|
| 540 |
+
# this test, we use maxiter=1 to reduce the testing time.
|
| 541 |
+
bounds = [(-5, 5), (-5, 5)]
|
| 542 |
+
# result = differential_evolution(rosen, bounds, popsize=1815,
|
| 543 |
+
# maxiter=1)
|
| 544 |
+
|
| 545 |
+
# the original issue arose because of rounding error in arange, with
|
| 546 |
+
# linspace being a much better solution. 1815 is quite a large popsize
|
| 547 |
+
# to use and results in a long test time (~13s). I used the original
|
| 548 |
+
# issue to figure out the lowest number of samples that would cause
|
| 549 |
+
# this rounding error to occur, 49.
|
| 550 |
+
differential_evolution(rosen, bounds, popsize=49, maxiter=1)
|
| 551 |
+
|
| 552 |
+
def test_calculate_population_energies(self):
|
| 553 |
+
# if popsize is 3, then the overall generation has size (6,)
|
| 554 |
+
solver = DifferentialEvolutionSolver(rosen, self.bounds, popsize=3)
|
| 555 |
+
solver._calculate_population_energies(solver.population)
|
| 556 |
+
solver._promote_lowest_energy()
|
| 557 |
+
assert_equal(np.argmin(solver.population_energies), 0)
|
| 558 |
+
|
| 559 |
+
# initial calculation of the energies should require 6 nfev.
|
| 560 |
+
assert_equal(solver._nfev, 6)
|
| 561 |
+
|
| 562 |
+
def test_iteration(self):
|
| 563 |
+
# test that DifferentialEvolutionSolver is iterable
|
| 564 |
+
# if popsize is 3, then the overall generation has size (6,)
|
| 565 |
+
solver = DifferentialEvolutionSolver(rosen, self.bounds, popsize=3,
|
| 566 |
+
maxfun=12)
|
| 567 |
+
x, fun = next(solver)
|
| 568 |
+
assert_equal(np.size(x, 0), 2)
|
| 569 |
+
|
| 570 |
+
# 6 nfev are required for initial calculation of energies, 6 nfev are
|
| 571 |
+
# required for the evolution of the 6 population members.
|
| 572 |
+
assert_equal(solver._nfev, 12)
|
| 573 |
+
|
| 574 |
+
# the next generation should halt because it exceeds maxfun
|
| 575 |
+
assert_raises(StopIteration, next, solver)
|
| 576 |
+
|
| 577 |
+
# check a proper minimisation can be done by an iterable solver
|
| 578 |
+
solver = DifferentialEvolutionSolver(rosen, self.bounds)
|
| 579 |
+
_, fun_prev = next(solver)
|
| 580 |
+
for i, soln in enumerate(solver):
|
| 581 |
+
x_current, fun_current = soln
|
| 582 |
+
assert fun_prev >= fun_current
|
| 583 |
+
_, fun_prev = x_current, fun_current
|
| 584 |
+
# need to have this otherwise the solver would never stop.
|
| 585 |
+
if i == 50:
|
| 586 |
+
break
|
| 587 |
+
|
| 588 |
+
def test_convergence(self):
|
| 589 |
+
solver = DifferentialEvolutionSolver(rosen, self.bounds, tol=0.2,
|
| 590 |
+
polish=False)
|
| 591 |
+
solver.solve()
|
| 592 |
+
assert_(solver.convergence < 0.2)
|
| 593 |
+
|
| 594 |
+
def test_maxiter_none_GH5731(self):
|
| 595 |
+
# Pre 0.17 the previous default for maxiter and maxfun was None.
|
| 596 |
+
# the numerical defaults are now 1000 and np.inf. However, some scripts
|
| 597 |
+
# will still supply None for both of those, this will raise a TypeError
|
| 598 |
+
# in the solve method.
|
| 599 |
+
solver = DifferentialEvolutionSolver(rosen, self.bounds, maxiter=None,
|
| 600 |
+
maxfun=None)
|
| 601 |
+
solver.solve()
|
| 602 |
+
|
| 603 |
+
def test_population_initiation(self):
|
| 604 |
+
# test the different modes of population initiation
|
| 605 |
+
|
| 606 |
+
# init must be either 'latinhypercube' or 'random'
|
| 607 |
+
# raising ValueError is something else is passed in
|
| 608 |
+
assert_raises(ValueError,
|
| 609 |
+
DifferentialEvolutionSolver,
|
| 610 |
+
*(rosen, self.bounds),
|
| 611 |
+
**{'init': 'rubbish'})
|
| 612 |
+
|
| 613 |
+
solver = DifferentialEvolutionSolver(rosen, self.bounds)
|
| 614 |
+
|
| 615 |
+
# check that population initiation:
|
| 616 |
+
# 1) resets _nfev to 0
|
| 617 |
+
# 2) all population energies are np.inf
|
| 618 |
+
solver.init_population_random()
|
| 619 |
+
assert_equal(solver._nfev, 0)
|
| 620 |
+
assert_(np.all(np.isinf(solver.population_energies)))
|
| 621 |
+
|
| 622 |
+
solver.init_population_lhs()
|
| 623 |
+
assert_equal(solver._nfev, 0)
|
| 624 |
+
assert_(np.all(np.isinf(solver.population_energies)))
|
| 625 |
+
|
| 626 |
+
solver.init_population_qmc(qmc_engine='halton')
|
| 627 |
+
assert_equal(solver._nfev, 0)
|
| 628 |
+
assert_(np.all(np.isinf(solver.population_energies)))
|
| 629 |
+
|
| 630 |
+
solver = DifferentialEvolutionSolver(rosen, self.bounds, init='sobol')
|
| 631 |
+
solver.init_population_qmc(qmc_engine='sobol')
|
| 632 |
+
assert_equal(solver._nfev, 0)
|
| 633 |
+
assert_(np.all(np.isinf(solver.population_energies)))
|
| 634 |
+
|
| 635 |
+
# we should be able to initialize with our own array
|
| 636 |
+
population = np.linspace(-1, 3, 10).reshape(5, 2)
|
| 637 |
+
solver = DifferentialEvolutionSolver(rosen, self.bounds,
|
| 638 |
+
init=population,
|
| 639 |
+
strategy='best2bin',
|
| 640 |
+
atol=0.01, rng=1, popsize=5)
|
| 641 |
+
|
| 642 |
+
assert_equal(solver._nfev, 0)
|
| 643 |
+
assert_(np.all(np.isinf(solver.population_energies)))
|
| 644 |
+
assert_(solver.num_population_members == 5)
|
| 645 |
+
assert_(solver.population_shape == (5, 2))
|
| 646 |
+
|
| 647 |
+
# check that the population was initialized correctly
|
| 648 |
+
unscaled_population = np.clip(solver._unscale_parameters(population),
|
| 649 |
+
0, 1)
|
| 650 |
+
assert_almost_equal(solver.population[:5], unscaled_population)
|
| 651 |
+
|
| 652 |
+
# population values need to be clipped to bounds
|
| 653 |
+
assert_almost_equal(np.min(solver.population[:5]), 0)
|
| 654 |
+
assert_almost_equal(np.max(solver.population[:5]), 1)
|
| 655 |
+
|
| 656 |
+
# shouldn't be able to initialize with an array if it's the wrong shape
|
| 657 |
+
# this would have too many parameters
|
| 658 |
+
population = np.linspace(-1, 3, 15).reshape(5, 3)
|
| 659 |
+
assert_raises(ValueError,
|
| 660 |
+
DifferentialEvolutionSolver,
|
| 661 |
+
*(rosen, self.bounds),
|
| 662 |
+
**{'init': population})
|
| 663 |
+
|
| 664 |
+
# provide an initial solution
|
| 665 |
+
# bounds are [(0, 2), (0, 2)]
|
| 666 |
+
x0 = np.random.uniform(low=0.0, high=2.0, size=2)
|
| 667 |
+
solver = DifferentialEvolutionSolver(
|
| 668 |
+
rosen, self.bounds, x0=x0
|
| 669 |
+
)
|
| 670 |
+
# parameters are scaled to unit interval
|
| 671 |
+
assert_allclose(solver.population[0], x0 / 2.0)
|
| 672 |
+
|
| 673 |
+
def test_x0(self):
|
| 674 |
+
# smoke test that checks that x0 is usable.
|
| 675 |
+
res = differential_evolution(rosen, self.bounds, x0=[0.2, 0.8])
|
| 676 |
+
assert res.success
|
| 677 |
+
|
| 678 |
+
# check what happens if some of the x0 lay outside the bounds
|
| 679 |
+
with assert_raises(ValueError):
|
| 680 |
+
differential_evolution(rosen, self.bounds, x0=[0.2, 2.1])
|
| 681 |
+
|
| 682 |
+
def test_infinite_objective_function(self):
|
| 683 |
+
# Test that there are no problems if the objective function
|
| 684 |
+
# returns inf on some runs
|
| 685 |
+
def sometimes_inf(x):
|
| 686 |
+
if x[0] < .5:
|
| 687 |
+
return np.inf
|
| 688 |
+
return x[1]
|
| 689 |
+
bounds = [(0, 1), (0, 1)]
|
| 690 |
+
differential_evolution(sometimes_inf, bounds=bounds, disp=False)
|
| 691 |
+
|
| 692 |
+
def test_deferred_updating(self):
|
| 693 |
+
# check setting of deferred updating, with default workers
|
| 694 |
+
bounds = [(0., 2.), (0., 2.)]
|
| 695 |
+
solver = DifferentialEvolutionSolver(rosen, bounds, updating='deferred')
|
| 696 |
+
assert_(solver._updating == 'deferred')
|
| 697 |
+
assert_(solver._mapwrapper._mapfunc is map)
|
| 698 |
+
res = solver.solve()
|
| 699 |
+
assert res.success
|
| 700 |
+
|
| 701 |
+
# check that deferred updating works with an exponential crossover
|
| 702 |
+
res = differential_evolution(
|
| 703 |
+
rosen, bounds, updating='deferred', strategy='best1exp'
|
| 704 |
+
)
|
| 705 |
+
assert res.success
|
| 706 |
+
|
| 707 |
+
@pytest.mark.thread_unsafe
|
| 708 |
+
def test_immediate_updating(self):
|
| 709 |
+
# check setting of immediate updating, with default workers
|
| 710 |
+
bounds = [(0., 2.), (0., 2.)]
|
| 711 |
+
solver = DifferentialEvolutionSolver(rosen, bounds)
|
| 712 |
+
assert_(solver._updating == 'immediate')
|
| 713 |
+
|
| 714 |
+
# Safely forking from a multithreaded process is
|
| 715 |
+
# problematic, and deprecated in Python 3.12, so
|
| 716 |
+
# we use a slower but portable alternative
|
| 717 |
+
# see gh-19848
|
| 718 |
+
ctx = multiprocessing.get_context("spawn")
|
| 719 |
+
with ctx.Pool(2) as p:
|
| 720 |
+
# should raise a UserWarning because the updating='immediate'
|
| 721 |
+
# is being overridden by the workers keyword
|
| 722 |
+
with warns(UserWarning):
|
| 723 |
+
with DifferentialEvolutionSolver(rosen, bounds, workers=p.map) as s:
|
| 724 |
+
solver.solve()
|
| 725 |
+
assert s._updating == 'deferred'
|
| 726 |
+
|
| 727 |
+
@pytest.mark.fail_slow(10)
|
| 728 |
+
def test_parallel(self):
|
| 729 |
+
# smoke test for parallelization with deferred updating
|
| 730 |
+
bounds = [(0., 2.), (0., 2.)]
|
| 731 |
+
# use threads instead of Process to speed things up for this simple example
|
| 732 |
+
with ThreadPool(2) as p, DifferentialEvolutionSolver(
|
| 733 |
+
rosen, bounds, updating='deferred', workers=p.map, tol=0.1, popsize=3
|
| 734 |
+
) as solver:
|
| 735 |
+
assert solver._mapwrapper.pool is not None
|
| 736 |
+
assert solver._updating == 'deferred'
|
| 737 |
+
solver.solve()
|
| 738 |
+
|
| 739 |
+
with DifferentialEvolutionSolver(
|
| 740 |
+
rosen, bounds, updating='deferred', workers=2, popsize=3, tol=0.1
|
| 741 |
+
) as solver:
|
| 742 |
+
assert solver._mapwrapper.pool is not None
|
| 743 |
+
assert solver._updating == 'deferred'
|
| 744 |
+
solver.solve()
|
| 745 |
+
|
| 746 |
+
def test_converged(self):
|
| 747 |
+
solver = DifferentialEvolutionSolver(rosen, [(0, 2), (0, 2)])
|
| 748 |
+
solver.solve()
|
| 749 |
+
assert_(solver.converged())
|
| 750 |
+
|
| 751 |
+
def test_constraint_violation_fn(self):
|
| 752 |
+
def constr_f(x):
|
| 753 |
+
return [x[0] + x[1]]
|
| 754 |
+
|
| 755 |
+
def constr_f2(x):
|
| 756 |
+
return np.array([x[0]**2 + x[1], x[0] - x[1]])
|
| 757 |
+
|
| 758 |
+
nlc = NonlinearConstraint(constr_f, -np.inf, 1.9)
|
| 759 |
+
|
| 760 |
+
solver = DifferentialEvolutionSolver(rosen, [(0, 2), (0, 2)],
|
| 761 |
+
constraints=(nlc,))
|
| 762 |
+
|
| 763 |
+
cv = solver._constraint_violation_fn(np.array([1.0, 1.0]))
|
| 764 |
+
assert_almost_equal(cv, 0.1)
|
| 765 |
+
|
| 766 |
+
nlc2 = NonlinearConstraint(constr_f2, -np.inf, 1.8)
|
| 767 |
+
solver = DifferentialEvolutionSolver(rosen, [(0, 2), (0, 2)],
|
| 768 |
+
constraints=(nlc, nlc2))
|
| 769 |
+
|
| 770 |
+
# for multiple constraints the constraint violations should
|
| 771 |
+
# be concatenated.
|
| 772 |
+
xs = [(1.2, 1), (2.0, 2.0), (0.5, 0.5)]
|
| 773 |
+
vs = [(0.3, 0.64, 0.0), (2.1, 4.2, 0.0), (0, 0, 0)]
|
| 774 |
+
|
| 775 |
+
for x, v in zip(xs, vs):
|
| 776 |
+
cv = solver._constraint_violation_fn(np.array(x))
|
| 777 |
+
assert_allclose(cv, np.atleast_2d(v))
|
| 778 |
+
|
| 779 |
+
# vectorized calculation of a series of solutions
|
| 780 |
+
assert_allclose(
|
| 781 |
+
solver._constraint_violation_fn(np.array(xs)), np.array(vs)
|
| 782 |
+
)
|
| 783 |
+
|
| 784 |
+
# the following line is used in _calculate_population_feasibilities.
|
| 785 |
+
# _constraint_violation_fn returns an (1, M) array when
|
| 786 |
+
# x.shape == (N,), i.e. a single solution. Therefore this list
|
| 787 |
+
# comprehension should generate (S, 1, M) array.
|
| 788 |
+
constraint_violation = np.array([solver._constraint_violation_fn(x)
|
| 789 |
+
for x in np.array(xs)])
|
| 790 |
+
assert constraint_violation.shape == (3, 1, 3)
|
| 791 |
+
|
| 792 |
+
# we need reasonable error messages if the constraint function doesn't
|
| 793 |
+
# return the right thing
|
| 794 |
+
def constr_f3(x):
|
| 795 |
+
# returns (S, M), rather than (M, S)
|
| 796 |
+
return constr_f2(x).T
|
| 797 |
+
|
| 798 |
+
nlc2 = NonlinearConstraint(constr_f3, -np.inf, 1.8)
|
| 799 |
+
solver = DifferentialEvolutionSolver(rosen, [(0, 2), (0, 2)],
|
| 800 |
+
constraints=(nlc, nlc2),
|
| 801 |
+
vectorized=False)
|
| 802 |
+
solver.vectorized = True
|
| 803 |
+
with pytest.raises(
|
| 804 |
+
RuntimeError, match="An array returned from a Constraint"
|
| 805 |
+
):
|
| 806 |
+
solver._constraint_violation_fn(np.array(xs))
|
| 807 |
+
|
| 808 |
+
def test_constraint_population_feasibilities(self):
|
| 809 |
+
def constr_f(x):
|
| 810 |
+
return [x[0] + x[1]]
|
| 811 |
+
|
| 812 |
+
def constr_f2(x):
|
| 813 |
+
return [x[0]**2 + x[1], x[0] - x[1]]
|
| 814 |
+
|
| 815 |
+
nlc = NonlinearConstraint(constr_f, -np.inf, 1.9)
|
| 816 |
+
|
| 817 |
+
solver = DifferentialEvolutionSolver(rosen, [(0, 2), (0, 2)],
|
| 818 |
+
constraints=(nlc,))
|
| 819 |
+
|
| 820 |
+
# are population feasibilities correct
|
| 821 |
+
# [0.5, 0.5] corresponds to scaled values of [1., 1.]
|
| 822 |
+
feas, cv = solver._calculate_population_feasibilities(
|
| 823 |
+
np.array([[0.5, 0.5], [1., 1.]]))
|
| 824 |
+
assert_equal(feas, [False, False])
|
| 825 |
+
assert_almost_equal(cv, np.array([[0.1], [2.1]]))
|
| 826 |
+
assert cv.shape == (2, 1)
|
| 827 |
+
|
| 828 |
+
nlc2 = NonlinearConstraint(constr_f2, -np.inf, 1.8)
|
| 829 |
+
|
| 830 |
+
for vectorize in [False, True]:
|
| 831 |
+
solver = DifferentialEvolutionSolver(rosen, [(0, 2), (0, 2)],
|
| 832 |
+
constraints=(nlc, nlc2),
|
| 833 |
+
vectorized=vectorize,
|
| 834 |
+
updating='deferred')
|
| 835 |
+
|
| 836 |
+
feas, cv = solver._calculate_population_feasibilities(
|
| 837 |
+
np.array([[0.5, 0.5], [0.6, 0.5]]))
|
| 838 |
+
assert_equal(feas, [False, False])
|
| 839 |
+
assert_almost_equal(cv, np.array([[0.1, 0.2, 0], [0.3, 0.64, 0]]))
|
| 840 |
+
|
| 841 |
+
feas, cv = solver._calculate_population_feasibilities(
|
| 842 |
+
np.array([[0.5, 0.5], [1., 1.]]))
|
| 843 |
+
assert_equal(feas, [False, False])
|
| 844 |
+
assert_almost_equal(cv, np.array([[0.1, 0.2, 0], [2.1, 4.2, 0]]))
|
| 845 |
+
assert cv.shape == (2, 3)
|
| 846 |
+
|
| 847 |
+
feas, cv = solver._calculate_population_feasibilities(
|
| 848 |
+
np.array([[0.25, 0.25], [1., 1.]]))
|
| 849 |
+
assert_equal(feas, [True, False])
|
| 850 |
+
assert_almost_equal(cv, np.array([[0.0, 0.0, 0.], [2.1, 4.2, 0]]))
|
| 851 |
+
assert cv.shape == (2, 3)
|
| 852 |
+
|
| 853 |
+
@pytest.mark.thread_unsafe
|
| 854 |
+
def test_constraint_solve(self):
|
| 855 |
+
def constr_f(x):
|
| 856 |
+
return np.array([x[0] + x[1]])
|
| 857 |
+
|
| 858 |
+
nlc = NonlinearConstraint(constr_f, -np.inf, 1.9)
|
| 859 |
+
|
| 860 |
+
solver = DifferentialEvolutionSolver(rosen, [(0, 2), (0, 2)],
|
| 861 |
+
constraints=(nlc,))
|
| 862 |
+
|
| 863 |
+
# trust-constr warns if the constraint function is linear
|
| 864 |
+
with warns(UserWarning):
|
| 865 |
+
res = solver.solve()
|
| 866 |
+
|
| 867 |
+
assert constr_f(res.x) <= 1.9
|
| 868 |
+
assert res.success
|
| 869 |
+
|
| 870 |
+
@pytest.mark.fail_slow(10)
|
| 871 |
+
@pytest.mark.thread_unsafe
|
| 872 |
+
def test_impossible_constraint(self):
|
| 873 |
+
def constr_f(x):
|
| 874 |
+
return np.array([x[0] + x[1]])
|
| 875 |
+
|
| 876 |
+
nlc = NonlinearConstraint(constr_f, -np.inf, -1)
|
| 877 |
+
|
| 878 |
+
solver = DifferentialEvolutionSolver(
|
| 879 |
+
rosen, [(0, 2), (0, 2)], constraints=(nlc,), popsize=1, rng=1, maxiter=100
|
| 880 |
+
)
|
| 881 |
+
|
| 882 |
+
# a UserWarning is issued because the 'trust-constr' polishing is
|
| 883 |
+
# attempted on the least infeasible solution found.
|
| 884 |
+
with warns(UserWarning):
|
| 885 |
+
res = solver.solve()
|
| 886 |
+
|
| 887 |
+
assert res.maxcv > 0
|
| 888 |
+
assert not res.success
|
| 889 |
+
|
| 890 |
+
# test _promote_lowest_energy works when none of the population is
|
| 891 |
+
# feasible. In this case, the solution with the lowest constraint
|
| 892 |
+
# violation should be promoted.
|
| 893 |
+
solver = DifferentialEvolutionSolver(
|
| 894 |
+
rosen, [(0, 2), (0, 2)], constraints=(nlc,), polish=False)
|
| 895 |
+
next(solver)
|
| 896 |
+
assert not solver.feasible.all()
|
| 897 |
+
assert not np.isfinite(solver.population_energies).all()
|
| 898 |
+
|
| 899 |
+
# now swap two of the entries in the population
|
| 900 |
+
l = 20
|
| 901 |
+
cv = solver.constraint_violation[0]
|
| 902 |
+
|
| 903 |
+
solver.population_energies[[0, l]] = solver.population_energies[[l, 0]]
|
| 904 |
+
solver.population[[0, l], :] = solver.population[[l, 0], :]
|
| 905 |
+
solver.constraint_violation[[0, l], :] = (
|
| 906 |
+
solver.constraint_violation[[l, 0], :])
|
| 907 |
+
|
| 908 |
+
solver._promote_lowest_energy()
|
| 909 |
+
assert_equal(solver.constraint_violation[0], cv)
|
| 910 |
+
|
| 911 |
+
def test_accept_trial(self):
|
| 912 |
+
# _accept_trial(self, energy_trial, feasible_trial, cv_trial,
|
| 913 |
+
# energy_orig, feasible_orig, cv_orig)
|
| 914 |
+
def constr_f(x):
|
| 915 |
+
return [x[0] + x[1]]
|
| 916 |
+
nlc = NonlinearConstraint(constr_f, -np.inf, 1.9)
|
| 917 |
+
solver = DifferentialEvolutionSolver(rosen, [(0, 2), (0, 2)],
|
| 918 |
+
constraints=(nlc,))
|
| 919 |
+
fn = solver._accept_trial
|
| 920 |
+
# both solutions are feasible, select lower energy
|
| 921 |
+
assert fn(0.1, True, np.array([0.]), 1.0, True, np.array([0.]))
|
| 922 |
+
assert (fn(1.0, True, np.array([0.0]), 0.1, True, np.array([0.0])) is False)
|
| 923 |
+
assert fn(0.1, True, np.array([0.]), 0.1, True, np.array([0.]))
|
| 924 |
+
|
| 925 |
+
# trial is feasible, original is not
|
| 926 |
+
assert fn(9.9, True, np.array([0.]), 1.0, False, np.array([1.]))
|
| 927 |
+
|
| 928 |
+
# trial and original are infeasible
|
| 929 |
+
# cv_trial have to be <= cv_original to be better
|
| 930 |
+
assert (fn(0.1, False, np.array([0.5, 0.5]),
|
| 931 |
+
1.0, False, np.array([1., 1.0])))
|
| 932 |
+
assert (fn(0.1, False, np.array([0.5, 0.5]),
|
| 933 |
+
1.0, False, np.array([1., 0.50])))
|
| 934 |
+
assert not (fn(1.0, False, np.array([0.5, 0.5]),
|
| 935 |
+
1.0, False, np.array([1.0, 0.4])))
|
| 936 |
+
|
| 937 |
+
def test_constraint_wrapper(self):
|
| 938 |
+
lb = np.array([0, 20, 30])
|
| 939 |
+
ub = np.array([0.5, np.inf, 70])
|
| 940 |
+
x0 = np.array([1, 2, 3])
|
| 941 |
+
pc = _ConstraintWrapper(Bounds(lb, ub), x0)
|
| 942 |
+
assert (pc.violation(x0) > 0).any()
|
| 943 |
+
assert (pc.violation([0.25, 21, 31]) == 0).all()
|
| 944 |
+
|
| 945 |
+
# check vectorized Bounds constraint
|
| 946 |
+
xs = np.arange(1, 16).reshape(5, 3)
|
| 947 |
+
violations = []
|
| 948 |
+
for x in xs:
|
| 949 |
+
violations.append(pc.violation(x))
|
| 950 |
+
np.testing.assert_allclose(pc.violation(xs.T), np.array(violations).T)
|
| 951 |
+
|
| 952 |
+
x0 = np.array([1, 2, 3, 4])
|
| 953 |
+
A = np.array([[1, 2, 3, 4], [5, 0, 0, 6], [7, 0, 8, 0]])
|
| 954 |
+
pc = _ConstraintWrapper(LinearConstraint(A, -np.inf, 0), x0)
|
| 955 |
+
assert (pc.violation(x0) > 0).any()
|
| 956 |
+
assert (pc.violation([-10, 2, -10, 4]) == 0).all()
|
| 957 |
+
|
| 958 |
+
# check vectorized LinearConstraint, for 7 lots of parameter vectors
|
| 959 |
+
# with each parameter vector being 4 long, with 3 constraints
|
| 960 |
+
# xs is the same shape as stored in the differential evolution
|
| 961 |
+
# population, but it's sent to the violation function as (len(x), M)
|
| 962 |
+
xs = np.arange(1, 29).reshape(7, 4)
|
| 963 |
+
violations = []
|
| 964 |
+
for x in xs:
|
| 965 |
+
violations.append(pc.violation(x))
|
| 966 |
+
np.testing.assert_allclose(pc.violation(xs.T), np.array(violations).T)
|
| 967 |
+
|
| 968 |
+
pc = _ConstraintWrapper(LinearConstraint(csr_matrix(A), -np.inf, 0),
|
| 969 |
+
x0)
|
| 970 |
+
assert (pc.violation(x0) > 0).any()
|
| 971 |
+
assert (pc.violation([-10, 2, -10, 4]) == 0).all()
|
| 972 |
+
|
| 973 |
+
def fun(x):
|
| 974 |
+
return A.dot(x)
|
| 975 |
+
|
| 976 |
+
nonlinear = NonlinearConstraint(fun, -np.inf, 0)
|
| 977 |
+
pc = _ConstraintWrapper(nonlinear, [-10, 2, -10, 4])
|
| 978 |
+
assert (pc.violation(x0) > 0).any()
|
| 979 |
+
assert (pc.violation([-10, 2, -10, 4]) == 0).all()
|
| 980 |
+
|
| 981 |
+
def test_constraint_wrapper_violation(self):
|
| 982 |
+
def cons_f(x):
|
| 983 |
+
# written in vectorised form to accept an array of (N, S)
|
| 984 |
+
# returning (M, S)
|
| 985 |
+
# where N is the number of parameters,
|
| 986 |
+
# S is the number of solution vectors to be examined,
|
| 987 |
+
# and M is the number of constraint components
|
| 988 |
+
return np.array([x[0] ** 2 + x[1],
|
| 989 |
+
x[0] ** 2 - x[1]])
|
| 990 |
+
|
| 991 |
+
nlc = NonlinearConstraint(cons_f, [-1, -0.8500], [2, 2])
|
| 992 |
+
pc = _ConstraintWrapper(nlc, [0.5, 1])
|
| 993 |
+
assert np.size(pc.bounds[0]) == 2
|
| 994 |
+
|
| 995 |
+
xs = [(0.5, 1), (0.5, 1.2), (1.2, 1.2), (0.1, -1.2), (0.1, 2.0)]
|
| 996 |
+
vs = [(0, 0), (0, 0.1), (0.64, 0), (0.19, 0), (0.01, 1.14)]
|
| 997 |
+
|
| 998 |
+
for x, v in zip(xs, vs):
|
| 999 |
+
assert_allclose(pc.violation(x), v)
|
| 1000 |
+
|
| 1001 |
+
# now check that we can vectorize the constraint wrapper
|
| 1002 |
+
assert_allclose(pc.violation(np.array(xs).T),
|
| 1003 |
+
np.array(vs).T)
|
| 1004 |
+
assert pc.fun(np.array(xs).T).shape == (2, len(xs))
|
| 1005 |
+
assert pc.violation(np.array(xs).T).shape == (2, len(xs))
|
| 1006 |
+
assert pc.num_constr == 2
|
| 1007 |
+
assert pc.parameter_count == 2
|
| 1008 |
+
|
| 1009 |
+
def test_matrix_linear_constraint(self):
|
| 1010 |
+
# gh20041 supplying an np.matrix to construct a LinearConstraint caused
|
| 1011 |
+
# _ConstraintWrapper to start returning constraint violations of the
|
| 1012 |
+
# wrong shape.
|
| 1013 |
+
with suppress_warnings() as sup:
|
| 1014 |
+
sup.filter(PendingDeprecationWarning)
|
| 1015 |
+
matrix = np.matrix([[1, 1, 1, 1.],
|
| 1016 |
+
[2, 2, 2, 2.]])
|
| 1017 |
+
lc = LinearConstraint(matrix, 0, 1)
|
| 1018 |
+
x0 = np.ones(4)
|
| 1019 |
+
cw = _ConstraintWrapper(lc, x0)
|
| 1020 |
+
# the shape of the constraint violation should be the same as the number
|
| 1021 |
+
# of constraints applied.
|
| 1022 |
+
assert cw.violation(x0).shape == (2,)
|
| 1023 |
+
|
| 1024 |
+
# let's try a vectorised violation call.
|
| 1025 |
+
xtrial = np.arange(4 * 5).reshape(4, 5)
|
| 1026 |
+
assert cw.violation(xtrial).shape == (2, 5)
|
| 1027 |
+
|
| 1028 |
+
@pytest.mark.fail_slow(20)
|
| 1029 |
+
def test_L1(self):
|
| 1030 |
+
# Lampinen ([5]) test problem 1
|
| 1031 |
+
|
| 1032 |
+
def f(x):
|
| 1033 |
+
x = np.hstack(([0], x)) # 1-indexed to match reference
|
| 1034 |
+
fun = np.sum(5*x[1:5]) - 5*x[1:5]@x[1:5] - np.sum(x[5:])
|
| 1035 |
+
return fun
|
| 1036 |
+
|
| 1037 |
+
A = np.zeros((10, 14)) # 1-indexed to match reference
|
| 1038 |
+
A[1, [1, 2, 10, 11]] = 2, 2, 1, 1
|
| 1039 |
+
A[2, [1, 10]] = -8, 1
|
| 1040 |
+
A[3, [4, 5, 10]] = -2, -1, 1
|
| 1041 |
+
A[4, [1, 3, 10, 11]] = 2, 2, 1, 1
|
| 1042 |
+
A[5, [2, 11]] = -8, 1
|
| 1043 |
+
A[6, [6, 7, 11]] = -2, -1, 1
|
| 1044 |
+
A[7, [2, 3, 11, 12]] = 2, 2, 1, 1
|
| 1045 |
+
A[8, [3, 12]] = -8, 1
|
| 1046 |
+
A[9, [8, 9, 12]] = -2, -1, 1
|
| 1047 |
+
A = A[1:, 1:]
|
| 1048 |
+
|
| 1049 |
+
b = np.array([10, 0, 0, 10, 0, 0, 10, 0, 0])
|
| 1050 |
+
|
| 1051 |
+
L = LinearConstraint(A, -np.inf, b)
|
| 1052 |
+
|
| 1053 |
+
bounds = [(0, 1)]*9 + [(0, 100)]*3 + [(0, 1)]
|
| 1054 |
+
|
| 1055 |
+
# using a lower popsize to speed the test up
|
| 1056 |
+
res = differential_evolution(
|
| 1057 |
+
f, bounds, strategy='best1bin', rng=12345, constraints=(L,),
|
| 1058 |
+
popsize=5, tol=0.01
|
| 1059 |
+
)
|
| 1060 |
+
|
| 1061 |
+
x_opt = (1, 1, 1, 1, 1, 1, 1, 1, 1, 3, 3, 3, 1)
|
| 1062 |
+
f_opt = -15
|
| 1063 |
+
|
| 1064 |
+
assert_allclose(f(x_opt), f_opt, atol=6e-4)
|
| 1065 |
+
assert res.success
|
| 1066 |
+
assert_allclose(res.x, x_opt, atol=6e-4)
|
| 1067 |
+
assert_allclose(res.fun, f_opt, atol=5e-3)
|
| 1068 |
+
assert_(np.all(A@res.x <= b))
|
| 1069 |
+
assert_(np.all(res.x >= np.array(bounds)[:, 0]))
|
| 1070 |
+
assert_(np.all(res.x <= np.array(bounds)[:, 1]))
|
| 1071 |
+
|
| 1072 |
+
# now repeat the same solve, using the same overall constraints,
|
| 1073 |
+
# but using a sparse matrix for the LinearConstraint instead of an
|
| 1074 |
+
# array
|
| 1075 |
+
|
| 1076 |
+
L = LinearConstraint(csr_matrix(A), -np.inf, b)
|
| 1077 |
+
|
| 1078 |
+
# using a lower popsize to speed the test up
|
| 1079 |
+
res = differential_evolution(
|
| 1080 |
+
f, bounds, strategy='best1bin', rng=1211134, constraints=(L,),
|
| 1081 |
+
popsize=2, tol=0.05
|
| 1082 |
+
)
|
| 1083 |
+
|
| 1084 |
+
assert_allclose(f(x_opt), f_opt)
|
| 1085 |
+
assert res.success
|
| 1086 |
+
assert_allclose(res.x, x_opt, atol=5e-4)
|
| 1087 |
+
assert_allclose(res.fun, f_opt, atol=5e-3)
|
| 1088 |
+
assert_(np.all(A@res.x <= b))
|
| 1089 |
+
assert_(np.all(res.x >= np.array(bounds)[:, 0]))
|
| 1090 |
+
assert_(np.all(res.x <= np.array(bounds)[:, 1]))
|
| 1091 |
+
|
| 1092 |
+
# now repeat the same solve, using the same overall constraints,
|
| 1093 |
+
# but specify half the constraints in terms of LinearConstraint,
|
| 1094 |
+
# and the other half by NonlinearConstraint
|
| 1095 |
+
def c1(x):
|
| 1096 |
+
x = np.hstack(([0], x))
|
| 1097 |
+
return [2*x[2] + 2*x[3] + x[11] + x[12],
|
| 1098 |
+
-8*x[3] + x[12]]
|
| 1099 |
+
|
| 1100 |
+
def c2(x):
|
| 1101 |
+
x = np.hstack(([0], x))
|
| 1102 |
+
return -2*x[8] - x[9] + x[12]
|
| 1103 |
+
|
| 1104 |
+
L = LinearConstraint(A[:5, :], -np.inf, b[:5])
|
| 1105 |
+
L2 = LinearConstraint(A[5:6, :], -np.inf, b[5:6])
|
| 1106 |
+
N = NonlinearConstraint(c1, -np.inf, b[6:8])
|
| 1107 |
+
N2 = NonlinearConstraint(c2, -np.inf, b[8:9])
|
| 1108 |
+
constraints = (L, N, L2, N2)
|
| 1109 |
+
|
| 1110 |
+
with suppress_warnings() as sup:
|
| 1111 |
+
sup.filter(UserWarning)
|
| 1112 |
+
res = differential_evolution(
|
| 1113 |
+
f, bounds, strategy='best1bin', rng=1211134,
|
| 1114 |
+
constraints=constraints, popsize=2, tol=0.05
|
| 1115 |
+
)
|
| 1116 |
+
|
| 1117 |
+
assert_allclose(res.x, x_opt, atol=6e-4)
|
| 1118 |
+
assert_allclose(res.fun, f_opt, atol=5e-3)
|
| 1119 |
+
assert_(np.all(A@res.x <= b))
|
| 1120 |
+
assert_(np.all(res.x >= np.array(bounds)[:, 0]))
|
| 1121 |
+
assert_(np.all(res.x <= np.array(bounds)[:, 1]))
|
| 1122 |
+
|
| 1123 |
+
@pytest.mark.fail_slow(10)
|
| 1124 |
+
def test_L2(self):
|
| 1125 |
+
# Lampinen ([5]) test problem 2
|
| 1126 |
+
|
| 1127 |
+
def f(x):
|
| 1128 |
+
x = np.hstack(([0], x)) # 1-indexed to match reference
|
| 1129 |
+
fun = ((x[1]-10)**2 + 5*(x[2]-12)**2 + x[3]**4 + 3*(x[4]-11)**2 +
|
| 1130 |
+
10*x[5]**6 + 7*x[6]**2 + x[7]**4 - 4*x[6]*x[7] - 10*x[6] -
|
| 1131 |
+
8*x[7])
|
| 1132 |
+
return fun
|
| 1133 |
+
|
| 1134 |
+
def c1(x):
|
| 1135 |
+
x = np.hstack(([0], x)) # 1-indexed to match reference
|
| 1136 |
+
return [127 - 2*x[1]**2 - 3*x[2]**4 - x[3] - 4*x[4]**2 - 5*x[5],
|
| 1137 |
+
196 - 23*x[1] - x[2]**2 - 6*x[6]**2 + 8*x[7],
|
| 1138 |
+
282 - 7*x[1] - 3*x[2] - 10*x[3]**2 - x[4] + x[5],
|
| 1139 |
+
-4*x[1]**2 - x[2]**2 + 3*x[1]*x[2] - 2*x[3]**2 -
|
| 1140 |
+
5*x[6] + 11*x[7]]
|
| 1141 |
+
|
| 1142 |
+
N = NonlinearConstraint(c1, 0, np.inf)
|
| 1143 |
+
bounds = [(-10, 10)]*7
|
| 1144 |
+
constraints = (N)
|
| 1145 |
+
|
| 1146 |
+
with suppress_warnings() as sup:
|
| 1147 |
+
sup.filter(UserWarning)
|
| 1148 |
+
res = differential_evolution(f, bounds, strategy='best1bin',
|
| 1149 |
+
rng=1234, constraints=constraints)
|
| 1150 |
+
|
| 1151 |
+
f_opt = 680.6300599487869
|
| 1152 |
+
x_opt = (2.330499, 1.951372, -0.4775414, 4.365726,
|
| 1153 |
+
-0.6244870, 1.038131, 1.594227)
|
| 1154 |
+
|
| 1155 |
+
assert_allclose(f(x_opt), f_opt)
|
| 1156 |
+
assert_allclose(res.fun, f_opt)
|
| 1157 |
+
assert_allclose(res.x, x_opt, atol=1e-5)
|
| 1158 |
+
assert res.success
|
| 1159 |
+
assert_(np.all(np.array(c1(res.x)) >= 0))
|
| 1160 |
+
assert_(np.all(res.x >= np.array(bounds)[:, 0]))
|
| 1161 |
+
assert_(np.all(res.x <= np.array(bounds)[:, 1]))
|
| 1162 |
+
|
| 1163 |
+
@pytest.mark.fail_slow(10)
|
| 1164 |
+
def test_L3(self):
|
| 1165 |
+
# Lampinen ([5]) test problem 3
|
| 1166 |
+
|
| 1167 |
+
def f(x):
|
| 1168 |
+
x = np.hstack(([0], x)) # 1-indexed to match reference
|
| 1169 |
+
fun = (x[1]**2 + x[2]**2 + x[1]*x[2] - 14*x[1] - 16*x[2] +
|
| 1170 |
+
(x[3]-10)**2 + 4*(x[4]-5)**2 + (x[5]-3)**2 + 2*(x[6]-1)**2 +
|
| 1171 |
+
5*x[7]**2 + 7*(x[8]-11)**2 + 2*(x[9]-10)**2 +
|
| 1172 |
+
(x[10] - 7)**2 + 45
|
| 1173 |
+
)
|
| 1174 |
+
return fun # maximize
|
| 1175 |
+
|
| 1176 |
+
A = np.zeros((4, 11))
|
| 1177 |
+
A[1, [1, 2, 7, 8]] = -4, -5, 3, -9
|
| 1178 |
+
A[2, [1, 2, 7, 8]] = -10, 8, 17, -2
|
| 1179 |
+
A[3, [1, 2, 9, 10]] = 8, -2, -5, 2
|
| 1180 |
+
A = A[1:, 1:]
|
| 1181 |
+
b = np.array([-105, 0, -12])
|
| 1182 |
+
|
| 1183 |
+
def c1(x):
|
| 1184 |
+
x = np.hstack(([0], x)) # 1-indexed to match reference
|
| 1185 |
+
return [3*x[1] - 6*x[2] - 12*(x[9]-8)**2 + 7*x[10],
|
| 1186 |
+
-3*(x[1]-2)**2 - 4*(x[2]-3)**2 - 2*x[3]**2 + 7*x[4] + 120,
|
| 1187 |
+
-x[1]**2 - 2*(x[2]-2)**2 + 2*x[1]*x[2] - 14*x[5] + 6*x[6],
|
| 1188 |
+
-5*x[1]**2 - 8*x[2] - (x[3]-6)**2 + 2*x[4] + 40,
|
| 1189 |
+
-0.5*(x[1]-8)**2 - 2*(x[2]-4)**2 - 3*x[5]**2 + x[6] + 30]
|
| 1190 |
+
|
| 1191 |
+
L = LinearConstraint(A, b, np.inf)
|
| 1192 |
+
N = NonlinearConstraint(c1, 0, np.inf)
|
| 1193 |
+
bounds = [(-10, 10)]*10
|
| 1194 |
+
constraints = (L, N)
|
| 1195 |
+
|
| 1196 |
+
with suppress_warnings() as sup:
|
| 1197 |
+
sup.filter(UserWarning)
|
| 1198 |
+
res = differential_evolution(f, bounds, rng=1234,
|
| 1199 |
+
constraints=constraints, popsize=3)
|
| 1200 |
+
|
| 1201 |
+
x_opt = (2.171996, 2.363683, 8.773926, 5.095984, 0.9906548,
|
| 1202 |
+
1.430574, 1.321644, 9.828726, 8.280092, 8.375927)
|
| 1203 |
+
f_opt = 24.3062091
|
| 1204 |
+
|
| 1205 |
+
assert_allclose(f(x_opt), f_opt, atol=1e-5)
|
| 1206 |
+
assert_allclose(res.x, x_opt, atol=1e-6)
|
| 1207 |
+
assert_allclose(res.fun, f_opt, atol=1e-5)
|
| 1208 |
+
assert res.success
|
| 1209 |
+
assert_(np.all(A @ res.x >= b))
|
| 1210 |
+
assert_(np.all(np.array(c1(res.x)) >= 0))
|
| 1211 |
+
assert_(np.all(res.x >= np.array(bounds)[:, 0]))
|
| 1212 |
+
assert_(np.all(res.x <= np.array(bounds)[:, 1]))
|
| 1213 |
+
|
| 1214 |
+
@pytest.mark.fail_slow(10)
|
| 1215 |
+
def test_L4(self):
|
| 1216 |
+
# Lampinen ([5]) test problem 4
|
| 1217 |
+
def f(x):
|
| 1218 |
+
return np.sum(x[:3])
|
| 1219 |
+
|
| 1220 |
+
A = np.zeros((4, 9))
|
| 1221 |
+
A[1, [4, 6]] = 0.0025, 0.0025
|
| 1222 |
+
A[2, [5, 7, 4]] = 0.0025, 0.0025, -0.0025
|
| 1223 |
+
A[3, [8, 5]] = 0.01, -0.01
|
| 1224 |
+
A = A[1:, 1:]
|
| 1225 |
+
b = np.array([1, 1, 1])
|
| 1226 |
+
|
| 1227 |
+
def c1(x):
|
| 1228 |
+
x = np.hstack(([0], x)) # 1-indexed to match reference
|
| 1229 |
+
return [x[1]*x[6] - 833.33252*x[4] - 100*x[1] + 83333.333,
|
| 1230 |
+
x[2]*x[7] - 1250*x[5] - x[2]*x[4] + 1250*x[4],
|
| 1231 |
+
x[3]*x[8] - 1250000 - x[3]*x[5] + 2500*x[5]]
|
| 1232 |
+
|
| 1233 |
+
L = LinearConstraint(A, -np.inf, 1)
|
| 1234 |
+
N = NonlinearConstraint(c1, 0, np.inf)
|
| 1235 |
+
|
| 1236 |
+
bounds = [(100, 10000)] + [(1000, 10000)]*2 + [(10, 1000)]*5
|
| 1237 |
+
constraints = (L, N)
|
| 1238 |
+
|
| 1239 |
+
with suppress_warnings() as sup:
|
| 1240 |
+
sup.filter(UserWarning)
|
| 1241 |
+
res = differential_evolution(
|
| 1242 |
+
f, bounds, strategy='best1bin', rng=1234,
|
| 1243 |
+
constraints=constraints, popsize=3, tol=0.05
|
| 1244 |
+
)
|
| 1245 |
+
|
| 1246 |
+
f_opt = 7049.248
|
| 1247 |
+
|
| 1248 |
+
x_opt = [579.306692, 1359.97063, 5109.9707, 182.0177, 295.601172,
|
| 1249 |
+
217.9823, 286.416528, 395.601172]
|
| 1250 |
+
|
| 1251 |
+
assert_allclose(f(x_opt), f_opt, atol=0.001)
|
| 1252 |
+
assert_allclose(res.fun, f_opt, atol=0.001)
|
| 1253 |
+
|
| 1254 |
+
# use higher tol here for 32-bit Windows, see gh-11693
|
| 1255 |
+
if (platform.system() == 'Windows' and np.dtype(np.intp).itemsize < 8):
|
| 1256 |
+
assert_allclose(res.x, x_opt, rtol=2.4e-6, atol=0.0035)
|
| 1257 |
+
else:
|
| 1258 |
+
# tolerance determined from macOS + MKL failure, see gh-12701
|
| 1259 |
+
assert_allclose(res.x, x_opt, rtol=5e-6, atol=0.0024)
|
| 1260 |
+
|
| 1261 |
+
assert res.success
|
| 1262 |
+
assert_(np.all(A @ res.x <= b))
|
| 1263 |
+
assert_(np.all(np.array(c1(res.x)) >= 0))
|
| 1264 |
+
assert_(np.all(res.x >= np.array(bounds)[:, 0]))
|
| 1265 |
+
assert_(np.all(res.x <= np.array(bounds)[:, 1]))
|
| 1266 |
+
|
| 1267 |
+
@pytest.mark.fail_slow(10)
|
| 1268 |
+
def test_L5(self):
|
| 1269 |
+
# Lampinen ([5]) test problem 5
|
| 1270 |
+
|
| 1271 |
+
def f(x):
|
| 1272 |
+
x = np.hstack(([0], x)) # 1-indexed to match reference
|
| 1273 |
+
fun = (np.sin(2*np.pi*x[1])**3*np.sin(2*np.pi*x[2]) /
|
| 1274 |
+
(x[1]**3*(x[1]+x[2])))
|
| 1275 |
+
return -fun # maximize
|
| 1276 |
+
|
| 1277 |
+
def c1(x):
|
| 1278 |
+
x = np.hstack(([0], x)) # 1-indexed to match reference
|
| 1279 |
+
return [x[1]**2 - x[2] + 1,
|
| 1280 |
+
1 - x[1] + (x[2]-4)**2]
|
| 1281 |
+
|
| 1282 |
+
N = NonlinearConstraint(c1, -np.inf, 0)
|
| 1283 |
+
bounds = [(0, 10)]*2
|
| 1284 |
+
constraints = (N)
|
| 1285 |
+
|
| 1286 |
+
res = differential_evolution(f, bounds, strategy='rand1bin', rng=1234,
|
| 1287 |
+
constraints=constraints)
|
| 1288 |
+
|
| 1289 |
+
x_opt = (1.22797135, 4.24537337)
|
| 1290 |
+
f_opt = -0.095825
|
| 1291 |
+
assert_allclose(f(x_opt), f_opt, atol=2e-5)
|
| 1292 |
+
assert_allclose(res.fun, f_opt, atol=1e-4)
|
| 1293 |
+
assert res.success
|
| 1294 |
+
assert_(np.all(np.array(c1(res.x)) <= 0))
|
| 1295 |
+
assert_(np.all(res.x >= np.array(bounds)[:, 0]))
|
| 1296 |
+
assert_(np.all(res.x <= np.array(bounds)[:, 1]))
|
| 1297 |
+
|
| 1298 |
+
@pytest.mark.fail_slow(10)
|
| 1299 |
+
def test_L6(self):
|
| 1300 |
+
# Lampinen ([5]) test problem 6
|
| 1301 |
+
def f(x):
|
| 1302 |
+
x = np.hstack(([0], x)) # 1-indexed to match reference
|
| 1303 |
+
fun = (x[1]-10)**3 + (x[2] - 20)**3
|
| 1304 |
+
return fun
|
| 1305 |
+
|
| 1306 |
+
def c1(x):
|
| 1307 |
+
x = np.hstack(([0], x)) # 1-indexed to match reference
|
| 1308 |
+
return [(x[1]-5)**2 + (x[2] - 5)**2 - 100,
|
| 1309 |
+
-(x[1]-6)**2 - (x[2] - 5)**2 + 82.81]
|
| 1310 |
+
|
| 1311 |
+
N = NonlinearConstraint(c1, 0, np.inf)
|
| 1312 |
+
bounds = [(13, 100), (0, 100)]
|
| 1313 |
+
constraints = (N)
|
| 1314 |
+
res = differential_evolution(f, bounds, strategy='rand1bin', rng=1234,
|
| 1315 |
+
constraints=constraints, tol=1e-7)
|
| 1316 |
+
x_opt = (14.095, 0.84296)
|
| 1317 |
+
f_opt = -6961.814744
|
| 1318 |
+
|
| 1319 |
+
assert_allclose(f(x_opt), f_opt, atol=1e-6)
|
| 1320 |
+
assert_allclose(res.fun, f_opt, atol=0.001)
|
| 1321 |
+
assert_allclose(res.x, x_opt, atol=1e-4)
|
| 1322 |
+
assert res.success
|
| 1323 |
+
assert_(np.all(np.array(c1(res.x)) >= 0))
|
| 1324 |
+
assert_(np.all(res.x >= np.array(bounds)[:, 0]))
|
| 1325 |
+
assert_(np.all(res.x <= np.array(bounds)[:, 1]))
|
| 1326 |
+
|
| 1327 |
+
def test_L7(self):
|
| 1328 |
+
# Lampinen ([5]) test problem 7
|
| 1329 |
+
def f(x):
|
| 1330 |
+
x = np.hstack(([0], x)) # 1-indexed to match reference
|
| 1331 |
+
fun = (5.3578547*x[3]**2 + 0.8356891*x[1]*x[5] +
|
| 1332 |
+
37.293239*x[1] - 40792.141)
|
| 1333 |
+
return fun
|
| 1334 |
+
|
| 1335 |
+
def c1(x):
|
| 1336 |
+
x = np.hstack(([0], x)) # 1-indexed to match reference
|
| 1337 |
+
return [
|
| 1338 |
+
85.334407 + 0.0056858*x[2]*x[5] + 0.0006262*x[1]*x[4] -
|
| 1339 |
+
0.0022053*x[3]*x[5],
|
| 1340 |
+
|
| 1341 |
+
80.51249 + 0.0071317*x[2]*x[5] + 0.0029955*x[1]*x[2] +
|
| 1342 |
+
0.0021813*x[3]**2,
|
| 1343 |
+
|
| 1344 |
+
9.300961 + 0.0047026*x[3]*x[5] + 0.0012547*x[1]*x[3] +
|
| 1345 |
+
0.0019085*x[3]*x[4]
|
| 1346 |
+
]
|
| 1347 |
+
|
| 1348 |
+
N = NonlinearConstraint(c1, [0, 90, 20], [92, 110, 25])
|
| 1349 |
+
|
| 1350 |
+
bounds = [(78, 102), (33, 45)] + [(27, 45)]*3
|
| 1351 |
+
constraints = (N)
|
| 1352 |
+
|
| 1353 |
+
res = differential_evolution(f, bounds, strategy='rand1bin', rng=1234,
|
| 1354 |
+
constraints=constraints)
|
| 1355 |
+
|
| 1356 |
+
# using our best solution, rather than Lampinen/Koziel. Koziel solution
|
| 1357 |
+
# doesn't satisfy constraints, Lampinen f_opt just plain wrong.
|
| 1358 |
+
x_opt = [78.00000686, 33.00000362, 29.99526064, 44.99999971,
|
| 1359 |
+
36.77579979]
|
| 1360 |
+
|
| 1361 |
+
f_opt = -30665.537578
|
| 1362 |
+
|
| 1363 |
+
assert_allclose(f(x_opt), f_opt)
|
| 1364 |
+
assert_allclose(res.x, x_opt, atol=1e-3)
|
| 1365 |
+
assert_allclose(res.fun, f_opt, atol=1e-3)
|
| 1366 |
+
|
| 1367 |
+
assert res.success
|
| 1368 |
+
assert_(np.all(np.array(c1(res.x)) >= np.array([0, 90, 20])))
|
| 1369 |
+
assert_(np.all(np.array(c1(res.x)) <= np.array([92, 110, 25])))
|
| 1370 |
+
assert_(np.all(res.x >= np.array(bounds)[:, 0]))
|
| 1371 |
+
assert_(np.all(res.x <= np.array(bounds)[:, 1]))
|
| 1372 |
+
|
| 1373 |
+
@pytest.mark.xslow
|
| 1374 |
+
@pytest.mark.xfail(platform.machine() == 'ppc64le',
|
| 1375 |
+
reason="fails on ppc64le")
|
| 1376 |
+
def test_L8(self):
|
| 1377 |
+
def f(x):
|
| 1378 |
+
x = np.hstack(([0], x)) # 1-indexed to match reference
|
| 1379 |
+
fun = 3*x[1] + 0.000001*x[1]**3 + 2*x[2] + 0.000002/3*x[2]**3
|
| 1380 |
+
return fun
|
| 1381 |
+
|
| 1382 |
+
A = np.zeros((3, 5))
|
| 1383 |
+
A[1, [4, 3]] = 1, -1
|
| 1384 |
+
A[2, [3, 4]] = 1, -1
|
| 1385 |
+
A = A[1:, 1:]
|
| 1386 |
+
b = np.array([-.55, -.55])
|
| 1387 |
+
|
| 1388 |
+
def c1(x):
|
| 1389 |
+
x = np.hstack(([0], x)) # 1-indexed to match reference
|
| 1390 |
+
return [
|
| 1391 |
+
1000*np.sin(-x[3]-0.25) + 1000*np.sin(-x[4]-0.25) +
|
| 1392 |
+
894.8 - x[1],
|
| 1393 |
+
1000*np.sin(x[3]-0.25) + 1000*np.sin(x[3]-x[4]-0.25) +
|
| 1394 |
+
894.8 - x[2],
|
| 1395 |
+
1000*np.sin(x[4]-0.25) + 1000*np.sin(x[4]-x[3]-0.25) +
|
| 1396 |
+
1294.8
|
| 1397 |
+
]
|
| 1398 |
+
L = LinearConstraint(A, b, np.inf)
|
| 1399 |
+
N = NonlinearConstraint(c1, np.full(3, -0.001), np.full(3, 0.001))
|
| 1400 |
+
|
| 1401 |
+
bounds = [(0, 1200)]*2+[(-.55, .55)]*2
|
| 1402 |
+
constraints = (L, N)
|
| 1403 |
+
|
| 1404 |
+
with suppress_warnings() as sup:
|
| 1405 |
+
sup.filter(UserWarning)
|
| 1406 |
+
# original Lampinen test was with rand1bin, but that takes a
|
| 1407 |
+
# huge amount of CPU time. Changing strategy to best1bin speeds
|
| 1408 |
+
# things up a lot
|
| 1409 |
+
res = differential_evolution(f, bounds, strategy='best1bin',
|
| 1410 |
+
rng=1234, constraints=constraints,
|
| 1411 |
+
maxiter=5000)
|
| 1412 |
+
|
| 1413 |
+
x_opt = (679.9453, 1026.067, 0.1188764, -0.3962336)
|
| 1414 |
+
f_opt = 5126.4981
|
| 1415 |
+
|
| 1416 |
+
assert_allclose(f(x_opt), f_opt, atol=1e-3)
|
| 1417 |
+
assert_allclose(res.x[:2], x_opt[:2], atol=2e-3)
|
| 1418 |
+
assert_allclose(res.x[2:], x_opt[2:], atol=2e-3)
|
| 1419 |
+
assert_allclose(res.fun, f_opt, atol=2e-2)
|
| 1420 |
+
assert res.success
|
| 1421 |
+
assert_(np.all(A@res.x >= b))
|
| 1422 |
+
assert_(np.all(np.array(c1(res.x)) >= -0.001))
|
| 1423 |
+
assert_(np.all(np.array(c1(res.x)) <= 0.001))
|
| 1424 |
+
assert_(np.all(res.x >= np.array(bounds)[:, 0]))
|
| 1425 |
+
assert_(np.all(res.x <= np.array(bounds)[:, 1]))
|
| 1426 |
+
|
| 1427 |
+
@pytest.mark.fail_slow(5)
|
| 1428 |
+
def test_L9(self):
|
| 1429 |
+
# Lampinen ([5]) test problem 9
|
| 1430 |
+
|
| 1431 |
+
def f(x):
|
| 1432 |
+
x = np.hstack(([0], x)) # 1-indexed to match reference
|
| 1433 |
+
return x[1]**2 + (x[2]-1)**2
|
| 1434 |
+
|
| 1435 |
+
def c1(x):
|
| 1436 |
+
x = np.hstack(([0], x)) # 1-indexed to match reference
|
| 1437 |
+
return [x[2] - x[1]**2]
|
| 1438 |
+
|
| 1439 |
+
N = NonlinearConstraint(c1, [-.001], [0.001])
|
| 1440 |
+
|
| 1441 |
+
bounds = [(-1, 1)]*2
|
| 1442 |
+
constraints = (N)
|
| 1443 |
+
res = differential_evolution(f, bounds, strategy='rand1bin', rng=1234,
|
| 1444 |
+
constraints=constraints)
|
| 1445 |
+
|
| 1446 |
+
x_opt = [np.sqrt(2)/2, 0.5]
|
| 1447 |
+
f_opt = 0.75
|
| 1448 |
+
|
| 1449 |
+
assert_allclose(f(x_opt), f_opt)
|
| 1450 |
+
assert_allclose(np.abs(res.x), x_opt, atol=1e-3)
|
| 1451 |
+
assert_allclose(res.fun, f_opt, atol=1e-3)
|
| 1452 |
+
assert res.success
|
| 1453 |
+
assert_(np.all(np.array(c1(res.x)) >= -0.001))
|
| 1454 |
+
assert_(np.all(np.array(c1(res.x)) <= 0.001))
|
| 1455 |
+
assert_(np.all(res.x >= np.array(bounds)[:, 0]))
|
| 1456 |
+
assert_(np.all(res.x <= np.array(bounds)[:, 1]))
|
| 1457 |
+
|
| 1458 |
+
@pytest.mark.fail_slow(10)
|
| 1459 |
+
def test_integrality(self):
|
| 1460 |
+
# test fitting discrete distribution to data
|
| 1461 |
+
rng = np.random.default_rng(6519843218105)
|
| 1462 |
+
dist = stats.nbinom
|
| 1463 |
+
shapes = (5, 0.5)
|
| 1464 |
+
x = dist.rvs(*shapes, size=10000, random_state=rng)
|
| 1465 |
+
|
| 1466 |
+
def func(p, *args):
|
| 1467 |
+
dist, x = args
|
| 1468 |
+
# negative log-likelihood function
|
| 1469 |
+
ll = -np.log(dist.pmf(x, *p)).sum(axis=-1)
|
| 1470 |
+
if np.isnan(ll): # occurs when x is outside of support
|
| 1471 |
+
ll = np.inf # we don't want that
|
| 1472 |
+
return ll
|
| 1473 |
+
|
| 1474 |
+
integrality = [True, False]
|
| 1475 |
+
bounds = [(1, 18), (0, 0.95)]
|
| 1476 |
+
|
| 1477 |
+
res = differential_evolution(func, bounds, args=(dist, x),
|
| 1478 |
+
integrality=integrality, polish=False,
|
| 1479 |
+
rng=rng)
|
| 1480 |
+
# tolerance has to be fairly relaxed for the second parameter
|
| 1481 |
+
# because we're fitting a distribution to random variates.
|
| 1482 |
+
assert res.x[0] == 5
|
| 1483 |
+
assert_allclose(res.x, shapes, rtol=0.025)
|
| 1484 |
+
|
| 1485 |
+
# check that we can still use integrality constraints with polishing
|
| 1486 |
+
res2 = differential_evolution(func, bounds, args=(dist, x),
|
| 1487 |
+
integrality=integrality, polish=True,
|
| 1488 |
+
rng=rng)
|
| 1489 |
+
|
| 1490 |
+
def func2(p, *args):
|
| 1491 |
+
n, dist, x = args
|
| 1492 |
+
return func(np.array([n, p[0]]), dist, x)
|
| 1493 |
+
|
| 1494 |
+
# compare the DE derived solution to an LBFGSB solution (that doesn't
|
| 1495 |
+
# have to find the integral values). Note we're setting x0 to be the
|
| 1496 |
+
# output from the first DE result, thereby making the polishing step
|
| 1497 |
+
# and this minimisation pretty much equivalent.
|
| 1498 |
+
LBFGSB = minimize(func2, res2.x[1], args=(5, dist, x),
|
| 1499 |
+
bounds=[(0, 0.95)])
|
| 1500 |
+
assert_allclose(res2.x[1], LBFGSB.x)
|
| 1501 |
+
assert res2.fun <= res.fun
|
| 1502 |
+
|
| 1503 |
+
def test_integrality_limits(self):
|
| 1504 |
+
def f(x):
|
| 1505 |
+
return x
|
| 1506 |
+
|
| 1507 |
+
integrality = [True, False, True]
|
| 1508 |
+
bounds = [(0.2, 1.1), (0.9, 2.2), (3.3, 4.9)]
|
| 1509 |
+
|
| 1510 |
+
# no integrality constraints
|
| 1511 |
+
solver = DifferentialEvolutionSolver(f, bounds=bounds, polish=False,
|
| 1512 |
+
integrality=False)
|
| 1513 |
+
assert_allclose(solver.limits[0], [0.2, 0.9, 3.3])
|
| 1514 |
+
assert_allclose(solver.limits[1], [1.1, 2.2, 4.9])
|
| 1515 |
+
|
| 1516 |
+
# with integrality constraints
|
| 1517 |
+
solver = DifferentialEvolutionSolver(f, bounds=bounds, polish=False,
|
| 1518 |
+
integrality=integrality)
|
| 1519 |
+
assert_allclose(solver.limits[0], [0.5, 0.9, 3.5])
|
| 1520 |
+
assert_allclose(solver.limits[1], [1.5, 2.2, 4.5])
|
| 1521 |
+
assert_equal(solver.integrality, [True, False, True])
|
| 1522 |
+
assert solver.polish is False
|
| 1523 |
+
|
| 1524 |
+
bounds = [(-1.2, -0.9), (0.9, 2.2), (-10.3, 4.1)]
|
| 1525 |
+
solver = DifferentialEvolutionSolver(f, bounds=bounds, polish=False,
|
| 1526 |
+
integrality=integrality)
|
| 1527 |
+
assert_allclose(solver.limits[0], [-1.5, 0.9, -10.5])
|
| 1528 |
+
assert_allclose(solver.limits[1], [-0.5, 2.2, 4.5])
|
| 1529 |
+
|
| 1530 |
+
# A lower bound of -1.2 is converted to
|
| 1531 |
+
# np.nextafter(np.ceil(-1.2) - 0.5, np.inf)
|
| 1532 |
+
# with a similar process to the upper bound. Check that the
|
| 1533 |
+
# conversions work
|
| 1534 |
+
assert_allclose(np.round(solver.limits[0]), [-1.0, 1.0, -10.0])
|
| 1535 |
+
assert_allclose(np.round(solver.limits[1]), [-1.0, 2.0, 4.0])
|
| 1536 |
+
|
| 1537 |
+
bounds = [(-10.2, -8.1), (0.9, 2.2), (-10.9, -9.9999)]
|
| 1538 |
+
solver = DifferentialEvolutionSolver(f, bounds=bounds, polish=False,
|
| 1539 |
+
integrality=integrality)
|
| 1540 |
+
assert_allclose(solver.limits[0], [-10.5, 0.9, -10.5])
|
| 1541 |
+
assert_allclose(solver.limits[1], [-8.5, 2.2, -9.5])
|
| 1542 |
+
|
| 1543 |
+
bounds = [(-10.2, -10.1), (0.9, 2.2), (-10.9, -9.9999)]
|
| 1544 |
+
with pytest.raises(ValueError, match='One of the integrality'):
|
| 1545 |
+
DifferentialEvolutionSolver(f, bounds=bounds, polish=False,
|
| 1546 |
+
integrality=integrality)
|
| 1547 |
+
|
| 1548 |
+
@pytest.mark.thread_unsafe
|
| 1549 |
+
@pytest.mark.fail_slow(10)
|
| 1550 |
+
def test_vectorized(self):
|
| 1551 |
+
def quadratic(x):
|
| 1552 |
+
return np.sum(x**2)
|
| 1553 |
+
|
| 1554 |
+
def quadratic_vec(x):
|
| 1555 |
+
return np.sum(x**2, axis=0)
|
| 1556 |
+
|
| 1557 |
+
# A vectorized function needs to accept (len(x), S) and return (S,)
|
| 1558 |
+
with pytest.raises(RuntimeError, match='The vectorized function'):
|
| 1559 |
+
differential_evolution(quadratic, self.bounds,
|
| 1560 |
+
vectorized=True, updating='deferred')
|
| 1561 |
+
|
| 1562 |
+
# vectorized overrides the updating keyword, check for warning
|
| 1563 |
+
with warns(UserWarning, match="differential_evolution: the 'vector"):
|
| 1564 |
+
differential_evolution(quadratic_vec, self.bounds,
|
| 1565 |
+
vectorized=True)
|
| 1566 |
+
|
| 1567 |
+
# vectorized defers to the workers keyword, check for warning
|
| 1568 |
+
with warns(UserWarning, match="differential_evolution: the 'workers"):
|
| 1569 |
+
differential_evolution(quadratic_vec, self.bounds,
|
| 1570 |
+
vectorized=True, workers=map,
|
| 1571 |
+
updating='deferred')
|
| 1572 |
+
|
| 1573 |
+
ncalls = [0]
|
| 1574 |
+
|
| 1575 |
+
def rosen_vec(x):
|
| 1576 |
+
ncalls[0] += 1
|
| 1577 |
+
return rosen(x)
|
| 1578 |
+
|
| 1579 |
+
bounds = [(0, 10), (0, 10)]
|
| 1580 |
+
res1 = differential_evolution(rosen, bounds, updating='deferred',
|
| 1581 |
+
rng=1)
|
| 1582 |
+
res2 = differential_evolution(rosen_vec, bounds, vectorized=True,
|
| 1583 |
+
updating='deferred', rng=1)
|
| 1584 |
+
|
| 1585 |
+
# the two minimisation runs should be functionally equivalent
|
| 1586 |
+
assert_allclose(res1.x, res2.x)
|
| 1587 |
+
assert ncalls[0] == res2.nfev
|
| 1588 |
+
assert res1.nit == res2.nit
|
| 1589 |
+
|
| 1590 |
+
def test_vectorized_constraints(self):
|
| 1591 |
+
def constr_f(x):
|
| 1592 |
+
return np.array([x[0] + x[1]])
|
| 1593 |
+
|
| 1594 |
+
def constr_f2(x):
|
| 1595 |
+
return np.array([x[0]**2 + x[1], x[0] - x[1]])
|
| 1596 |
+
|
| 1597 |
+
nlc1 = NonlinearConstraint(constr_f, -np.inf, 1.9)
|
| 1598 |
+
nlc2 = NonlinearConstraint(constr_f2, (0.9, 0.5), (2.0, 2.0))
|
| 1599 |
+
|
| 1600 |
+
def rosen_vec(x):
|
| 1601 |
+
# accept an (len(x0), S) array, returning a (S,) array
|
| 1602 |
+
v = 100 * (x[1:] - x[:-1]**2.0)**2.0
|
| 1603 |
+
v += (1 - x[:-1])**2.0
|
| 1604 |
+
return np.squeeze(v)
|
| 1605 |
+
|
| 1606 |
+
bounds = [(0, 10), (0, 10)]
|
| 1607 |
+
|
| 1608 |
+
res1 = differential_evolution(rosen, bounds, updating='deferred',
|
| 1609 |
+
rng=1, constraints=[nlc1, nlc2],
|
| 1610 |
+
polish=False)
|
| 1611 |
+
res2 = differential_evolution(rosen_vec, bounds, vectorized=True,
|
| 1612 |
+
updating='deferred', rng=1,
|
| 1613 |
+
constraints=[nlc1, nlc2],
|
| 1614 |
+
polish=False)
|
| 1615 |
+
# the two minimisation runs should be functionally equivalent
|
| 1616 |
+
assert_allclose(res1.x, res2.x)
|
| 1617 |
+
|
| 1618 |
+
def test_constraint_violation_error_message(self):
|
| 1619 |
+
|
| 1620 |
+
def func(x):
|
| 1621 |
+
return np.cos(x[0]) + np.sin(x[1])
|
| 1622 |
+
|
| 1623 |
+
# Intentionally infeasible constraints.
|
| 1624 |
+
c0 = NonlinearConstraint(lambda x: x[1] - (x[0]-1)**2, 0, np.inf)
|
| 1625 |
+
c1 = NonlinearConstraint(lambda x: x[1] + x[0]**2, -np.inf, 0)
|
| 1626 |
+
|
| 1627 |
+
result = differential_evolution(func,
|
| 1628 |
+
bounds=[(-1, 2), (-1, 1)],
|
| 1629 |
+
constraints=[c0, c1],
|
| 1630 |
+
maxiter=10,
|
| 1631 |
+
polish=False,
|
| 1632 |
+
rng=864197532)
|
| 1633 |
+
assert result.success is False
|
| 1634 |
+
# The numerical value in the error message might be sensitive to
|
| 1635 |
+
# changes in the implementation. It can be updated if the code is
|
| 1636 |
+
# changed. The essential part of the test is that there is a number
|
| 1637 |
+
# after the '=', so if necessary, the text could be reduced to, say,
|
| 1638 |
+
# "MAXCV = 0.".
|
| 1639 |
+
assert "MAXCV = 0." in result.message
|
| 1640 |
+
|
| 1641 |
+
@pytest.mark.fail_slow(20) # fail-slow exception by request - see gh-20806
|
| 1642 |
+
def test_strategy_fn(self):
|
| 1643 |
+
# examines ability to customize strategy by mimicking one of the
|
| 1644 |
+
# in-built strategies
|
| 1645 |
+
parameter_count = 4
|
| 1646 |
+
popsize = 10
|
| 1647 |
+
bounds = [(0, 10.)] * parameter_count
|
| 1648 |
+
total_popsize = parameter_count * popsize
|
| 1649 |
+
mutation = 0.8
|
| 1650 |
+
recombination = 0.7
|
| 1651 |
+
|
| 1652 |
+
calls = [0]
|
| 1653 |
+
def custom_strategy_fn(candidate, population, rng=None):
|
| 1654 |
+
calls[0] += 1
|
| 1655 |
+
trial = np.copy(population[candidate])
|
| 1656 |
+
fill_point = rng.choice(parameter_count)
|
| 1657 |
+
|
| 1658 |
+
pool = np.arange(total_popsize)
|
| 1659 |
+
rng.shuffle(pool)
|
| 1660 |
+
idxs = pool[:2 + 1]
|
| 1661 |
+
idxs = idxs[idxs != candidate][:2]
|
| 1662 |
+
|
| 1663 |
+
r0, r1 = idxs[:2]
|
| 1664 |
+
|
| 1665 |
+
bprime = (population[0] + mutation *
|
| 1666 |
+
(population[r0] - population[r1]))
|
| 1667 |
+
|
| 1668 |
+
crossovers = rng.uniform(size=parameter_count)
|
| 1669 |
+
crossovers = crossovers < recombination
|
| 1670 |
+
crossovers[fill_point] = True
|
| 1671 |
+
trial = np.where(crossovers, bprime, trial)
|
| 1672 |
+
return trial
|
| 1673 |
+
|
| 1674 |
+
solver = DifferentialEvolutionSolver(
|
| 1675 |
+
rosen,
|
| 1676 |
+
bounds,
|
| 1677 |
+
popsize=popsize,
|
| 1678 |
+
recombination=recombination,
|
| 1679 |
+
mutation=mutation,
|
| 1680 |
+
maxiter=2,
|
| 1681 |
+
strategy=custom_strategy_fn,
|
| 1682 |
+
rng=10,
|
| 1683 |
+
polish=False
|
| 1684 |
+
)
|
| 1685 |
+
assert solver.strategy is custom_strategy_fn
|
| 1686 |
+
solver.solve()
|
| 1687 |
+
assert calls[0] > 0
|
| 1688 |
+
|
| 1689 |
+
# check custom strategy works with updating='deferred'
|
| 1690 |
+
res = differential_evolution(
|
| 1691 |
+
rosen, bounds, strategy=custom_strategy_fn, updating='deferred'
|
| 1692 |
+
)
|
| 1693 |
+
assert res.success
|
| 1694 |
+
|
| 1695 |
+
def custom_strategy_fn(candidate, population, rng=None):
|
| 1696 |
+
return np.array([1.0, 2.0])
|
| 1697 |
+
|
| 1698 |
+
with pytest.raises(RuntimeError, match="strategy*"):
|
| 1699 |
+
differential_evolution(
|
| 1700 |
+
rosen,
|
| 1701 |
+
bounds,
|
| 1702 |
+
strategy=custom_strategy_fn
|
| 1703 |
+
)
|
mantis_evalkit/lib/python3.10/site-packages/scipy/optimize/tests/test__numdiff.py
ADDED
|
@@ -0,0 +1,841 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import math
|
| 2 |
+
from itertools import product
|
| 3 |
+
|
| 4 |
+
import numpy as np
|
| 5 |
+
from numpy.testing import assert_allclose, assert_equal, assert_
|
| 6 |
+
from pytest import raises as assert_raises
|
| 7 |
+
|
| 8 |
+
from scipy.sparse import csr_matrix, csc_matrix, lil_matrix
|
| 9 |
+
|
| 10 |
+
from scipy.optimize._numdiff import (
|
| 11 |
+
_adjust_scheme_to_bounds, approx_derivative, check_derivative,
|
| 12 |
+
group_columns, _eps_for_method, _compute_absolute_step)
|
| 13 |
+
|
| 14 |
+
|
| 15 |
+
def test_group_columns():
|
| 16 |
+
structure = [
|
| 17 |
+
[1, 1, 0, 0, 0, 0],
|
| 18 |
+
[1, 1, 1, 0, 0, 0],
|
| 19 |
+
[0, 1, 1, 1, 0, 0],
|
| 20 |
+
[0, 0, 1, 1, 1, 0],
|
| 21 |
+
[0, 0, 0, 1, 1, 1],
|
| 22 |
+
[0, 0, 0, 0, 1, 1],
|
| 23 |
+
[0, 0, 0, 0, 0, 0]
|
| 24 |
+
]
|
| 25 |
+
for transform in [np.asarray, csr_matrix, csc_matrix, lil_matrix]:
|
| 26 |
+
A = transform(structure)
|
| 27 |
+
order = np.arange(6)
|
| 28 |
+
groups_true = np.array([0, 1, 2, 0, 1, 2])
|
| 29 |
+
groups = group_columns(A, order)
|
| 30 |
+
assert_equal(groups, groups_true)
|
| 31 |
+
|
| 32 |
+
order = [1, 2, 4, 3, 5, 0]
|
| 33 |
+
groups_true = np.array([2, 0, 1, 2, 0, 1])
|
| 34 |
+
groups = group_columns(A, order)
|
| 35 |
+
assert_equal(groups, groups_true)
|
| 36 |
+
|
| 37 |
+
# Test repeatability.
|
| 38 |
+
groups_1 = group_columns(A)
|
| 39 |
+
groups_2 = group_columns(A)
|
| 40 |
+
assert_equal(groups_1, groups_2)
|
| 41 |
+
|
| 42 |
+
|
| 43 |
+
def test_correct_fp_eps():
|
| 44 |
+
# check that relative step size is correct for FP size
|
| 45 |
+
EPS = np.finfo(np.float64).eps
|
| 46 |
+
relative_step = {"2-point": EPS**0.5,
|
| 47 |
+
"3-point": EPS**(1/3),
|
| 48 |
+
"cs": EPS**0.5}
|
| 49 |
+
for method in ['2-point', '3-point', 'cs']:
|
| 50 |
+
assert_allclose(
|
| 51 |
+
_eps_for_method(np.float64, np.float64, method),
|
| 52 |
+
relative_step[method])
|
| 53 |
+
assert_allclose(
|
| 54 |
+
_eps_for_method(np.complex128, np.complex128, method),
|
| 55 |
+
relative_step[method]
|
| 56 |
+
)
|
| 57 |
+
|
| 58 |
+
# check another FP size
|
| 59 |
+
EPS = np.finfo(np.float32).eps
|
| 60 |
+
relative_step = {"2-point": EPS**0.5,
|
| 61 |
+
"3-point": EPS**(1/3),
|
| 62 |
+
"cs": EPS**0.5}
|
| 63 |
+
|
| 64 |
+
for method in ['2-point', '3-point', 'cs']:
|
| 65 |
+
assert_allclose(
|
| 66 |
+
_eps_for_method(np.float64, np.float32, method),
|
| 67 |
+
relative_step[method]
|
| 68 |
+
)
|
| 69 |
+
assert_allclose(
|
| 70 |
+
_eps_for_method(np.float32, np.float64, method),
|
| 71 |
+
relative_step[method]
|
| 72 |
+
)
|
| 73 |
+
assert_allclose(
|
| 74 |
+
_eps_for_method(np.float32, np.float32, method),
|
| 75 |
+
relative_step[method]
|
| 76 |
+
)
|
| 77 |
+
|
| 78 |
+
|
| 79 |
+
class TestAdjustSchemeToBounds:
|
| 80 |
+
def test_no_bounds(self):
|
| 81 |
+
x0 = np.zeros(3)
|
| 82 |
+
h = np.full(3, 1e-2)
|
| 83 |
+
inf_lower = np.empty_like(x0)
|
| 84 |
+
inf_upper = np.empty_like(x0)
|
| 85 |
+
inf_lower.fill(-np.inf)
|
| 86 |
+
inf_upper.fill(np.inf)
|
| 87 |
+
|
| 88 |
+
h_adjusted, one_sided = _adjust_scheme_to_bounds(
|
| 89 |
+
x0, h, 1, '1-sided', inf_lower, inf_upper)
|
| 90 |
+
assert_allclose(h_adjusted, h)
|
| 91 |
+
assert_(np.all(one_sided))
|
| 92 |
+
|
| 93 |
+
h_adjusted, one_sided = _adjust_scheme_to_bounds(
|
| 94 |
+
x0, h, 2, '1-sided', inf_lower, inf_upper)
|
| 95 |
+
assert_allclose(h_adjusted, h)
|
| 96 |
+
assert_(np.all(one_sided))
|
| 97 |
+
|
| 98 |
+
h_adjusted, one_sided = _adjust_scheme_to_bounds(
|
| 99 |
+
x0, h, 1, '2-sided', inf_lower, inf_upper)
|
| 100 |
+
assert_allclose(h_adjusted, h)
|
| 101 |
+
assert_(np.all(~one_sided))
|
| 102 |
+
|
| 103 |
+
h_adjusted, one_sided = _adjust_scheme_to_bounds(
|
| 104 |
+
x0, h, 2, '2-sided', inf_lower, inf_upper)
|
| 105 |
+
assert_allclose(h_adjusted, h)
|
| 106 |
+
assert_(np.all(~one_sided))
|
| 107 |
+
|
| 108 |
+
def test_with_bound(self):
|
| 109 |
+
x0 = np.array([0.0, 0.85, -0.85])
|
| 110 |
+
lb = -np.ones(3)
|
| 111 |
+
ub = np.ones(3)
|
| 112 |
+
h = np.array([1, 1, -1]) * 1e-1
|
| 113 |
+
|
| 114 |
+
h_adjusted, _ = _adjust_scheme_to_bounds(x0, h, 1, '1-sided', lb, ub)
|
| 115 |
+
assert_allclose(h_adjusted, h)
|
| 116 |
+
|
| 117 |
+
h_adjusted, _ = _adjust_scheme_to_bounds(x0, h, 2, '1-sided', lb, ub)
|
| 118 |
+
assert_allclose(h_adjusted, np.array([1, -1, 1]) * 1e-1)
|
| 119 |
+
|
| 120 |
+
h_adjusted, one_sided = _adjust_scheme_to_bounds(
|
| 121 |
+
x0, h, 1, '2-sided', lb, ub)
|
| 122 |
+
assert_allclose(h_adjusted, np.abs(h))
|
| 123 |
+
assert_(np.all(~one_sided))
|
| 124 |
+
|
| 125 |
+
h_adjusted, one_sided = _adjust_scheme_to_bounds(
|
| 126 |
+
x0, h, 2, '2-sided', lb, ub)
|
| 127 |
+
assert_allclose(h_adjusted, np.array([1, -1, 1]) * 1e-1)
|
| 128 |
+
assert_equal(one_sided, np.array([False, True, True]))
|
| 129 |
+
|
| 130 |
+
def test_tight_bounds(self):
|
| 131 |
+
lb = np.array([-0.03, -0.03])
|
| 132 |
+
ub = np.array([0.05, 0.05])
|
| 133 |
+
x0 = np.array([0.0, 0.03])
|
| 134 |
+
h = np.array([-0.1, -0.1])
|
| 135 |
+
|
| 136 |
+
h_adjusted, _ = _adjust_scheme_to_bounds(x0, h, 1, '1-sided', lb, ub)
|
| 137 |
+
assert_allclose(h_adjusted, np.array([0.05, -0.06]))
|
| 138 |
+
|
| 139 |
+
h_adjusted, _ = _adjust_scheme_to_bounds(x0, h, 2, '1-sided', lb, ub)
|
| 140 |
+
assert_allclose(h_adjusted, np.array([0.025, -0.03]))
|
| 141 |
+
|
| 142 |
+
h_adjusted, one_sided = _adjust_scheme_to_bounds(
|
| 143 |
+
x0, h, 1, '2-sided', lb, ub)
|
| 144 |
+
assert_allclose(h_adjusted, np.array([0.03, -0.03]))
|
| 145 |
+
assert_equal(one_sided, np.array([False, True]))
|
| 146 |
+
|
| 147 |
+
h_adjusted, one_sided = _adjust_scheme_to_bounds(
|
| 148 |
+
x0, h, 2, '2-sided', lb, ub)
|
| 149 |
+
assert_allclose(h_adjusted, np.array([0.015, -0.015]))
|
| 150 |
+
assert_equal(one_sided, np.array([False, True]))
|
| 151 |
+
|
| 152 |
+
|
| 153 |
+
class TestApproxDerivativesDense:
|
| 154 |
+
def fun_scalar_scalar(self, x):
|
| 155 |
+
return np.sinh(x)
|
| 156 |
+
|
| 157 |
+
def jac_scalar_scalar(self, x):
|
| 158 |
+
return np.cosh(x)
|
| 159 |
+
|
| 160 |
+
def fun_scalar_vector(self, x):
|
| 161 |
+
return np.array([x[0]**2, np.tan(x[0]), np.exp(x[0])])
|
| 162 |
+
|
| 163 |
+
def jac_scalar_vector(self, x):
|
| 164 |
+
return np.array(
|
| 165 |
+
[2 * x[0], np.cos(x[0]) ** -2, np.exp(x[0])]).reshape(-1, 1)
|
| 166 |
+
|
| 167 |
+
def fun_vector_scalar(self, x):
|
| 168 |
+
return np.sin(x[0] * x[1]) * np.log(x[0])
|
| 169 |
+
|
| 170 |
+
def wrong_dimensions_fun(self, x):
|
| 171 |
+
return np.array([x**2, np.tan(x), np.exp(x)])
|
| 172 |
+
|
| 173 |
+
def jac_vector_scalar(self, x):
|
| 174 |
+
return np.array([
|
| 175 |
+
x[1] * np.cos(x[0] * x[1]) * np.log(x[0]) +
|
| 176 |
+
np.sin(x[0] * x[1]) / x[0],
|
| 177 |
+
x[0] * np.cos(x[0] * x[1]) * np.log(x[0])
|
| 178 |
+
])
|
| 179 |
+
|
| 180 |
+
def fun_vector_vector(self, x):
|
| 181 |
+
return np.array([
|
| 182 |
+
x[0] * np.sin(x[1]),
|
| 183 |
+
x[1] * np.cos(x[0]),
|
| 184 |
+
x[0] ** 3 * x[1] ** -0.5
|
| 185 |
+
])
|
| 186 |
+
|
| 187 |
+
def fun_vector_vector_with_arg(self, x, arg):
|
| 188 |
+
"""Used to test passing custom arguments with check_derivative()"""
|
| 189 |
+
assert arg == 42
|
| 190 |
+
return np.array([
|
| 191 |
+
x[0] * np.sin(x[1]),
|
| 192 |
+
x[1] * np.cos(x[0]),
|
| 193 |
+
x[0] ** 3 * x[1] ** -0.5
|
| 194 |
+
])
|
| 195 |
+
|
| 196 |
+
def jac_vector_vector(self, x):
|
| 197 |
+
return np.array([
|
| 198 |
+
[np.sin(x[1]), x[0] * np.cos(x[1])],
|
| 199 |
+
[-x[1] * np.sin(x[0]), np.cos(x[0])],
|
| 200 |
+
[3 * x[0] ** 2 * x[1] ** -0.5, -0.5 * x[0] ** 3 * x[1] ** -1.5]
|
| 201 |
+
])
|
| 202 |
+
|
| 203 |
+
def jac_vector_vector_with_arg(self, x, arg):
|
| 204 |
+
"""Used to test passing custom arguments with check_derivative()"""
|
| 205 |
+
assert arg == 42
|
| 206 |
+
return np.array([
|
| 207 |
+
[np.sin(x[1]), x[0] * np.cos(x[1])],
|
| 208 |
+
[-x[1] * np.sin(x[0]), np.cos(x[0])],
|
| 209 |
+
[3 * x[0] ** 2 * x[1] ** -0.5, -0.5 * x[0] ** 3 * x[1] ** -1.5]
|
| 210 |
+
])
|
| 211 |
+
|
| 212 |
+
def fun_parametrized(self, x, c0, c1=1.0):
|
| 213 |
+
return np.array([np.exp(c0 * x[0]), np.exp(c1 * x[1])])
|
| 214 |
+
|
| 215 |
+
def jac_parametrized(self, x, c0, c1=0.1):
|
| 216 |
+
return np.array([
|
| 217 |
+
[c0 * np.exp(c0 * x[0]), 0],
|
| 218 |
+
[0, c1 * np.exp(c1 * x[1])]
|
| 219 |
+
])
|
| 220 |
+
|
| 221 |
+
def fun_with_nan(self, x):
|
| 222 |
+
return x if np.abs(x) <= 1e-8 else np.nan
|
| 223 |
+
|
| 224 |
+
def jac_with_nan(self, x):
|
| 225 |
+
return 1.0 if np.abs(x) <= 1e-8 else np.nan
|
| 226 |
+
|
| 227 |
+
def fun_zero_jacobian(self, x):
|
| 228 |
+
return np.array([x[0] * x[1], np.cos(x[0] * x[1])])
|
| 229 |
+
|
| 230 |
+
def jac_zero_jacobian(self, x):
|
| 231 |
+
return np.array([
|
| 232 |
+
[x[1], x[0]],
|
| 233 |
+
[-x[1] * np.sin(x[0] * x[1]), -x[0] * np.sin(x[0] * x[1])]
|
| 234 |
+
])
|
| 235 |
+
|
| 236 |
+
def jac_non_numpy(self, x):
|
| 237 |
+
# x can be a scalar or an array [val].
|
| 238 |
+
# Cast to true scalar before handing over to math.exp
|
| 239 |
+
xp = np.asarray(x).item()
|
| 240 |
+
return math.exp(xp)
|
| 241 |
+
|
| 242 |
+
def test_scalar_scalar(self):
|
| 243 |
+
x0 = 1.0
|
| 244 |
+
jac_diff_2 = approx_derivative(self.fun_scalar_scalar, x0,
|
| 245 |
+
method='2-point')
|
| 246 |
+
jac_diff_3 = approx_derivative(self.fun_scalar_scalar, x0)
|
| 247 |
+
jac_diff_4 = approx_derivative(self.fun_scalar_scalar, x0,
|
| 248 |
+
method='cs')
|
| 249 |
+
jac_true = self.jac_scalar_scalar(x0)
|
| 250 |
+
assert_allclose(jac_diff_2, jac_true, rtol=1e-6)
|
| 251 |
+
assert_allclose(jac_diff_3, jac_true, rtol=1e-9)
|
| 252 |
+
assert_allclose(jac_diff_4, jac_true, rtol=1e-12)
|
| 253 |
+
|
| 254 |
+
def test_scalar_scalar_abs_step(self):
|
| 255 |
+
# can approx_derivative use abs_step?
|
| 256 |
+
x0 = 1.0
|
| 257 |
+
jac_diff_2 = approx_derivative(self.fun_scalar_scalar, x0,
|
| 258 |
+
method='2-point', abs_step=1.49e-8)
|
| 259 |
+
jac_diff_3 = approx_derivative(self.fun_scalar_scalar, x0,
|
| 260 |
+
abs_step=1.49e-8)
|
| 261 |
+
jac_diff_4 = approx_derivative(self.fun_scalar_scalar, x0,
|
| 262 |
+
method='cs', abs_step=1.49e-8)
|
| 263 |
+
jac_true = self.jac_scalar_scalar(x0)
|
| 264 |
+
assert_allclose(jac_diff_2, jac_true, rtol=1e-6)
|
| 265 |
+
assert_allclose(jac_diff_3, jac_true, rtol=1e-9)
|
| 266 |
+
assert_allclose(jac_diff_4, jac_true, rtol=1e-12)
|
| 267 |
+
|
| 268 |
+
def test_scalar_vector(self):
|
| 269 |
+
x0 = 0.5
|
| 270 |
+
jac_diff_2 = approx_derivative(self.fun_scalar_vector, x0,
|
| 271 |
+
method='2-point')
|
| 272 |
+
jac_diff_3 = approx_derivative(self.fun_scalar_vector, x0)
|
| 273 |
+
jac_diff_4 = approx_derivative(self.fun_scalar_vector, x0,
|
| 274 |
+
method='cs')
|
| 275 |
+
jac_true = self.jac_scalar_vector(np.atleast_1d(x0))
|
| 276 |
+
assert_allclose(jac_diff_2, jac_true, rtol=1e-6)
|
| 277 |
+
assert_allclose(jac_diff_3, jac_true, rtol=1e-9)
|
| 278 |
+
assert_allclose(jac_diff_4, jac_true, rtol=1e-12)
|
| 279 |
+
|
| 280 |
+
def test_vector_scalar(self):
|
| 281 |
+
x0 = np.array([100.0, -0.5])
|
| 282 |
+
jac_diff_2 = approx_derivative(self.fun_vector_scalar, x0,
|
| 283 |
+
method='2-point')
|
| 284 |
+
jac_diff_3 = approx_derivative(self.fun_vector_scalar, x0)
|
| 285 |
+
jac_diff_4 = approx_derivative(self.fun_vector_scalar, x0,
|
| 286 |
+
method='cs')
|
| 287 |
+
jac_true = self.jac_vector_scalar(x0)
|
| 288 |
+
assert_allclose(jac_diff_2, jac_true, rtol=1e-6)
|
| 289 |
+
assert_allclose(jac_diff_3, jac_true, rtol=1e-7)
|
| 290 |
+
assert_allclose(jac_diff_4, jac_true, rtol=1e-12)
|
| 291 |
+
|
| 292 |
+
def test_vector_scalar_abs_step(self):
|
| 293 |
+
# can approx_derivative use abs_step?
|
| 294 |
+
x0 = np.array([100.0, -0.5])
|
| 295 |
+
jac_diff_2 = approx_derivative(self.fun_vector_scalar, x0,
|
| 296 |
+
method='2-point', abs_step=1.49e-8)
|
| 297 |
+
jac_diff_3 = approx_derivative(self.fun_vector_scalar, x0,
|
| 298 |
+
abs_step=1.49e-8, rel_step=np.inf)
|
| 299 |
+
jac_diff_4 = approx_derivative(self.fun_vector_scalar, x0,
|
| 300 |
+
method='cs', abs_step=1.49e-8)
|
| 301 |
+
jac_true = self.jac_vector_scalar(x0)
|
| 302 |
+
assert_allclose(jac_diff_2, jac_true, rtol=1e-6)
|
| 303 |
+
assert_allclose(jac_diff_3, jac_true, rtol=3e-9)
|
| 304 |
+
assert_allclose(jac_diff_4, jac_true, rtol=1e-12)
|
| 305 |
+
|
| 306 |
+
def test_vector_vector(self):
|
| 307 |
+
x0 = np.array([-100.0, 0.2])
|
| 308 |
+
jac_diff_2 = approx_derivative(self.fun_vector_vector, x0,
|
| 309 |
+
method='2-point')
|
| 310 |
+
jac_diff_3 = approx_derivative(self.fun_vector_vector, x0)
|
| 311 |
+
jac_diff_4 = approx_derivative(self.fun_vector_vector, x0,
|
| 312 |
+
method='cs')
|
| 313 |
+
jac_true = self.jac_vector_vector(x0)
|
| 314 |
+
assert_allclose(jac_diff_2, jac_true, rtol=1e-5)
|
| 315 |
+
assert_allclose(jac_diff_3, jac_true, rtol=1e-6)
|
| 316 |
+
assert_allclose(jac_diff_4, jac_true, rtol=1e-12)
|
| 317 |
+
|
| 318 |
+
def test_wrong_dimensions(self):
|
| 319 |
+
x0 = 1.0
|
| 320 |
+
assert_raises(RuntimeError, approx_derivative,
|
| 321 |
+
self.wrong_dimensions_fun, x0)
|
| 322 |
+
f0 = self.wrong_dimensions_fun(np.atleast_1d(x0))
|
| 323 |
+
assert_raises(ValueError, approx_derivative,
|
| 324 |
+
self.wrong_dimensions_fun, x0, f0=f0)
|
| 325 |
+
|
| 326 |
+
def test_custom_rel_step(self):
|
| 327 |
+
x0 = np.array([-0.1, 0.1])
|
| 328 |
+
jac_diff_2 = approx_derivative(self.fun_vector_vector, x0,
|
| 329 |
+
method='2-point', rel_step=1e-4)
|
| 330 |
+
jac_diff_3 = approx_derivative(self.fun_vector_vector, x0,
|
| 331 |
+
rel_step=1e-4)
|
| 332 |
+
jac_true = self.jac_vector_vector(x0)
|
| 333 |
+
assert_allclose(jac_diff_2, jac_true, rtol=1e-2)
|
| 334 |
+
assert_allclose(jac_diff_3, jac_true, rtol=1e-4)
|
| 335 |
+
|
| 336 |
+
def test_options(self):
|
| 337 |
+
x0 = np.array([1.0, 1.0])
|
| 338 |
+
c0 = -1.0
|
| 339 |
+
c1 = 1.0
|
| 340 |
+
lb = 0.0
|
| 341 |
+
ub = 2.0
|
| 342 |
+
f0 = self.fun_parametrized(x0, c0, c1=c1)
|
| 343 |
+
rel_step = np.array([-1e-6, 1e-7])
|
| 344 |
+
jac_true = self.jac_parametrized(x0, c0, c1)
|
| 345 |
+
jac_diff_2 = approx_derivative(
|
| 346 |
+
self.fun_parametrized, x0, method='2-point', rel_step=rel_step,
|
| 347 |
+
f0=f0, args=(c0,), kwargs=dict(c1=c1), bounds=(lb, ub))
|
| 348 |
+
jac_diff_3 = approx_derivative(
|
| 349 |
+
self.fun_parametrized, x0, rel_step=rel_step,
|
| 350 |
+
f0=f0, args=(c0,), kwargs=dict(c1=c1), bounds=(lb, ub))
|
| 351 |
+
assert_allclose(jac_diff_2, jac_true, rtol=1e-6)
|
| 352 |
+
assert_allclose(jac_diff_3, jac_true, rtol=1e-9)
|
| 353 |
+
|
| 354 |
+
def test_with_bounds_2_point(self):
|
| 355 |
+
lb = -np.ones(2)
|
| 356 |
+
ub = np.ones(2)
|
| 357 |
+
|
| 358 |
+
x0 = np.array([-2.0, 0.2])
|
| 359 |
+
assert_raises(ValueError, approx_derivative,
|
| 360 |
+
self.fun_vector_vector, x0, bounds=(lb, ub))
|
| 361 |
+
|
| 362 |
+
x0 = np.array([-1.0, 1.0])
|
| 363 |
+
jac_diff = approx_derivative(self.fun_vector_vector, x0,
|
| 364 |
+
method='2-point', bounds=(lb, ub))
|
| 365 |
+
jac_true = self.jac_vector_vector(x0)
|
| 366 |
+
assert_allclose(jac_diff, jac_true, rtol=1e-6)
|
| 367 |
+
|
| 368 |
+
def test_with_bounds_3_point(self):
|
| 369 |
+
lb = np.array([1.0, 1.0])
|
| 370 |
+
ub = np.array([2.0, 2.0])
|
| 371 |
+
|
| 372 |
+
x0 = np.array([1.0, 2.0])
|
| 373 |
+
jac_true = self.jac_vector_vector(x0)
|
| 374 |
+
|
| 375 |
+
jac_diff = approx_derivative(self.fun_vector_vector, x0)
|
| 376 |
+
assert_allclose(jac_diff, jac_true, rtol=1e-9)
|
| 377 |
+
|
| 378 |
+
jac_diff = approx_derivative(self.fun_vector_vector, x0,
|
| 379 |
+
bounds=(lb, np.inf))
|
| 380 |
+
assert_allclose(jac_diff, jac_true, rtol=1e-9)
|
| 381 |
+
|
| 382 |
+
jac_diff = approx_derivative(self.fun_vector_vector, x0,
|
| 383 |
+
bounds=(-np.inf, ub))
|
| 384 |
+
assert_allclose(jac_diff, jac_true, rtol=1e-9)
|
| 385 |
+
|
| 386 |
+
jac_diff = approx_derivative(self.fun_vector_vector, x0,
|
| 387 |
+
bounds=(lb, ub))
|
| 388 |
+
assert_allclose(jac_diff, jac_true, rtol=1e-9)
|
| 389 |
+
|
| 390 |
+
def test_tight_bounds(self):
|
| 391 |
+
x0 = np.array([10.0, 10.0])
|
| 392 |
+
lb = x0 - 3e-9
|
| 393 |
+
ub = x0 + 2e-9
|
| 394 |
+
jac_true = self.jac_vector_vector(x0)
|
| 395 |
+
jac_diff = approx_derivative(
|
| 396 |
+
self.fun_vector_vector, x0, method='2-point', bounds=(lb, ub))
|
| 397 |
+
assert_allclose(jac_diff, jac_true, rtol=1e-6)
|
| 398 |
+
jac_diff = approx_derivative(
|
| 399 |
+
self.fun_vector_vector, x0, method='2-point',
|
| 400 |
+
rel_step=1e-6, bounds=(lb, ub))
|
| 401 |
+
assert_allclose(jac_diff, jac_true, rtol=1e-6)
|
| 402 |
+
|
| 403 |
+
jac_diff = approx_derivative(
|
| 404 |
+
self.fun_vector_vector, x0, bounds=(lb, ub))
|
| 405 |
+
assert_allclose(jac_diff, jac_true, rtol=1e-6)
|
| 406 |
+
jac_diff = approx_derivative(
|
| 407 |
+
self.fun_vector_vector, x0, rel_step=1e-6, bounds=(lb, ub))
|
| 408 |
+
assert_allclose(jac_true, jac_diff, rtol=1e-6)
|
| 409 |
+
|
| 410 |
+
def test_bound_switches(self):
|
| 411 |
+
lb = -1e-8
|
| 412 |
+
ub = 1e-8
|
| 413 |
+
x0 = 0.0
|
| 414 |
+
jac_true = self.jac_with_nan(x0)
|
| 415 |
+
jac_diff_2 = approx_derivative(
|
| 416 |
+
self.fun_with_nan, x0, method='2-point', rel_step=1e-6,
|
| 417 |
+
bounds=(lb, ub))
|
| 418 |
+
jac_diff_3 = approx_derivative(
|
| 419 |
+
self.fun_with_nan, x0, rel_step=1e-6, bounds=(lb, ub))
|
| 420 |
+
assert_allclose(jac_diff_2, jac_true, rtol=1e-6)
|
| 421 |
+
assert_allclose(jac_diff_3, jac_true, rtol=1e-9)
|
| 422 |
+
|
| 423 |
+
x0 = 1e-8
|
| 424 |
+
jac_true = self.jac_with_nan(x0)
|
| 425 |
+
jac_diff_2 = approx_derivative(
|
| 426 |
+
self.fun_with_nan, x0, method='2-point', rel_step=1e-6,
|
| 427 |
+
bounds=(lb, ub))
|
| 428 |
+
jac_diff_3 = approx_derivative(
|
| 429 |
+
self.fun_with_nan, x0, rel_step=1e-6, bounds=(lb, ub))
|
| 430 |
+
assert_allclose(jac_diff_2, jac_true, rtol=1e-6)
|
| 431 |
+
assert_allclose(jac_diff_3, jac_true, rtol=1e-9)
|
| 432 |
+
|
| 433 |
+
def test_non_numpy(self):
|
| 434 |
+
x0 = 1.0
|
| 435 |
+
jac_true = self.jac_non_numpy(x0)
|
| 436 |
+
jac_diff_2 = approx_derivative(self.jac_non_numpy, x0,
|
| 437 |
+
method='2-point')
|
| 438 |
+
jac_diff_3 = approx_derivative(self.jac_non_numpy, x0)
|
| 439 |
+
assert_allclose(jac_diff_2, jac_true, rtol=1e-6)
|
| 440 |
+
assert_allclose(jac_diff_3, jac_true, rtol=1e-8)
|
| 441 |
+
|
| 442 |
+
# math.exp cannot handle complex arguments, hence this raises
|
| 443 |
+
assert_raises(TypeError, approx_derivative, self.jac_non_numpy, x0,
|
| 444 |
+
**dict(method='cs'))
|
| 445 |
+
|
| 446 |
+
def test_fp(self):
|
| 447 |
+
# checks that approx_derivative works for FP size other than 64.
|
| 448 |
+
# Example is derived from the minimal working example in gh12991.
|
| 449 |
+
np.random.seed(1)
|
| 450 |
+
|
| 451 |
+
def func(p, x):
|
| 452 |
+
return p[0] + p[1] * x
|
| 453 |
+
|
| 454 |
+
def err(p, x, y):
|
| 455 |
+
return func(p, x) - y
|
| 456 |
+
|
| 457 |
+
x = np.linspace(0, 1, 100, dtype=np.float64)
|
| 458 |
+
y = np.random.random(100).astype(np.float64)
|
| 459 |
+
p0 = np.array([-1.0, -1.0])
|
| 460 |
+
|
| 461 |
+
jac_fp64 = approx_derivative(err, p0, method='2-point', args=(x, y))
|
| 462 |
+
|
| 463 |
+
# parameter vector is float32, func output is float64
|
| 464 |
+
jac_fp = approx_derivative(err, p0.astype(np.float32),
|
| 465 |
+
method='2-point', args=(x, y))
|
| 466 |
+
assert err(p0, x, y).dtype == np.float64
|
| 467 |
+
assert_allclose(jac_fp, jac_fp64, atol=1e-3)
|
| 468 |
+
|
| 469 |
+
# parameter vector is float64, func output is float32
|
| 470 |
+
def err_fp32(p):
|
| 471 |
+
assert p.dtype == np.float32
|
| 472 |
+
return err(p, x, y).astype(np.float32)
|
| 473 |
+
|
| 474 |
+
jac_fp = approx_derivative(err_fp32, p0.astype(np.float32),
|
| 475 |
+
method='2-point')
|
| 476 |
+
assert_allclose(jac_fp, jac_fp64, atol=1e-3)
|
| 477 |
+
|
| 478 |
+
# check upper bound of error on the derivative for 2-point
|
| 479 |
+
def f(x):
|
| 480 |
+
return np.sin(x)
|
| 481 |
+
def g(x):
|
| 482 |
+
return np.cos(x)
|
| 483 |
+
def hess(x):
|
| 484 |
+
return -np.sin(x)
|
| 485 |
+
|
| 486 |
+
def calc_atol(h, x0, f, hess, EPS):
|
| 487 |
+
# truncation error
|
| 488 |
+
t0 = h / 2 * max(np.abs(hess(x0)), np.abs(hess(x0 + h)))
|
| 489 |
+
# roundoff error. There may be a divisor (>1) missing from
|
| 490 |
+
# the following line, so this contribution is possibly
|
| 491 |
+
# overestimated
|
| 492 |
+
t1 = EPS / h * max(np.abs(f(x0)), np.abs(f(x0 + h)))
|
| 493 |
+
return t0 + t1
|
| 494 |
+
|
| 495 |
+
for dtype in [np.float16, np.float32, np.float64]:
|
| 496 |
+
EPS = np.finfo(dtype).eps
|
| 497 |
+
x0 = np.array(1.0).astype(dtype)
|
| 498 |
+
h = _compute_absolute_step(None, x0, f(x0), '2-point')
|
| 499 |
+
atol = calc_atol(h, x0, f, hess, EPS)
|
| 500 |
+
err = approx_derivative(f, x0, method='2-point',
|
| 501 |
+
abs_step=h) - g(x0)
|
| 502 |
+
assert abs(err) < atol
|
| 503 |
+
|
| 504 |
+
def test_check_derivative(self):
|
| 505 |
+
x0 = np.array([-10.0, 10])
|
| 506 |
+
accuracy = check_derivative(self.fun_vector_vector,
|
| 507 |
+
self.jac_vector_vector, x0)
|
| 508 |
+
assert_(accuracy < 1e-9)
|
| 509 |
+
accuracy = check_derivative(self.fun_vector_vector,
|
| 510 |
+
self.jac_vector_vector, x0)
|
| 511 |
+
assert_(accuracy < 1e-6)
|
| 512 |
+
|
| 513 |
+
x0 = np.array([0.0, 0.0])
|
| 514 |
+
accuracy = check_derivative(self.fun_zero_jacobian,
|
| 515 |
+
self.jac_zero_jacobian, x0)
|
| 516 |
+
assert_(accuracy == 0)
|
| 517 |
+
accuracy = check_derivative(self.fun_zero_jacobian,
|
| 518 |
+
self.jac_zero_jacobian, x0)
|
| 519 |
+
assert_(accuracy == 0)
|
| 520 |
+
|
| 521 |
+
def test_check_derivative_with_kwargs(self):
|
| 522 |
+
x0 = np.array([-10.0, 10])
|
| 523 |
+
accuracy = check_derivative(self.fun_vector_vector_with_arg,
|
| 524 |
+
self.jac_vector_vector_with_arg,
|
| 525 |
+
x0,
|
| 526 |
+
kwargs={'arg': 42})
|
| 527 |
+
assert_(accuracy < 1e-9)
|
| 528 |
+
|
| 529 |
+
|
| 530 |
+
class TestApproxDerivativeSparse:
|
| 531 |
+
# Example from Numerical Optimization 2nd edition, p. 198.
|
| 532 |
+
def setup_method(self):
|
| 533 |
+
np.random.seed(0)
|
| 534 |
+
self.n = 50
|
| 535 |
+
self.lb = -0.1 * (1 + np.arange(self.n))
|
| 536 |
+
self.ub = 0.1 * (1 + np.arange(self.n))
|
| 537 |
+
self.x0 = np.empty(self.n)
|
| 538 |
+
self.x0[::2] = (1 - 1e-7) * self.lb[::2]
|
| 539 |
+
self.x0[1::2] = (1 - 1e-7) * self.ub[1::2]
|
| 540 |
+
|
| 541 |
+
self.J_true = self.jac(self.x0)
|
| 542 |
+
|
| 543 |
+
def fun(self, x):
|
| 544 |
+
e = x[1:]**3 - x[:-1]**2
|
| 545 |
+
return np.hstack((0, 3 * e)) + np.hstack((2 * e, 0))
|
| 546 |
+
|
| 547 |
+
def jac(self, x):
|
| 548 |
+
n = x.size
|
| 549 |
+
J = np.zeros((n, n))
|
| 550 |
+
J[0, 0] = -4 * x[0]
|
| 551 |
+
J[0, 1] = 6 * x[1]**2
|
| 552 |
+
for i in range(1, n - 1):
|
| 553 |
+
J[i, i - 1] = -6 * x[i-1]
|
| 554 |
+
J[i, i] = 9 * x[i]**2 - 4 * x[i]
|
| 555 |
+
J[i, i + 1] = 6 * x[i+1]**2
|
| 556 |
+
J[-1, -1] = 9 * x[-1]**2
|
| 557 |
+
J[-1, -2] = -6 * x[-2]
|
| 558 |
+
|
| 559 |
+
return J
|
| 560 |
+
|
| 561 |
+
def structure(self, n):
|
| 562 |
+
A = np.zeros((n, n), dtype=int)
|
| 563 |
+
A[0, 0] = 1
|
| 564 |
+
A[0, 1] = 1
|
| 565 |
+
for i in range(1, n - 1):
|
| 566 |
+
A[i, i - 1: i + 2] = 1
|
| 567 |
+
A[-1, -1] = 1
|
| 568 |
+
A[-1, -2] = 1
|
| 569 |
+
|
| 570 |
+
return A
|
| 571 |
+
|
| 572 |
+
def test_all(self):
|
| 573 |
+
A = self.structure(self.n)
|
| 574 |
+
order = np.arange(self.n)
|
| 575 |
+
groups_1 = group_columns(A, order)
|
| 576 |
+
np.random.shuffle(order)
|
| 577 |
+
groups_2 = group_columns(A, order)
|
| 578 |
+
|
| 579 |
+
for method, groups, l, u in product(
|
| 580 |
+
['2-point', '3-point', 'cs'], [groups_1, groups_2],
|
| 581 |
+
[-np.inf, self.lb], [np.inf, self.ub]):
|
| 582 |
+
J = approx_derivative(self.fun, self.x0, method=method,
|
| 583 |
+
bounds=(l, u), sparsity=(A, groups))
|
| 584 |
+
assert_(isinstance(J, csr_matrix))
|
| 585 |
+
assert_allclose(J.toarray(), self.J_true, rtol=1e-6)
|
| 586 |
+
|
| 587 |
+
rel_step = np.full_like(self.x0, 1e-8)
|
| 588 |
+
rel_step[::2] *= -1
|
| 589 |
+
J = approx_derivative(self.fun, self.x0, method=method,
|
| 590 |
+
rel_step=rel_step, sparsity=(A, groups))
|
| 591 |
+
assert_allclose(J.toarray(), self.J_true, rtol=1e-5)
|
| 592 |
+
|
| 593 |
+
def test_no_precomputed_groups(self):
|
| 594 |
+
A = self.structure(self.n)
|
| 595 |
+
J = approx_derivative(self.fun, self.x0, sparsity=A)
|
| 596 |
+
assert_allclose(J.toarray(), self.J_true, rtol=1e-6)
|
| 597 |
+
|
| 598 |
+
def test_equivalence(self):
|
| 599 |
+
structure = np.ones((self.n, self.n), dtype=int)
|
| 600 |
+
groups = np.arange(self.n)
|
| 601 |
+
for method in ['2-point', '3-point', 'cs']:
|
| 602 |
+
J_dense = approx_derivative(self.fun, self.x0, method=method)
|
| 603 |
+
J_sparse = approx_derivative(
|
| 604 |
+
self.fun, self.x0, sparsity=(structure, groups), method=method)
|
| 605 |
+
assert_allclose(J_dense, J_sparse.toarray(),
|
| 606 |
+
rtol=5e-16, atol=7e-15)
|
| 607 |
+
|
| 608 |
+
def test_check_derivative(self):
|
| 609 |
+
def jac(x):
|
| 610 |
+
return csr_matrix(self.jac(x))
|
| 611 |
+
|
| 612 |
+
accuracy = check_derivative(self.fun, jac, self.x0,
|
| 613 |
+
bounds=(self.lb, self.ub))
|
| 614 |
+
assert_(accuracy < 1e-9)
|
| 615 |
+
|
| 616 |
+
accuracy = check_derivative(self.fun, jac, self.x0,
|
| 617 |
+
bounds=(self.lb, self.ub))
|
| 618 |
+
assert_(accuracy < 1e-9)
|
| 619 |
+
|
| 620 |
+
|
| 621 |
+
class TestApproxDerivativeLinearOperator:
|
| 622 |
+
|
| 623 |
+
def fun_scalar_scalar(self, x):
|
| 624 |
+
return np.sinh(x)
|
| 625 |
+
|
| 626 |
+
def jac_scalar_scalar(self, x):
|
| 627 |
+
return np.cosh(x)
|
| 628 |
+
|
| 629 |
+
def fun_scalar_vector(self, x):
|
| 630 |
+
return np.array([x[0]**2, np.tan(x[0]), np.exp(x[0])])
|
| 631 |
+
|
| 632 |
+
def jac_scalar_vector(self, x):
|
| 633 |
+
return np.array(
|
| 634 |
+
[2 * x[0], np.cos(x[0]) ** -2, np.exp(x[0])]).reshape(-1, 1)
|
| 635 |
+
|
| 636 |
+
def fun_vector_scalar(self, x):
|
| 637 |
+
return np.sin(x[0] * x[1]) * np.log(x[0])
|
| 638 |
+
|
| 639 |
+
def jac_vector_scalar(self, x):
|
| 640 |
+
return np.array([
|
| 641 |
+
x[1] * np.cos(x[0] * x[1]) * np.log(x[0]) +
|
| 642 |
+
np.sin(x[0] * x[1]) / x[0],
|
| 643 |
+
x[0] * np.cos(x[0] * x[1]) * np.log(x[0])
|
| 644 |
+
])
|
| 645 |
+
|
| 646 |
+
def fun_vector_vector(self, x):
|
| 647 |
+
return np.array([
|
| 648 |
+
x[0] * np.sin(x[1]),
|
| 649 |
+
x[1] * np.cos(x[0]),
|
| 650 |
+
x[0] ** 3 * x[1] ** -0.5
|
| 651 |
+
])
|
| 652 |
+
|
| 653 |
+
def jac_vector_vector(self, x):
|
| 654 |
+
return np.array([
|
| 655 |
+
[np.sin(x[1]), x[0] * np.cos(x[1])],
|
| 656 |
+
[-x[1] * np.sin(x[0]), np.cos(x[0])],
|
| 657 |
+
[3 * x[0] ** 2 * x[1] ** -0.5, -0.5 * x[0] ** 3 * x[1] ** -1.5]
|
| 658 |
+
])
|
| 659 |
+
|
| 660 |
+
def test_scalar_scalar(self):
|
| 661 |
+
x0 = 1.0
|
| 662 |
+
jac_diff_2 = approx_derivative(self.fun_scalar_scalar, x0,
|
| 663 |
+
method='2-point',
|
| 664 |
+
as_linear_operator=True)
|
| 665 |
+
jac_diff_3 = approx_derivative(self.fun_scalar_scalar, x0,
|
| 666 |
+
as_linear_operator=True)
|
| 667 |
+
jac_diff_4 = approx_derivative(self.fun_scalar_scalar, x0,
|
| 668 |
+
method='cs',
|
| 669 |
+
as_linear_operator=True)
|
| 670 |
+
jac_true = self.jac_scalar_scalar(x0)
|
| 671 |
+
np.random.seed(1)
|
| 672 |
+
for i in range(10):
|
| 673 |
+
p = np.random.uniform(-10, 10, size=(1,))
|
| 674 |
+
assert_allclose(jac_diff_2.dot(p), jac_true*p,
|
| 675 |
+
rtol=1e-5)
|
| 676 |
+
assert_allclose(jac_diff_3.dot(p), jac_true*p,
|
| 677 |
+
rtol=5e-6)
|
| 678 |
+
assert_allclose(jac_diff_4.dot(p), jac_true*p,
|
| 679 |
+
rtol=5e-6)
|
| 680 |
+
|
| 681 |
+
def test_scalar_vector(self):
|
| 682 |
+
x0 = 0.5
|
| 683 |
+
jac_diff_2 = approx_derivative(self.fun_scalar_vector, x0,
|
| 684 |
+
method='2-point',
|
| 685 |
+
as_linear_operator=True)
|
| 686 |
+
jac_diff_3 = approx_derivative(self.fun_scalar_vector, x0,
|
| 687 |
+
as_linear_operator=True)
|
| 688 |
+
jac_diff_4 = approx_derivative(self.fun_scalar_vector, x0,
|
| 689 |
+
method='cs',
|
| 690 |
+
as_linear_operator=True)
|
| 691 |
+
jac_true = self.jac_scalar_vector(np.atleast_1d(x0))
|
| 692 |
+
np.random.seed(1)
|
| 693 |
+
for i in range(10):
|
| 694 |
+
p = np.random.uniform(-10, 10, size=(1,))
|
| 695 |
+
assert_allclose(jac_diff_2.dot(p), jac_true.dot(p),
|
| 696 |
+
rtol=1e-5)
|
| 697 |
+
assert_allclose(jac_diff_3.dot(p), jac_true.dot(p),
|
| 698 |
+
rtol=5e-6)
|
| 699 |
+
assert_allclose(jac_diff_4.dot(p), jac_true.dot(p),
|
| 700 |
+
rtol=5e-6)
|
| 701 |
+
|
| 702 |
+
def test_vector_scalar(self):
|
| 703 |
+
x0 = np.array([100.0, -0.5])
|
| 704 |
+
jac_diff_2 = approx_derivative(self.fun_vector_scalar, x0,
|
| 705 |
+
method='2-point',
|
| 706 |
+
as_linear_operator=True)
|
| 707 |
+
jac_diff_3 = approx_derivative(self.fun_vector_scalar, x0,
|
| 708 |
+
as_linear_operator=True)
|
| 709 |
+
jac_diff_4 = approx_derivative(self.fun_vector_scalar, x0,
|
| 710 |
+
method='cs',
|
| 711 |
+
as_linear_operator=True)
|
| 712 |
+
jac_true = self.jac_vector_scalar(x0)
|
| 713 |
+
np.random.seed(1)
|
| 714 |
+
for i in range(10):
|
| 715 |
+
p = np.random.uniform(-10, 10, size=x0.shape)
|
| 716 |
+
assert_allclose(jac_diff_2.dot(p), np.atleast_1d(jac_true.dot(p)),
|
| 717 |
+
rtol=1e-5)
|
| 718 |
+
assert_allclose(jac_diff_3.dot(p), np.atleast_1d(jac_true.dot(p)),
|
| 719 |
+
rtol=5e-6)
|
| 720 |
+
assert_allclose(jac_diff_4.dot(p), np.atleast_1d(jac_true.dot(p)),
|
| 721 |
+
rtol=1e-7)
|
| 722 |
+
|
| 723 |
+
def test_vector_vector(self):
|
| 724 |
+
x0 = np.array([-100.0, 0.2])
|
| 725 |
+
jac_diff_2 = approx_derivative(self.fun_vector_vector, x0,
|
| 726 |
+
method='2-point',
|
| 727 |
+
as_linear_operator=True)
|
| 728 |
+
jac_diff_3 = approx_derivative(self.fun_vector_vector, x0,
|
| 729 |
+
as_linear_operator=True)
|
| 730 |
+
jac_diff_4 = approx_derivative(self.fun_vector_vector, x0,
|
| 731 |
+
method='cs',
|
| 732 |
+
as_linear_operator=True)
|
| 733 |
+
jac_true = self.jac_vector_vector(x0)
|
| 734 |
+
np.random.seed(1)
|
| 735 |
+
for i in range(10):
|
| 736 |
+
p = np.random.uniform(-10, 10, size=x0.shape)
|
| 737 |
+
assert_allclose(jac_diff_2.dot(p), jac_true.dot(p), rtol=1e-5)
|
| 738 |
+
assert_allclose(jac_diff_3.dot(p), jac_true.dot(p), rtol=1e-6)
|
| 739 |
+
assert_allclose(jac_diff_4.dot(p), jac_true.dot(p), rtol=1e-7)
|
| 740 |
+
|
| 741 |
+
def test_exception(self):
|
| 742 |
+
x0 = np.array([-100.0, 0.2])
|
| 743 |
+
assert_raises(ValueError, approx_derivative,
|
| 744 |
+
self.fun_vector_vector, x0,
|
| 745 |
+
method='2-point', bounds=(1, np.inf))
|
| 746 |
+
|
| 747 |
+
|
| 748 |
+
def test_absolute_step_sign():
|
| 749 |
+
# test for gh12487
|
| 750 |
+
# if an absolute step is specified for 2-point differences make sure that
|
| 751 |
+
# the side corresponds to the step. i.e. if step is positive then forward
|
| 752 |
+
# differences should be used, if step is negative then backwards
|
| 753 |
+
# differences should be used.
|
| 754 |
+
|
| 755 |
+
# function has double discontinuity at x = [-1, -1]
|
| 756 |
+
# first component is \/, second component is /\
|
| 757 |
+
def f(x):
|
| 758 |
+
return -np.abs(x[0] + 1) + np.abs(x[1] + 1)
|
| 759 |
+
|
| 760 |
+
# check that the forward difference is used
|
| 761 |
+
grad = approx_derivative(f, [-1, -1], method='2-point', abs_step=1e-8)
|
| 762 |
+
assert_allclose(grad, [-1.0, 1.0])
|
| 763 |
+
|
| 764 |
+
# check that the backwards difference is used
|
| 765 |
+
grad = approx_derivative(f, [-1, -1], method='2-point', abs_step=-1e-8)
|
| 766 |
+
assert_allclose(grad, [1.0, -1.0])
|
| 767 |
+
|
| 768 |
+
# check that the forwards difference is used with a step for both
|
| 769 |
+
# parameters
|
| 770 |
+
grad = approx_derivative(
|
| 771 |
+
f, [-1, -1], method='2-point', abs_step=[1e-8, 1e-8]
|
| 772 |
+
)
|
| 773 |
+
assert_allclose(grad, [-1.0, 1.0])
|
| 774 |
+
|
| 775 |
+
# check that we can mix forward/backwards steps.
|
| 776 |
+
grad = approx_derivative(
|
| 777 |
+
f, [-1, -1], method='2-point', abs_step=[1e-8, -1e-8]
|
| 778 |
+
)
|
| 779 |
+
assert_allclose(grad, [-1.0, -1.0])
|
| 780 |
+
grad = approx_derivative(
|
| 781 |
+
f, [-1, -1], method='2-point', abs_step=[-1e-8, 1e-8]
|
| 782 |
+
)
|
| 783 |
+
assert_allclose(grad, [1.0, 1.0])
|
| 784 |
+
|
| 785 |
+
# the forward step should reverse to a backwards step if it runs into a
|
| 786 |
+
# bound
|
| 787 |
+
# This is kind of tested in TestAdjustSchemeToBounds, but only for a lower level
|
| 788 |
+
# function.
|
| 789 |
+
grad = approx_derivative(
|
| 790 |
+
f, [-1, -1], method='2-point', abs_step=1e-8,
|
| 791 |
+
bounds=(-np.inf, -1)
|
| 792 |
+
)
|
| 793 |
+
assert_allclose(grad, [1.0, -1.0])
|
| 794 |
+
|
| 795 |
+
grad = approx_derivative(
|
| 796 |
+
f, [-1, -1], method='2-point', abs_step=-1e-8, bounds=(-1, np.inf)
|
| 797 |
+
)
|
| 798 |
+
assert_allclose(grad, [-1.0, 1.0])
|
| 799 |
+
|
| 800 |
+
|
| 801 |
+
def test__compute_absolute_step():
|
| 802 |
+
# tests calculation of absolute step from rel_step
|
| 803 |
+
methods = ['2-point', '3-point', 'cs']
|
| 804 |
+
|
| 805 |
+
x0 = np.array([1e-5, 0, 1, 1e5])
|
| 806 |
+
|
| 807 |
+
EPS = np.finfo(np.float64).eps
|
| 808 |
+
relative_step = {
|
| 809 |
+
"2-point": EPS**0.5,
|
| 810 |
+
"3-point": EPS**(1/3),
|
| 811 |
+
"cs": EPS**0.5
|
| 812 |
+
}
|
| 813 |
+
f0 = np.array(1.0)
|
| 814 |
+
|
| 815 |
+
for method in methods:
|
| 816 |
+
rel_step = relative_step[method]
|
| 817 |
+
correct_step = np.array([rel_step,
|
| 818 |
+
rel_step * 1.,
|
| 819 |
+
rel_step * 1.,
|
| 820 |
+
rel_step * np.abs(x0[3])])
|
| 821 |
+
|
| 822 |
+
abs_step = _compute_absolute_step(None, x0, f0, method)
|
| 823 |
+
assert_allclose(abs_step, correct_step)
|
| 824 |
+
|
| 825 |
+
sign_x0 = (-x0 >= 0).astype(float) * 2 - 1
|
| 826 |
+
abs_step = _compute_absolute_step(None, -x0, f0, method)
|
| 827 |
+
assert_allclose(abs_step, sign_x0 * correct_step)
|
| 828 |
+
|
| 829 |
+
# if a relative step is provided it should be used
|
| 830 |
+
rel_step = np.array([0.1, 1, 10, 100])
|
| 831 |
+
correct_step = np.array([rel_step[0] * x0[0],
|
| 832 |
+
relative_step['2-point'],
|
| 833 |
+
rel_step[2] * 1.,
|
| 834 |
+
rel_step[3] * np.abs(x0[3])])
|
| 835 |
+
|
| 836 |
+
abs_step = _compute_absolute_step(rel_step, x0, f0, '2-point')
|
| 837 |
+
assert_allclose(abs_step, correct_step)
|
| 838 |
+
|
| 839 |
+
sign_x0 = (-x0 >= 0).astype(float) * 2 - 1
|
| 840 |
+
abs_step = _compute_absolute_step(rel_step, -x0, f0, '2-point')
|
| 841 |
+
assert_allclose(abs_step, sign_x0 * correct_step)
|
mantis_evalkit/lib/python3.10/site-packages/scipy/optimize/tests/test__shgo.py
ADDED
|
@@ -0,0 +1,1156 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import logging
|
| 2 |
+
import sys
|
| 3 |
+
|
| 4 |
+
import numpy as np
|
| 5 |
+
import time
|
| 6 |
+
from multiprocessing import Pool
|
| 7 |
+
from numpy.testing import assert_allclose, IS_PYPY
|
| 8 |
+
import pytest
|
| 9 |
+
from pytest import raises as assert_raises, warns
|
| 10 |
+
from scipy.optimize import (shgo, Bounds, minimize_scalar, minimize, rosen,
|
| 11 |
+
rosen_der, rosen_hess, NonlinearConstraint)
|
| 12 |
+
from scipy.optimize._constraints import new_constraint_to_old
|
| 13 |
+
from scipy.optimize._shgo import SHGO
|
| 14 |
+
|
| 15 |
+
|
| 16 |
+
class StructTestFunction:
|
| 17 |
+
def __init__(self, bounds, expected_x, expected_fun=None,
|
| 18 |
+
expected_xl=None, expected_funl=None):
|
| 19 |
+
self.bounds = bounds
|
| 20 |
+
self.expected_x = expected_x
|
| 21 |
+
self.expected_fun = expected_fun
|
| 22 |
+
self.expected_xl = expected_xl
|
| 23 |
+
self.expected_funl = expected_funl
|
| 24 |
+
|
| 25 |
+
|
| 26 |
+
def wrap_constraints(g):
|
| 27 |
+
cons = []
|
| 28 |
+
if g is not None:
|
| 29 |
+
if not isinstance(g, (tuple, list)):
|
| 30 |
+
g = (g,)
|
| 31 |
+
else:
|
| 32 |
+
pass
|
| 33 |
+
for g in g:
|
| 34 |
+
cons.append({'type': 'ineq',
|
| 35 |
+
'fun': g})
|
| 36 |
+
cons = tuple(cons)
|
| 37 |
+
else:
|
| 38 |
+
cons = None
|
| 39 |
+
return cons
|
| 40 |
+
|
| 41 |
+
|
| 42 |
+
class StructTest1(StructTestFunction):
|
| 43 |
+
def f(self, x):
|
| 44 |
+
return x[0] ** 2 + x[1] ** 2
|
| 45 |
+
|
| 46 |
+
def g(x):
|
| 47 |
+
return -(np.sum(x, axis=0) - 6.0)
|
| 48 |
+
|
| 49 |
+
cons = wrap_constraints(g)
|
| 50 |
+
|
| 51 |
+
|
| 52 |
+
test1_1 = StructTest1(bounds=[(-1, 6), (-1, 6)],
|
| 53 |
+
expected_x=[0, 0])
|
| 54 |
+
test1_2 = StructTest1(bounds=[(0, 1), (0, 1)],
|
| 55 |
+
expected_x=[0, 0])
|
| 56 |
+
test1_3 = StructTest1(bounds=[(None, None), (None, None)],
|
| 57 |
+
expected_x=[0, 0])
|
| 58 |
+
|
| 59 |
+
|
| 60 |
+
class StructTest2(StructTestFunction):
|
| 61 |
+
"""
|
| 62 |
+
Scalar function with several minima to test all minimiser retrievals
|
| 63 |
+
"""
|
| 64 |
+
|
| 65 |
+
def f(self, x):
|
| 66 |
+
return (x - 30) * np.sin(x)
|
| 67 |
+
|
| 68 |
+
def g(x):
|
| 69 |
+
return 58 - np.sum(x, axis=0)
|
| 70 |
+
|
| 71 |
+
cons = wrap_constraints(g)
|
| 72 |
+
|
| 73 |
+
|
| 74 |
+
test2_1 = StructTest2(bounds=[(0, 60)],
|
| 75 |
+
expected_x=[1.53567906],
|
| 76 |
+
expected_fun=-28.44677132,
|
| 77 |
+
# Important: test that funl return is in the correct
|
| 78 |
+
# order
|
| 79 |
+
expected_xl=np.array([[1.53567906],
|
| 80 |
+
[55.01782167],
|
| 81 |
+
[7.80894889],
|
| 82 |
+
[48.74797493],
|
| 83 |
+
[14.07445705],
|
| 84 |
+
[42.4913859],
|
| 85 |
+
[20.31743841],
|
| 86 |
+
[36.28607535],
|
| 87 |
+
[26.43039605],
|
| 88 |
+
[30.76371366]]),
|
| 89 |
+
|
| 90 |
+
expected_funl=np.array([-28.44677132, -24.99785984,
|
| 91 |
+
-22.16855376, -18.72136195,
|
| 92 |
+
-15.89423937, -12.45154942,
|
| 93 |
+
-9.63133158, -6.20801301,
|
| 94 |
+
-3.43727232, -0.46353338])
|
| 95 |
+
)
|
| 96 |
+
|
| 97 |
+
test2_2 = StructTest2(bounds=[(0, 4.5)],
|
| 98 |
+
expected_x=[1.53567906],
|
| 99 |
+
expected_fun=[-28.44677132],
|
| 100 |
+
expected_xl=np.array([[1.53567906]]),
|
| 101 |
+
expected_funl=np.array([-28.44677132])
|
| 102 |
+
)
|
| 103 |
+
|
| 104 |
+
|
| 105 |
+
class StructTest3(StructTestFunction):
|
| 106 |
+
"""
|
| 107 |
+
Hock and Schittkowski 18 problem (HS18). Hoch and Schittkowski (1981)
|
| 108 |
+
http://www.ai7.uni-bayreuth.de/test_problem_coll.pdf
|
| 109 |
+
Minimize: f = 0.01 * (x_1)**2 + (x_2)**2
|
| 110 |
+
|
| 111 |
+
Subject to: x_1 * x_2 - 25.0 >= 0,
|
| 112 |
+
(x_1)**2 + (x_2)**2 - 25.0 >= 0,
|
| 113 |
+
2 <= x_1 <= 50,
|
| 114 |
+
0 <= x_2 <= 50.
|
| 115 |
+
|
| 116 |
+
Approx. Answer:
|
| 117 |
+
f([(250)**0.5 , (2.5)**0.5]) = 5.0
|
| 118 |
+
|
| 119 |
+
|
| 120 |
+
"""
|
| 121 |
+
|
| 122 |
+
# amended to test vectorisation of constraints
|
| 123 |
+
def f(self, x):
|
| 124 |
+
return 0.01 * (x[0]) ** 2 + (x[1]) ** 2
|
| 125 |
+
|
| 126 |
+
def g1(x):
|
| 127 |
+
return x[0] * x[1] - 25.0
|
| 128 |
+
|
| 129 |
+
def g2(x):
|
| 130 |
+
return x[0] ** 2 + x[1] ** 2 - 25.0
|
| 131 |
+
|
| 132 |
+
# g = (g1, g2)
|
| 133 |
+
# cons = wrap_constraints(g)
|
| 134 |
+
|
| 135 |
+
def g(x):
|
| 136 |
+
return x[0] * x[1] - 25.0, x[0] ** 2 + x[1] ** 2 - 25.0
|
| 137 |
+
|
| 138 |
+
# this checks that shgo can be sent new-style constraints
|
| 139 |
+
__nlc = NonlinearConstraint(g, 0, np.inf)
|
| 140 |
+
cons = (__nlc,)
|
| 141 |
+
|
| 142 |
+
test3_1 = StructTest3(bounds=[(2, 50), (0, 50)],
|
| 143 |
+
expected_x=[250 ** 0.5, 2.5 ** 0.5],
|
| 144 |
+
expected_fun=5.0
|
| 145 |
+
)
|
| 146 |
+
|
| 147 |
+
|
| 148 |
+
class StructTest4(StructTestFunction):
|
| 149 |
+
"""
|
| 150 |
+
Hock and Schittkowski 11 problem (HS11). Hoch and Schittkowski (1981)
|
| 151 |
+
|
| 152 |
+
NOTE: Did not find in original reference to HS collection, refer to
|
| 153 |
+
Henderson (2015) problem 7 instead. 02.03.2016
|
| 154 |
+
"""
|
| 155 |
+
|
| 156 |
+
def f(self, x):
|
| 157 |
+
return ((x[0] - 10) ** 2 + 5 * (x[1] - 12) ** 2 + x[2] ** 4
|
| 158 |
+
+ 3 * (x[3] - 11) ** 2 + 10 * x[4] ** 6 + 7 * x[5] ** 2 + x[
|
| 159 |
+
6] ** 4
|
| 160 |
+
- 4 * x[5] * x[6] - 10 * x[5] - 8 * x[6]
|
| 161 |
+
)
|
| 162 |
+
|
| 163 |
+
def g1(x):
|
| 164 |
+
return -(2 * x[0] ** 2 + 3 * x[1] ** 4 + x[2] + 4 * x[3] ** 2
|
| 165 |
+
+ 5 * x[4] - 127)
|
| 166 |
+
|
| 167 |
+
def g2(x):
|
| 168 |
+
return -(7 * x[0] + 3 * x[1] + 10 * x[2] ** 2 + x[3] - x[4] - 282.0)
|
| 169 |
+
|
| 170 |
+
def g3(x):
|
| 171 |
+
return -(23 * x[0] + x[1] ** 2 + 6 * x[5] ** 2 - 8 * x[6] - 196)
|
| 172 |
+
|
| 173 |
+
def g4(x):
|
| 174 |
+
return -(4 * x[0] ** 2 + x[1] ** 2 - 3 * x[0] * x[1] + 2 * x[2] ** 2
|
| 175 |
+
+ 5 * x[5] - 11 * x[6])
|
| 176 |
+
|
| 177 |
+
g = (g1, g2, g3, g4)
|
| 178 |
+
|
| 179 |
+
cons = wrap_constraints(g)
|
| 180 |
+
|
| 181 |
+
|
| 182 |
+
test4_1 = StructTest4(bounds=[(-10, 10), ] * 7,
|
| 183 |
+
expected_x=[2.330499, 1.951372, -0.4775414,
|
| 184 |
+
4.365726, -0.6244870, 1.038131, 1.594227],
|
| 185 |
+
expected_fun=680.6300573
|
| 186 |
+
)
|
| 187 |
+
|
| 188 |
+
|
| 189 |
+
class StructTest5(StructTestFunction):
|
| 190 |
+
def f(self, x):
|
| 191 |
+
return (
|
| 192 |
+
-(x[1] + 47.0)*np.sin(np.sqrt(abs(x[0]/2.0 + (x[1] + 47.0))))
|
| 193 |
+
- x[0]*np.sin(np.sqrt(abs(x[0] - (x[1] + 47.0))))
|
| 194 |
+
)
|
| 195 |
+
|
| 196 |
+
g = None
|
| 197 |
+
cons = wrap_constraints(g)
|
| 198 |
+
|
| 199 |
+
|
| 200 |
+
test5_1 = StructTest5(bounds=[(-512, 512), (-512, 512)],
|
| 201 |
+
expected_fun=[-959.64066272085051],
|
| 202 |
+
expected_x=[512., 404.23180542])
|
| 203 |
+
|
| 204 |
+
|
| 205 |
+
class StructTestLJ(StructTestFunction):
|
| 206 |
+
"""
|
| 207 |
+
LennardJones objective function. Used to test symmetry constraints
|
| 208 |
+
settings.
|
| 209 |
+
"""
|
| 210 |
+
|
| 211 |
+
def f(self, x, *args):
|
| 212 |
+
print(f'x = {x}')
|
| 213 |
+
self.N = args[0]
|
| 214 |
+
k = int(self.N / 3)
|
| 215 |
+
s = 0.0
|
| 216 |
+
|
| 217 |
+
for i in range(k - 1):
|
| 218 |
+
for j in range(i + 1, k):
|
| 219 |
+
a = 3 * i
|
| 220 |
+
b = 3 * j
|
| 221 |
+
xd = x[a] - x[b]
|
| 222 |
+
yd = x[a + 1] - x[b + 1]
|
| 223 |
+
zd = x[a + 2] - x[b + 2]
|
| 224 |
+
ed = xd * xd + yd * yd + zd * zd
|
| 225 |
+
ud = ed * ed * ed
|
| 226 |
+
if ed > 0.0:
|
| 227 |
+
s += (1.0 / ud - 2.0) / ud
|
| 228 |
+
|
| 229 |
+
return s
|
| 230 |
+
|
| 231 |
+
g = None
|
| 232 |
+
cons = wrap_constraints(g)
|
| 233 |
+
|
| 234 |
+
|
| 235 |
+
N = 6
|
| 236 |
+
boundsLJ = list(zip([-4.0] * 6, [4.0] * 6))
|
| 237 |
+
|
| 238 |
+
testLJ = StructTestLJ(bounds=boundsLJ,
|
| 239 |
+
expected_fun=[-1.0],
|
| 240 |
+
expected_x=None,
|
| 241 |
+
# expected_x=[-2.71247337e-08,
|
| 242 |
+
# -2.71247337e-08,
|
| 243 |
+
# -2.50000222e+00,
|
| 244 |
+
# -2.71247337e-08,
|
| 245 |
+
# -2.71247337e-08,
|
| 246 |
+
# -1.50000222e+00]
|
| 247 |
+
)
|
| 248 |
+
|
| 249 |
+
|
| 250 |
+
class StructTestS(StructTestFunction):
|
| 251 |
+
def f(self, x):
|
| 252 |
+
return ((x[0] - 0.5) ** 2 + (x[1] - 0.5) ** 2
|
| 253 |
+
+ (x[2] - 0.5) ** 2 + (x[3] - 0.5) ** 2)
|
| 254 |
+
|
| 255 |
+
g = None
|
| 256 |
+
cons = wrap_constraints(g)
|
| 257 |
+
|
| 258 |
+
|
| 259 |
+
test_s = StructTestS(bounds=[(0, 2.0), ] * 4,
|
| 260 |
+
expected_fun=0.0,
|
| 261 |
+
expected_x=np.ones(4) - 0.5
|
| 262 |
+
)
|
| 263 |
+
|
| 264 |
+
|
| 265 |
+
class StructTestTable(StructTestFunction):
|
| 266 |
+
def f(self, x):
|
| 267 |
+
if x[0] == 3.0 and x[1] == 3.0:
|
| 268 |
+
return 50
|
| 269 |
+
else:
|
| 270 |
+
return 100
|
| 271 |
+
|
| 272 |
+
g = None
|
| 273 |
+
cons = wrap_constraints(g)
|
| 274 |
+
|
| 275 |
+
|
| 276 |
+
test_table = StructTestTable(bounds=[(-10, 10), (-10, 10)],
|
| 277 |
+
expected_fun=[50],
|
| 278 |
+
expected_x=[3.0, 3.0])
|
| 279 |
+
|
| 280 |
+
|
| 281 |
+
class StructTestInfeasible(StructTestFunction):
|
| 282 |
+
"""
|
| 283 |
+
Test function with no feasible domain.
|
| 284 |
+
"""
|
| 285 |
+
|
| 286 |
+
def f(self, x, *args):
|
| 287 |
+
return x[0] ** 2 + x[1] ** 2
|
| 288 |
+
|
| 289 |
+
def g1(x):
|
| 290 |
+
return x[0] + x[1] - 1
|
| 291 |
+
|
| 292 |
+
def g2(x):
|
| 293 |
+
return -(x[0] + x[1] - 1)
|
| 294 |
+
|
| 295 |
+
def g3(x):
|
| 296 |
+
return -x[0] + x[1] - 1
|
| 297 |
+
|
| 298 |
+
def g4(x):
|
| 299 |
+
return -(-x[0] + x[1] - 1)
|
| 300 |
+
|
| 301 |
+
g = (g1, g2, g3, g4)
|
| 302 |
+
cons = wrap_constraints(g)
|
| 303 |
+
|
| 304 |
+
|
| 305 |
+
test_infeasible = StructTestInfeasible(bounds=[(2, 50), (-1, 1)],
|
| 306 |
+
expected_fun=None,
|
| 307 |
+
expected_x=None
|
| 308 |
+
)
|
| 309 |
+
|
| 310 |
+
|
| 311 |
+
@pytest.mark.skip("Not a test")
|
| 312 |
+
def run_test(test, args=(), test_atol=1e-5, n=100, iters=None,
|
| 313 |
+
callback=None, minimizer_kwargs=None, options=None,
|
| 314 |
+
sampling_method='sobol', workers=1):
|
| 315 |
+
res = shgo(test.f, test.bounds, args=args, constraints=test.cons,
|
| 316 |
+
n=n, iters=iters, callback=callback,
|
| 317 |
+
minimizer_kwargs=minimizer_kwargs, options=options,
|
| 318 |
+
sampling_method=sampling_method, workers=workers)
|
| 319 |
+
|
| 320 |
+
print(f'res = {res}')
|
| 321 |
+
logging.info(f'res = {res}')
|
| 322 |
+
if test.expected_x is not None:
|
| 323 |
+
np.testing.assert_allclose(res.x, test.expected_x,
|
| 324 |
+
rtol=test_atol,
|
| 325 |
+
atol=test_atol)
|
| 326 |
+
|
| 327 |
+
# (Optional tests)
|
| 328 |
+
if test.expected_fun is not None:
|
| 329 |
+
np.testing.assert_allclose(res.fun,
|
| 330 |
+
test.expected_fun,
|
| 331 |
+
atol=test_atol)
|
| 332 |
+
|
| 333 |
+
if test.expected_xl is not None:
|
| 334 |
+
np.testing.assert_allclose(res.xl,
|
| 335 |
+
test.expected_xl,
|
| 336 |
+
atol=test_atol)
|
| 337 |
+
|
| 338 |
+
if test.expected_funl is not None:
|
| 339 |
+
np.testing.assert_allclose(res.funl,
|
| 340 |
+
test.expected_funl,
|
| 341 |
+
atol=test_atol)
|
| 342 |
+
return
|
| 343 |
+
|
| 344 |
+
|
| 345 |
+
# Base test functions:
|
| 346 |
+
class TestShgoSobolTestFunctions:
|
| 347 |
+
"""
|
| 348 |
+
Global optimisation tests with Sobol sampling:
|
| 349 |
+
"""
|
| 350 |
+
|
| 351 |
+
# Sobol algorithm
|
| 352 |
+
def test_f1_1_sobol(self):
|
| 353 |
+
"""Multivariate test function 1:
|
| 354 |
+
x[0]**2 + x[1]**2 with bounds=[(-1, 6), (-1, 6)]"""
|
| 355 |
+
run_test(test1_1)
|
| 356 |
+
|
| 357 |
+
def test_f1_2_sobol(self):
|
| 358 |
+
"""Multivariate test function 1:
|
| 359 |
+
x[0]**2 + x[1]**2 with bounds=[(0, 1), (0, 1)]"""
|
| 360 |
+
run_test(test1_2)
|
| 361 |
+
|
| 362 |
+
def test_f1_3_sobol(self):
|
| 363 |
+
"""Multivariate test function 1:
|
| 364 |
+
x[0]**2 + x[1]**2 with bounds=[(None, None),(None, None)]"""
|
| 365 |
+
options = {'disp': True}
|
| 366 |
+
run_test(test1_3, options=options)
|
| 367 |
+
|
| 368 |
+
def test_f2_1_sobol(self):
|
| 369 |
+
"""Univariate test function on
|
| 370 |
+
f(x) = (x - 30) * sin(x) with bounds=[(0, 60)]"""
|
| 371 |
+
run_test(test2_1)
|
| 372 |
+
|
| 373 |
+
def test_f2_2_sobol(self):
|
| 374 |
+
"""Univariate test function on
|
| 375 |
+
f(x) = (x - 30) * sin(x) bounds=[(0, 4.5)]"""
|
| 376 |
+
run_test(test2_2)
|
| 377 |
+
|
| 378 |
+
def test_f3_sobol(self):
|
| 379 |
+
"""NLP: Hock and Schittkowski problem 18"""
|
| 380 |
+
run_test(test3_1)
|
| 381 |
+
|
| 382 |
+
@pytest.mark.slow
|
| 383 |
+
def test_f4_sobol(self):
|
| 384 |
+
"""NLP: (High dimensional) Hock and Schittkowski 11 problem (HS11)"""
|
| 385 |
+
options = {'infty_constraints': False}
|
| 386 |
+
# run_test(test4_1, n=990, options=options)
|
| 387 |
+
run_test(test4_1, n=990 * 2, options=options)
|
| 388 |
+
|
| 389 |
+
def test_f5_1_sobol(self):
|
| 390 |
+
"""NLP: Eggholder, multimodal"""
|
| 391 |
+
# run_test(test5_1, n=30)
|
| 392 |
+
run_test(test5_1, n=60)
|
| 393 |
+
|
| 394 |
+
def test_f5_2_sobol(self):
|
| 395 |
+
"""NLP: Eggholder, multimodal"""
|
| 396 |
+
# run_test(test5_1, n=60, iters=5)
|
| 397 |
+
run_test(test5_1, n=60, iters=5)
|
| 398 |
+
|
| 399 |
+
# def test_t911(self):
|
| 400 |
+
# """1D tabletop function"""
|
| 401 |
+
# run_test(test11_1)
|
| 402 |
+
|
| 403 |
+
|
| 404 |
+
class TestShgoSimplicialTestFunctions:
|
| 405 |
+
"""
|
| 406 |
+
Global optimisation tests with Simplicial sampling:
|
| 407 |
+
"""
|
| 408 |
+
|
| 409 |
+
def test_f1_1_simplicial(self):
|
| 410 |
+
"""Multivariate test function 1:
|
| 411 |
+
x[0]**2 + x[1]**2 with bounds=[(-1, 6), (-1, 6)]"""
|
| 412 |
+
run_test(test1_1, n=1, sampling_method='simplicial')
|
| 413 |
+
|
| 414 |
+
def test_f1_2_simplicial(self):
|
| 415 |
+
"""Multivariate test function 1:
|
| 416 |
+
x[0]**2 + x[1]**2 with bounds=[(0, 1), (0, 1)]"""
|
| 417 |
+
run_test(test1_2, n=1, sampling_method='simplicial')
|
| 418 |
+
|
| 419 |
+
def test_f1_3_simplicial(self):
|
| 420 |
+
"""Multivariate test function 1: x[0]**2 + x[1]**2
|
| 421 |
+
with bounds=[(None, None),(None, None)]"""
|
| 422 |
+
run_test(test1_3, n=5, sampling_method='simplicial')
|
| 423 |
+
|
| 424 |
+
def test_f2_1_simplicial(self):
|
| 425 |
+
"""Univariate test function on
|
| 426 |
+
f(x) = (x - 30) * sin(x) with bounds=[(0, 60)]"""
|
| 427 |
+
options = {'minimize_every_iter': False}
|
| 428 |
+
run_test(test2_1, n=200, iters=7, options=options,
|
| 429 |
+
sampling_method='simplicial')
|
| 430 |
+
|
| 431 |
+
def test_f2_2_simplicial(self):
|
| 432 |
+
"""Univariate test function on
|
| 433 |
+
f(x) = (x - 30) * sin(x) bounds=[(0, 4.5)]"""
|
| 434 |
+
run_test(test2_2, n=1, sampling_method='simplicial')
|
| 435 |
+
|
| 436 |
+
def test_f3_simplicial(self):
|
| 437 |
+
"""NLP: Hock and Schittkowski problem 18"""
|
| 438 |
+
run_test(test3_1, n=1, sampling_method='simplicial')
|
| 439 |
+
|
| 440 |
+
@pytest.mark.slow
|
| 441 |
+
def test_f4_simplicial(self):
|
| 442 |
+
"""NLP: (High dimensional) Hock and Schittkowski 11 problem (HS11)"""
|
| 443 |
+
run_test(test4_1, n=1, sampling_method='simplicial')
|
| 444 |
+
|
| 445 |
+
def test_lj_symmetry_old(self):
|
| 446 |
+
"""LJ: Symmetry-constrained test function"""
|
| 447 |
+
options = {'symmetry': True,
|
| 448 |
+
'disp': True}
|
| 449 |
+
args = (6,) # Number of atoms
|
| 450 |
+
run_test(testLJ, args=args, n=300,
|
| 451 |
+
options=options, iters=1,
|
| 452 |
+
sampling_method='simplicial')
|
| 453 |
+
|
| 454 |
+
def test_f5_1_lj_symmetry(self):
|
| 455 |
+
"""LJ: Symmetry constrained test function"""
|
| 456 |
+
options = {'symmetry': [0, ] * 6,
|
| 457 |
+
'disp': True}
|
| 458 |
+
args = (6,) # No. of atoms
|
| 459 |
+
|
| 460 |
+
run_test(testLJ, args=args, n=300,
|
| 461 |
+
options=options, iters=1,
|
| 462 |
+
sampling_method='simplicial')
|
| 463 |
+
|
| 464 |
+
def test_f5_2_cons_symmetry(self):
|
| 465 |
+
"""Symmetry constrained test function"""
|
| 466 |
+
options = {'symmetry': [0, 0],
|
| 467 |
+
'disp': True}
|
| 468 |
+
|
| 469 |
+
run_test(test1_1, n=200,
|
| 470 |
+
options=options, iters=1,
|
| 471 |
+
sampling_method='simplicial')
|
| 472 |
+
|
| 473 |
+
@pytest.mark.fail_slow(10)
|
| 474 |
+
def test_f5_3_cons_symmetry(self):
|
| 475 |
+
"""Asymmetrically constrained test function"""
|
| 476 |
+
options = {'symmetry': [0, 0, 0, 3],
|
| 477 |
+
'disp': True}
|
| 478 |
+
|
| 479 |
+
run_test(test_s, n=10000,
|
| 480 |
+
options=options,
|
| 481 |
+
iters=1,
|
| 482 |
+
sampling_method='simplicial')
|
| 483 |
+
|
| 484 |
+
@pytest.mark.skip("Not a test")
|
| 485 |
+
def test_f0_min_variance(self):
|
| 486 |
+
"""Return a minimum on a perfectly symmetric problem, based on
|
| 487 |
+
gh10429"""
|
| 488 |
+
avg = 0.5 # Given average value of x
|
| 489 |
+
cons = {'type': 'eq', 'fun': lambda x: np.mean(x) - avg}
|
| 490 |
+
|
| 491 |
+
# Minimize the variance of x under the given constraint
|
| 492 |
+
res = shgo(np.var, bounds=6 * [(0, 1)], constraints=cons)
|
| 493 |
+
assert res.success
|
| 494 |
+
assert_allclose(res.fun, 0, atol=1e-15)
|
| 495 |
+
assert_allclose(res.x, 0.5)
|
| 496 |
+
|
| 497 |
+
@pytest.mark.skip("Not a test")
|
| 498 |
+
def test_f0_min_variance_1D(self):
|
| 499 |
+
"""Return a minimum on a perfectly symmetric 1D problem, based on
|
| 500 |
+
gh10538"""
|
| 501 |
+
|
| 502 |
+
def fun(x):
|
| 503 |
+
return x * (x - 1.0) * (x - 0.5)
|
| 504 |
+
|
| 505 |
+
bounds = [(0, 1)]
|
| 506 |
+
res = shgo(fun, bounds=bounds)
|
| 507 |
+
ref = minimize_scalar(fun, bounds=bounds[0])
|
| 508 |
+
assert res.success
|
| 509 |
+
assert_allclose(res.fun, ref.fun)
|
| 510 |
+
assert_allclose(res.x, ref.x, rtol=1e-6)
|
| 511 |
+
|
| 512 |
+
# Argument test functions
|
| 513 |
+
class TestShgoArguments:
|
| 514 |
+
def test_1_1_simpl_iter(self):
|
| 515 |
+
"""Iterative simplicial sampling on TestFunction 1 (multivariate)"""
|
| 516 |
+
run_test(test1_2, n=None, iters=2, sampling_method='simplicial')
|
| 517 |
+
|
| 518 |
+
def test_1_2_simpl_iter(self):
|
| 519 |
+
"""Iterative simplicial on TestFunction 2 (univariate)"""
|
| 520 |
+
options = {'minimize_every_iter': False}
|
| 521 |
+
run_test(test2_1, n=None, iters=9, options=options,
|
| 522 |
+
sampling_method='simplicial')
|
| 523 |
+
|
| 524 |
+
def test_2_1_sobol_iter(self):
|
| 525 |
+
"""Iterative Sobol sampling on TestFunction 1 (multivariate)"""
|
| 526 |
+
run_test(test1_2, n=None, iters=1, sampling_method='sobol')
|
| 527 |
+
|
| 528 |
+
def test_2_2_sobol_iter(self):
|
| 529 |
+
"""Iterative Sobol sampling on TestFunction 2 (univariate)"""
|
| 530 |
+
res = shgo(test2_1.f, test2_1.bounds, constraints=test2_1.cons,
|
| 531 |
+
n=None, iters=1, sampling_method='sobol')
|
| 532 |
+
|
| 533 |
+
np.testing.assert_allclose(res.x, test2_1.expected_x, rtol=1e-5, atol=1e-5)
|
| 534 |
+
np.testing.assert_allclose(res.fun, test2_1.expected_fun, atol=1e-5)
|
| 535 |
+
|
| 536 |
+
def test_3_1_disp_simplicial(self):
|
| 537 |
+
"""Iterative sampling on TestFunction 1 and 2 (multi and univariate)
|
| 538 |
+
"""
|
| 539 |
+
|
| 540 |
+
def callback_func(x):
|
| 541 |
+
print("Local minimization callback test")
|
| 542 |
+
|
| 543 |
+
for test in [test1_1, test2_1]:
|
| 544 |
+
shgo(test.f, test.bounds, iters=1,
|
| 545 |
+
sampling_method='simplicial',
|
| 546 |
+
callback=callback_func, options={'disp': True})
|
| 547 |
+
shgo(test.f, test.bounds, n=1, sampling_method='simplicial',
|
| 548 |
+
callback=callback_func, options={'disp': True})
|
| 549 |
+
|
| 550 |
+
def test_3_2_disp_sobol(self):
|
| 551 |
+
"""Iterative sampling on TestFunction 1 and 2 (multi and univariate)"""
|
| 552 |
+
|
| 553 |
+
def callback_func(x):
|
| 554 |
+
print("Local minimization callback test")
|
| 555 |
+
|
| 556 |
+
for test in [test1_1, test2_1]:
|
| 557 |
+
shgo(test.f, test.bounds, iters=1, sampling_method='sobol',
|
| 558 |
+
callback=callback_func, options={'disp': True})
|
| 559 |
+
|
| 560 |
+
shgo(test.f, test.bounds, n=1, sampling_method='simplicial',
|
| 561 |
+
callback=callback_func, options={'disp': True})
|
| 562 |
+
|
| 563 |
+
def test_args_gh14589(self):
|
| 564 |
+
"""Using `args` used to cause `shgo` to fail; see #14589, #15986,
|
| 565 |
+
#16506"""
|
| 566 |
+
res = shgo(func=lambda x, y, z: x * z + y, bounds=[(0, 3)], args=(1, 2)
|
| 567 |
+
)
|
| 568 |
+
ref = shgo(func=lambda x: 2 * x + 1, bounds=[(0, 3)])
|
| 569 |
+
assert_allclose(res.fun, ref.fun)
|
| 570 |
+
assert_allclose(res.x, ref.x)
|
| 571 |
+
|
| 572 |
+
@pytest.mark.slow
|
| 573 |
+
def test_4_1_known_f_min(self):
|
| 574 |
+
"""Test known function minima stopping criteria"""
|
| 575 |
+
# Specify known function value
|
| 576 |
+
options = {'f_min': test4_1.expected_fun,
|
| 577 |
+
'f_tol': 1e-6,
|
| 578 |
+
'minimize_every_iter': True}
|
| 579 |
+
# TODO: Make default n higher for faster tests
|
| 580 |
+
run_test(test4_1, n=None, test_atol=1e-5, options=options,
|
| 581 |
+
sampling_method='simplicial')
|
| 582 |
+
|
| 583 |
+
@pytest.mark.slow
|
| 584 |
+
def test_4_2_known_f_min(self):
|
| 585 |
+
"""Test Global mode limiting local evaluations"""
|
| 586 |
+
options = { # Specify known function value
|
| 587 |
+
'f_min': test4_1.expected_fun,
|
| 588 |
+
'f_tol': 1e-6,
|
| 589 |
+
# Specify number of local iterations to perform
|
| 590 |
+
'minimize_every_iter': True,
|
| 591 |
+
'local_iter': 1}
|
| 592 |
+
|
| 593 |
+
run_test(test4_1, n=None, test_atol=1e-5, options=options,
|
| 594 |
+
sampling_method='simplicial')
|
| 595 |
+
|
| 596 |
+
def test_4_4_known_f_min(self):
|
| 597 |
+
"""Test Global mode limiting local evaluations for 1D funcs"""
|
| 598 |
+
options = { # Specify known function value
|
| 599 |
+
'f_min': test2_1.expected_fun,
|
| 600 |
+
'f_tol': 1e-6,
|
| 601 |
+
# Specify number of local iterations to perform+
|
| 602 |
+
'minimize_every_iter': True,
|
| 603 |
+
'local_iter': 1,
|
| 604 |
+
'infty_constraints': False}
|
| 605 |
+
|
| 606 |
+
res = shgo(test2_1.f, test2_1.bounds, constraints=test2_1.cons,
|
| 607 |
+
n=None, iters=None, options=options,
|
| 608 |
+
sampling_method='sobol')
|
| 609 |
+
np.testing.assert_allclose(res.x, test2_1.expected_x, rtol=1e-5, atol=1e-5)
|
| 610 |
+
|
| 611 |
+
def test_5_1_simplicial_argless(self):
|
| 612 |
+
"""Test Default simplicial sampling settings on TestFunction 1"""
|
| 613 |
+
res = shgo(test1_1.f, test1_1.bounds, constraints=test1_1.cons)
|
| 614 |
+
np.testing.assert_allclose(res.x, test1_1.expected_x, rtol=1e-5, atol=1e-5)
|
| 615 |
+
|
| 616 |
+
def test_5_2_sobol_argless(self):
|
| 617 |
+
"""Test Default sobol sampling settings on TestFunction 1"""
|
| 618 |
+
res = shgo(test1_1.f, test1_1.bounds, constraints=test1_1.cons,
|
| 619 |
+
sampling_method='sobol')
|
| 620 |
+
np.testing.assert_allclose(res.x, test1_1.expected_x, rtol=1e-5, atol=1e-5)
|
| 621 |
+
|
| 622 |
+
def test_6_1_simplicial_max_iter(self):
|
| 623 |
+
"""Test that maximum iteration option works on TestFunction 3"""
|
| 624 |
+
options = {'max_iter': 2}
|
| 625 |
+
res = shgo(test3_1.f, test3_1.bounds, constraints=test3_1.cons,
|
| 626 |
+
options=options, sampling_method='simplicial')
|
| 627 |
+
np.testing.assert_allclose(res.x, test3_1.expected_x, rtol=1e-5, atol=1e-5)
|
| 628 |
+
np.testing.assert_allclose(res.fun, test3_1.expected_fun, atol=1e-5)
|
| 629 |
+
|
| 630 |
+
def test_6_2_simplicial_min_iter(self):
|
| 631 |
+
"""Test that maximum iteration option works on TestFunction 3"""
|
| 632 |
+
options = {'min_iter': 2}
|
| 633 |
+
res = shgo(test3_1.f, test3_1.bounds, constraints=test3_1.cons,
|
| 634 |
+
options=options, sampling_method='simplicial')
|
| 635 |
+
np.testing.assert_allclose(res.x, test3_1.expected_x, rtol=1e-5, atol=1e-5)
|
| 636 |
+
np.testing.assert_allclose(res.fun, test3_1.expected_fun, atol=1e-5)
|
| 637 |
+
|
| 638 |
+
def test_7_1_minkwargs(self):
|
| 639 |
+
"""Test the minimizer_kwargs arguments for solvers with constraints"""
|
| 640 |
+
# Test solvers
|
| 641 |
+
for solver in ['COBYLA', 'COBYQA', 'SLSQP']:
|
| 642 |
+
# Note that passing global constraints to SLSQP is tested in other
|
| 643 |
+
# unittests which run test4_1 normally
|
| 644 |
+
minimizer_kwargs = {'method': solver,
|
| 645 |
+
'constraints': test3_1.cons}
|
| 646 |
+
run_test(test3_1, n=100, test_atol=1e-3,
|
| 647 |
+
minimizer_kwargs=minimizer_kwargs,
|
| 648 |
+
sampling_method='sobol')
|
| 649 |
+
|
| 650 |
+
def test_7_2_minkwargs(self):
|
| 651 |
+
"""Test the minimizer_kwargs default inits"""
|
| 652 |
+
minimizer_kwargs = {'ftol': 1e-5}
|
| 653 |
+
options = {'disp': True} # For coverage purposes
|
| 654 |
+
SHGO(test3_1.f, test3_1.bounds, constraints=test3_1.cons[0],
|
| 655 |
+
minimizer_kwargs=minimizer_kwargs, options=options)
|
| 656 |
+
|
| 657 |
+
def test_7_3_minkwargs(self):
|
| 658 |
+
"""Test minimizer_kwargs arguments for solvers without constraints"""
|
| 659 |
+
for solver in ['Nelder-Mead', 'Powell', 'CG', 'BFGS', 'Newton-CG',
|
| 660 |
+
'L-BFGS-B', 'TNC', 'dogleg', 'trust-ncg', 'trust-exact',
|
| 661 |
+
'trust-krylov']:
|
| 662 |
+
def jac(x):
|
| 663 |
+
return np.array([2 * x[0], 2 * x[1]]).T
|
| 664 |
+
|
| 665 |
+
def hess(x):
|
| 666 |
+
return np.array([[2, 0], [0, 2]])
|
| 667 |
+
|
| 668 |
+
minimizer_kwargs = {'method': solver,
|
| 669 |
+
'jac': jac,
|
| 670 |
+
'hess': hess}
|
| 671 |
+
logging.info(f"Solver = {solver}")
|
| 672 |
+
logging.info("=" * 100)
|
| 673 |
+
run_test(test1_1, n=100, test_atol=1e-3,
|
| 674 |
+
minimizer_kwargs=minimizer_kwargs,
|
| 675 |
+
sampling_method='sobol')
|
| 676 |
+
|
| 677 |
+
def test_8_homology_group_diff(self):
|
| 678 |
+
options = {'minhgrd': 1,
|
| 679 |
+
'minimize_every_iter': True}
|
| 680 |
+
|
| 681 |
+
run_test(test1_1, n=None, iters=None, options=options,
|
| 682 |
+
sampling_method='simplicial')
|
| 683 |
+
|
| 684 |
+
def test_9_cons_g(self):
|
| 685 |
+
"""Test single function constraint passing"""
|
| 686 |
+
SHGO(test3_1.f, test3_1.bounds, constraints=test3_1.cons[0])
|
| 687 |
+
|
| 688 |
+
@pytest.mark.xfail(IS_PYPY and sys.platform == 'win32',
|
| 689 |
+
reason="Failing and fix in PyPy not planned (see gh-18632)")
|
| 690 |
+
def test_10_finite_time(self):
|
| 691 |
+
"""Test single function constraint passing"""
|
| 692 |
+
options = {'maxtime': 1e-15}
|
| 693 |
+
|
| 694 |
+
def f(x):
|
| 695 |
+
time.sleep(1e-14)
|
| 696 |
+
return 0.0
|
| 697 |
+
|
| 698 |
+
res = shgo(f, test1_1.bounds, iters=5, options=options)
|
| 699 |
+
# Assert that only 1 rather than 5 requested iterations ran:
|
| 700 |
+
assert res.nit == 1
|
| 701 |
+
|
| 702 |
+
def test_11_f_min_0(self):
|
| 703 |
+
"""Test to cover the case where f_lowest == 0"""
|
| 704 |
+
options = {'f_min': 0.0,
|
| 705 |
+
'disp': True}
|
| 706 |
+
res = shgo(test1_2.f, test1_2.bounds, n=10, iters=None,
|
| 707 |
+
options=options, sampling_method='sobol')
|
| 708 |
+
np.testing.assert_equal(0, res.x[0])
|
| 709 |
+
np.testing.assert_equal(0, res.x[1])
|
| 710 |
+
|
| 711 |
+
# @nottest
|
| 712 |
+
@pytest.mark.skip(reason="no way of currently testing this")
|
| 713 |
+
def test_12_sobol_inf_cons(self):
|
| 714 |
+
"""Test to cover the case where f_lowest == 0"""
|
| 715 |
+
# TODO: This test doesn't cover anything new, it is unknown what the
|
| 716 |
+
# original test was intended for as it was never complete. Delete or
|
| 717 |
+
# replace in the future.
|
| 718 |
+
options = {'maxtime': 1e-15,
|
| 719 |
+
'f_min': 0.0}
|
| 720 |
+
res = shgo(test1_2.f, test1_2.bounds, n=1, iters=None,
|
| 721 |
+
options=options, sampling_method='sobol')
|
| 722 |
+
np.testing.assert_equal(0.0, res.fun)
|
| 723 |
+
|
| 724 |
+
def test_13_high_sobol(self):
|
| 725 |
+
"""Test init of high-dimensional sobol sequences"""
|
| 726 |
+
|
| 727 |
+
def f(x):
|
| 728 |
+
return 0
|
| 729 |
+
|
| 730 |
+
bounds = [(None, None), ] * 41
|
| 731 |
+
SHGOc = SHGO(f, bounds, sampling_method='sobol')
|
| 732 |
+
# SHGOc.sobol_points(2, 50)
|
| 733 |
+
SHGOc.sampling_function(2, 50)
|
| 734 |
+
|
| 735 |
+
def test_14_local_iter(self):
|
| 736 |
+
"""Test limited local iterations for a pseudo-global mode"""
|
| 737 |
+
options = {'local_iter': 4}
|
| 738 |
+
run_test(test5_1, n=60, options=options)
|
| 739 |
+
|
| 740 |
+
def test_15_min_every_iter(self):
|
| 741 |
+
"""Test minimize every iter options and cover function cache"""
|
| 742 |
+
options = {'minimize_every_iter': True}
|
| 743 |
+
run_test(test1_1, n=1, iters=7, options=options,
|
| 744 |
+
sampling_method='sobol')
|
| 745 |
+
|
| 746 |
+
def test_16_disp_bounds_minimizer(self, capsys):
|
| 747 |
+
"""Test disp=True with minimizers that do not support bounds """
|
| 748 |
+
options = {'disp': True}
|
| 749 |
+
minimizer_kwargs = {'method': 'nelder-mead'}
|
| 750 |
+
run_test(test1_2, sampling_method='simplicial',
|
| 751 |
+
options=options, minimizer_kwargs=minimizer_kwargs)
|
| 752 |
+
|
| 753 |
+
def test_17_custom_sampling(self):
|
| 754 |
+
"""Test the functionality to add custom sampling methods to shgo"""
|
| 755 |
+
|
| 756 |
+
def sample(n, d):
|
| 757 |
+
return np.random.uniform(size=(n, d))
|
| 758 |
+
|
| 759 |
+
run_test(test1_1, n=30, sampling_method=sample)
|
| 760 |
+
|
| 761 |
+
def test_18_bounds_class(self):
|
| 762 |
+
# test that new and old bounds yield same result
|
| 763 |
+
def f(x):
|
| 764 |
+
return np.square(x).sum()
|
| 765 |
+
|
| 766 |
+
lb = [-6., 1., -5.]
|
| 767 |
+
ub = [-1., 3., 5.]
|
| 768 |
+
bounds_old = list(zip(lb, ub))
|
| 769 |
+
bounds_new = Bounds(lb, ub)
|
| 770 |
+
|
| 771 |
+
res_old_bounds = shgo(f, bounds_old)
|
| 772 |
+
res_new_bounds = shgo(f, bounds_new)
|
| 773 |
+
|
| 774 |
+
assert res_new_bounds.nfev == res_old_bounds.nfev
|
| 775 |
+
assert res_new_bounds.message == res_old_bounds.message
|
| 776 |
+
assert res_new_bounds.success == res_old_bounds.success
|
| 777 |
+
x_opt = np.array([-1., 1., 0.])
|
| 778 |
+
np.testing.assert_allclose(res_new_bounds.x, x_opt)
|
| 779 |
+
np.testing.assert_allclose(res_new_bounds.x, res_old_bounds.x)
|
| 780 |
+
|
| 781 |
+
@pytest.mark.fail_slow(10)
|
| 782 |
+
def test_19_parallelization(self):
|
| 783 |
+
"""Test the functionality to add custom sampling methods to shgo"""
|
| 784 |
+
|
| 785 |
+
with Pool(2) as p:
|
| 786 |
+
run_test(test1_1, n=30, workers=p.map) # Constrained
|
| 787 |
+
run_test(test1_1, n=30, workers=map) # Constrained
|
| 788 |
+
with Pool(2) as p:
|
| 789 |
+
run_test(test_s, n=30, workers=p.map) # Unconstrained
|
| 790 |
+
run_test(test_s, n=30, workers=map) # Unconstrained
|
| 791 |
+
|
| 792 |
+
def test_20_constrained_args(self):
|
| 793 |
+
"""Test that constraints can be passed to arguments"""
|
| 794 |
+
|
| 795 |
+
def eggholder(x):
|
| 796 |
+
return (
|
| 797 |
+
-(x[1] + 47.0)*np.sin(np.sqrt(abs(x[0] / 2.0 + (x[1] + 47.0))))
|
| 798 |
+
- x[0]*np.sin(np.sqrt(abs(x[0] - (x[1] + 47.0))))
|
| 799 |
+
)
|
| 800 |
+
|
| 801 |
+
def f(x): # (cattle-feed)
|
| 802 |
+
return 24.55 * x[0] + 26.75 * x[1] + 39 * x[2] + 40.50 * x[3]
|
| 803 |
+
|
| 804 |
+
bounds = [(0, 1.0), ] * 4
|
| 805 |
+
|
| 806 |
+
def g1_modified(x, i):
|
| 807 |
+
return i * 2.3 * x[0] + i * 5.6 * x[1] + 11.1 * x[2] + 1.3 * x[
|
| 808 |
+
3] - 5 # >=0
|
| 809 |
+
|
| 810 |
+
def g2(x):
|
| 811 |
+
return (
|
| 812 |
+
12*x[0] + 11.9*x[1] + 41.8*x[2] + 52.1*x[3] - 21
|
| 813 |
+
- 1.645*np.sqrt(
|
| 814 |
+
0.28*x[0]**2 + 0.19*x[1]**2 + 20.5*x[2]**2 + 0.62*x[3]**2
|
| 815 |
+
)
|
| 816 |
+
) # >=0
|
| 817 |
+
|
| 818 |
+
def h1(x):
|
| 819 |
+
return x[0] + x[1] + x[2] + x[3] - 1 # == 0
|
| 820 |
+
|
| 821 |
+
cons = ({'type': 'ineq', 'fun': g1_modified, "args": (0,)},
|
| 822 |
+
{'type': 'ineq', 'fun': g2},
|
| 823 |
+
{'type': 'eq', 'fun': h1})
|
| 824 |
+
|
| 825 |
+
shgo(f, bounds, n=300, iters=1, constraints=cons)
|
| 826 |
+
# using constrain with arguments AND sampling method sobol
|
| 827 |
+
shgo(f, bounds, n=300, iters=1, constraints=cons,
|
| 828 |
+
sampling_method='sobol')
|
| 829 |
+
|
| 830 |
+
def test_21_1_jac_true(self):
|
| 831 |
+
"""Test that shgo can handle objective functions that return the
|
| 832 |
+
gradient alongside the objective value. Fixes gh-13547"""
|
| 833 |
+
# previous
|
| 834 |
+
def func(x):
|
| 835 |
+
return np.sum(np.power(x, 2)), 2 * x
|
| 836 |
+
|
| 837 |
+
shgo(
|
| 838 |
+
func,
|
| 839 |
+
bounds=[[-1, 1], [1, 2]],
|
| 840 |
+
n=100, iters=5,
|
| 841 |
+
sampling_method="sobol",
|
| 842 |
+
minimizer_kwargs={'method': 'SLSQP', 'jac': True}
|
| 843 |
+
)
|
| 844 |
+
|
| 845 |
+
# new
|
| 846 |
+
def func(x):
|
| 847 |
+
return np.sum(x ** 2), 2 * x
|
| 848 |
+
|
| 849 |
+
bounds = [[-1, 1], [1, 2], [-1, 1], [1, 2], [0, 3]]
|
| 850 |
+
|
| 851 |
+
res = shgo(func, bounds=bounds, sampling_method="sobol",
|
| 852 |
+
minimizer_kwargs={'method': 'SLSQP', 'jac': True})
|
| 853 |
+
ref = minimize(func, x0=[1, 1, 1, 1, 1], bounds=bounds,
|
| 854 |
+
jac=True)
|
| 855 |
+
assert res.success
|
| 856 |
+
assert_allclose(res.fun, ref.fun)
|
| 857 |
+
assert_allclose(res.x, ref.x, atol=1e-15)
|
| 858 |
+
|
| 859 |
+
@pytest.mark.parametrize('derivative', ['jac', 'hess', 'hessp'])
|
| 860 |
+
def test_21_2_derivative_options(self, derivative):
|
| 861 |
+
"""shgo used to raise an error when passing `options` with 'jac'
|
| 862 |
+
# see gh-12963. check that this is resolved
|
| 863 |
+
"""
|
| 864 |
+
|
| 865 |
+
def objective(x):
|
| 866 |
+
return 3 * x[0] * x[0] + 2 * x[0] + 5
|
| 867 |
+
|
| 868 |
+
def gradient(x):
|
| 869 |
+
return 6 * x[0] + 2
|
| 870 |
+
|
| 871 |
+
def hess(x):
|
| 872 |
+
return 6
|
| 873 |
+
|
| 874 |
+
def hessp(x, p):
|
| 875 |
+
return 6 * p
|
| 876 |
+
|
| 877 |
+
derivative_funcs = {'jac': gradient, 'hess': hess, 'hessp': hessp}
|
| 878 |
+
options = {derivative: derivative_funcs[derivative]}
|
| 879 |
+
minimizer_kwargs = {'method': 'trust-constr'}
|
| 880 |
+
|
| 881 |
+
bounds = [(-100, 100)]
|
| 882 |
+
res = shgo(objective, bounds, minimizer_kwargs=minimizer_kwargs,
|
| 883 |
+
options=options)
|
| 884 |
+
ref = minimize(objective, x0=[0], bounds=bounds, **minimizer_kwargs,
|
| 885 |
+
**options)
|
| 886 |
+
|
| 887 |
+
assert res.success
|
| 888 |
+
np.testing.assert_allclose(res.fun, ref.fun)
|
| 889 |
+
np.testing.assert_allclose(res.x, ref.x)
|
| 890 |
+
|
| 891 |
+
def test_21_3_hess_options_rosen(self):
|
| 892 |
+
"""Ensure the Hessian gets passed correctly to the local minimizer
|
| 893 |
+
routine. Previous report gh-14533.
|
| 894 |
+
"""
|
| 895 |
+
bounds = [(0, 1.6), (0, 1.6), (0, 1.4), (0, 1.4), (0, 1.4)]
|
| 896 |
+
options = {'jac': rosen_der, 'hess': rosen_hess}
|
| 897 |
+
minimizer_kwargs = {'method': 'Newton-CG'}
|
| 898 |
+
res = shgo(rosen, bounds, minimizer_kwargs=minimizer_kwargs,
|
| 899 |
+
options=options)
|
| 900 |
+
ref = minimize(rosen, np.zeros(5), method='Newton-CG',
|
| 901 |
+
**options)
|
| 902 |
+
assert res.success
|
| 903 |
+
assert_allclose(res.fun, ref.fun)
|
| 904 |
+
assert_allclose(res.x, ref.x, atol=1e-15)
|
| 905 |
+
|
| 906 |
+
def test_21_arg_tuple_sobol(self):
|
| 907 |
+
"""shgo used to raise an error when passing `args` with Sobol sampling
|
| 908 |
+
# see gh-12114. check that this is resolved"""
|
| 909 |
+
|
| 910 |
+
def fun(x, k):
|
| 911 |
+
return x[0] ** k
|
| 912 |
+
|
| 913 |
+
constraints = ({'type': 'ineq', 'fun': lambda x: x[0] - 1})
|
| 914 |
+
|
| 915 |
+
bounds = [(0, 10)]
|
| 916 |
+
res = shgo(fun, bounds, args=(1,), constraints=constraints,
|
| 917 |
+
sampling_method='sobol')
|
| 918 |
+
ref = minimize(fun, np.zeros(1), bounds=bounds, args=(1,),
|
| 919 |
+
constraints=constraints)
|
| 920 |
+
assert res.success
|
| 921 |
+
assert_allclose(res.fun, ref.fun)
|
| 922 |
+
assert_allclose(res.x, ref.x)
|
| 923 |
+
|
| 924 |
+
|
| 925 |
+
# Failure test functions
|
| 926 |
+
class TestShgoFailures:
|
| 927 |
+
def test_1_maxiter(self):
|
| 928 |
+
"""Test failure on insufficient iterations"""
|
| 929 |
+
options = {'maxiter': 2}
|
| 930 |
+
res = shgo(test4_1.f, test4_1.bounds, n=2, iters=None,
|
| 931 |
+
options=options, sampling_method='sobol')
|
| 932 |
+
|
| 933 |
+
np.testing.assert_equal(False, res.success)
|
| 934 |
+
# np.testing.assert_equal(4, res.nfev)
|
| 935 |
+
np.testing.assert_equal(4, res.tnev)
|
| 936 |
+
|
| 937 |
+
def test_2_sampling(self):
|
| 938 |
+
"""Rejection of unknown sampling method"""
|
| 939 |
+
assert_raises(ValueError, shgo, test1_1.f, test1_1.bounds,
|
| 940 |
+
sampling_method='not_Sobol')
|
| 941 |
+
|
| 942 |
+
def test_3_1_no_min_pool_sobol(self):
|
| 943 |
+
"""Check that the routine stops when no minimiser is found
|
| 944 |
+
after maximum specified function evaluations"""
|
| 945 |
+
options = {'maxfev': 10,
|
| 946 |
+
# 'maxev': 10,
|
| 947 |
+
'disp': True}
|
| 948 |
+
res = shgo(test_table.f, test_table.bounds, n=3, options=options,
|
| 949 |
+
sampling_method='sobol')
|
| 950 |
+
np.testing.assert_equal(False, res.success)
|
| 951 |
+
# np.testing.assert_equal(9, res.nfev)
|
| 952 |
+
np.testing.assert_equal(12, res.nfev)
|
| 953 |
+
|
| 954 |
+
def test_3_2_no_min_pool_simplicial(self):
|
| 955 |
+
"""Check that the routine stops when no minimiser is found
|
| 956 |
+
after maximum specified sampling evaluations"""
|
| 957 |
+
options = {'maxev': 10,
|
| 958 |
+
'disp': True}
|
| 959 |
+
res = shgo(test_table.f, test_table.bounds, n=3, options=options,
|
| 960 |
+
sampling_method='simplicial')
|
| 961 |
+
np.testing.assert_equal(False, res.success)
|
| 962 |
+
|
| 963 |
+
def test_4_1_bound_err(self):
|
| 964 |
+
"""Specified bounds ub > lb"""
|
| 965 |
+
bounds = [(6, 3), (3, 5)]
|
| 966 |
+
assert_raises(ValueError, shgo, test1_1.f, bounds)
|
| 967 |
+
|
| 968 |
+
def test_4_2_bound_err(self):
|
| 969 |
+
"""Specified bounds are of the form (lb, ub)"""
|
| 970 |
+
bounds = [(3, 5, 5), (3, 5)]
|
| 971 |
+
assert_raises(ValueError, shgo, test1_1.f, bounds)
|
| 972 |
+
|
| 973 |
+
def test_5_1_1_infeasible_sobol(self):
|
| 974 |
+
"""Ensures the algorithm terminates on infeasible problems
|
| 975 |
+
after maxev is exceeded. Use infty constraints option"""
|
| 976 |
+
options = {'maxev': 100,
|
| 977 |
+
'disp': True}
|
| 978 |
+
|
| 979 |
+
res = shgo(test_infeasible.f, test_infeasible.bounds,
|
| 980 |
+
constraints=test_infeasible.cons, n=100, options=options,
|
| 981 |
+
sampling_method='sobol')
|
| 982 |
+
|
| 983 |
+
np.testing.assert_equal(False, res.success)
|
| 984 |
+
|
| 985 |
+
def test_5_1_2_infeasible_sobol(self):
|
| 986 |
+
"""Ensures the algorithm terminates on infeasible problems
|
| 987 |
+
after maxev is exceeded. Do not use infty constraints option"""
|
| 988 |
+
options = {'maxev': 100,
|
| 989 |
+
'disp': True,
|
| 990 |
+
'infty_constraints': False}
|
| 991 |
+
|
| 992 |
+
res = shgo(test_infeasible.f, test_infeasible.bounds,
|
| 993 |
+
constraints=test_infeasible.cons, n=100, options=options,
|
| 994 |
+
sampling_method='sobol')
|
| 995 |
+
|
| 996 |
+
np.testing.assert_equal(False, res.success)
|
| 997 |
+
|
| 998 |
+
def test_5_2_infeasible_simplicial(self):
|
| 999 |
+
"""Ensures the algorithm terminates on infeasible problems
|
| 1000 |
+
after maxev is exceeded."""
|
| 1001 |
+
options = {'maxev': 1000,
|
| 1002 |
+
'disp': False}
|
| 1003 |
+
|
| 1004 |
+
res = shgo(test_infeasible.f, test_infeasible.bounds,
|
| 1005 |
+
constraints=test_infeasible.cons, n=100, options=options,
|
| 1006 |
+
sampling_method='simplicial')
|
| 1007 |
+
|
| 1008 |
+
np.testing.assert_equal(False, res.success)
|
| 1009 |
+
|
| 1010 |
+
@pytest.mark.thread_unsafe
|
| 1011 |
+
def test_6_1_lower_known_f_min(self):
|
| 1012 |
+
"""Test Global mode limiting local evaluations with f* too high"""
|
| 1013 |
+
options = { # Specify known function value
|
| 1014 |
+
'f_min': test2_1.expected_fun + 2.0,
|
| 1015 |
+
'f_tol': 1e-6,
|
| 1016 |
+
# Specify number of local iterations to perform+
|
| 1017 |
+
'minimize_every_iter': True,
|
| 1018 |
+
'local_iter': 1,
|
| 1019 |
+
'infty_constraints': False}
|
| 1020 |
+
args = (test2_1.f, test2_1.bounds)
|
| 1021 |
+
kwargs = {'constraints': test2_1.cons,
|
| 1022 |
+
'n': None,
|
| 1023 |
+
'iters': None,
|
| 1024 |
+
'options': options,
|
| 1025 |
+
'sampling_method': 'sobol'
|
| 1026 |
+
}
|
| 1027 |
+
warns(UserWarning, shgo, *args, **kwargs)
|
| 1028 |
+
|
| 1029 |
+
def test(self):
|
| 1030 |
+
from scipy.optimize import rosen, shgo
|
| 1031 |
+
bounds = [(0, 2), (0, 2), (0, 2), (0, 2), (0, 2)]
|
| 1032 |
+
|
| 1033 |
+
def fun(x):
|
| 1034 |
+
fun.nfev += 1
|
| 1035 |
+
return rosen(x)
|
| 1036 |
+
|
| 1037 |
+
fun.nfev = 0
|
| 1038 |
+
|
| 1039 |
+
result = shgo(fun, bounds)
|
| 1040 |
+
print(result.x, result.fun, fun.nfev) # 50
|
| 1041 |
+
|
| 1042 |
+
|
| 1043 |
+
# Returns
|
| 1044 |
+
class TestShgoReturns:
|
| 1045 |
+
def test_1_nfev_simplicial(self):
|
| 1046 |
+
bounds = [(0, 2), (0, 2), (0, 2), (0, 2), (0, 2)]
|
| 1047 |
+
|
| 1048 |
+
def fun(x):
|
| 1049 |
+
fun.nfev += 1
|
| 1050 |
+
return rosen(x)
|
| 1051 |
+
|
| 1052 |
+
fun.nfev = 0
|
| 1053 |
+
|
| 1054 |
+
result = shgo(fun, bounds)
|
| 1055 |
+
np.testing.assert_equal(fun.nfev, result.nfev)
|
| 1056 |
+
|
| 1057 |
+
def test_1_nfev_sobol(self):
|
| 1058 |
+
bounds = [(0, 2), (0, 2), (0, 2), (0, 2), (0, 2)]
|
| 1059 |
+
|
| 1060 |
+
def fun(x):
|
| 1061 |
+
fun.nfev += 1
|
| 1062 |
+
return rosen(x)
|
| 1063 |
+
|
| 1064 |
+
fun.nfev = 0
|
| 1065 |
+
|
| 1066 |
+
result = shgo(fun, bounds, sampling_method='sobol')
|
| 1067 |
+
np.testing.assert_equal(fun.nfev, result.nfev)
|
| 1068 |
+
|
| 1069 |
+
|
| 1070 |
+
def test_vector_constraint():
|
| 1071 |
+
# gh15514
|
| 1072 |
+
def quad(x):
|
| 1073 |
+
x = np.asarray(x)
|
| 1074 |
+
return [np.sum(x ** 2)]
|
| 1075 |
+
|
| 1076 |
+
nlc = NonlinearConstraint(quad, [2.2], [3])
|
| 1077 |
+
oldc = new_constraint_to_old(nlc, np.array([1.0, 1.0]))
|
| 1078 |
+
|
| 1079 |
+
res = shgo(rosen, [(0, 10), (0, 10)], constraints=oldc, sampling_method='sobol')
|
| 1080 |
+
assert np.all(np.sum((res.x)**2) >= 2.2)
|
| 1081 |
+
assert np.all(np.sum((res.x) ** 2) <= 3.0)
|
| 1082 |
+
assert res.success
|
| 1083 |
+
|
| 1084 |
+
|
| 1085 |
+
@pytest.mark.filterwarnings("ignore:delta_grad")
|
| 1086 |
+
def test_trust_constr():
|
| 1087 |
+
def quad(x):
|
| 1088 |
+
x = np.asarray(x)
|
| 1089 |
+
return [np.sum(x ** 2)]
|
| 1090 |
+
|
| 1091 |
+
nlc = NonlinearConstraint(quad, [2.6], [3])
|
| 1092 |
+
minimizer_kwargs = {'method': 'trust-constr'}
|
| 1093 |
+
# note that we don't supply the constraints in minimizer_kwargs,
|
| 1094 |
+
# so if the final result obeys the constraints we know that shgo
|
| 1095 |
+
# passed them on to 'trust-constr'
|
| 1096 |
+
res = shgo(
|
| 1097 |
+
rosen,
|
| 1098 |
+
[(0, 10), (0, 10)],
|
| 1099 |
+
constraints=nlc,
|
| 1100 |
+
sampling_method='sobol',
|
| 1101 |
+
minimizer_kwargs=minimizer_kwargs
|
| 1102 |
+
)
|
| 1103 |
+
assert np.all(np.sum((res.x)**2) >= 2.6)
|
| 1104 |
+
assert np.all(np.sum((res.x) ** 2) <= 3.0)
|
| 1105 |
+
assert res.success
|
| 1106 |
+
|
| 1107 |
+
|
| 1108 |
+
def test_equality_constraints():
|
| 1109 |
+
# gh16260
|
| 1110 |
+
bounds = [(0.9, 4.0)] * 2 # Constrain probabilities to 0 and 1.
|
| 1111 |
+
|
| 1112 |
+
def faulty(x):
|
| 1113 |
+
return x[0] + x[1]
|
| 1114 |
+
|
| 1115 |
+
nlc = NonlinearConstraint(faulty, 3.9, 3.9)
|
| 1116 |
+
res = shgo(rosen, bounds=bounds, constraints=nlc)
|
| 1117 |
+
assert_allclose(np.sum(res.x), 3.9)
|
| 1118 |
+
|
| 1119 |
+
def faulty(x):
|
| 1120 |
+
return x[0] + x[1] - 3.9
|
| 1121 |
+
|
| 1122 |
+
constraints = {'type': 'eq', 'fun': faulty}
|
| 1123 |
+
res = shgo(rosen, bounds=bounds, constraints=constraints)
|
| 1124 |
+
assert_allclose(np.sum(res.x), 3.9)
|
| 1125 |
+
|
| 1126 |
+
bounds = [(0, 1.0)] * 4
|
| 1127 |
+
# sum of variable should equal 1.
|
| 1128 |
+
def faulty(x):
|
| 1129 |
+
return x[0] + x[1] + x[2] + x[3] - 1
|
| 1130 |
+
|
| 1131 |
+
# options = {'minimize_every_iter': True, 'local_iter':10}
|
| 1132 |
+
constraints = {'type': 'eq', 'fun': faulty}
|
| 1133 |
+
res = shgo(
|
| 1134 |
+
lambda x: - np.prod(x),
|
| 1135 |
+
bounds=bounds,
|
| 1136 |
+
constraints=constraints,
|
| 1137 |
+
sampling_method='sobol'
|
| 1138 |
+
)
|
| 1139 |
+
assert_allclose(np.sum(res.x), 1.0)
|
| 1140 |
+
|
| 1141 |
+
def test_gh16971():
|
| 1142 |
+
def cons(x):
|
| 1143 |
+
return np.sum(x**2) - 0
|
| 1144 |
+
|
| 1145 |
+
c = {'fun': cons, 'type': 'ineq'}
|
| 1146 |
+
minimizer_kwargs = {
|
| 1147 |
+
'method': 'COBYLA',
|
| 1148 |
+
'options': {'rhobeg': 5, 'tol': 5e-1, 'catol': 0.05}
|
| 1149 |
+
}
|
| 1150 |
+
|
| 1151 |
+
s = SHGO(
|
| 1152 |
+
rosen, [(0, 10)]*2, constraints=c, minimizer_kwargs=minimizer_kwargs
|
| 1153 |
+
)
|
| 1154 |
+
|
| 1155 |
+
assert s.minimizer_kwargs['method'].lower() == 'cobyla'
|
| 1156 |
+
assert s.minimizer_kwargs['options']['catol'] == 0.05
|
mantis_evalkit/lib/python3.10/site-packages/scipy/optimize/tests/test_bracket.py
ADDED
|
@@ -0,0 +1,871 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import pytest
|
| 2 |
+
|
| 3 |
+
import numpy as np
|
| 4 |
+
|
| 5 |
+
from scipy.optimize._bracket import _ELIMITS
|
| 6 |
+
from scipy.optimize.elementwise import bracket_root, bracket_minimum
|
| 7 |
+
import scipy._lib._elementwise_iterative_method as eim
|
| 8 |
+
from scipy import stats
|
| 9 |
+
from scipy._lib._array_api_no_0d import (xp_assert_close, xp_assert_equal,
|
| 10 |
+
xp_assert_less, array_namespace)
|
| 11 |
+
from scipy._lib._array_api import xp_ravel
|
| 12 |
+
from scipy.conftest import array_api_compatible
|
| 13 |
+
|
| 14 |
+
|
| 15 |
+
# These tests were originally written for the private `optimize._bracket`
|
| 16 |
+
# interfaces, but now we want the tests to check the behavior of the public
|
| 17 |
+
# `optimize.elementwise` interfaces. Therefore, rather than importing
|
| 18 |
+
# `_bracket_root`/`_bracket_minimum` from `_bracket.py`, we import
|
| 19 |
+
# `bracket_root`/`bracket_minimum` from `optimize.elementwise` and wrap those
|
| 20 |
+
# functions to conform to the private interface. This may look a little strange,
|
| 21 |
+
# since it effectively just inverts the interface transformation done within the
|
| 22 |
+
# `bracket_root`/`bracket_minimum` functions, but it allows us to run the original,
|
| 23 |
+
# unmodified tests on the public interfaces, simplifying the PR that adds
|
| 24 |
+
# the public interfaces. We'll refactor this when we want to @parametrize the
|
| 25 |
+
# tests over multiple `method`s.
|
| 26 |
+
def _bracket_root(*args, **kwargs):
|
| 27 |
+
res = bracket_root(*args, **kwargs)
|
| 28 |
+
res.xl, res.xr = res.bracket
|
| 29 |
+
res.fl, res.fr = res.f_bracket
|
| 30 |
+
del res.bracket
|
| 31 |
+
del res.f_bracket
|
| 32 |
+
return res
|
| 33 |
+
|
| 34 |
+
|
| 35 |
+
def _bracket_minimum(*args, **kwargs):
|
| 36 |
+
res = bracket_minimum(*args, **kwargs)
|
| 37 |
+
res.xl, res.xm, res.xr = res.bracket
|
| 38 |
+
res.fl, res.fm, res.fr = res.f_bracket
|
| 39 |
+
del res.bracket
|
| 40 |
+
del res.f_bracket
|
| 41 |
+
return res
|
| 42 |
+
|
| 43 |
+
|
| 44 |
+
array_api_strict_skip_reason = 'Array API does not support fancy indexing assignment.'
|
| 45 |
+
jax_skip_reason = 'JAX arrays do not support item assignment.'
|
| 46 |
+
|
| 47 |
+
@pytest.mark.skip_xp_backends('array_api_strict', reason=array_api_strict_skip_reason)
|
| 48 |
+
@pytest.mark.skip_xp_backends('jax.numpy', reason=jax_skip_reason)
|
| 49 |
+
@array_api_compatible
|
| 50 |
+
@pytest.mark.usefixtures("skip_xp_backends")
|
| 51 |
+
class TestBracketRoot:
|
| 52 |
+
@pytest.mark.parametrize("seed", (615655101, 3141866013, 238075752))
|
| 53 |
+
@pytest.mark.parametrize("use_xmin", (False, True))
|
| 54 |
+
@pytest.mark.parametrize("other_side", (False, True))
|
| 55 |
+
@pytest.mark.parametrize("fix_one_side", (False, True))
|
| 56 |
+
def test_nfev_expected(self, seed, use_xmin, other_side, fix_one_side, xp):
|
| 57 |
+
# Property-based test to confirm that _bracket_root is behaving as
|
| 58 |
+
# expected. The basic case is when root < a < b.
|
| 59 |
+
# The number of times bracket expands (per side) can be found by
|
| 60 |
+
# setting the expression for the left endpoint of the bracket to the
|
| 61 |
+
# root of f (x=0), solving for i, and rounding up. The corresponding
|
| 62 |
+
# lower and upper ends of the bracket are found by plugging this back
|
| 63 |
+
# into the expression for the ends of the bracket.
|
| 64 |
+
# `other_side=True` is the case that a < b < root
|
| 65 |
+
# Special cases like a < root < b are tested separately
|
| 66 |
+
rng = np.random.default_rng(seed)
|
| 67 |
+
xl0, d, factor = xp.asarray(rng.random(size=3) * [1e5, 10, 5])
|
| 68 |
+
factor = 1 + factor # factor must be greater than 1
|
| 69 |
+
xr0 = xl0 + d # xr0 must be greater than a in basic case
|
| 70 |
+
|
| 71 |
+
def f(x):
|
| 72 |
+
f.count += 1
|
| 73 |
+
return x # root is 0
|
| 74 |
+
|
| 75 |
+
if use_xmin:
|
| 76 |
+
xmin = xp.asarray(-rng.random())
|
| 77 |
+
n = xp.ceil(xp.log(-(xl0 - xmin) / xmin) / xp.log(factor))
|
| 78 |
+
l, u = xmin + (xl0 - xmin)*factor**-n, xmin + (xl0 - xmin)*factor**-(n - 1)
|
| 79 |
+
kwargs = dict(xl0=xl0, xr0=xr0, factor=factor, xmin=xmin)
|
| 80 |
+
else:
|
| 81 |
+
n = xp.ceil(xp.log(xr0/d) / xp.log(factor))
|
| 82 |
+
l, u = xr0 - d*factor**n, xr0 - d*factor**(n-1)
|
| 83 |
+
kwargs = dict(xl0=xl0, xr0=xr0, factor=factor)
|
| 84 |
+
|
| 85 |
+
if other_side:
|
| 86 |
+
kwargs['xl0'], kwargs['xr0'] = -kwargs['xr0'], -kwargs['xl0']
|
| 87 |
+
l, u = -u, -l
|
| 88 |
+
if 'xmin' in kwargs:
|
| 89 |
+
kwargs['xmax'] = -kwargs.pop('xmin')
|
| 90 |
+
|
| 91 |
+
if fix_one_side:
|
| 92 |
+
if other_side:
|
| 93 |
+
kwargs['xmin'] = -xr0
|
| 94 |
+
else:
|
| 95 |
+
kwargs['xmax'] = xr0
|
| 96 |
+
|
| 97 |
+
f.count = 0
|
| 98 |
+
res = _bracket_root(f, **kwargs)
|
| 99 |
+
|
| 100 |
+
# Compare reported number of function evaluations `nfev` against
|
| 101 |
+
# reported `nit`, actual function call count `f.count`, and theoretical
|
| 102 |
+
# number of expansions `n`.
|
| 103 |
+
# When both sides are free, these get multiplied by 2 because function
|
| 104 |
+
# is evaluated on the left and the right each iteration.
|
| 105 |
+
# When one side is fixed, however, we add one: on the right side, the
|
| 106 |
+
# function gets evaluated once at b.
|
| 107 |
+
# Add 1 to `n` and `res.nit` because function evaluations occur at
|
| 108 |
+
# iterations *0*, 1, ..., `n`. Subtract 1 from `f.count` because
|
| 109 |
+
# function is called separately for left and right in iteration 0.
|
| 110 |
+
if not fix_one_side:
|
| 111 |
+
assert res.nfev == 2*(res.nit+1) == 2*(f.count-1) == 2*(n + 1)
|
| 112 |
+
else:
|
| 113 |
+
assert res.nfev == (res.nit+1)+1 == (f.count-1)+1 == (n+1)+1
|
| 114 |
+
|
| 115 |
+
# Compare reported bracket to theoretical bracket and reported function
|
| 116 |
+
# values to function evaluated at bracket.
|
| 117 |
+
bracket = xp.asarray([res.xl, res.xr])
|
| 118 |
+
xp_assert_close(bracket, xp.asarray([l, u]))
|
| 119 |
+
f_bracket = xp.asarray([res.fl, res.fr])
|
| 120 |
+
xp_assert_close(f_bracket, f(bracket))
|
| 121 |
+
|
| 122 |
+
# Check that bracket is valid and that status and success are correct
|
| 123 |
+
assert res.xr > res.xl
|
| 124 |
+
signs = xp.sign(f_bracket)
|
| 125 |
+
assert signs[0] == -signs[1]
|
| 126 |
+
assert res.status == 0
|
| 127 |
+
assert res.success
|
| 128 |
+
|
| 129 |
+
def f(self, q, p):
|
| 130 |
+
return stats._stats_py._SimpleNormal().cdf(q) - p
|
| 131 |
+
|
| 132 |
+
@pytest.mark.parametrize('p', [0.6, np.linspace(0.05, 0.95, 10)])
|
| 133 |
+
@pytest.mark.parametrize('xmin', [-5, None])
|
| 134 |
+
@pytest.mark.parametrize('xmax', [5, None])
|
| 135 |
+
@pytest.mark.parametrize('factor', [1.2, 2])
|
| 136 |
+
def test_basic(self, p, xmin, xmax, factor, xp):
|
| 137 |
+
# Test basic functionality to bracket root (distribution PPF)
|
| 138 |
+
res = _bracket_root(self.f, xp.asarray(-0.01), 0.01, xmin=xmin, xmax=xmax,
|
| 139 |
+
factor=factor, args=(xp.asarray(p),))
|
| 140 |
+
xp_assert_equal(-xp.sign(res.fl), xp.sign(res.fr))
|
| 141 |
+
|
| 142 |
+
@pytest.mark.parametrize('shape', [tuple(), (12,), (3, 4), (3, 2, 2)])
|
| 143 |
+
def test_vectorization(self, shape, xp):
|
| 144 |
+
# Test for correct functionality, output shapes, and dtypes for various
|
| 145 |
+
# input shapes.
|
| 146 |
+
p = np.linspace(-0.05, 1.05, 12).reshape(shape) if shape else np.float64(0.6)
|
| 147 |
+
args = (p,)
|
| 148 |
+
maxiter = 10
|
| 149 |
+
|
| 150 |
+
@np.vectorize
|
| 151 |
+
def bracket_root_single(xl0, xr0, xmin, xmax, factor, p):
|
| 152 |
+
return _bracket_root(self.f, xl0, xr0, xmin=xmin, xmax=xmax,
|
| 153 |
+
factor=factor, args=(p,),
|
| 154 |
+
maxiter=maxiter)
|
| 155 |
+
|
| 156 |
+
def f(*args, **kwargs):
|
| 157 |
+
f.f_evals += 1
|
| 158 |
+
return self.f(*args, **kwargs)
|
| 159 |
+
f.f_evals = 0
|
| 160 |
+
|
| 161 |
+
rng = np.random.default_rng(2348234)
|
| 162 |
+
xl0 = -rng.random(size=shape)
|
| 163 |
+
xr0 = rng.random(size=shape)
|
| 164 |
+
xmin, xmax = 1e3*xl0, 1e3*xr0
|
| 165 |
+
if shape: # make some elements un
|
| 166 |
+
i = rng.random(size=shape) > 0.5
|
| 167 |
+
xmin[i], xmax[i] = -np.inf, np.inf
|
| 168 |
+
factor = rng.random(size=shape) + 1.5
|
| 169 |
+
refs = bracket_root_single(xl0, xr0, xmin, xmax, factor, p).ravel()
|
| 170 |
+
xl0, xr0, xmin, xmax, factor = (xp.asarray(xl0), xp.asarray(xr0),
|
| 171 |
+
xp.asarray(xmin), xp.asarray(xmax),
|
| 172 |
+
xp.asarray(factor))
|
| 173 |
+
args = tuple(map(xp.asarray, args))
|
| 174 |
+
res = _bracket_root(f, xl0, xr0, xmin=xmin, xmax=xmax, factor=factor,
|
| 175 |
+
args=args, maxiter=maxiter)
|
| 176 |
+
|
| 177 |
+
attrs = ['xl', 'xr', 'fl', 'fr', 'success', 'nfev', 'nit']
|
| 178 |
+
for attr in attrs:
|
| 179 |
+
ref_attr = [xp.asarray(getattr(ref, attr)) for ref in refs]
|
| 180 |
+
res_attr = getattr(res, attr)
|
| 181 |
+
xp_assert_close(xp_ravel(res_attr, xp=xp), xp.stack(ref_attr))
|
| 182 |
+
xp_assert_equal(res_attr.shape, shape)
|
| 183 |
+
|
| 184 |
+
xp_test = array_namespace(xp.asarray(1.))
|
| 185 |
+
assert res.success.dtype == xp_test.bool
|
| 186 |
+
if shape:
|
| 187 |
+
assert xp.all(res.success[1:-1])
|
| 188 |
+
assert res.status.dtype == xp.int32
|
| 189 |
+
assert res.nfev.dtype == xp.int32
|
| 190 |
+
assert res.nit.dtype == xp.int32
|
| 191 |
+
assert xp.max(res.nit) == f.f_evals - 2
|
| 192 |
+
xp_assert_less(res.xl, res.xr)
|
| 193 |
+
xp_assert_close(res.fl, xp.asarray(self.f(res.xl, *args)))
|
| 194 |
+
xp_assert_close(res.fr, xp.asarray(self.f(res.xr, *args)))
|
| 195 |
+
|
| 196 |
+
def test_flags(self, xp):
|
| 197 |
+
# Test cases that should produce different status flags; show that all
|
| 198 |
+
# can be produced simultaneously.
|
| 199 |
+
def f(xs, js):
|
| 200 |
+
funcs = [lambda x: x - 1.5,
|
| 201 |
+
lambda x: x - 1000,
|
| 202 |
+
lambda x: x - 1000,
|
| 203 |
+
lambda x: x * xp.nan,
|
| 204 |
+
lambda x: x]
|
| 205 |
+
|
| 206 |
+
return [funcs[int(j)](x) for x, j in zip(xs, js)]
|
| 207 |
+
|
| 208 |
+
args = (xp.arange(5, dtype=xp.int64),)
|
| 209 |
+
res = _bracket_root(f,
|
| 210 |
+
xl0=xp.asarray([-1., -1., -1., -1., 4.]),
|
| 211 |
+
xr0=xp.asarray([1, 1, 1, 1, -4]),
|
| 212 |
+
xmin=xp.asarray([-xp.inf, -1, -xp.inf, -xp.inf, 6]),
|
| 213 |
+
xmax=xp.asarray([xp.inf, 1, xp.inf, xp.inf, 2]),
|
| 214 |
+
args=args, maxiter=3)
|
| 215 |
+
|
| 216 |
+
ref_flags = xp.asarray([eim._ECONVERGED,
|
| 217 |
+
_ELIMITS,
|
| 218 |
+
eim._ECONVERR,
|
| 219 |
+
eim._EVALUEERR,
|
| 220 |
+
eim._EINPUTERR],
|
| 221 |
+
dtype=xp.int32)
|
| 222 |
+
|
| 223 |
+
xp_assert_equal(res.status, ref_flags)
|
| 224 |
+
|
| 225 |
+
@pytest.mark.parametrize("root", (0.622, [0.622, 0.623]))
|
| 226 |
+
@pytest.mark.parametrize('xmin', [-5, None])
|
| 227 |
+
@pytest.mark.parametrize('xmax', [5, None])
|
| 228 |
+
@pytest.mark.parametrize("dtype", ("float16", "float32", "float64"))
|
| 229 |
+
def test_dtype(self, root, xmin, xmax, dtype, xp):
|
| 230 |
+
# Test that dtypes are preserved
|
| 231 |
+
dtype = getattr(xp, dtype)
|
| 232 |
+
xp_test = array_namespace(xp.asarray(1.))
|
| 233 |
+
|
| 234 |
+
xmin = xmin if xmin is None else xp.asarray(xmin, dtype=dtype)
|
| 235 |
+
xmax = xmax if xmax is None else xp.asarray(xmax, dtype=dtype)
|
| 236 |
+
root = xp.asarray(root, dtype=dtype)
|
| 237 |
+
def f(x, root):
|
| 238 |
+
return xp_test.astype((x - root) ** 3, dtype)
|
| 239 |
+
|
| 240 |
+
bracket = xp.asarray([-0.01, 0.01], dtype=dtype)
|
| 241 |
+
res = _bracket_root(f, *bracket, xmin=xmin, xmax=xmax, args=(root,))
|
| 242 |
+
assert xp.all(res.success)
|
| 243 |
+
assert res.xl.dtype == res.xr.dtype == dtype
|
| 244 |
+
assert res.fl.dtype == res.fr.dtype == dtype
|
| 245 |
+
|
| 246 |
+
def test_input_validation(self, xp):
|
| 247 |
+
# Test input validation for appropriate error messages
|
| 248 |
+
|
| 249 |
+
message = '`func` must be callable.'
|
| 250 |
+
with pytest.raises(ValueError, match=message):
|
| 251 |
+
_bracket_root(None, -4, 4)
|
| 252 |
+
|
| 253 |
+
message = '...must be numeric and real.'
|
| 254 |
+
with pytest.raises(ValueError, match=message):
|
| 255 |
+
_bracket_root(lambda x: x, -4+1j, 4)
|
| 256 |
+
with pytest.raises(ValueError, match=message):
|
| 257 |
+
_bracket_root(lambda x: x, -4, 'hello')
|
| 258 |
+
with pytest.raises(ValueError, match=message):
|
| 259 |
+
_bracket_root(lambda x: x, -4, 4, xmin=np)
|
| 260 |
+
with pytest.raises(ValueError, match=message):
|
| 261 |
+
_bracket_root(lambda x: x, -4, 4, xmax=object())
|
| 262 |
+
with pytest.raises(ValueError, match=message):
|
| 263 |
+
_bracket_root(lambda x: x, -4, 4, factor=sum)
|
| 264 |
+
|
| 265 |
+
message = "All elements of `factor` must be greater than 1."
|
| 266 |
+
with pytest.raises(ValueError, match=message):
|
| 267 |
+
_bracket_root(lambda x: x, -4, 4, factor=0.5)
|
| 268 |
+
|
| 269 |
+
message = "broadcast"
|
| 270 |
+
# raised by `xp.broadcast, but the traceback is readable IMO
|
| 271 |
+
with pytest.raises(Exception, match=message):
|
| 272 |
+
_bracket_root(lambda x: x, xp.asarray([-2, -3]), xp.asarray([3, 4, 5]))
|
| 273 |
+
# Consider making this give a more readable error message
|
| 274 |
+
# with pytest.raises(ValueError, match=message):
|
| 275 |
+
# _bracket_root(lambda x: [x[0], x[1], x[1]], [-3, -3], [5, 5])
|
| 276 |
+
|
| 277 |
+
message = '`maxiter` must be a non-negative integer.'
|
| 278 |
+
with pytest.raises(ValueError, match=message):
|
| 279 |
+
_bracket_root(lambda x: x, -4, 4, maxiter=1.5)
|
| 280 |
+
with pytest.raises(ValueError, match=message):
|
| 281 |
+
_bracket_root(lambda x: x, -4, 4, maxiter=-1)
|
| 282 |
+
with pytest.raises(ValueError, match=message):
|
| 283 |
+
_bracket_root(lambda x: x, -4, 4, maxiter="shrubbery")
|
| 284 |
+
|
| 285 |
+
def test_special_cases(self, xp):
|
| 286 |
+
# Test edge cases and other special cases
|
| 287 |
+
xp_test = array_namespace(xp.asarray(1.))
|
| 288 |
+
|
| 289 |
+
# Test that integers are not passed to `f`
|
| 290 |
+
# (otherwise this would overflow)
|
| 291 |
+
def f(x):
|
| 292 |
+
assert xp_test.isdtype(x.dtype, "real floating")
|
| 293 |
+
return x ** 99 - 1
|
| 294 |
+
|
| 295 |
+
res = _bracket_root(f, xp.asarray(-7.), xp.asarray(5.))
|
| 296 |
+
assert res.success
|
| 297 |
+
|
| 298 |
+
# Test maxiter = 0. Should do nothing to bracket.
|
| 299 |
+
def f(x):
|
| 300 |
+
return x - 10
|
| 301 |
+
|
| 302 |
+
bracket = (xp.asarray(-3.), xp.asarray(5.))
|
| 303 |
+
res = _bracket_root(f, *bracket, maxiter=0)
|
| 304 |
+
assert res.xl, res.xr == bracket
|
| 305 |
+
assert res.nit == 0
|
| 306 |
+
assert res.nfev == 2
|
| 307 |
+
assert res.status == -2
|
| 308 |
+
|
| 309 |
+
# Test scalar `args` (not in tuple)
|
| 310 |
+
def f(x, c):
|
| 311 |
+
return c*x - 1
|
| 312 |
+
|
| 313 |
+
res = _bracket_root(f, xp.asarray(-1.), xp.asarray(1.),
|
| 314 |
+
args=xp.asarray(3.))
|
| 315 |
+
assert res.success
|
| 316 |
+
xp_assert_close(res.fl, f(res.xl, 3))
|
| 317 |
+
|
| 318 |
+
# Test other edge cases
|
| 319 |
+
|
| 320 |
+
def f(x):
|
| 321 |
+
f.count += 1
|
| 322 |
+
return x
|
| 323 |
+
|
| 324 |
+
# 1. root lies within guess of bracket
|
| 325 |
+
f.count = 0
|
| 326 |
+
_bracket_root(f, xp.asarray(-10), xp.asarray(20))
|
| 327 |
+
assert f.count == 2
|
| 328 |
+
|
| 329 |
+
# 2. bracket endpoint hits root exactly
|
| 330 |
+
f.count = 0
|
| 331 |
+
res = _bracket_root(f, xp.asarray(5.), xp.asarray(10.),
|
| 332 |
+
factor=2)
|
| 333 |
+
|
| 334 |
+
assert res.nfev == 4
|
| 335 |
+
xp_assert_close(res.xl, xp.asarray(0.), atol=1e-15)
|
| 336 |
+
xp_assert_close(res.xr, xp.asarray(5.), atol=1e-15)
|
| 337 |
+
|
| 338 |
+
# 3. bracket limit hits root exactly
|
| 339 |
+
with np.errstate(over='ignore'):
|
| 340 |
+
res = _bracket_root(f, xp.asarray(5.), xp.asarray(10.),
|
| 341 |
+
xmin=0)
|
| 342 |
+
xp_assert_close(res.xl, xp.asarray(0.), atol=1e-15)
|
| 343 |
+
|
| 344 |
+
with np.errstate(over='ignore'):
|
| 345 |
+
res = _bracket_root(f, xp.asarray(-10.), xp.asarray(-5.),
|
| 346 |
+
xmax=0)
|
| 347 |
+
xp_assert_close(res.xr, xp.asarray(0.), atol=1e-15)
|
| 348 |
+
|
| 349 |
+
# 4. bracket not within min, max
|
| 350 |
+
with np.errstate(over='ignore'):
|
| 351 |
+
res = _bracket_root(f, xp.asarray(5.), xp.asarray(10.),
|
| 352 |
+
xmin=1)
|
| 353 |
+
assert not res.success
|
| 354 |
+
|
| 355 |
+
|
| 356 |
+
@pytest.mark.skip_xp_backends('array_api_strict', reason=array_api_strict_skip_reason)
|
| 357 |
+
@pytest.mark.skip_xp_backends('jax.numpy', reason=jax_skip_reason)
|
| 358 |
+
@array_api_compatible
|
| 359 |
+
@pytest.mark.usefixtures("skip_xp_backends")
|
| 360 |
+
class TestBracketMinimum:
|
| 361 |
+
def init_f(self):
|
| 362 |
+
def f(x, a, b):
|
| 363 |
+
f.count += 1
|
| 364 |
+
return (x - a)**2 + b
|
| 365 |
+
f.count = 0
|
| 366 |
+
return f
|
| 367 |
+
|
| 368 |
+
def assert_valid_bracket(self, result, xp):
|
| 369 |
+
assert xp.all(
|
| 370 |
+
(result.xl < result.xm) & (result.xm < result.xr)
|
| 371 |
+
)
|
| 372 |
+
assert xp.all(
|
| 373 |
+
(result.fl >= result.fm) & (result.fr > result.fm)
|
| 374 |
+
| (result.fl > result.fm) & (result.fr > result.fm)
|
| 375 |
+
)
|
| 376 |
+
|
| 377 |
+
def get_kwargs(
|
| 378 |
+
self, *, xl0=None, xr0=None, factor=None, xmin=None, xmax=None, args=None
|
| 379 |
+
):
|
| 380 |
+
names = ("xl0", "xr0", "xmin", "xmax", "factor", "args")
|
| 381 |
+
return {
|
| 382 |
+
name: val for name, val in zip(names, (xl0, xr0, xmin, xmax, factor, args))
|
| 383 |
+
if val is not None
|
| 384 |
+
}
|
| 385 |
+
|
| 386 |
+
@pytest.mark.parametrize(
|
| 387 |
+
"seed",
|
| 388 |
+
(
|
| 389 |
+
307448016549685229886351382450158984917,
|
| 390 |
+
11650702770735516532954347931959000479,
|
| 391 |
+
113767103358505514764278732330028568336,
|
| 392 |
+
)
|
| 393 |
+
)
|
| 394 |
+
@pytest.mark.parametrize("use_xmin", (False, True))
|
| 395 |
+
@pytest.mark.parametrize("other_side", (False, True))
|
| 396 |
+
def test_nfev_expected(self, seed, use_xmin, other_side, xp):
|
| 397 |
+
rng = np.random.default_rng(seed)
|
| 398 |
+
args = (xp.asarray(0.), xp.asarray(0.)) # f(x) = x^2 with minimum at 0
|
| 399 |
+
# xl0, xm0, xr0 are chosen such that the initial bracket is to
|
| 400 |
+
# the right of the minimum, and the bracket will expand
|
| 401 |
+
# downhill towards zero.
|
| 402 |
+
xl0, d1, d2, factor = xp.asarray(rng.random(size=4) * [1e5, 10, 10, 5])
|
| 403 |
+
xm0 = xl0 + d1
|
| 404 |
+
xr0 = xm0 + d2
|
| 405 |
+
# Factor should be greater than one.
|
| 406 |
+
factor += 1
|
| 407 |
+
|
| 408 |
+
if use_xmin:
|
| 409 |
+
xmin = xp.asarray(-rng.random() * 5, dtype=xp.float64)
|
| 410 |
+
n = int(xp.ceil(xp.log(-(xl0 - xmin) / xmin) / xp.log(factor)))
|
| 411 |
+
lower = xmin + (xl0 - xmin)*factor**-n
|
| 412 |
+
middle = xmin + (xl0 - xmin)*factor**-(n-1)
|
| 413 |
+
upper = xmin + (xl0 - xmin)*factor**-(n-2) if n > 1 else xm0
|
| 414 |
+
# It may be the case the lower is below the minimum, but we still
|
| 415 |
+
# don't have a valid bracket.
|
| 416 |
+
if middle**2 > lower**2:
|
| 417 |
+
n += 1
|
| 418 |
+
lower, middle, upper = (
|
| 419 |
+
xmin + (xl0 - xmin)*factor**-n, lower, middle
|
| 420 |
+
)
|
| 421 |
+
else:
|
| 422 |
+
xmin = None
|
| 423 |
+
n = int(xp.ceil(xp.log(xl0 / d1) / xp.log(factor)))
|
| 424 |
+
lower = xl0 - d1*factor**n
|
| 425 |
+
middle = xl0 - d1*factor**(n-1) if n > 1 else xl0
|
| 426 |
+
upper = xl0 - d1*factor**(n-2) if n > 1 else xm0
|
| 427 |
+
# It may be the case the lower is below the minimum, but we still
|
| 428 |
+
# don't have a valid bracket.
|
| 429 |
+
if middle**2 > lower**2:
|
| 430 |
+
n += 1
|
| 431 |
+
lower, middle, upper = (
|
| 432 |
+
xl0 - d1*factor**n, lower, middle
|
| 433 |
+
)
|
| 434 |
+
f = self.init_f()
|
| 435 |
+
|
| 436 |
+
xmax = None
|
| 437 |
+
if other_side:
|
| 438 |
+
xl0, xm0, xr0 = -xr0, -xm0, -xl0
|
| 439 |
+
xmin, xmax = None, -xmin if xmin is not None else None
|
| 440 |
+
lower, middle, upper = -upper, -middle, -lower
|
| 441 |
+
|
| 442 |
+
kwargs = self.get_kwargs(
|
| 443 |
+
xl0=xl0, xr0=xr0, xmin=xmin, xmax=xmax, factor=factor, args=args
|
| 444 |
+
)
|
| 445 |
+
result = _bracket_minimum(f, xp.asarray(xm0), **kwargs)
|
| 446 |
+
|
| 447 |
+
# Check that `nfev` and `nit` have the correct relationship
|
| 448 |
+
assert result.nfev == result.nit + 3
|
| 449 |
+
# Check that `nfev` reports the correct number of function evaluations.
|
| 450 |
+
assert result.nfev == f.count
|
| 451 |
+
# Check that the number of iterations matches the theoretical value.
|
| 452 |
+
assert result.nit == n
|
| 453 |
+
|
| 454 |
+
# Compare reported bracket to theoretical bracket and reported function
|
| 455 |
+
# values to function evaluated at bracket.
|
| 456 |
+
xp_assert_close(result.xl, lower)
|
| 457 |
+
xp_assert_close(result.xm, middle)
|
| 458 |
+
xp_assert_close(result.xr, upper)
|
| 459 |
+
xp_assert_close(result.fl, f(lower, *args))
|
| 460 |
+
xp_assert_close(result.fm, f(middle, *args))
|
| 461 |
+
xp_assert_close(result.fr, f(upper, *args))
|
| 462 |
+
|
| 463 |
+
self.assert_valid_bracket(result, xp)
|
| 464 |
+
assert result.status == 0
|
| 465 |
+
assert result.success
|
| 466 |
+
|
| 467 |
+
def test_flags(self, xp):
|
| 468 |
+
# Test cases that should produce different status flags; show that all
|
| 469 |
+
# can be produced simultaneously
|
| 470 |
+
def f(xs, js):
|
| 471 |
+
funcs = [lambda x: (x - 1.5)**2,
|
| 472 |
+
lambda x: x,
|
| 473 |
+
lambda x: x,
|
| 474 |
+
lambda x: xp.nan,
|
| 475 |
+
lambda x: x**2]
|
| 476 |
+
|
| 477 |
+
return [funcs[j](x) for x, j in zip(xs, js)]
|
| 478 |
+
|
| 479 |
+
args = (xp.arange(5, dtype=xp.int64),)
|
| 480 |
+
xl0 = xp.asarray([-1.0, -1.0, -1.0, -1.0, 6.0])
|
| 481 |
+
xm0 = xp.asarray([0.0, 0.0, 0.0, 0.0, 4.0])
|
| 482 |
+
xr0 = xp.asarray([1.0, 1.0, 1.0, 1.0, 2.0])
|
| 483 |
+
xmin = xp.asarray([-xp.inf, -1.0, -xp.inf, -xp.inf, 8.0])
|
| 484 |
+
|
| 485 |
+
result = _bracket_minimum(f, xm0, xl0=xl0, xr0=xr0, xmin=xmin,
|
| 486 |
+
args=args, maxiter=3)
|
| 487 |
+
|
| 488 |
+
reference_flags = xp.asarray([eim._ECONVERGED, _ELIMITS,
|
| 489 |
+
eim._ECONVERR, eim._EVALUEERR,
|
| 490 |
+
eim._EINPUTERR], dtype=xp.int32)
|
| 491 |
+
xp_assert_equal(result.status, reference_flags)
|
| 492 |
+
|
| 493 |
+
@pytest.mark.parametrize("minimum", (0.622, [0.622, 0.623]))
|
| 494 |
+
@pytest.mark.parametrize("dtype", ("float16", "float32", "float64"))
|
| 495 |
+
@pytest.mark.parametrize("xmin", [-5, None])
|
| 496 |
+
@pytest.mark.parametrize("xmax", [5, None])
|
| 497 |
+
def test_dtypes(self, minimum, xmin, xmax, dtype, xp):
|
| 498 |
+
dtype = getattr(xp, dtype)
|
| 499 |
+
xp_test = array_namespace(xp.asarray(1.))
|
| 500 |
+
xmin = xmin if xmin is None else xp.asarray(xmin, dtype=dtype)
|
| 501 |
+
xmax = xmax if xmax is None else xp.asarray(xmax, dtype=dtype)
|
| 502 |
+
minimum = xp.asarray(minimum, dtype=dtype)
|
| 503 |
+
|
| 504 |
+
def f(x, minimum):
|
| 505 |
+
return xp_test.astype((x - minimum)**2, dtype)
|
| 506 |
+
|
| 507 |
+
xl0, xm0, xr0 = [-0.01, 0.0, 0.01]
|
| 508 |
+
result = _bracket_minimum(
|
| 509 |
+
f, xp.asarray(xm0, dtype=dtype), xl0=xp.asarray(xl0, dtype=dtype),
|
| 510 |
+
xr0=xp.asarray(xr0, dtype=dtype), xmin=xmin, xmax=xmax, args=(minimum, )
|
| 511 |
+
)
|
| 512 |
+
assert xp.all(result.success)
|
| 513 |
+
assert result.xl.dtype == result.xm.dtype == result.xr.dtype == dtype
|
| 514 |
+
assert result.fl.dtype == result.fm.dtype == result.fr.dtype == dtype
|
| 515 |
+
|
| 516 |
+
@pytest.mark.skip_xp_backends(np_only=True, reason="str/object arrays")
|
| 517 |
+
def test_input_validation(self, xp):
|
| 518 |
+
# Test input validation for appropriate error messages
|
| 519 |
+
|
| 520 |
+
message = '`func` must be callable.'
|
| 521 |
+
with pytest.raises(ValueError, match=message):
|
| 522 |
+
_bracket_minimum(None, -4, xl0=4)
|
| 523 |
+
|
| 524 |
+
message = '...must be numeric and real.'
|
| 525 |
+
with pytest.raises(ValueError, match=message):
|
| 526 |
+
_bracket_minimum(lambda x: x**2, xp.asarray(4+1j))
|
| 527 |
+
with pytest.raises(ValueError, match=message):
|
| 528 |
+
_bracket_minimum(lambda x: x**2, xp.asarray(-4), xl0='hello')
|
| 529 |
+
with pytest.raises(ValueError, match=message):
|
| 530 |
+
_bracket_minimum(lambda x: x**2, xp.asarray(-4),
|
| 531 |
+
xr0='farcical aquatic ceremony')
|
| 532 |
+
with pytest.raises(ValueError, match=message):
|
| 533 |
+
_bracket_minimum(lambda x: x**2, xp.asarray(-4), xmin=np)
|
| 534 |
+
with pytest.raises(ValueError, match=message):
|
| 535 |
+
_bracket_minimum(lambda x: x**2, xp.asarray(-4), xmax=object())
|
| 536 |
+
with pytest.raises(ValueError, match=message):
|
| 537 |
+
_bracket_minimum(lambda x: x**2, xp.asarray(-4), factor=sum)
|
| 538 |
+
|
| 539 |
+
message = "All elements of `factor` must be greater than 1."
|
| 540 |
+
with pytest.raises(ValueError, match=message):
|
| 541 |
+
_bracket_minimum(lambda x: x, xp.asarray(-4), factor=0.5)
|
| 542 |
+
|
| 543 |
+
message = "shape mismatch: objects cannot be broadcast"
|
| 544 |
+
# raised by `xp.broadcast, but the traceback is readable IMO
|
| 545 |
+
with pytest.raises(ValueError, match=message):
|
| 546 |
+
_bracket_minimum(lambda x: x**2, xp.asarray([-2, -3]), xl0=[-3, -4, -5])
|
| 547 |
+
|
| 548 |
+
message = '`maxiter` must be a non-negative integer.'
|
| 549 |
+
with pytest.raises(ValueError, match=message):
|
| 550 |
+
_bracket_minimum(lambda x: x**2, xp.asarray(-4), xr0=4, maxiter=1.5)
|
| 551 |
+
with pytest.raises(ValueError, match=message):
|
| 552 |
+
_bracket_minimum(lambda x: x**2, xp.asarray(-4), xr0=4, maxiter=-1)
|
| 553 |
+
with pytest.raises(ValueError, match=message):
|
| 554 |
+
_bracket_minimum(lambda x: x**2, xp.asarray(-4), xr0=4, maxiter="ekki")
|
| 555 |
+
|
| 556 |
+
@pytest.mark.parametrize("xl0", [0.0, None])
|
| 557 |
+
@pytest.mark.parametrize("xm0", (0.05, 0.1, 0.15))
|
| 558 |
+
@pytest.mark.parametrize("xr0", (0.2, 0.4, 0.6, None))
|
| 559 |
+
# Minimum is ``a`` for each tuple ``(a, b)`` below. Tests cases where minimum
|
| 560 |
+
# is within, or at varying distances to the left or right of the initial
|
| 561 |
+
# bracket.
|
| 562 |
+
@pytest.mark.parametrize(
|
| 563 |
+
"args",
|
| 564 |
+
(
|
| 565 |
+
(1.2, 0), (-0.5, 0), (0.1, 0), (0.2, 0), (3.6, 0), (21.4, 0),
|
| 566 |
+
(121.6, 0), (5764.1, 0), (-6.4, 0), (-12.9, 0), (-146.2, 0)
|
| 567 |
+
)
|
| 568 |
+
)
|
| 569 |
+
def test_scalar_no_limits(self, xl0, xm0, xr0, args, xp):
|
| 570 |
+
f = self.init_f()
|
| 571 |
+
kwargs = self.get_kwargs(xl0=xl0, xr0=xr0, args=tuple(map(xp.asarray, args)))
|
| 572 |
+
result = _bracket_minimum(f, xp.asarray(xm0, dtype=xp.float64), **kwargs)
|
| 573 |
+
self.assert_valid_bracket(result, xp)
|
| 574 |
+
assert result.status == 0
|
| 575 |
+
assert result.success
|
| 576 |
+
assert result.nfev == f.count
|
| 577 |
+
|
| 578 |
+
@pytest.mark.parametrize(
|
| 579 |
+
# xmin is set at 0.0 in all cases.
|
| 580 |
+
"xl0,xm0,xr0,xmin",
|
| 581 |
+
(
|
| 582 |
+
# Initial bracket at varying distances from the xmin.
|
| 583 |
+
(0.5, 0.75, 1.0, 0.0),
|
| 584 |
+
(1.0, 2.5, 4.0, 0.0),
|
| 585 |
+
(2.0, 4.0, 6.0, 0.0),
|
| 586 |
+
(12.0, 16.0, 20.0, 0.0),
|
| 587 |
+
# Test default initial left endpoint selection. It should not
|
| 588 |
+
# be below xmin.
|
| 589 |
+
(None, 0.75, 1.0, 0.0),
|
| 590 |
+
(None, 2.5, 4.0, 0.0),
|
| 591 |
+
(None, 4.0, 6.0, 0.0),
|
| 592 |
+
(None, 16.0, 20.0, 0.0),
|
| 593 |
+
)
|
| 594 |
+
)
|
| 595 |
+
@pytest.mark.parametrize(
|
| 596 |
+
"args", (
|
| 597 |
+
(0.0, 0.0), # Minimum is directly at xmin.
|
| 598 |
+
(1e-300, 0.0), # Minimum is extremely close to xmin.
|
| 599 |
+
(1e-20, 0.0), # Minimum is very close to xmin.
|
| 600 |
+
# Minimum at varying distances from xmin.
|
| 601 |
+
(0.1, 0.0),
|
| 602 |
+
(0.2, 0.0),
|
| 603 |
+
(0.4, 0.0)
|
| 604 |
+
)
|
| 605 |
+
)
|
| 606 |
+
def test_scalar_with_limit_left(self, xl0, xm0, xr0, xmin, args, xp):
|
| 607 |
+
f = self.init_f()
|
| 608 |
+
kwargs = self.get_kwargs(xl0=xl0, xr0=xr0, xmin=xmin,
|
| 609 |
+
args=tuple(map(xp.asarray, args)))
|
| 610 |
+
result = _bracket_minimum(f, xp.asarray(xm0), **kwargs)
|
| 611 |
+
self.assert_valid_bracket(result, xp)
|
| 612 |
+
assert result.status == 0
|
| 613 |
+
assert result.success
|
| 614 |
+
assert result.nfev == f.count
|
| 615 |
+
|
| 616 |
+
@pytest.mark.parametrize(
|
| 617 |
+
#xmax is set to 1.0 in all cases.
|
| 618 |
+
"xl0,xm0,xr0,xmax",
|
| 619 |
+
(
|
| 620 |
+
# Bracket at varying distances from xmax.
|
| 621 |
+
(0.2, 0.3, 0.4, 1.0),
|
| 622 |
+
(0.05, 0.075, 0.1, 1.0),
|
| 623 |
+
(-0.2, -0.1, 0.0, 1.0),
|
| 624 |
+
(-21.2, -17.7, -14.2, 1.0),
|
| 625 |
+
# Test default right endpoint selection. It should not exceed xmax.
|
| 626 |
+
(0.2, 0.3, None, 1.0),
|
| 627 |
+
(0.05, 0.075, None, 1.0),
|
| 628 |
+
(-0.2, -0.1, None, 1.0),
|
| 629 |
+
(-21.2, -17.7, None, 1.0),
|
| 630 |
+
)
|
| 631 |
+
)
|
| 632 |
+
@pytest.mark.parametrize(
|
| 633 |
+
"args", (
|
| 634 |
+
(0.9999999999999999, 0.0), # Minimum very close to xmax.
|
| 635 |
+
# Minimum at varying distances from xmax.
|
| 636 |
+
(0.9, 0.0),
|
| 637 |
+
(0.7, 0.0),
|
| 638 |
+
(0.5, 0.0)
|
| 639 |
+
)
|
| 640 |
+
)
|
| 641 |
+
def test_scalar_with_limit_right(self, xl0, xm0, xr0, xmax, args, xp):
|
| 642 |
+
f = self.init_f()
|
| 643 |
+
args = tuple(xp.asarray(arg, dtype=xp.float64) for arg in args)
|
| 644 |
+
kwargs = self.get_kwargs(xl0=xl0, xr0=xr0, xmax=xmax, args=args)
|
| 645 |
+
result = _bracket_minimum(f, xp.asarray(xm0, dtype=xp.float64), **kwargs)
|
| 646 |
+
self.assert_valid_bracket(result, xp)
|
| 647 |
+
assert result.status == 0
|
| 648 |
+
assert result.success
|
| 649 |
+
assert result.nfev == f.count
|
| 650 |
+
|
| 651 |
+
@pytest.mark.parametrize(
|
| 652 |
+
"xl0,xm0,xr0,xmin,xmax,args",
|
| 653 |
+
(
|
| 654 |
+
( # Case 1:
|
| 655 |
+
# Initial bracket.
|
| 656 |
+
0.2,
|
| 657 |
+
0.3,
|
| 658 |
+
0.4,
|
| 659 |
+
# Function slopes down to the right from the bracket to a minimum
|
| 660 |
+
# at 1.0. xmax is also at 1.0
|
| 661 |
+
None,
|
| 662 |
+
1.0,
|
| 663 |
+
(1.0, 0.0)
|
| 664 |
+
),
|
| 665 |
+
( # Case 2:
|
| 666 |
+
# Initial bracket.
|
| 667 |
+
1.4,
|
| 668 |
+
1.95,
|
| 669 |
+
2.5,
|
| 670 |
+
# Function slopes down to the left from the bracket to a minimum at
|
| 671 |
+
# 0.3 with xmin set to 0.3.
|
| 672 |
+
0.3,
|
| 673 |
+
None,
|
| 674 |
+
(0.3, 0.0)
|
| 675 |
+
),
|
| 676 |
+
(
|
| 677 |
+
# Case 3:
|
| 678 |
+
# Initial bracket.
|
| 679 |
+
2.6,
|
| 680 |
+
3.25,
|
| 681 |
+
3.9,
|
| 682 |
+
# Function slopes down and to the right to a minimum at 99.4 with xmax
|
| 683 |
+
# at 99.4. Tests case where minimum is at xmax relatively further from
|
| 684 |
+
# the bracket.
|
| 685 |
+
None,
|
| 686 |
+
99.4,
|
| 687 |
+
(99.4, 0)
|
| 688 |
+
),
|
| 689 |
+
(
|
| 690 |
+
# Case 4:
|
| 691 |
+
# Initial bracket.
|
| 692 |
+
4,
|
| 693 |
+
4.5,
|
| 694 |
+
5,
|
| 695 |
+
# Function slopes down and to the left away from the bracket with a
|
| 696 |
+
# minimum at -26.3 with xmin set to -26.3. Tests case where minimum is
|
| 697 |
+
# at xmin relatively far from the bracket.
|
| 698 |
+
-26.3,
|
| 699 |
+
None,
|
| 700 |
+
(-26.3, 0)
|
| 701 |
+
),
|
| 702 |
+
(
|
| 703 |
+
# Case 5:
|
| 704 |
+
# Similar to Case 1 above, but tests default values of xl0 and xr0.
|
| 705 |
+
None,
|
| 706 |
+
0.3,
|
| 707 |
+
None,
|
| 708 |
+
None,
|
| 709 |
+
1.0,
|
| 710 |
+
(1.0, 0.0)
|
| 711 |
+
),
|
| 712 |
+
( # Case 6:
|
| 713 |
+
# Similar to Case 2 above, but tests default values of xl0 and xr0.
|
| 714 |
+
None,
|
| 715 |
+
1.95,
|
| 716 |
+
None,
|
| 717 |
+
0.3,
|
| 718 |
+
None,
|
| 719 |
+
(0.3, 0.0)
|
| 720 |
+
),
|
| 721 |
+
(
|
| 722 |
+
# Case 7:
|
| 723 |
+
# Similar to Case 3 above, but tests default values of xl0 and xr0.
|
| 724 |
+
None,
|
| 725 |
+
3.25,
|
| 726 |
+
None,
|
| 727 |
+
None,
|
| 728 |
+
99.4,
|
| 729 |
+
(99.4, 0)
|
| 730 |
+
),
|
| 731 |
+
(
|
| 732 |
+
# Case 8:
|
| 733 |
+
# Similar to Case 4 above, but tests default values of xl0 and xr0.
|
| 734 |
+
None,
|
| 735 |
+
4.5,
|
| 736 |
+
None,
|
| 737 |
+
-26.3,
|
| 738 |
+
None,
|
| 739 |
+
(-26.3, 0)
|
| 740 |
+
),
|
| 741 |
+
)
|
| 742 |
+
)
|
| 743 |
+
def test_minimum_at_boundary_point(self, xl0, xm0, xr0, xmin, xmax, args, xp):
|
| 744 |
+
f = self.init_f()
|
| 745 |
+
kwargs = self.get_kwargs(xr0=xr0, xmin=xmin, xmax=xmax,
|
| 746 |
+
args=tuple(map(xp.asarray, args)))
|
| 747 |
+
result = _bracket_minimum(f, xp.asarray(xm0), **kwargs)
|
| 748 |
+
assert result.status == -1
|
| 749 |
+
assert args[0] in (result.xl, result.xr)
|
| 750 |
+
assert result.nfev == f.count
|
| 751 |
+
|
| 752 |
+
@pytest.mark.parametrize('shape', [tuple(), (12, ), (3, 4), (3, 2, 2)])
|
| 753 |
+
def test_vectorization(self, shape, xp):
|
| 754 |
+
# Test for correct functionality, output shapes, and dtypes for
|
| 755 |
+
# various input shapes.
|
| 756 |
+
a = np.linspace(-0.05, 1.05, 12).reshape(shape) if shape else 0.6
|
| 757 |
+
args = (a, 0.)
|
| 758 |
+
maxiter = 10
|
| 759 |
+
|
| 760 |
+
@np.vectorize
|
| 761 |
+
def bracket_minimum_single(xm0, xl0, xr0, xmin, xmax, factor, a):
|
| 762 |
+
return _bracket_minimum(self.init_f(), xm0, xl0=xl0, xr0=xr0, xmin=xmin,
|
| 763 |
+
xmax=xmax, factor=factor, maxiter=maxiter,
|
| 764 |
+
args=(a, 0.0))
|
| 765 |
+
|
| 766 |
+
f = self.init_f()
|
| 767 |
+
|
| 768 |
+
rng = np.random.default_rng(2348234)
|
| 769 |
+
xl0 = -rng.random(size=shape)
|
| 770 |
+
xr0 = rng.random(size=shape)
|
| 771 |
+
xm0 = xl0 + rng.random(size=shape) * (xr0 - xl0)
|
| 772 |
+
xmin, xmax = 1e3*xl0, 1e3*xr0
|
| 773 |
+
if shape: # make some elements un
|
| 774 |
+
i = rng.random(size=shape) > 0.5
|
| 775 |
+
xmin[i], xmax[i] = -np.inf, np.inf
|
| 776 |
+
factor = rng.random(size=shape) + 1.5
|
| 777 |
+
refs = bracket_minimum_single(xm0, xl0, xr0, xmin, xmax, factor, a).ravel()
|
| 778 |
+
args = tuple(xp.asarray(arg, dtype=xp.float64) for arg in args)
|
| 779 |
+
res = _bracket_minimum(f, xp.asarray(xm0), xl0=xl0, xr0=xr0, xmin=xmin,
|
| 780 |
+
xmax=xmax, factor=factor, args=args, maxiter=maxiter)
|
| 781 |
+
|
| 782 |
+
attrs = ['xl', 'xm', 'xr', 'fl', 'fm', 'fr', 'success', 'nfev', 'nit']
|
| 783 |
+
for attr in attrs:
|
| 784 |
+
ref_attr = [xp.asarray(getattr(ref, attr)) for ref in refs]
|
| 785 |
+
res_attr = getattr(res, attr)
|
| 786 |
+
xp_assert_close(xp_ravel(res_attr, xp=xp), xp.stack(ref_attr))
|
| 787 |
+
xp_assert_equal(res_attr.shape, shape)
|
| 788 |
+
|
| 789 |
+
xp_test = array_namespace(xp.asarray(1.))
|
| 790 |
+
assert res.success.dtype == xp_test.bool
|
| 791 |
+
if shape:
|
| 792 |
+
assert xp.all(res.success[1:-1])
|
| 793 |
+
assert res.status.dtype == xp.int32
|
| 794 |
+
assert res.nfev.dtype == xp.int32
|
| 795 |
+
assert res.nit.dtype == xp.int32
|
| 796 |
+
assert xp.max(res.nit) == f.count - 3
|
| 797 |
+
self.assert_valid_bracket(res, xp)
|
| 798 |
+
xp_assert_close(res.fl, f(res.xl, *args))
|
| 799 |
+
xp_assert_close(res.fm, f(res.xm, *args))
|
| 800 |
+
xp_assert_close(res.fr, f(res.xr, *args))
|
| 801 |
+
|
| 802 |
+
def test_special_cases(self, xp):
|
| 803 |
+
# Test edge cases and other special cases.
|
| 804 |
+
xp_test = array_namespace(xp.asarray(1.))
|
| 805 |
+
|
| 806 |
+
# Test that integers are not passed to `f`
|
| 807 |
+
# (otherwise this would overflow)
|
| 808 |
+
def f(x):
|
| 809 |
+
assert xp_test.isdtype(x.dtype, "numeric")
|
| 810 |
+
return x ** 98 - 1
|
| 811 |
+
|
| 812 |
+
result = _bracket_minimum(f, xp.asarray(-7., dtype=xp.float64), xr0=5)
|
| 813 |
+
assert result.success
|
| 814 |
+
|
| 815 |
+
# Test maxiter = 0. Should do nothing to bracket.
|
| 816 |
+
def f(x):
|
| 817 |
+
return x**2 - 10
|
| 818 |
+
|
| 819 |
+
xl0, xm0, xr0 = xp.asarray(-3.), xp.asarray(-1.), xp.asarray(2.)
|
| 820 |
+
result = _bracket_minimum(f, xm0, xl0=xl0, xr0=xr0, maxiter=0)
|
| 821 |
+
xp_assert_equal(result.xl, xl0)
|
| 822 |
+
xp_assert_equal(result.xm, xm0)
|
| 823 |
+
xp_assert_equal(result.xr, xr0)
|
| 824 |
+
|
| 825 |
+
# Test scalar `args` (not in tuple)
|
| 826 |
+
def f(x, c):
|
| 827 |
+
return c*x**2 - 1
|
| 828 |
+
|
| 829 |
+
result = _bracket_minimum(f, xp.asarray(-1.), args=xp.asarray(3.))
|
| 830 |
+
assert result.success
|
| 831 |
+
xp_assert_close(result.fl, f(result.xl, 3))
|
| 832 |
+
|
| 833 |
+
# Initial bracket is valid.
|
| 834 |
+
f = self.init_f()
|
| 835 |
+
xl0, xm0, xr0 = xp.asarray(-1.0), xp.asarray(-0.2), xp.asarray(1.0)
|
| 836 |
+
args = (xp.asarray(0.), xp.asarray(0.))
|
| 837 |
+
result = _bracket_minimum(f, xm0, xl0=xl0, xr0=xr0, args=args)
|
| 838 |
+
assert f.count == 3
|
| 839 |
+
|
| 840 |
+
xp_assert_equal(result.xl, xl0)
|
| 841 |
+
xp_assert_equal(result.xm , xm0)
|
| 842 |
+
xp_assert_equal(result.xr, xr0)
|
| 843 |
+
xp_assert_equal(result.fl, f(xl0, *args))
|
| 844 |
+
xp_assert_equal(result.fm, f(xm0, *args))
|
| 845 |
+
xp_assert_equal(result.fr, f(xr0, *args))
|
| 846 |
+
|
| 847 |
+
def test_gh_20562_left(self, xp):
|
| 848 |
+
# Regression test for https://github.com/scipy/scipy/issues/20562
|
| 849 |
+
# minimum of f in [xmin, xmax] is at xmin.
|
| 850 |
+
xmin, xmax = xp.asarray(0.21933608), xp.asarray(1.39713606)
|
| 851 |
+
|
| 852 |
+
def f(x):
|
| 853 |
+
log_a, log_b = xp.log(xmin), xp.log(xmax)
|
| 854 |
+
return -((log_b - log_a)*x)**-1
|
| 855 |
+
|
| 856 |
+
result = _bracket_minimum(f, xp.asarray(0.5535723499480897), xmin=xmin,
|
| 857 |
+
xmax=xmax)
|
| 858 |
+
assert xmin == result.xl
|
| 859 |
+
|
| 860 |
+
def test_gh_20562_right(self, xp):
|
| 861 |
+
# Regression test for https://github.com/scipy/scipy/issues/20562
|
| 862 |
+
# minimum of f in [xmin, xmax] is at xmax.
|
| 863 |
+
xmin, xmax = xp.asarray(-1.39713606), xp.asarray(-0.21933608)
|
| 864 |
+
|
| 865 |
+
def f(x):
|
| 866 |
+
log_a, log_b = xp.log(-xmax), xp.log(-xmin)
|
| 867 |
+
return ((log_b - log_a)*x)**-1
|
| 868 |
+
|
| 869 |
+
result = _bracket_minimum(f, xp.asarray(-0.5535723499480897),
|
| 870 |
+
xmin=xmin, xmax=xmax)
|
| 871 |
+
assert xmax == result.xr
|
mantis_evalkit/lib/python3.10/site-packages/scipy/optimize/tests/test_chandrupatla.py
ADDED
|
@@ -0,0 +1,984 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import math
|
| 2 |
+
import pytest
|
| 3 |
+
import numpy as np
|
| 4 |
+
|
| 5 |
+
from scipy import stats, special
|
| 6 |
+
import scipy._lib._elementwise_iterative_method as eim
|
| 7 |
+
from scipy.conftest import array_api_compatible
|
| 8 |
+
from scipy._lib._array_api import array_namespace, is_cupy, is_numpy, xp_ravel, xp_size
|
| 9 |
+
from scipy._lib._array_api_no_0d import (xp_assert_close, xp_assert_equal,
|
| 10 |
+
xp_assert_less)
|
| 11 |
+
|
| 12 |
+
from scipy.optimize.elementwise import find_minimum, find_root
|
| 13 |
+
from scipy.optimize._tstutils import _CHANDRUPATLA_TESTS
|
| 14 |
+
|
| 15 |
+
from itertools import permutations
|
| 16 |
+
from .test_zeros import TestScalarRootFinders
|
| 17 |
+
|
| 18 |
+
|
| 19 |
+
def _vectorize(xp):
|
| 20 |
+
# xp-compatible version of np.vectorize
|
| 21 |
+
# assumes arguments are all arrays of the same shape
|
| 22 |
+
def decorator(f):
|
| 23 |
+
def wrapped(*arg_arrays):
|
| 24 |
+
shape = arg_arrays[0].shape
|
| 25 |
+
arg_arrays = [xp_ravel(arg_array, xp=xp) for arg_array in arg_arrays]
|
| 26 |
+
res = []
|
| 27 |
+
for i in range(math.prod(shape)):
|
| 28 |
+
arg_scalars = [arg_array[i] for arg_array in arg_arrays]
|
| 29 |
+
res.append(f(*arg_scalars))
|
| 30 |
+
return res
|
| 31 |
+
|
| 32 |
+
return wrapped
|
| 33 |
+
|
| 34 |
+
return decorator
|
| 35 |
+
|
| 36 |
+
|
| 37 |
+
# These tests were originally written for the private `optimize._chandrupatla`
|
| 38 |
+
# interfaces, but now we want the tests to check the behavior of the public
|
| 39 |
+
# `optimize.elementwise` interfaces. Therefore, rather than importing
|
| 40 |
+
# `_chandrupatla`/`_chandrupatla_minimize` from `_chandrupatla.py`, we import
|
| 41 |
+
# `find_root`/`find_minimum` from `optimize.elementwise` and wrap those
|
| 42 |
+
# functions to conform to the private interface. This may look a little strange,
|
| 43 |
+
# since it effectively just inverts the interface transformation done within the
|
| 44 |
+
# `find_root`/`find_minimum` functions, but it allows us to run the original,
|
| 45 |
+
# unmodified tests on the public interfaces, simplifying the PR that adds
|
| 46 |
+
# the public interfaces. We'll refactor this when we want to @parametrize the
|
| 47 |
+
# tests over multiple `method`s.
|
| 48 |
+
def _wrap_chandrupatla(func):
|
| 49 |
+
def _chandrupatla_wrapper(f, *bracket, **kwargs):
|
| 50 |
+
# avoid passing arguments to `find_minimum` to this function
|
| 51 |
+
tol_keys = {'xatol', 'xrtol', 'fatol', 'frtol'}
|
| 52 |
+
tolerances = {key: kwargs.pop(key) for key in tol_keys if key in kwargs}
|
| 53 |
+
_callback = kwargs.pop('callback', None)
|
| 54 |
+
if callable(_callback):
|
| 55 |
+
def callback(res):
|
| 56 |
+
if func == find_root:
|
| 57 |
+
res.xl, res.xr = res.bracket
|
| 58 |
+
res.fl, res.fr = res.f_bracket
|
| 59 |
+
else:
|
| 60 |
+
res.xl, res.xm, res.xr = res.bracket
|
| 61 |
+
res.fl, res.fm, res.fr = res.f_bracket
|
| 62 |
+
res.fun = res.f_x
|
| 63 |
+
del res.bracket
|
| 64 |
+
del res.f_bracket
|
| 65 |
+
del res.f_x
|
| 66 |
+
return _callback(res)
|
| 67 |
+
else:
|
| 68 |
+
callback = _callback
|
| 69 |
+
|
| 70 |
+
res = func(f, bracket, tolerances=tolerances, callback=callback, **kwargs)
|
| 71 |
+
if func == find_root:
|
| 72 |
+
res.xl, res.xr = res.bracket
|
| 73 |
+
res.fl, res.fr = res.f_bracket
|
| 74 |
+
else:
|
| 75 |
+
res.xl, res.xm, res.xr = res.bracket
|
| 76 |
+
res.fl, res.fm, res.fr = res.f_bracket
|
| 77 |
+
res.fun = res.f_x
|
| 78 |
+
del res.bracket
|
| 79 |
+
del res.f_bracket
|
| 80 |
+
del res.f_x
|
| 81 |
+
return res
|
| 82 |
+
return _chandrupatla_wrapper
|
| 83 |
+
|
| 84 |
+
|
| 85 |
+
_chandrupatla_root = _wrap_chandrupatla(find_root)
|
| 86 |
+
_chandrupatla_minimize = _wrap_chandrupatla(find_minimum)
|
| 87 |
+
|
| 88 |
+
|
| 89 |
+
def f1(x):
|
| 90 |
+
return 100*(1 - x**3.)**2 + (1-x**2.) + 2*(1-x)**2.
|
| 91 |
+
|
| 92 |
+
|
| 93 |
+
def f2(x):
|
| 94 |
+
return 5 + (x - 2.)**6
|
| 95 |
+
|
| 96 |
+
|
| 97 |
+
def f3(x):
|
| 98 |
+
xp = array_namespace(x)
|
| 99 |
+
return xp.exp(x) - 5*x
|
| 100 |
+
|
| 101 |
+
|
| 102 |
+
def f4(x):
|
| 103 |
+
return x**5. - 5*x**3. - 20.*x + 5.
|
| 104 |
+
|
| 105 |
+
|
| 106 |
+
def f5(x):
|
| 107 |
+
return 8*x**3 - 2*x**2 - 7*x + 3
|
| 108 |
+
|
| 109 |
+
|
| 110 |
+
def _bracket_minimum(func, x1, x2):
|
| 111 |
+
phi = 1.61803398875
|
| 112 |
+
maxiter = 100
|
| 113 |
+
f1 = func(x1)
|
| 114 |
+
f2 = func(x2)
|
| 115 |
+
step = x2 - x1
|
| 116 |
+
x1, x2, f1, f2, step = ((x2, x1, f2, f1, -step) if f2 > f1
|
| 117 |
+
else (x1, x2, f1, f2, step))
|
| 118 |
+
|
| 119 |
+
for i in range(maxiter):
|
| 120 |
+
step *= phi
|
| 121 |
+
x3 = x2 + step
|
| 122 |
+
f3 = func(x3)
|
| 123 |
+
if f3 < f2:
|
| 124 |
+
x1, x2, f1, f2 = x2, x3, f2, f3
|
| 125 |
+
else:
|
| 126 |
+
break
|
| 127 |
+
return x1, x2, x3, f1, f2, f3
|
| 128 |
+
|
| 129 |
+
|
| 130 |
+
cases = [
|
| 131 |
+
(f1, -1, 11),
|
| 132 |
+
(f1, -2, 13),
|
| 133 |
+
(f1, -4, 13),
|
| 134 |
+
(f1, -8, 15),
|
| 135 |
+
(f1, -16, 16),
|
| 136 |
+
(f1, -32, 19),
|
| 137 |
+
(f1, -64, 20),
|
| 138 |
+
(f1, -128, 21),
|
| 139 |
+
(f1, -256, 21),
|
| 140 |
+
(f1, -512, 19),
|
| 141 |
+
(f1, -1024, 24),
|
| 142 |
+
(f2, -1, 8),
|
| 143 |
+
(f2, -2, 6),
|
| 144 |
+
(f2, -4, 6),
|
| 145 |
+
(f2, -8, 7),
|
| 146 |
+
(f2, -16, 8),
|
| 147 |
+
(f2, -32, 8),
|
| 148 |
+
(f2, -64, 9),
|
| 149 |
+
(f2, -128, 11),
|
| 150 |
+
(f2, -256, 13),
|
| 151 |
+
(f2, -512, 12),
|
| 152 |
+
(f2, -1024, 13),
|
| 153 |
+
(f3, -1, 11),
|
| 154 |
+
(f3, -2, 11),
|
| 155 |
+
(f3, -4, 11),
|
| 156 |
+
(f3, -8, 10),
|
| 157 |
+
(f3, -16, 14),
|
| 158 |
+
(f3, -32, 12),
|
| 159 |
+
(f3, -64, 15),
|
| 160 |
+
(f3, -128, 18),
|
| 161 |
+
(f3, -256, 18),
|
| 162 |
+
(f3, -512, 19),
|
| 163 |
+
(f3, -1024, 19),
|
| 164 |
+
(f4, -0.05, 9),
|
| 165 |
+
(f4, -0.10, 11),
|
| 166 |
+
(f4, -0.15, 11),
|
| 167 |
+
(f4, -0.20, 11),
|
| 168 |
+
(f4, -0.25, 11),
|
| 169 |
+
(f4, -0.30, 9),
|
| 170 |
+
(f4, -0.35, 9),
|
| 171 |
+
(f4, -0.40, 9),
|
| 172 |
+
(f4, -0.45, 10),
|
| 173 |
+
(f4, -0.50, 10),
|
| 174 |
+
(f4, -0.55, 10),
|
| 175 |
+
(f5, -0.05, 6),
|
| 176 |
+
(f5, -0.10, 7),
|
| 177 |
+
(f5, -0.15, 8),
|
| 178 |
+
(f5, -0.20, 10),
|
| 179 |
+
(f5, -0.25, 9),
|
| 180 |
+
(f5, -0.30, 8),
|
| 181 |
+
(f5, -0.35, 7),
|
| 182 |
+
(f5, -0.40, 7),
|
| 183 |
+
(f5, -0.45, 9),
|
| 184 |
+
(f5, -0.50, 9),
|
| 185 |
+
(f5, -0.55, 8)
|
| 186 |
+
]
|
| 187 |
+
|
| 188 |
+
|
| 189 |
+
@array_api_compatible
|
| 190 |
+
@pytest.mark.usefixtures("skip_xp_backends")
|
| 191 |
+
@pytest.mark.skip_xp_backends('jax.numpy',
|
| 192 |
+
reason='JAX arrays do not support item assignment.')
|
| 193 |
+
@pytest.mark.skip_xp_backends('array_api_strict',
|
| 194 |
+
reason='Currently uses fancy indexing assignment.')
|
| 195 |
+
class TestChandrupatlaMinimize:
|
| 196 |
+
|
| 197 |
+
def f(self, x, loc):
|
| 198 |
+
xp = array_namespace(x, loc)
|
| 199 |
+
res = -xp.exp(-1/2 * (x-loc)**2) / (2*xp.pi)**0.5
|
| 200 |
+
return xp.asarray(res, dtype=x.dtype)[()]
|
| 201 |
+
|
| 202 |
+
@pytest.mark.parametrize('dtype', ('float32', 'float64'))
|
| 203 |
+
@pytest.mark.parametrize('loc', [0.6, np.linspace(-1.05, 1.05, 10)])
|
| 204 |
+
def test_basic(self, loc, xp, dtype):
|
| 205 |
+
# Find mode of normal distribution. Compare mode against location
|
| 206 |
+
# parameter and value of pdf at mode against expected pdf.
|
| 207 |
+
rtol = {'float32': 5e-3, 'float64': 5e-7}[dtype]
|
| 208 |
+
dtype = getattr(xp, dtype)
|
| 209 |
+
bracket = (xp.asarray(xi, dtype=dtype) for xi in (-5, 0, 5))
|
| 210 |
+
loc = xp.asarray(loc, dtype=dtype)
|
| 211 |
+
fun = xp.broadcast_to(xp.asarray(-stats.norm.pdf(0), dtype=dtype), loc.shape)
|
| 212 |
+
|
| 213 |
+
res = _chandrupatla_minimize(self.f, *bracket, args=(loc,))
|
| 214 |
+
xp_assert_close(res.x, loc, rtol=rtol)
|
| 215 |
+
xp_assert_equal(res.fun, fun)
|
| 216 |
+
|
| 217 |
+
@pytest.mark.parametrize('shape', [tuple(), (12,), (3, 4), (3, 2, 2)])
|
| 218 |
+
def test_vectorization(self, shape, xp):
|
| 219 |
+
# Test for correct functionality, output shapes, and dtypes for various
|
| 220 |
+
# input shapes.
|
| 221 |
+
loc = xp.linspace(-0.05, 1.05, 12).reshape(shape) if shape else xp.asarray(0.6)
|
| 222 |
+
args = (loc,)
|
| 223 |
+
bracket = xp.asarray(-5.), xp.asarray(0.), xp.asarray(5.)
|
| 224 |
+
xp_test = array_namespace(loc) # need xp.stack
|
| 225 |
+
|
| 226 |
+
@_vectorize(xp)
|
| 227 |
+
def chandrupatla_single(loc_single):
|
| 228 |
+
return _chandrupatla_minimize(self.f, *bracket, args=(loc_single,))
|
| 229 |
+
|
| 230 |
+
def f(*args, **kwargs):
|
| 231 |
+
f.f_evals += 1
|
| 232 |
+
return self.f(*args, **kwargs)
|
| 233 |
+
f.f_evals = 0
|
| 234 |
+
|
| 235 |
+
res = _chandrupatla_minimize(f, *bracket, args=args)
|
| 236 |
+
refs = chandrupatla_single(loc)
|
| 237 |
+
|
| 238 |
+
attrs = ['x', 'fun', 'success', 'status', 'nfev', 'nit',
|
| 239 |
+
'xl', 'xm', 'xr', 'fl', 'fm', 'fr']
|
| 240 |
+
for attr in attrs:
|
| 241 |
+
ref_attr = xp_test.stack([getattr(ref, attr) for ref in refs])
|
| 242 |
+
res_attr = xp_ravel(getattr(res, attr))
|
| 243 |
+
xp_assert_equal(res_attr, ref_attr)
|
| 244 |
+
assert getattr(res, attr).shape == shape
|
| 245 |
+
|
| 246 |
+
xp_assert_equal(res.fun, self.f(res.x, *args))
|
| 247 |
+
xp_assert_equal(res.fl, self.f(res.xl, *args))
|
| 248 |
+
xp_assert_equal(res.fm, self.f(res.xm, *args))
|
| 249 |
+
xp_assert_equal(res.fr, self.f(res.xr, *args))
|
| 250 |
+
assert xp.max(res.nfev) == f.f_evals
|
| 251 |
+
assert xp.max(res.nit) == f.f_evals - 3
|
| 252 |
+
|
| 253 |
+
assert xp_test.isdtype(res.success.dtype, 'bool')
|
| 254 |
+
assert xp_test.isdtype(res.status.dtype, 'integral')
|
| 255 |
+
assert xp_test.isdtype(res.nfev.dtype, 'integral')
|
| 256 |
+
assert xp_test.isdtype(res.nit.dtype, 'integral')
|
| 257 |
+
|
| 258 |
+
|
| 259 |
+
def test_flags(self, xp):
|
| 260 |
+
# Test cases that should produce different status flags; show that all
|
| 261 |
+
# can be produced simultaneously.
|
| 262 |
+
def f(xs, js):
|
| 263 |
+
funcs = [lambda x: (x - 2.5) ** 2,
|
| 264 |
+
lambda x: x - 10,
|
| 265 |
+
lambda x: (x - 2.5) ** 4,
|
| 266 |
+
lambda x: xp.full_like(x, xp.asarray(xp.nan))]
|
| 267 |
+
res = []
|
| 268 |
+
for i in range(xp_size(js)):
|
| 269 |
+
x = xs[i, ...]
|
| 270 |
+
j = int(xp_ravel(js)[i])
|
| 271 |
+
res.append(funcs[j](x))
|
| 272 |
+
return xp.stack(res)
|
| 273 |
+
|
| 274 |
+
args = (xp.arange(4, dtype=xp.int64),)
|
| 275 |
+
bracket = (xp.asarray([0]*4, dtype=xp.float64),
|
| 276 |
+
xp.asarray([2]*4, dtype=xp.float64),
|
| 277 |
+
xp.asarray([np.pi]*4, dtype=xp.float64))
|
| 278 |
+
res = _chandrupatla_minimize(f, *bracket, args=args, maxiter=10)
|
| 279 |
+
|
| 280 |
+
ref_flags = xp.asarray([eim._ECONVERGED, eim._ESIGNERR, eim._ECONVERR,
|
| 281 |
+
eim._EVALUEERR], dtype=xp.int32)
|
| 282 |
+
xp_assert_equal(res.status, ref_flags)
|
| 283 |
+
|
| 284 |
+
def test_convergence(self, xp):
|
| 285 |
+
# Test that the convergence tolerances behave as expected
|
| 286 |
+
rng = np.random.default_rng(2585255913088665241)
|
| 287 |
+
p = xp.asarray(rng.random(size=3))
|
| 288 |
+
bracket = (xp.asarray(-5), xp.asarray(0), xp.asarray(5))
|
| 289 |
+
args = (p,)
|
| 290 |
+
kwargs0 = dict(args=args, xatol=0, xrtol=0, fatol=0, frtol=0)
|
| 291 |
+
|
| 292 |
+
kwargs = kwargs0.copy()
|
| 293 |
+
kwargs['xatol'] = 1e-3
|
| 294 |
+
res1 = _chandrupatla_minimize(self.f, *bracket, **kwargs)
|
| 295 |
+
j1 = xp.abs(res1.xr - res1.xl)
|
| 296 |
+
tol = xp.asarray(4*kwargs['xatol'], dtype=p.dtype)
|
| 297 |
+
xp_assert_less(j1, xp.full((3,), tol, dtype=p.dtype))
|
| 298 |
+
kwargs['xatol'] = 1e-6
|
| 299 |
+
res2 = _chandrupatla_minimize(self.f, *bracket, **kwargs)
|
| 300 |
+
j2 = xp.abs(res2.xr - res2.xl)
|
| 301 |
+
tol = xp.asarray(4*kwargs['xatol'], dtype=p.dtype)
|
| 302 |
+
xp_assert_less(j2, xp.full((3,), tol, dtype=p.dtype))
|
| 303 |
+
xp_assert_less(j2, j1)
|
| 304 |
+
|
| 305 |
+
kwargs = kwargs0.copy()
|
| 306 |
+
kwargs['xrtol'] = 1e-3
|
| 307 |
+
res1 = _chandrupatla_minimize(self.f, *bracket, **kwargs)
|
| 308 |
+
j1 = xp.abs(res1.xr - res1.xl)
|
| 309 |
+
tol = xp.asarray(4*kwargs['xrtol']*xp.abs(res1.x), dtype=p.dtype)
|
| 310 |
+
xp_assert_less(j1, tol)
|
| 311 |
+
kwargs['xrtol'] = 1e-6
|
| 312 |
+
res2 = _chandrupatla_minimize(self.f, *bracket, **kwargs)
|
| 313 |
+
j2 = xp.abs(res2.xr - res2.xl)
|
| 314 |
+
tol = xp.asarray(4*kwargs['xrtol']*xp.abs(res2.x), dtype=p.dtype)
|
| 315 |
+
xp_assert_less(j2, tol)
|
| 316 |
+
xp_assert_less(j2, j1)
|
| 317 |
+
|
| 318 |
+
kwargs = kwargs0.copy()
|
| 319 |
+
kwargs['fatol'] = 1e-3
|
| 320 |
+
res1 = _chandrupatla_minimize(self.f, *bracket, **kwargs)
|
| 321 |
+
h1 = xp.abs(res1.fl - 2 * res1.fm + res1.fr)
|
| 322 |
+
tol = xp.asarray(2*kwargs['fatol'], dtype=p.dtype)
|
| 323 |
+
xp_assert_less(h1, xp.full((3,), tol, dtype=p.dtype))
|
| 324 |
+
kwargs['fatol'] = 1e-6
|
| 325 |
+
res2 = _chandrupatla_minimize(self.f, *bracket, **kwargs)
|
| 326 |
+
h2 = xp.abs(res2.fl - 2 * res2.fm + res2.fr)
|
| 327 |
+
tol = xp.asarray(2*kwargs['fatol'], dtype=p.dtype)
|
| 328 |
+
xp_assert_less(h2, xp.full((3,), tol, dtype=p.dtype))
|
| 329 |
+
xp_assert_less(h2, h1)
|
| 330 |
+
|
| 331 |
+
kwargs = kwargs0.copy()
|
| 332 |
+
kwargs['frtol'] = 1e-3
|
| 333 |
+
res1 = _chandrupatla_minimize(self.f, *bracket, **kwargs)
|
| 334 |
+
h1 = xp.abs(res1.fl - 2 * res1.fm + res1.fr)
|
| 335 |
+
tol = xp.asarray(2*kwargs['frtol']*xp.abs(res1.fun), dtype=p.dtype)
|
| 336 |
+
xp_assert_less(h1, tol)
|
| 337 |
+
kwargs['frtol'] = 1e-6
|
| 338 |
+
res2 = _chandrupatla_minimize(self.f, *bracket, **kwargs)
|
| 339 |
+
h2 = xp.abs(res2.fl - 2 * res2.fm + res2.fr)
|
| 340 |
+
tol = xp.asarray(2*kwargs['frtol']*abs(res2.fun), dtype=p.dtype)
|
| 341 |
+
xp_assert_less(h2, tol)
|
| 342 |
+
xp_assert_less(h2, h1)
|
| 343 |
+
|
| 344 |
+
def test_maxiter_callback(self, xp):
|
| 345 |
+
# Test behavior of `maxiter` parameter and `callback` interface
|
| 346 |
+
loc = xp.asarray(0.612814)
|
| 347 |
+
bracket = (xp.asarray(-5), xp.asarray(0), xp.asarray(5))
|
| 348 |
+
maxiter = 5
|
| 349 |
+
|
| 350 |
+
res = _chandrupatla_minimize(self.f, *bracket, args=(loc,),
|
| 351 |
+
maxiter=maxiter)
|
| 352 |
+
assert not xp.any(res.success)
|
| 353 |
+
assert xp.all(res.nfev == maxiter+3)
|
| 354 |
+
assert xp.all(res.nit == maxiter)
|
| 355 |
+
|
| 356 |
+
def callback(res):
|
| 357 |
+
callback.iter += 1
|
| 358 |
+
callback.res = res
|
| 359 |
+
assert hasattr(res, 'x')
|
| 360 |
+
if callback.iter == 0:
|
| 361 |
+
# callback is called once with initial bracket
|
| 362 |
+
assert (res.xl, res.xm, res.xr) == bracket
|
| 363 |
+
else:
|
| 364 |
+
changed_xr = (res.xl == callback.xl) & (res.xr != callback.xr)
|
| 365 |
+
changed_xl = (res.xl != callback.xl) & (res.xr == callback.xr)
|
| 366 |
+
assert xp.all(changed_xr | changed_xl)
|
| 367 |
+
|
| 368 |
+
callback.xl = res.xl
|
| 369 |
+
callback.xr = res.xr
|
| 370 |
+
assert res.status == eim._EINPROGRESS
|
| 371 |
+
xp_assert_equal(self.f(res.xl, loc), res.fl)
|
| 372 |
+
xp_assert_equal(self.f(res.xm, loc), res.fm)
|
| 373 |
+
xp_assert_equal(self.f(res.xr, loc), res.fr)
|
| 374 |
+
xp_assert_equal(self.f(res.x, loc), res.fun)
|
| 375 |
+
if callback.iter == maxiter:
|
| 376 |
+
raise StopIteration
|
| 377 |
+
|
| 378 |
+
callback.xl = xp.nan
|
| 379 |
+
callback.xr = xp.nan
|
| 380 |
+
callback.iter = -1 # callback called once before first iteration
|
| 381 |
+
callback.res = None
|
| 382 |
+
|
| 383 |
+
res2 = _chandrupatla_minimize(self.f, *bracket, args=(loc,),
|
| 384 |
+
callback=callback)
|
| 385 |
+
|
| 386 |
+
# terminating with callback is identical to terminating due to maxiter
|
| 387 |
+
# (except for `status`)
|
| 388 |
+
for key in res.keys():
|
| 389 |
+
if key == 'status':
|
| 390 |
+
assert res[key] == eim._ECONVERR
|
| 391 |
+
# assert callback.res[key] == eim._EINPROGRESS
|
| 392 |
+
assert res2[key] == eim._ECALLBACK
|
| 393 |
+
else:
|
| 394 |
+
assert res2[key] == callback.res[key] == res[key]
|
| 395 |
+
|
| 396 |
+
@pytest.mark.parametrize('case', cases)
|
| 397 |
+
def test_nit_expected(self, case, xp):
|
| 398 |
+
# Test that `_chandrupatla` implements Chandrupatla's algorithm:
|
| 399 |
+
# in all 55 test cases, the number of iterations performed
|
| 400 |
+
# matches the number reported in the original paper.
|
| 401 |
+
func, x1, nit = case
|
| 402 |
+
|
| 403 |
+
# Find bracket using the algorithm in the paper
|
| 404 |
+
step = 0.2
|
| 405 |
+
x2 = x1 + step
|
| 406 |
+
x1, x2, x3, f1, f2, f3 = _bracket_minimum(func, x1, x2)
|
| 407 |
+
|
| 408 |
+
# Use tolerances from original paper
|
| 409 |
+
xatol = 0.0001
|
| 410 |
+
fatol = 0.000001
|
| 411 |
+
xrtol = 1e-16
|
| 412 |
+
frtol = 1e-16
|
| 413 |
+
|
| 414 |
+
bracket = xp.asarray(x1), xp.asarray(x2), xp.asarray(x3, dtype=xp.float64)
|
| 415 |
+
res = _chandrupatla_minimize(func, *bracket, xatol=xatol,
|
| 416 |
+
fatol=fatol, xrtol=xrtol, frtol=frtol)
|
| 417 |
+
xp_assert_equal(res.nit, xp.asarray(nit, dtype=xp.int32))
|
| 418 |
+
|
| 419 |
+
@pytest.mark.parametrize("loc", (0.65, [0.65, 0.7]))
|
| 420 |
+
@pytest.mark.parametrize("dtype", ('float16', 'float32', 'float64'))
|
| 421 |
+
def test_dtype(self, loc, dtype, xp):
|
| 422 |
+
# Test that dtypes are preserved
|
| 423 |
+
dtype = getattr(xp, dtype)
|
| 424 |
+
|
| 425 |
+
loc = xp.asarray(loc, dtype=dtype)
|
| 426 |
+
bracket = (xp.asarray(-3, dtype=dtype),
|
| 427 |
+
xp.asarray(1, dtype=dtype),
|
| 428 |
+
xp.asarray(5, dtype=dtype))
|
| 429 |
+
|
| 430 |
+
xp_test = array_namespace(loc) # need astype
|
| 431 |
+
def f(x, loc):
|
| 432 |
+
assert x.dtype == dtype
|
| 433 |
+
return xp_test.astype((x - loc)**2, dtype)
|
| 434 |
+
|
| 435 |
+
res = _chandrupatla_minimize(f, *bracket, args=(loc,))
|
| 436 |
+
assert res.x.dtype == dtype
|
| 437 |
+
xp_assert_close(res.x, loc, rtol=math.sqrt(xp.finfo(dtype).eps))
|
| 438 |
+
|
| 439 |
+
def test_input_validation(self, xp):
|
| 440 |
+
# Test input validation for appropriate error messages
|
| 441 |
+
|
| 442 |
+
message = '`func` must be callable.'
|
| 443 |
+
bracket = xp.asarray(-4), xp.asarray(0), xp.asarray(4)
|
| 444 |
+
with pytest.raises(ValueError, match=message):
|
| 445 |
+
_chandrupatla_minimize(None, *bracket)
|
| 446 |
+
|
| 447 |
+
message = 'Abscissae and function output must be real numbers.'
|
| 448 |
+
bracket = xp.asarray(-4 + 1j), xp.asarray(0), xp.asarray(4)
|
| 449 |
+
with pytest.raises(ValueError, match=message):
|
| 450 |
+
_chandrupatla_minimize(lambda x: x, *bracket)
|
| 451 |
+
|
| 452 |
+
message = "...be broadcast..."
|
| 453 |
+
bracket = xp.asarray([-2, -3]), xp.asarray([0, 0]), xp.asarray([3, 4, 5])
|
| 454 |
+
# raised by `np.broadcast, but the traceback is readable IMO
|
| 455 |
+
with pytest.raises((ValueError, RuntimeError), match=message):
|
| 456 |
+
_chandrupatla_minimize(lambda x: x, *bracket)
|
| 457 |
+
|
| 458 |
+
message = "The shape of the array returned by `func` must be the same"
|
| 459 |
+
bracket = xp.asarray([-3, -3]), xp.asarray([0, 0]), xp.asarray([5, 5])
|
| 460 |
+
with pytest.raises(ValueError, match=message):
|
| 461 |
+
_chandrupatla_minimize(lambda x: [x[0, ...], x[1, ...], x[1, ...]],
|
| 462 |
+
*bracket)
|
| 463 |
+
|
| 464 |
+
message = 'Tolerances must be non-negative scalars.'
|
| 465 |
+
bracket = xp.asarray(-4), xp.asarray(0), xp.asarray(4)
|
| 466 |
+
with pytest.raises(ValueError, match=message):
|
| 467 |
+
_chandrupatla_minimize(lambda x: x, *bracket, xatol=-1)
|
| 468 |
+
with pytest.raises(ValueError, match=message):
|
| 469 |
+
_chandrupatla_minimize(lambda x: x, *bracket, xrtol=xp.nan)
|
| 470 |
+
with pytest.raises(ValueError, match=message):
|
| 471 |
+
_chandrupatla_minimize(lambda x: x, *bracket, fatol='ekki')
|
| 472 |
+
with pytest.raises(ValueError, match=message):
|
| 473 |
+
_chandrupatla_minimize(lambda x: x, *bracket, frtol=xp.nan)
|
| 474 |
+
|
| 475 |
+
message = '`maxiter` must be a non-negative integer.'
|
| 476 |
+
with pytest.raises(ValueError, match=message):
|
| 477 |
+
_chandrupatla_minimize(lambda x: x, *bracket, maxiter=1.5)
|
| 478 |
+
with pytest.raises(ValueError, match=message):
|
| 479 |
+
_chandrupatla_minimize(lambda x: x, *bracket, maxiter=-1)
|
| 480 |
+
|
| 481 |
+
message = '`callback` must be callable.'
|
| 482 |
+
with pytest.raises(ValueError, match=message):
|
| 483 |
+
_chandrupatla_minimize(lambda x: x, *bracket, callback='shrubbery')
|
| 484 |
+
|
| 485 |
+
def test_bracket_order(self, xp):
|
| 486 |
+
# Confirm that order of points in bracket doesn't
|
| 487 |
+
xp_test = array_namespace(xp.asarray(1.)) # need `xp.newaxis`
|
| 488 |
+
loc = xp.linspace(-1, 1, 6)[:, xp_test.newaxis]
|
| 489 |
+
brackets = xp.asarray(list(permutations([-5, 0, 5]))).T
|
| 490 |
+
res = _chandrupatla_minimize(self.f, *brackets, args=(loc,))
|
| 491 |
+
assert xp.all(xp.isclose(res.x, loc) | (res.fun == self.f(loc, loc)))
|
| 492 |
+
ref = res.x[:, 0] # all columns should be the same
|
| 493 |
+
xp_test = array_namespace(loc) # need `xp.broadcast_arrays
|
| 494 |
+
xp_assert_close(*xp_test.broadcast_arrays(res.x.T, ref), rtol=1e-15)
|
| 495 |
+
|
| 496 |
+
def test_special_cases(self, xp):
|
| 497 |
+
# Test edge cases and other special cases
|
| 498 |
+
|
| 499 |
+
# Test that integers are not passed to `f`
|
| 500 |
+
xp_test = array_namespace(xp.asarray(1.)) # need `xp.isdtype`
|
| 501 |
+
def f(x):
|
| 502 |
+
assert xp_test.isdtype(x.dtype, "real floating")
|
| 503 |
+
return (x - 1)**2
|
| 504 |
+
|
| 505 |
+
bracket = xp.asarray(-7), xp.asarray(0), xp.asarray(8)
|
| 506 |
+
with np.errstate(invalid='ignore'):
|
| 507 |
+
res = _chandrupatla_minimize(f, *bracket, fatol=0, frtol=0)
|
| 508 |
+
assert res.success
|
| 509 |
+
xp_assert_close(res.x, xp.asarray(1.), rtol=1e-3)
|
| 510 |
+
xp_assert_close(res.fun, xp.asarray(0.), atol=1e-200)
|
| 511 |
+
|
| 512 |
+
# Test that if all elements of bracket equal minimizer, algorithm
|
| 513 |
+
# reports convergence
|
| 514 |
+
def f(x):
|
| 515 |
+
return (x-1)**2
|
| 516 |
+
|
| 517 |
+
bracket = xp.asarray(1), xp.asarray(1), xp.asarray(1)
|
| 518 |
+
res = _chandrupatla_minimize(f, *bracket)
|
| 519 |
+
assert res.success
|
| 520 |
+
xp_assert_equal(res.x, xp.asarray(1.))
|
| 521 |
+
|
| 522 |
+
# Test maxiter = 0. Should do nothing to bracket.
|
| 523 |
+
def f(x):
|
| 524 |
+
return (x-1)**2
|
| 525 |
+
|
| 526 |
+
bracket = xp.asarray(-3), xp.asarray(1.1), xp.asarray(5)
|
| 527 |
+
res = _chandrupatla_minimize(f, *bracket, maxiter=0)
|
| 528 |
+
assert res.xl, res.xr == bracket
|
| 529 |
+
assert res.nit == 0
|
| 530 |
+
assert res.nfev == 3
|
| 531 |
+
assert res.status == -2
|
| 532 |
+
assert res.x == 1.1 # best so far
|
| 533 |
+
|
| 534 |
+
# Test scalar `args` (not in tuple)
|
| 535 |
+
def f(x, c):
|
| 536 |
+
return (x-c)**2 - 1
|
| 537 |
+
|
| 538 |
+
bracket = xp.asarray(-1), xp.asarray(0), xp.asarray(1)
|
| 539 |
+
c = xp.asarray(1/3)
|
| 540 |
+
res = _chandrupatla_minimize(f, *bracket, args=(c,))
|
| 541 |
+
xp_assert_close(res.x, c)
|
| 542 |
+
|
| 543 |
+
# Test zero tolerances
|
| 544 |
+
def f(x):
|
| 545 |
+
return -xp.sin(x)
|
| 546 |
+
|
| 547 |
+
bracket = xp.asarray(0), xp.asarray(1), xp.asarray(xp.pi)
|
| 548 |
+
res = _chandrupatla_minimize(f, *bracket, xatol=0, xrtol=0, fatol=0, frtol=0)
|
| 549 |
+
assert res.success
|
| 550 |
+
# found a minimum exactly (according to floating point arithmetic)
|
| 551 |
+
assert res.xl < res.xm < res.xr
|
| 552 |
+
assert f(res.xl) == f(res.xm) == f(res.xr)
|
| 553 |
+
|
| 554 |
+
|
| 555 |
+
@array_api_compatible
|
| 556 |
+
@pytest.mark.usefixtures("skip_xp_backends")
|
| 557 |
+
@pytest.mark.skip_xp_backends('array_api_strict',
|
| 558 |
+
reason='Currently uses fancy indexing assignment.')
|
| 559 |
+
@pytest.mark.skip_xp_backends('jax.numpy',
|
| 560 |
+
reason='JAX arrays do not support item assignment.')
|
| 561 |
+
@pytest.mark.skip_xp_backends('cupy',
|
| 562 |
+
reason='cupy/cupy#8391')
|
| 563 |
+
class TestChandrupatla(TestScalarRootFinders):
|
| 564 |
+
|
| 565 |
+
def f(self, q, p):
|
| 566 |
+
return special.ndtr(q) - p
|
| 567 |
+
|
| 568 |
+
@pytest.mark.parametrize('p', [0.6, np.linspace(-0.05, 1.05, 10)])
|
| 569 |
+
def test_basic(self, p, xp):
|
| 570 |
+
# Invert distribution CDF and compare against distribution `ppf`
|
| 571 |
+
a, b = xp.asarray(-5.), xp.asarray(5.)
|
| 572 |
+
res = _chandrupatla_root(self.f, a, b, args=(xp.asarray(p),))
|
| 573 |
+
ref = xp.asarray(stats.norm().ppf(p), dtype=xp.asarray(p).dtype)
|
| 574 |
+
xp_assert_close(res.x, ref)
|
| 575 |
+
|
| 576 |
+
@pytest.mark.parametrize('shape', [tuple(), (12,), (3, 4), (3, 2, 2)])
|
| 577 |
+
def test_vectorization(self, shape, xp):
|
| 578 |
+
# Test for correct functionality, output shapes, and dtypes for various
|
| 579 |
+
# input shapes.
|
| 580 |
+
p = (np.linspace(-0.05, 1.05, 12).reshape(shape) if shape
|
| 581 |
+
else np.float64(0.6))
|
| 582 |
+
p_xp = xp.asarray(p)
|
| 583 |
+
args_xp = (p_xp,)
|
| 584 |
+
dtype = p_xp.dtype
|
| 585 |
+
xp_test = array_namespace(p_xp) # need xp.bool
|
| 586 |
+
|
| 587 |
+
@np.vectorize
|
| 588 |
+
def chandrupatla_single(p):
|
| 589 |
+
return _chandrupatla_root(self.f, -5, 5, args=(p,))
|
| 590 |
+
|
| 591 |
+
def f(*args, **kwargs):
|
| 592 |
+
f.f_evals += 1
|
| 593 |
+
return self.f(*args, **kwargs)
|
| 594 |
+
f.f_evals = 0
|
| 595 |
+
|
| 596 |
+
res = _chandrupatla_root(f, xp.asarray(-5.), xp.asarray(5.), args=args_xp)
|
| 597 |
+
refs = chandrupatla_single(p).ravel()
|
| 598 |
+
|
| 599 |
+
ref_x = [ref.x for ref in refs]
|
| 600 |
+
ref_x = xp.reshape(xp.asarray(ref_x, dtype=dtype), shape)
|
| 601 |
+
xp_assert_close(res.x, ref_x)
|
| 602 |
+
|
| 603 |
+
ref_fun = [ref.fun for ref in refs]
|
| 604 |
+
ref_fun = xp.reshape(xp.asarray(ref_fun, dtype=dtype), shape)
|
| 605 |
+
xp_assert_close(res.fun, ref_fun, atol=1e-15)
|
| 606 |
+
xp_assert_equal(res.fun, self.f(res.x, *args_xp))
|
| 607 |
+
|
| 608 |
+
ref_success = [bool(ref.success) for ref in refs]
|
| 609 |
+
ref_success = xp.reshape(xp.asarray(ref_success, dtype=xp_test.bool), shape)
|
| 610 |
+
xp_assert_equal(res.success, ref_success)
|
| 611 |
+
|
| 612 |
+
ref_flag = [ref.status for ref in refs]
|
| 613 |
+
ref_flag = xp.reshape(xp.asarray(ref_flag, dtype=xp.int32), shape)
|
| 614 |
+
xp_assert_equal(res.status, ref_flag)
|
| 615 |
+
|
| 616 |
+
ref_nfev = [ref.nfev for ref in refs]
|
| 617 |
+
ref_nfev = xp.reshape(xp.asarray(ref_nfev, dtype=xp.int32), shape)
|
| 618 |
+
if is_numpy(xp):
|
| 619 |
+
xp_assert_equal(res.nfev, ref_nfev)
|
| 620 |
+
assert xp.max(res.nfev) == f.f_evals
|
| 621 |
+
else: # different backend may lead to different nfev
|
| 622 |
+
assert res.nfev.shape == shape
|
| 623 |
+
assert res.nfev.dtype == xp.int32
|
| 624 |
+
|
| 625 |
+
ref_nit = [ref.nit for ref in refs]
|
| 626 |
+
ref_nit = xp.reshape(xp.asarray(ref_nit, dtype=xp.int32), shape)
|
| 627 |
+
if is_numpy(xp):
|
| 628 |
+
xp_assert_equal(res.nit, ref_nit)
|
| 629 |
+
assert xp.max(res.nit) == f.f_evals-2
|
| 630 |
+
else:
|
| 631 |
+
assert res.nit.shape == shape
|
| 632 |
+
assert res.nit.dtype == xp.int32
|
| 633 |
+
|
| 634 |
+
ref_xl = [ref.xl for ref in refs]
|
| 635 |
+
ref_xl = xp.reshape(xp.asarray(ref_xl, dtype=dtype), shape)
|
| 636 |
+
xp_assert_close(res.xl, ref_xl)
|
| 637 |
+
|
| 638 |
+
ref_xr = [ref.xr for ref in refs]
|
| 639 |
+
ref_xr = xp.reshape(xp.asarray(ref_xr, dtype=dtype), shape)
|
| 640 |
+
xp_assert_close(res.xr, ref_xr)
|
| 641 |
+
|
| 642 |
+
xp_assert_less(res.xl, res.xr)
|
| 643 |
+
finite = xp.isfinite(res.x)
|
| 644 |
+
assert xp.all((res.x[finite] == res.xl[finite])
|
| 645 |
+
| (res.x[finite] == res.xr[finite]))
|
| 646 |
+
|
| 647 |
+
# PyTorch and CuPy don't solve to the same accuracy as NumPy - that's OK.
|
| 648 |
+
atol = 1e-15 if is_numpy(xp) else 1e-9
|
| 649 |
+
|
| 650 |
+
ref_fl = [ref.fl for ref in refs]
|
| 651 |
+
ref_fl = xp.reshape(xp.asarray(ref_fl, dtype=dtype), shape)
|
| 652 |
+
xp_assert_close(res.fl, ref_fl, atol=atol)
|
| 653 |
+
xp_assert_equal(res.fl, self.f(res.xl, *args_xp))
|
| 654 |
+
|
| 655 |
+
ref_fr = [ref.fr for ref in refs]
|
| 656 |
+
ref_fr = xp.reshape(xp.asarray(ref_fr, dtype=dtype), shape)
|
| 657 |
+
xp_assert_close(res.fr, ref_fr, atol=atol)
|
| 658 |
+
xp_assert_equal(res.fr, self.f(res.xr, *args_xp))
|
| 659 |
+
|
| 660 |
+
assert xp.all(xp.abs(res.fun[finite]) ==
|
| 661 |
+
xp.minimum(xp.abs(res.fl[finite]),
|
| 662 |
+
xp.abs(res.fr[finite])))
|
| 663 |
+
|
| 664 |
+
def test_flags(self, xp):
|
| 665 |
+
# Test cases that should produce different status flags; show that all
|
| 666 |
+
# can be produced simultaneously.
|
| 667 |
+
def f(xs, js):
|
| 668 |
+
# Note that full_like and int(j) shouldn't really be required. CuPy
|
| 669 |
+
# is just really picky here, so I'm making it a special case to
|
| 670 |
+
# make sure the other backends work when the user is less careful.
|
| 671 |
+
assert js.dtype == xp.int64
|
| 672 |
+
if is_cupy(xp):
|
| 673 |
+
funcs = [lambda x: x - 2.5,
|
| 674 |
+
lambda x: x - 10,
|
| 675 |
+
lambda x: (x - 0.1)**3,
|
| 676 |
+
lambda x: xp.full_like(x, xp.asarray(xp.nan))]
|
| 677 |
+
return [funcs[int(j)](x) for x, j in zip(xs, js)]
|
| 678 |
+
|
| 679 |
+
funcs = [lambda x: x - 2.5,
|
| 680 |
+
lambda x: x - 10,
|
| 681 |
+
lambda x: (x - 0.1) ** 3,
|
| 682 |
+
lambda x: xp.nan]
|
| 683 |
+
return [funcs[j](x) for x, j in zip(xs, js)]
|
| 684 |
+
|
| 685 |
+
args = (xp.arange(4, dtype=xp.int64),)
|
| 686 |
+
a, b = xp.asarray([0.]*4), xp.asarray([xp.pi]*4)
|
| 687 |
+
res = _chandrupatla_root(f, a, b, args=args, maxiter=2)
|
| 688 |
+
|
| 689 |
+
ref_flags = xp.asarray([eim._ECONVERGED,
|
| 690 |
+
eim._ESIGNERR,
|
| 691 |
+
eim._ECONVERR,
|
| 692 |
+
eim._EVALUEERR], dtype=xp.int32)
|
| 693 |
+
xp_assert_equal(res.status, ref_flags)
|
| 694 |
+
|
| 695 |
+
def test_convergence(self, xp):
|
| 696 |
+
# Test that the convergence tolerances behave as expected
|
| 697 |
+
rng = np.random.default_rng(2585255913088665241)
|
| 698 |
+
p = xp.asarray(rng.random(size=3))
|
| 699 |
+
bracket = (-xp.asarray(5.), xp.asarray(5.))
|
| 700 |
+
args = (p,)
|
| 701 |
+
kwargs0 = dict(args=args, xatol=0, xrtol=0, fatol=0, frtol=0)
|
| 702 |
+
|
| 703 |
+
kwargs = kwargs0.copy()
|
| 704 |
+
kwargs['xatol'] = 1e-3
|
| 705 |
+
res1 = _chandrupatla_root(self.f, *bracket, **kwargs)
|
| 706 |
+
xp_assert_less(res1.xr - res1.xl, xp.full_like(p, xp.asarray(1e-3)))
|
| 707 |
+
kwargs['xatol'] = 1e-6
|
| 708 |
+
res2 = _chandrupatla_root(self.f, *bracket, **kwargs)
|
| 709 |
+
xp_assert_less(res2.xr - res2.xl, xp.full_like(p, xp.asarray(1e-6)))
|
| 710 |
+
xp_assert_less(res2.xr - res2.xl, res1.xr - res1.xl)
|
| 711 |
+
|
| 712 |
+
kwargs = kwargs0.copy()
|
| 713 |
+
kwargs['xrtol'] = 1e-3
|
| 714 |
+
res1 = _chandrupatla_root(self.f, *bracket, **kwargs)
|
| 715 |
+
xp_assert_less(res1.xr - res1.xl, 1e-3 * xp.abs(res1.x))
|
| 716 |
+
kwargs['xrtol'] = 1e-6
|
| 717 |
+
res2 = _chandrupatla_root(self.f, *bracket, **kwargs)
|
| 718 |
+
xp_assert_less(res2.xr - res2.xl, 1e-6 * xp.abs(res2.x))
|
| 719 |
+
xp_assert_less(res2.xr - res2.xl, res1.xr - res1.xl)
|
| 720 |
+
|
| 721 |
+
kwargs = kwargs0.copy()
|
| 722 |
+
kwargs['fatol'] = 1e-3
|
| 723 |
+
res1 = _chandrupatla_root(self.f, *bracket, **kwargs)
|
| 724 |
+
xp_assert_less(xp.abs(res1.fun), xp.full_like(p, xp.asarray(1e-3)))
|
| 725 |
+
kwargs['fatol'] = 1e-6
|
| 726 |
+
res2 = _chandrupatla_root(self.f, *bracket, **kwargs)
|
| 727 |
+
xp_assert_less(xp.abs(res2.fun), xp.full_like(p, xp.asarray(1e-6)))
|
| 728 |
+
xp_assert_less(xp.abs(res2.fun), xp.abs(res1.fun))
|
| 729 |
+
|
| 730 |
+
kwargs = kwargs0.copy()
|
| 731 |
+
kwargs['frtol'] = 1e-3
|
| 732 |
+
x1, x2 = bracket
|
| 733 |
+
f0 = xp.minimum(xp.abs(self.f(x1, *args)), xp.abs(self.f(x2, *args)))
|
| 734 |
+
res1 = _chandrupatla_root(self.f, *bracket, **kwargs)
|
| 735 |
+
xp_assert_less(xp.abs(res1.fun), 1e-3*f0)
|
| 736 |
+
kwargs['frtol'] = 1e-6
|
| 737 |
+
res2 = _chandrupatla_root(self.f, *bracket, **kwargs)
|
| 738 |
+
xp_assert_less(xp.abs(res2.fun), 1e-6*f0)
|
| 739 |
+
xp_assert_less(xp.abs(res2.fun), xp.abs(res1.fun))
|
| 740 |
+
|
| 741 |
+
def test_maxiter_callback(self, xp):
|
| 742 |
+
# Test behavior of `maxiter` parameter and `callback` interface
|
| 743 |
+
p = xp.asarray(0.612814)
|
| 744 |
+
bracket = (xp.asarray(-5.), xp.asarray(5.))
|
| 745 |
+
maxiter = 5
|
| 746 |
+
|
| 747 |
+
def f(q, p):
|
| 748 |
+
res = special.ndtr(q) - p
|
| 749 |
+
f.x = q
|
| 750 |
+
f.fun = res
|
| 751 |
+
return res
|
| 752 |
+
f.x = None
|
| 753 |
+
f.fun = None
|
| 754 |
+
|
| 755 |
+
res = _chandrupatla_root(f, *bracket, args=(p,), maxiter=maxiter)
|
| 756 |
+
assert not xp.any(res.success)
|
| 757 |
+
assert xp.all(res.nfev == maxiter+2)
|
| 758 |
+
assert xp.all(res.nit == maxiter)
|
| 759 |
+
|
| 760 |
+
def callback(res):
|
| 761 |
+
callback.iter += 1
|
| 762 |
+
callback.res = res
|
| 763 |
+
assert hasattr(res, 'x')
|
| 764 |
+
if callback.iter == 0:
|
| 765 |
+
# callback is called once with initial bracket
|
| 766 |
+
assert (res.xl, res.xr) == bracket
|
| 767 |
+
else:
|
| 768 |
+
changed = (((res.xl == callback.xl) & (res.xr != callback.xr))
|
| 769 |
+
| ((res.xl != callback.xl) & (res.xr == callback.xr)))
|
| 770 |
+
assert xp.all(changed)
|
| 771 |
+
|
| 772 |
+
callback.xl = res.xl
|
| 773 |
+
callback.xr = res.xr
|
| 774 |
+
assert res.status == eim._EINPROGRESS
|
| 775 |
+
xp_assert_equal(self.f(res.xl, p), res.fl)
|
| 776 |
+
xp_assert_equal(self.f(res.xr, p), res.fr)
|
| 777 |
+
xp_assert_equal(self.f(res.x, p), res.fun)
|
| 778 |
+
if callback.iter == maxiter:
|
| 779 |
+
raise StopIteration
|
| 780 |
+
callback.iter = -1 # callback called once before first iteration
|
| 781 |
+
callback.res = None
|
| 782 |
+
callback.xl = None
|
| 783 |
+
callback.xr = None
|
| 784 |
+
|
| 785 |
+
res2 = _chandrupatla_root(f, *bracket, args=(p,), callback=callback)
|
| 786 |
+
|
| 787 |
+
# terminating with callback is identical to terminating due to maxiter
|
| 788 |
+
# (except for `status`)
|
| 789 |
+
for key in res.keys():
|
| 790 |
+
if key == 'status':
|
| 791 |
+
xp_assert_equal(res[key], xp.asarray(eim._ECONVERR, dtype=xp.int32))
|
| 792 |
+
xp_assert_equal(res2[key], xp.asarray(eim._ECALLBACK, dtype=xp.int32))
|
| 793 |
+
elif key.startswith('_'):
|
| 794 |
+
continue
|
| 795 |
+
else:
|
| 796 |
+
xp_assert_equal(res2[key], res[key])
|
| 797 |
+
|
| 798 |
+
@pytest.mark.parametrize('case', _CHANDRUPATLA_TESTS)
|
| 799 |
+
def test_nit_expected(self, case, xp):
|
| 800 |
+
# Test that `_chandrupatla` implements Chandrupatla's algorithm:
|
| 801 |
+
# in all 40 test cases, the number of iterations performed
|
| 802 |
+
# matches the number reported in the original paper.
|
| 803 |
+
f, bracket, root, nfeval, id = case
|
| 804 |
+
# Chandrupatla's criterion is equivalent to
|
| 805 |
+
# abs(x2-x1) < 4*abs(xmin)*xrtol + xatol, but we use the more standard
|
| 806 |
+
# abs(x2-x1) < abs(xmin)*xrtol + xatol. Therefore, set xrtol to 4x
|
| 807 |
+
# that used by Chandrupatla in tests.
|
| 808 |
+
bracket = (xp.asarray(bracket[0], dtype=xp.float64),
|
| 809 |
+
xp.asarray(bracket[1], dtype=xp.float64))
|
| 810 |
+
root = xp.asarray(root, dtype=xp.float64)
|
| 811 |
+
|
| 812 |
+
res = _chandrupatla_root(f, *bracket, xrtol=4e-10, xatol=1e-5)
|
| 813 |
+
xp_assert_close(res.fun, xp.asarray(f(root), dtype=xp.float64),
|
| 814 |
+
rtol=1e-8, atol=2e-3)
|
| 815 |
+
xp_assert_equal(res.nfev, xp.asarray(nfeval, dtype=xp.int32))
|
| 816 |
+
|
| 817 |
+
@pytest.mark.parametrize("root", (0.622, [0.622, 0.623]))
|
| 818 |
+
@pytest.mark.parametrize("dtype", ('float16', 'float32', 'float64'))
|
| 819 |
+
def test_dtype(self, root, dtype, xp):
|
| 820 |
+
# Test that dtypes are preserved
|
| 821 |
+
not_numpy = not is_numpy(xp)
|
| 822 |
+
if not_numpy and dtype == 'float16':
|
| 823 |
+
pytest.skip("`float16` dtype only supported for NumPy arrays.")
|
| 824 |
+
|
| 825 |
+
dtype = getattr(xp, dtype, None)
|
| 826 |
+
if dtype is None:
|
| 827 |
+
pytest.skip(f"{xp} does not support {dtype}")
|
| 828 |
+
|
| 829 |
+
def f(x, root):
|
| 830 |
+
res = (x - root) ** 3.
|
| 831 |
+
if is_numpy(xp): # NumPy does not preserve dtype
|
| 832 |
+
return xp.asarray(res, dtype=dtype)
|
| 833 |
+
return res
|
| 834 |
+
|
| 835 |
+
a, b = xp.asarray(-3, dtype=dtype), xp.asarray(3, dtype=dtype)
|
| 836 |
+
root = xp.asarray(root, dtype=dtype)
|
| 837 |
+
res = _chandrupatla_root(f, a, b, args=(root,), xatol=1e-3)
|
| 838 |
+
try:
|
| 839 |
+
xp_assert_close(res.x, root, atol=1e-3)
|
| 840 |
+
except AssertionError:
|
| 841 |
+
assert res.x.dtype == dtype
|
| 842 |
+
xp.all(res.fun == 0)
|
| 843 |
+
|
| 844 |
+
def test_input_validation(self, xp):
|
| 845 |
+
# Test input validation for appropriate error messages
|
| 846 |
+
|
| 847 |
+
def func(x):
|
| 848 |
+
return x
|
| 849 |
+
|
| 850 |
+
message = '`func` must be callable.'
|
| 851 |
+
with pytest.raises(ValueError, match=message):
|
| 852 |
+
bracket = xp.asarray(-4), xp.asarray(4)
|
| 853 |
+
_chandrupatla_root(None, *bracket)
|
| 854 |
+
|
| 855 |
+
message = 'Abscissae and function output must be real numbers.'
|
| 856 |
+
with pytest.raises(ValueError, match=message):
|
| 857 |
+
bracket = xp.asarray(-4+1j), xp.asarray(4)
|
| 858 |
+
_chandrupatla_root(func, *bracket)
|
| 859 |
+
|
| 860 |
+
# raised by `np.broadcast, but the traceback is readable IMO
|
| 861 |
+
message = "...not be broadcast..." # all messages include this part
|
| 862 |
+
with pytest.raises((ValueError, RuntimeError), match=message):
|
| 863 |
+
bracket = xp.asarray([-2, -3]), xp.asarray([3, 4, 5])
|
| 864 |
+
_chandrupatla_root(func, *bracket)
|
| 865 |
+
|
| 866 |
+
message = "The shape of the array returned by `func`..."
|
| 867 |
+
with pytest.raises(ValueError, match=message):
|
| 868 |
+
bracket = xp.asarray([-3, -3]), xp.asarray([5, 5])
|
| 869 |
+
_chandrupatla_root(lambda x: [x[0], x[1], x[1]], *bracket)
|
| 870 |
+
|
| 871 |
+
message = 'Tolerances must be non-negative scalars.'
|
| 872 |
+
bracket = xp.asarray(-4), xp.asarray(4)
|
| 873 |
+
with pytest.raises(ValueError, match=message):
|
| 874 |
+
_chandrupatla_root(func, *bracket, xatol=-1)
|
| 875 |
+
with pytest.raises(ValueError, match=message):
|
| 876 |
+
_chandrupatla_root(func, *bracket, xrtol=xp.nan)
|
| 877 |
+
with pytest.raises(ValueError, match=message):
|
| 878 |
+
_chandrupatla_root(func, *bracket, fatol='ekki')
|
| 879 |
+
with pytest.raises(ValueError, match=message):
|
| 880 |
+
_chandrupatla_root(func, *bracket, frtol=xp.nan)
|
| 881 |
+
|
| 882 |
+
message = '`maxiter` must be a non-negative integer.'
|
| 883 |
+
with pytest.raises(ValueError, match=message):
|
| 884 |
+
_chandrupatla_root(func, *bracket, maxiter=1.5)
|
| 885 |
+
with pytest.raises(ValueError, match=message):
|
| 886 |
+
_chandrupatla_root(func, *bracket, maxiter=-1)
|
| 887 |
+
|
| 888 |
+
message = '`callback` must be callable.'
|
| 889 |
+
with pytest.raises(ValueError, match=message):
|
| 890 |
+
_chandrupatla_root(func, *bracket, callback='shrubbery')
|
| 891 |
+
|
| 892 |
+
def test_special_cases(self, xp):
|
| 893 |
+
# Test edge cases and other special cases
|
| 894 |
+
|
| 895 |
+
# Test infinite function values
|
| 896 |
+
def f(x):
|
| 897 |
+
return 1 / x + 1 - 1 / (-x + 1)
|
| 898 |
+
|
| 899 |
+
a, b = xp.asarray([0.1, 0., 0., 0.1]), xp.asarray([0.9, 1.0, 0.9, 1.0])
|
| 900 |
+
|
| 901 |
+
with np.errstate(divide='ignore', invalid='ignore'):
|
| 902 |
+
res = _chandrupatla_root(f, a, b)
|
| 903 |
+
|
| 904 |
+
assert xp.all(res.success)
|
| 905 |
+
xp_assert_close(res.x[1:], xp.full((3,), res.x[0]))
|
| 906 |
+
|
| 907 |
+
# Test that integers are not passed to `f`
|
| 908 |
+
# (otherwise this would overflow)
|
| 909 |
+
xp_test = array_namespace(a) # need isdtype
|
| 910 |
+
def f(x):
|
| 911 |
+
assert xp_test.isdtype(x.dtype, "real floating")
|
| 912 |
+
# this would overflow if x were an xp integer dtype
|
| 913 |
+
return x ** 31 - 1
|
| 914 |
+
|
| 915 |
+
# note that all inputs are integer type; result is automatically default float
|
| 916 |
+
res = _chandrupatla_root(f, xp.asarray(-7), xp.asarray(5))
|
| 917 |
+
assert res.success
|
| 918 |
+
xp_assert_close(res.x, xp.asarray(1.))
|
| 919 |
+
|
| 920 |
+
# Test that if both ends of bracket equal root, algorithm reports
|
| 921 |
+
# convergence.
|
| 922 |
+
def f(x, root):
|
| 923 |
+
return x**2 - root
|
| 924 |
+
|
| 925 |
+
root = xp.asarray([0, 1])
|
| 926 |
+
res = _chandrupatla_root(f, xp.asarray(1), xp.asarray(1), args=(root,))
|
| 927 |
+
xp_assert_equal(res.success, xp.asarray([False, True]))
|
| 928 |
+
xp_assert_equal(res.x, xp.asarray([xp.nan, 1.]))
|
| 929 |
+
|
| 930 |
+
def f(x):
|
| 931 |
+
return 1/x
|
| 932 |
+
|
| 933 |
+
with np.errstate(invalid='ignore'):
|
| 934 |
+
inf = xp.asarray(xp.inf)
|
| 935 |
+
res = _chandrupatla_root(f, inf, inf)
|
| 936 |
+
assert res.success
|
| 937 |
+
xp_assert_equal(res.x, xp.asarray(xp.inf))
|
| 938 |
+
|
| 939 |
+
# Test maxiter = 0. Should do nothing to bracket.
|
| 940 |
+
def f(x):
|
| 941 |
+
return x**3 - 1
|
| 942 |
+
|
| 943 |
+
a, b = xp.asarray(-3.), xp.asarray(5.)
|
| 944 |
+
res = _chandrupatla_root(f, a, b, maxiter=0)
|
| 945 |
+
xp_assert_equal(res.success, xp.asarray(False))
|
| 946 |
+
xp_assert_equal(res.status, xp.asarray(-2, dtype=xp.int32))
|
| 947 |
+
xp_assert_equal(res.nit, xp.asarray(0, dtype=xp.int32))
|
| 948 |
+
xp_assert_equal(res.nfev, xp.asarray(2, dtype=xp.int32))
|
| 949 |
+
xp_assert_equal(res.xl, a)
|
| 950 |
+
xp_assert_equal(res.xr, b)
|
| 951 |
+
# The `x` attribute is the one with the smaller function value
|
| 952 |
+
xp_assert_equal(res.x, a)
|
| 953 |
+
# Reverse bracket; check that this is still true
|
| 954 |
+
res = _chandrupatla_root(f, -b, -a, maxiter=0)
|
| 955 |
+
xp_assert_equal(res.x, -a)
|
| 956 |
+
|
| 957 |
+
# Test maxiter = 1
|
| 958 |
+
res = _chandrupatla_root(f, a, b, maxiter=1)
|
| 959 |
+
xp_assert_equal(res.success, xp.asarray(True))
|
| 960 |
+
xp_assert_equal(res.status, xp.asarray(0, dtype=xp.int32))
|
| 961 |
+
xp_assert_equal(res.nit, xp.asarray(1, dtype=xp.int32))
|
| 962 |
+
xp_assert_equal(res.nfev, xp.asarray(3, dtype=xp.int32))
|
| 963 |
+
xp_assert_close(res.x, xp.asarray(1.))
|
| 964 |
+
|
| 965 |
+
# Test scalar `args` (not in tuple)
|
| 966 |
+
def f(x, c):
|
| 967 |
+
return c*x - 1
|
| 968 |
+
|
| 969 |
+
res = _chandrupatla_root(f, xp.asarray(-1), xp.asarray(1), args=xp.asarray(3))
|
| 970 |
+
xp_assert_close(res.x, xp.asarray(1/3))
|
| 971 |
+
|
| 972 |
+
# # TODO: Test zero tolerance
|
| 973 |
+
# # ~~What's going on here - why are iterations repeated?~~
|
| 974 |
+
# # tl goes to zero when xatol=xrtol=0. When function is nearly linear,
|
| 975 |
+
# # this causes convergence issues.
|
| 976 |
+
# def f(x):
|
| 977 |
+
# return np.cos(x)
|
| 978 |
+
#
|
| 979 |
+
# res = _chandrupatla_root(f, 0, np.pi, xatol=0, xrtol=0)
|
| 980 |
+
# assert res.nit < 100
|
| 981 |
+
# xp = np.nextafter(res.x, np.inf)
|
| 982 |
+
# xm = np.nextafter(res.x, -np.inf)
|
| 983 |
+
# assert np.abs(res.fun) < np.abs(f(xp))
|
| 984 |
+
# assert np.abs(res.fun) < np.abs(f(xm))
|
mantis_evalkit/lib/python3.10/site-packages/scipy/optimize/tests/test_cobyqa.py
ADDED
|
@@ -0,0 +1,252 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import numpy as np
|
| 2 |
+
import pytest
|
| 3 |
+
import threading
|
| 4 |
+
from numpy.testing import assert_allclose, assert_equal
|
| 5 |
+
|
| 6 |
+
from scipy.optimize import (
|
| 7 |
+
Bounds,
|
| 8 |
+
LinearConstraint,
|
| 9 |
+
NonlinearConstraint,
|
| 10 |
+
OptimizeResult,
|
| 11 |
+
minimize,
|
| 12 |
+
)
|
| 13 |
+
|
| 14 |
+
|
| 15 |
+
class TestCOBYQA:
|
| 16 |
+
|
| 17 |
+
def setup_method(self):
|
| 18 |
+
self.x0 = [4.95, 0.66]
|
| 19 |
+
self.options = {'maxfev': 100}
|
| 20 |
+
|
| 21 |
+
@staticmethod
|
| 22 |
+
def fun(x, c=1.0):
|
| 23 |
+
return x[0]**2 + c * abs(x[1])**3
|
| 24 |
+
|
| 25 |
+
@staticmethod
|
| 26 |
+
def con(x):
|
| 27 |
+
return x[0]**2 + x[1]**2 - 25.0
|
| 28 |
+
|
| 29 |
+
def test_minimize_simple(self):
|
| 30 |
+
class Callback:
|
| 31 |
+
def __init__(self):
|
| 32 |
+
self.lock = threading.Lock()
|
| 33 |
+
self.n_calls = 0
|
| 34 |
+
|
| 35 |
+
def __call__(self, x):
|
| 36 |
+
assert isinstance(x, np.ndarray)
|
| 37 |
+
with self.lock:
|
| 38 |
+
self.n_calls += 1
|
| 39 |
+
|
| 40 |
+
class CallbackNewSyntax:
|
| 41 |
+
def __init__(self):
|
| 42 |
+
self.lock = threading.Lock()
|
| 43 |
+
self.n_calls = 0
|
| 44 |
+
|
| 45 |
+
def __call__(self, intermediate_result):
|
| 46 |
+
assert isinstance(intermediate_result, OptimizeResult)
|
| 47 |
+
with self.lock:
|
| 48 |
+
self.n_calls += 1
|
| 49 |
+
|
| 50 |
+
x0 = [4.95, 0.66]
|
| 51 |
+
callback = Callback()
|
| 52 |
+
callback_new_syntax = CallbackNewSyntax()
|
| 53 |
+
|
| 54 |
+
# Minimize with method='cobyqa'.
|
| 55 |
+
constraints = NonlinearConstraint(self.con, 0.0, 0.0)
|
| 56 |
+
sol = minimize(
|
| 57 |
+
self.fun,
|
| 58 |
+
x0,
|
| 59 |
+
method='cobyqa',
|
| 60 |
+
constraints=constraints,
|
| 61 |
+
callback=callback,
|
| 62 |
+
options=self.options,
|
| 63 |
+
)
|
| 64 |
+
sol_new = minimize(
|
| 65 |
+
self.fun,
|
| 66 |
+
x0,
|
| 67 |
+
method='cobyqa',
|
| 68 |
+
constraints=constraints,
|
| 69 |
+
callback=callback_new_syntax,
|
| 70 |
+
options=self.options,
|
| 71 |
+
)
|
| 72 |
+
solution = [np.sqrt(25.0 - 4.0 / 9.0), 2.0 / 3.0]
|
| 73 |
+
assert_allclose(sol.x, solution, atol=1e-4)
|
| 74 |
+
assert sol.success, sol.message
|
| 75 |
+
assert sol.maxcv < 1e-8, sol
|
| 76 |
+
assert sol.nfev <= 100, sol
|
| 77 |
+
assert sol.fun < self.fun(solution) + 1e-3, sol
|
| 78 |
+
assert sol.nfev == callback.n_calls, \
|
| 79 |
+
"Callback is not called exactly once for every function eval."
|
| 80 |
+
assert_equal(sol.x, sol_new.x)
|
| 81 |
+
assert sol_new.success, sol_new.message
|
| 82 |
+
assert sol.fun == sol_new.fun
|
| 83 |
+
assert sol.maxcv == sol_new.maxcv
|
| 84 |
+
assert sol.nfev == sol_new.nfev
|
| 85 |
+
assert sol.nit == sol_new.nit
|
| 86 |
+
assert sol_new.nfev == callback_new_syntax.n_calls, \
|
| 87 |
+
"Callback is not called exactly once for every function eval."
|
| 88 |
+
|
| 89 |
+
def test_minimize_bounds(self):
|
| 90 |
+
def fun_check_bounds(x):
|
| 91 |
+
assert np.all(bounds.lb <= x) and np.all(x <= bounds.ub)
|
| 92 |
+
return self.fun(x)
|
| 93 |
+
|
| 94 |
+
# Case where the bounds are not active at the solution.
|
| 95 |
+
bounds = Bounds([4.5, 0.6], [5.0, 0.7])
|
| 96 |
+
constraints = NonlinearConstraint(self.con, 0.0, 0.0)
|
| 97 |
+
sol = minimize(
|
| 98 |
+
fun_check_bounds,
|
| 99 |
+
self.x0,
|
| 100 |
+
method='cobyqa',
|
| 101 |
+
bounds=bounds,
|
| 102 |
+
constraints=constraints,
|
| 103 |
+
options=self.options,
|
| 104 |
+
)
|
| 105 |
+
solution = [np.sqrt(25.0 - 4.0 / 9.0), 2.0 / 3.0]
|
| 106 |
+
assert_allclose(sol.x, solution, atol=1e-4)
|
| 107 |
+
assert sol.success, sol.message
|
| 108 |
+
assert sol.maxcv < 1e-8, sol
|
| 109 |
+
assert np.all(bounds.lb <= sol.x) and np.all(sol.x <= bounds.ub), sol
|
| 110 |
+
assert sol.nfev <= 100, sol
|
| 111 |
+
assert sol.fun < self.fun(solution) + 1e-3, sol
|
| 112 |
+
|
| 113 |
+
# Case where the bounds are active at the solution.
|
| 114 |
+
bounds = Bounds([5.0, 0.6], [5.5, 0.65])
|
| 115 |
+
sol = minimize(
|
| 116 |
+
fun_check_bounds,
|
| 117 |
+
self.x0,
|
| 118 |
+
method='cobyqa',
|
| 119 |
+
bounds=bounds,
|
| 120 |
+
constraints=constraints,
|
| 121 |
+
options=self.options,
|
| 122 |
+
)
|
| 123 |
+
assert not sol.success, sol.message
|
| 124 |
+
assert sol.maxcv > 0.35, sol
|
| 125 |
+
assert np.all(bounds.lb <= sol.x) and np.all(sol.x <= bounds.ub), sol
|
| 126 |
+
assert sol.nfev <= 100, sol
|
| 127 |
+
|
| 128 |
+
def test_minimize_linear_constraints(self):
|
| 129 |
+
constraints = LinearConstraint([1.0, 1.0], 1.0, 1.0)
|
| 130 |
+
sol = minimize(
|
| 131 |
+
self.fun,
|
| 132 |
+
self.x0,
|
| 133 |
+
method='cobyqa',
|
| 134 |
+
constraints=constraints,
|
| 135 |
+
options=self.options,
|
| 136 |
+
)
|
| 137 |
+
solution = [(4 - np.sqrt(7)) / 3, (np.sqrt(7) - 1) / 3]
|
| 138 |
+
assert_allclose(sol.x, solution, atol=1e-4)
|
| 139 |
+
assert sol.success, sol.message
|
| 140 |
+
assert sol.maxcv < 1e-8, sol
|
| 141 |
+
assert sol.nfev <= 100, sol
|
| 142 |
+
assert sol.fun < self.fun(solution) + 1e-3, sol
|
| 143 |
+
|
| 144 |
+
def test_minimize_args(self):
|
| 145 |
+
constraints = NonlinearConstraint(self.con, 0.0, 0.0)
|
| 146 |
+
sol = minimize(
|
| 147 |
+
self.fun,
|
| 148 |
+
self.x0,
|
| 149 |
+
args=(2.0,),
|
| 150 |
+
method='cobyqa',
|
| 151 |
+
constraints=constraints,
|
| 152 |
+
options=self.options,
|
| 153 |
+
)
|
| 154 |
+
solution = [np.sqrt(25.0 - 4.0 / 36.0), 2.0 / 6.0]
|
| 155 |
+
assert_allclose(sol.x, solution, atol=1e-4)
|
| 156 |
+
assert sol.success, sol.message
|
| 157 |
+
assert sol.maxcv < 1e-8, sol
|
| 158 |
+
assert sol.nfev <= 100, sol
|
| 159 |
+
assert sol.fun < self.fun(solution, 2.0) + 1e-3, sol
|
| 160 |
+
|
| 161 |
+
def test_minimize_array(self):
|
| 162 |
+
def fun_array(x, dim):
|
| 163 |
+
f = np.array(self.fun(x))
|
| 164 |
+
return np.reshape(f, (1,) * dim)
|
| 165 |
+
|
| 166 |
+
# The argument fun can return an array with a single element.
|
| 167 |
+
bounds = Bounds([4.5, 0.6], [5.0, 0.7])
|
| 168 |
+
constraints = NonlinearConstraint(self.con, 0.0, 0.0)
|
| 169 |
+
sol = minimize(
|
| 170 |
+
self.fun,
|
| 171 |
+
self.x0,
|
| 172 |
+
method='cobyqa',
|
| 173 |
+
bounds=bounds,
|
| 174 |
+
constraints=constraints,
|
| 175 |
+
options=self.options,
|
| 176 |
+
)
|
| 177 |
+
for dim in [0, 1, 2]:
|
| 178 |
+
sol_array = minimize(
|
| 179 |
+
fun_array,
|
| 180 |
+
self.x0,
|
| 181 |
+
args=(dim,),
|
| 182 |
+
method='cobyqa',
|
| 183 |
+
bounds=bounds,
|
| 184 |
+
constraints=constraints,
|
| 185 |
+
options=self.options,
|
| 186 |
+
)
|
| 187 |
+
assert_equal(sol.x, sol_array.x)
|
| 188 |
+
assert sol_array.success, sol_array.message
|
| 189 |
+
assert sol.fun == sol_array.fun
|
| 190 |
+
assert sol.maxcv == sol_array.maxcv
|
| 191 |
+
assert sol.nfev == sol_array.nfev
|
| 192 |
+
assert sol.nit == sol_array.nit
|
| 193 |
+
|
| 194 |
+
# The argument fun cannot return an array with more than one element.
|
| 195 |
+
with pytest.raises(TypeError):
|
| 196 |
+
minimize(
|
| 197 |
+
lambda x: np.array([self.fun(x), self.fun(x)]),
|
| 198 |
+
self.x0,
|
| 199 |
+
method='cobyqa',
|
| 200 |
+
bounds=bounds,
|
| 201 |
+
constraints=constraints,
|
| 202 |
+
options=self.options,
|
| 203 |
+
)
|
| 204 |
+
|
| 205 |
+
def test_minimize_maxfev(self):
|
| 206 |
+
constraints = NonlinearConstraint(self.con, 0.0, 0.0)
|
| 207 |
+
options = {'maxfev': 2}
|
| 208 |
+
sol = minimize(
|
| 209 |
+
self.fun,
|
| 210 |
+
self.x0,
|
| 211 |
+
method='cobyqa',
|
| 212 |
+
constraints=constraints,
|
| 213 |
+
options=options,
|
| 214 |
+
)
|
| 215 |
+
assert not sol.success, sol.message
|
| 216 |
+
assert sol.nfev <= 2, sol
|
| 217 |
+
|
| 218 |
+
def test_minimize_maxiter(self):
|
| 219 |
+
constraints = NonlinearConstraint(self.con, 0.0, 0.0)
|
| 220 |
+
options = {'maxiter': 2}
|
| 221 |
+
sol = minimize(
|
| 222 |
+
self.fun,
|
| 223 |
+
self.x0,
|
| 224 |
+
method='cobyqa',
|
| 225 |
+
constraints=constraints,
|
| 226 |
+
options=options,
|
| 227 |
+
)
|
| 228 |
+
assert not sol.success, sol.message
|
| 229 |
+
assert sol.nit <= 2, sol
|
| 230 |
+
|
| 231 |
+
def test_minimize_f_target(self):
|
| 232 |
+
constraints = NonlinearConstraint(self.con, 0.0, 0.0)
|
| 233 |
+
sol_ref = minimize(
|
| 234 |
+
self.fun,
|
| 235 |
+
self.x0,
|
| 236 |
+
method='cobyqa',
|
| 237 |
+
constraints=constraints,
|
| 238 |
+
options=self.options,
|
| 239 |
+
)
|
| 240 |
+
options = dict(self.options)
|
| 241 |
+
options['f_target'] = sol_ref.fun
|
| 242 |
+
sol = minimize(
|
| 243 |
+
self.fun,
|
| 244 |
+
self.x0,
|
| 245 |
+
method='cobyqa',
|
| 246 |
+
constraints=constraints,
|
| 247 |
+
options=options,
|
| 248 |
+
)
|
| 249 |
+
assert sol.success, sol.message
|
| 250 |
+
assert sol.maxcv < 1e-8, sol
|
| 251 |
+
assert sol.nfev <= sol_ref.nfev, sol
|
| 252 |
+
assert sol.fun <= sol_ref.fun, sol
|
mantis_evalkit/lib/python3.10/site-packages/scipy/optimize/tests/test_constraints.py
ADDED
|
@@ -0,0 +1,255 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import pytest
|
| 2 |
+
import numpy as np
|
| 3 |
+
from numpy.testing import TestCase, assert_array_equal
|
| 4 |
+
import scipy.sparse as sps
|
| 5 |
+
from scipy.optimize._constraints import (
|
| 6 |
+
Bounds, LinearConstraint, NonlinearConstraint, PreparedConstraint,
|
| 7 |
+
new_bounds_to_old, old_bound_to_new, strict_bounds)
|
| 8 |
+
|
| 9 |
+
|
| 10 |
+
class TestStrictBounds(TestCase):
|
| 11 |
+
def test_scalarvalue_unique_enforce_feasibility(self):
|
| 12 |
+
m = 3
|
| 13 |
+
lb = 2
|
| 14 |
+
ub = 4
|
| 15 |
+
enforce_feasibility = False
|
| 16 |
+
strict_lb, strict_ub = strict_bounds(lb, ub,
|
| 17 |
+
enforce_feasibility,
|
| 18 |
+
m)
|
| 19 |
+
assert_array_equal(strict_lb, [-np.inf, -np.inf, -np.inf])
|
| 20 |
+
assert_array_equal(strict_ub, [np.inf, np.inf, np.inf])
|
| 21 |
+
|
| 22 |
+
enforce_feasibility = True
|
| 23 |
+
strict_lb, strict_ub = strict_bounds(lb, ub,
|
| 24 |
+
enforce_feasibility,
|
| 25 |
+
m)
|
| 26 |
+
assert_array_equal(strict_lb, [2, 2, 2])
|
| 27 |
+
assert_array_equal(strict_ub, [4, 4, 4])
|
| 28 |
+
|
| 29 |
+
def test_vectorvalue_unique_enforce_feasibility(self):
|
| 30 |
+
m = 3
|
| 31 |
+
lb = [1, 2, 3]
|
| 32 |
+
ub = [4, 5, 6]
|
| 33 |
+
enforce_feasibility = False
|
| 34 |
+
strict_lb, strict_ub = strict_bounds(lb, ub,
|
| 35 |
+
enforce_feasibility,
|
| 36 |
+
m)
|
| 37 |
+
assert_array_equal(strict_lb, [-np.inf, -np.inf, -np.inf])
|
| 38 |
+
assert_array_equal(strict_ub, [np.inf, np.inf, np.inf])
|
| 39 |
+
|
| 40 |
+
enforce_feasibility = True
|
| 41 |
+
strict_lb, strict_ub = strict_bounds(lb, ub,
|
| 42 |
+
enforce_feasibility,
|
| 43 |
+
m)
|
| 44 |
+
assert_array_equal(strict_lb, [1, 2, 3])
|
| 45 |
+
assert_array_equal(strict_ub, [4, 5, 6])
|
| 46 |
+
|
| 47 |
+
def test_scalarvalue_vector_enforce_feasibility(self):
|
| 48 |
+
m = 3
|
| 49 |
+
lb = 2
|
| 50 |
+
ub = 4
|
| 51 |
+
enforce_feasibility = [False, True, False]
|
| 52 |
+
strict_lb, strict_ub = strict_bounds(lb, ub,
|
| 53 |
+
enforce_feasibility,
|
| 54 |
+
m)
|
| 55 |
+
assert_array_equal(strict_lb, [-np.inf, 2, -np.inf])
|
| 56 |
+
assert_array_equal(strict_ub, [np.inf, 4, np.inf])
|
| 57 |
+
|
| 58 |
+
def test_vectorvalue_vector_enforce_feasibility(self):
|
| 59 |
+
m = 3
|
| 60 |
+
lb = [1, 2, 3]
|
| 61 |
+
ub = [4, 6, np.inf]
|
| 62 |
+
enforce_feasibility = [True, False, True]
|
| 63 |
+
strict_lb, strict_ub = strict_bounds(lb, ub,
|
| 64 |
+
enforce_feasibility,
|
| 65 |
+
m)
|
| 66 |
+
assert_array_equal(strict_lb, [1, -np.inf, 3])
|
| 67 |
+
assert_array_equal(strict_ub, [4, np.inf, np.inf])
|
| 68 |
+
|
| 69 |
+
|
| 70 |
+
def test_prepare_constraint_infeasible_x0():
|
| 71 |
+
lb = np.array([0, 20, 30])
|
| 72 |
+
ub = np.array([0.5, np.inf, 70])
|
| 73 |
+
x0 = np.array([1, 2, 3])
|
| 74 |
+
enforce_feasibility = np.array([False, True, True], dtype=bool)
|
| 75 |
+
bounds = Bounds(lb, ub, enforce_feasibility)
|
| 76 |
+
pytest.raises(ValueError, PreparedConstraint, bounds, x0)
|
| 77 |
+
|
| 78 |
+
pc = PreparedConstraint(Bounds(lb, ub), [1, 2, 3])
|
| 79 |
+
assert (pc.violation([1, 2, 3]) > 0).any()
|
| 80 |
+
assert (pc.violation([0.25, 21, 31]) == 0).all()
|
| 81 |
+
|
| 82 |
+
x0 = np.array([1, 2, 3, 4])
|
| 83 |
+
A = np.array([[1, 2, 3, 4], [5, 0, 0, 6], [7, 0, 8, 0]])
|
| 84 |
+
enforce_feasibility = np.array([True, True, True], dtype=bool)
|
| 85 |
+
linear = LinearConstraint(A, -np.inf, 0, enforce_feasibility)
|
| 86 |
+
pytest.raises(ValueError, PreparedConstraint, linear, x0)
|
| 87 |
+
|
| 88 |
+
pc = PreparedConstraint(LinearConstraint(A, -np.inf, 0),
|
| 89 |
+
[1, 2, 3, 4])
|
| 90 |
+
assert (pc.violation([1, 2, 3, 4]) > 0).any()
|
| 91 |
+
assert (pc.violation([-10, 2, -10, 4]) == 0).all()
|
| 92 |
+
|
| 93 |
+
def fun(x):
|
| 94 |
+
return A.dot(x)
|
| 95 |
+
|
| 96 |
+
def jac(x):
|
| 97 |
+
return A
|
| 98 |
+
|
| 99 |
+
def hess(x, v):
|
| 100 |
+
return sps.csr_matrix((4, 4))
|
| 101 |
+
|
| 102 |
+
nonlinear = NonlinearConstraint(fun, -np.inf, 0, jac, hess,
|
| 103 |
+
enforce_feasibility)
|
| 104 |
+
pytest.raises(ValueError, PreparedConstraint, nonlinear, x0)
|
| 105 |
+
|
| 106 |
+
pc = PreparedConstraint(nonlinear, [-10, 2, -10, 4])
|
| 107 |
+
assert (pc.violation([1, 2, 3, 4]) > 0).any()
|
| 108 |
+
assert (pc.violation([-10, 2, -10, 4]) == 0).all()
|
| 109 |
+
|
| 110 |
+
|
| 111 |
+
def test_violation():
|
| 112 |
+
def cons_f(x):
|
| 113 |
+
return np.array([x[0] ** 2 + x[1], x[0] ** 2 - x[1]])
|
| 114 |
+
|
| 115 |
+
nlc = NonlinearConstraint(cons_f, [-1, -0.8500], [2, 2])
|
| 116 |
+
pc = PreparedConstraint(nlc, [0.5, 1])
|
| 117 |
+
|
| 118 |
+
assert_array_equal(pc.violation([0.5, 1]), [0., 0.])
|
| 119 |
+
|
| 120 |
+
np.testing.assert_almost_equal(pc.violation([0.5, 1.2]), [0., 0.1])
|
| 121 |
+
|
| 122 |
+
np.testing.assert_almost_equal(pc.violation([1.2, 1.2]), [0.64, 0])
|
| 123 |
+
|
| 124 |
+
np.testing.assert_almost_equal(pc.violation([0.1, -1.2]), [0.19, 0])
|
| 125 |
+
|
| 126 |
+
np.testing.assert_almost_equal(pc.violation([0.1, 2]), [0.01, 1.14])
|
| 127 |
+
|
| 128 |
+
|
| 129 |
+
def test_new_bounds_to_old():
|
| 130 |
+
lb = np.array([-np.inf, 2, 3])
|
| 131 |
+
ub = np.array([3, np.inf, 10])
|
| 132 |
+
|
| 133 |
+
bounds = [(None, 3), (2, None), (3, 10)]
|
| 134 |
+
assert_array_equal(new_bounds_to_old(lb, ub, 3), bounds)
|
| 135 |
+
|
| 136 |
+
bounds_single_lb = [(-1, 3), (-1, None), (-1, 10)]
|
| 137 |
+
assert_array_equal(new_bounds_to_old(-1, ub, 3), bounds_single_lb)
|
| 138 |
+
|
| 139 |
+
bounds_no_lb = [(None, 3), (None, None), (None, 10)]
|
| 140 |
+
assert_array_equal(new_bounds_to_old(-np.inf, ub, 3), bounds_no_lb)
|
| 141 |
+
|
| 142 |
+
bounds_single_ub = [(None, 20), (2, 20), (3, 20)]
|
| 143 |
+
assert_array_equal(new_bounds_to_old(lb, 20, 3), bounds_single_ub)
|
| 144 |
+
|
| 145 |
+
bounds_no_ub = [(None, None), (2, None), (3, None)]
|
| 146 |
+
assert_array_equal(new_bounds_to_old(lb, np.inf, 3), bounds_no_ub)
|
| 147 |
+
|
| 148 |
+
bounds_single_both = [(1, 2), (1, 2), (1, 2)]
|
| 149 |
+
assert_array_equal(new_bounds_to_old(1, 2, 3), bounds_single_both)
|
| 150 |
+
|
| 151 |
+
bounds_no_both = [(None, None), (None, None), (None, None)]
|
| 152 |
+
assert_array_equal(new_bounds_to_old(-np.inf, np.inf, 3), bounds_no_both)
|
| 153 |
+
|
| 154 |
+
|
| 155 |
+
def test_old_bounds_to_new():
|
| 156 |
+
bounds = ([1, 2], (None, 3), (-1, None))
|
| 157 |
+
lb_true = np.array([1, -np.inf, -1])
|
| 158 |
+
ub_true = np.array([2, 3, np.inf])
|
| 159 |
+
|
| 160 |
+
lb, ub = old_bound_to_new(bounds)
|
| 161 |
+
assert_array_equal(lb, lb_true)
|
| 162 |
+
assert_array_equal(ub, ub_true)
|
| 163 |
+
|
| 164 |
+
bounds = [(-np.inf, np.inf), (np.array([1]), np.array([1]))]
|
| 165 |
+
lb, ub = old_bound_to_new(bounds)
|
| 166 |
+
|
| 167 |
+
assert_array_equal(lb, [-np.inf, 1])
|
| 168 |
+
assert_array_equal(ub, [np.inf, 1])
|
| 169 |
+
|
| 170 |
+
|
| 171 |
+
class TestBounds:
|
| 172 |
+
def test_repr(self):
|
| 173 |
+
# so that eval works
|
| 174 |
+
from numpy import array, inf # noqa: F401
|
| 175 |
+
for args in (
|
| 176 |
+
(-1.0, 5.0),
|
| 177 |
+
(-1.0, np.inf, True),
|
| 178 |
+
(np.array([1.0, -np.inf]), np.array([2.0, np.inf])),
|
| 179 |
+
(np.array([1.0, -np.inf]), np.array([2.0, np.inf]),
|
| 180 |
+
np.array([True, False])),
|
| 181 |
+
):
|
| 182 |
+
bounds = Bounds(*args)
|
| 183 |
+
bounds2 = eval(repr(Bounds(*args)))
|
| 184 |
+
assert_array_equal(bounds.lb, bounds2.lb)
|
| 185 |
+
assert_array_equal(bounds.ub, bounds2.ub)
|
| 186 |
+
assert_array_equal(bounds.keep_feasible, bounds2.keep_feasible)
|
| 187 |
+
|
| 188 |
+
def test_array(self):
|
| 189 |
+
# gh13501
|
| 190 |
+
b = Bounds(lb=[0.0, 0.0], ub=[1.0, 1.0])
|
| 191 |
+
assert isinstance(b.lb, np.ndarray)
|
| 192 |
+
assert isinstance(b.ub, np.ndarray)
|
| 193 |
+
|
| 194 |
+
def test_defaults(self):
|
| 195 |
+
b1 = Bounds()
|
| 196 |
+
b2 = Bounds(np.asarray(-np.inf), np.asarray(np.inf))
|
| 197 |
+
assert b1.lb == b2.lb
|
| 198 |
+
assert b1.ub == b2.ub
|
| 199 |
+
|
| 200 |
+
def test_input_validation(self):
|
| 201 |
+
message = "Lower and upper bounds must be dense arrays."
|
| 202 |
+
with pytest.raises(ValueError, match=message):
|
| 203 |
+
Bounds(sps.coo_array([1, 2]), [1, 2])
|
| 204 |
+
with pytest.raises(ValueError, match=message):
|
| 205 |
+
Bounds([1, 2], sps.coo_array([1, 2]))
|
| 206 |
+
|
| 207 |
+
message = "`keep_feasible` must be a dense array."
|
| 208 |
+
with pytest.raises(ValueError, match=message):
|
| 209 |
+
Bounds([1, 2], [1, 2], keep_feasible=sps.coo_array([True, True]))
|
| 210 |
+
|
| 211 |
+
message = "`lb`, `ub`, and `keep_feasible` must be broadcastable."
|
| 212 |
+
with pytest.raises(ValueError, match=message):
|
| 213 |
+
Bounds([1, 2], [1, 2, 3])
|
| 214 |
+
|
| 215 |
+
def test_residual(self):
|
| 216 |
+
bounds = Bounds(-2, 4)
|
| 217 |
+
x0 = [-1, 2]
|
| 218 |
+
np.testing.assert_allclose(bounds.residual(x0), ([1, 4], [5, 2]))
|
| 219 |
+
|
| 220 |
+
|
| 221 |
+
class TestLinearConstraint:
|
| 222 |
+
def test_defaults(self):
|
| 223 |
+
A = np.eye(4)
|
| 224 |
+
lc = LinearConstraint(A)
|
| 225 |
+
lc2 = LinearConstraint(A, -np.inf, np.inf)
|
| 226 |
+
assert_array_equal(lc.lb, lc2.lb)
|
| 227 |
+
assert_array_equal(lc.ub, lc2.ub)
|
| 228 |
+
|
| 229 |
+
def test_input_validation(self):
|
| 230 |
+
A = np.eye(4)
|
| 231 |
+
message = "`lb`, `ub`, and `keep_feasible` must be broadcastable"
|
| 232 |
+
with pytest.raises(ValueError, match=message):
|
| 233 |
+
LinearConstraint(A, [1, 2], [1, 2, 3])
|
| 234 |
+
|
| 235 |
+
message = "Constraint limits must be dense arrays"
|
| 236 |
+
with pytest.raises(ValueError, match=message):
|
| 237 |
+
LinearConstraint(A, sps.coo_array([1, 2]), [2, 3])
|
| 238 |
+
with pytest.raises(ValueError, match=message):
|
| 239 |
+
LinearConstraint(A, [1, 2], sps.coo_array([2, 3]))
|
| 240 |
+
|
| 241 |
+
message = "`keep_feasible` must be a dense array"
|
| 242 |
+
with pytest.raises(ValueError, match=message):
|
| 243 |
+
keep_feasible = sps.coo_array([True, True])
|
| 244 |
+
LinearConstraint(A, [1, 2], [2, 3], keep_feasible=keep_feasible)
|
| 245 |
+
|
| 246 |
+
A = np.empty((4, 3, 5))
|
| 247 |
+
message = "`A` must have exactly two dimensions."
|
| 248 |
+
with pytest.raises(ValueError, match=message):
|
| 249 |
+
LinearConstraint(A)
|
| 250 |
+
|
| 251 |
+
def test_residual(self):
|
| 252 |
+
A = np.eye(2)
|
| 253 |
+
lc = LinearConstraint(A, -2, 4)
|
| 254 |
+
x0 = [-1, 2]
|
| 255 |
+
np.testing.assert_allclose(lc.residual(x0), ([1, 4], [5, 2]))
|
mantis_evalkit/lib/python3.10/site-packages/scipy/optimize/tests/test_cython_optimize.py
ADDED
|
@@ -0,0 +1,92 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Test Cython optimize zeros API functions: ``bisect``, ``ridder``, ``brenth``,
|
| 3 |
+
and ``brentq`` in `scipy.optimize.cython_optimize`, by finding the roots of a
|
| 4 |
+
3rd order polynomial given a sequence of constant terms, ``a0``, and fixed 1st,
|
| 5 |
+
2nd, and 3rd order terms in ``args``.
|
| 6 |
+
|
| 7 |
+
.. math::
|
| 8 |
+
|
| 9 |
+
f(x, a0, args) = ((args[2]*x + args[1])*x + args[0])*x + a0
|
| 10 |
+
|
| 11 |
+
The 3rd order polynomial function is written in Cython and called in a Python
|
| 12 |
+
wrapper named after the zero function. See the private ``_zeros`` Cython module
|
| 13 |
+
in `scipy.optimize.cython_optimze` for more information.
|
| 14 |
+
"""
|
| 15 |
+
|
| 16 |
+
import numpy.testing as npt
|
| 17 |
+
from scipy.optimize.cython_optimize import _zeros
|
| 18 |
+
|
| 19 |
+
# CONSTANTS
|
| 20 |
+
# Solve x**3 - A0 = 0 for A0 = [2.0, 2.1, ..., 2.9].
|
| 21 |
+
# The ARGS have 3 elements just to show how this could be done for any cubic
|
| 22 |
+
# polynomial.
|
| 23 |
+
A0 = tuple(-2.0 - x/10.0 for x in range(10)) # constant term
|
| 24 |
+
ARGS = (0.0, 0.0, 1.0) # 1st, 2nd, and 3rd order terms
|
| 25 |
+
XLO, XHI = 0.0, 2.0 # first and second bounds of zeros functions
|
| 26 |
+
# absolute and relative tolerances and max iterations for zeros functions
|
| 27 |
+
XTOL, RTOL, MITR = 0.001, 0.001, 10
|
| 28 |
+
EXPECTED = [(-a0) ** (1.0/3.0) for a0 in A0]
|
| 29 |
+
# = [1.2599210498948732,
|
| 30 |
+
# 1.2805791649874942,
|
| 31 |
+
# 1.300591446851387,
|
| 32 |
+
# 1.3200061217959123,
|
| 33 |
+
# 1.338865900164339,
|
| 34 |
+
# 1.3572088082974532,
|
| 35 |
+
# 1.375068867074141,
|
| 36 |
+
# 1.3924766500838337,
|
| 37 |
+
# 1.4094597464129783,
|
| 38 |
+
# 1.4260431471424087]
|
| 39 |
+
|
| 40 |
+
|
| 41 |
+
# test bisect
|
| 42 |
+
def test_bisect():
|
| 43 |
+
npt.assert_allclose(
|
| 44 |
+
EXPECTED,
|
| 45 |
+
list(
|
| 46 |
+
_zeros.loop_example('bisect', A0, ARGS, XLO, XHI, XTOL, RTOL, MITR)
|
| 47 |
+
),
|
| 48 |
+
rtol=RTOL, atol=XTOL
|
| 49 |
+
)
|
| 50 |
+
|
| 51 |
+
|
| 52 |
+
# test ridder
|
| 53 |
+
def test_ridder():
|
| 54 |
+
npt.assert_allclose(
|
| 55 |
+
EXPECTED,
|
| 56 |
+
list(
|
| 57 |
+
_zeros.loop_example('ridder', A0, ARGS, XLO, XHI, XTOL, RTOL, MITR)
|
| 58 |
+
),
|
| 59 |
+
rtol=RTOL, atol=XTOL
|
| 60 |
+
)
|
| 61 |
+
|
| 62 |
+
|
| 63 |
+
# test brenth
|
| 64 |
+
def test_brenth():
|
| 65 |
+
npt.assert_allclose(
|
| 66 |
+
EXPECTED,
|
| 67 |
+
list(
|
| 68 |
+
_zeros.loop_example('brenth', A0, ARGS, XLO, XHI, XTOL, RTOL, MITR)
|
| 69 |
+
),
|
| 70 |
+
rtol=RTOL, atol=XTOL
|
| 71 |
+
)
|
| 72 |
+
|
| 73 |
+
|
| 74 |
+
# test brentq
|
| 75 |
+
def test_brentq():
|
| 76 |
+
npt.assert_allclose(
|
| 77 |
+
EXPECTED,
|
| 78 |
+
list(
|
| 79 |
+
_zeros.loop_example('brentq', A0, ARGS, XLO, XHI, XTOL, RTOL, MITR)
|
| 80 |
+
),
|
| 81 |
+
rtol=RTOL, atol=XTOL
|
| 82 |
+
)
|
| 83 |
+
|
| 84 |
+
|
| 85 |
+
# test brentq with full output
|
| 86 |
+
def test_brentq_full_output():
|
| 87 |
+
output = _zeros.full_output_example(
|
| 88 |
+
(A0[0],) + ARGS, XLO, XHI, XTOL, RTOL, MITR)
|
| 89 |
+
npt.assert_allclose(EXPECTED[0], output['root'], rtol=RTOL, atol=XTOL)
|
| 90 |
+
npt.assert_equal(6, output['iterations'])
|
| 91 |
+
npt.assert_equal(7, output['funcalls'])
|
| 92 |
+
npt.assert_equal(0, output['error_num'])
|
mantis_evalkit/lib/python3.10/site-packages/scipy/optimize/tests/test_differentiable_functions.py
ADDED
|
@@ -0,0 +1,805 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import pytest
|
| 2 |
+
import platform
|
| 3 |
+
import numpy as np
|
| 4 |
+
from numpy.testing import (TestCase, assert_array_almost_equal,
|
| 5 |
+
assert_array_equal, assert_, assert_allclose,
|
| 6 |
+
assert_equal)
|
| 7 |
+
from scipy._lib._gcutils import assert_deallocated
|
| 8 |
+
from scipy.sparse import csr_matrix
|
| 9 |
+
from scipy.sparse.linalg import LinearOperator
|
| 10 |
+
from scipy.optimize._differentiable_functions import (ScalarFunction,
|
| 11 |
+
VectorFunction,
|
| 12 |
+
LinearVectorFunction,
|
| 13 |
+
IdentityVectorFunction)
|
| 14 |
+
from scipy.optimize import rosen, rosen_der, rosen_hess
|
| 15 |
+
from scipy.optimize._hessian_update_strategy import BFGS
|
| 16 |
+
|
| 17 |
+
|
| 18 |
+
class ExScalarFunction:
|
| 19 |
+
|
| 20 |
+
def __init__(self):
|
| 21 |
+
self.nfev = 0
|
| 22 |
+
self.ngev = 0
|
| 23 |
+
self.nhev = 0
|
| 24 |
+
|
| 25 |
+
def fun(self, x):
|
| 26 |
+
self.nfev += 1
|
| 27 |
+
return 2*(x[0]**2 + x[1]**2 - 1) - x[0]
|
| 28 |
+
|
| 29 |
+
def grad(self, x):
|
| 30 |
+
self.ngev += 1
|
| 31 |
+
return np.array([4*x[0]-1, 4*x[1]])
|
| 32 |
+
|
| 33 |
+
def hess(self, x):
|
| 34 |
+
self.nhev += 1
|
| 35 |
+
return 4*np.eye(2)
|
| 36 |
+
|
| 37 |
+
|
| 38 |
+
class TestScalarFunction(TestCase):
|
| 39 |
+
|
| 40 |
+
def test_finite_difference_grad(self):
|
| 41 |
+
ex = ExScalarFunction()
|
| 42 |
+
nfev = 0
|
| 43 |
+
ngev = 0
|
| 44 |
+
|
| 45 |
+
x0 = [1.0, 0.0]
|
| 46 |
+
analit = ScalarFunction(ex.fun, x0, (), ex.grad,
|
| 47 |
+
ex.hess, None, (-np.inf, np.inf))
|
| 48 |
+
nfev += 1
|
| 49 |
+
ngev += 1
|
| 50 |
+
assert_array_equal(ex.nfev, nfev)
|
| 51 |
+
assert_array_equal(analit.nfev, nfev)
|
| 52 |
+
assert_array_equal(ex.ngev, ngev)
|
| 53 |
+
assert_array_equal(analit.ngev, nfev)
|
| 54 |
+
approx = ScalarFunction(ex.fun, x0, (), '2-point',
|
| 55 |
+
ex.hess, None, (-np.inf, np.inf))
|
| 56 |
+
nfev += 3
|
| 57 |
+
ngev += 1
|
| 58 |
+
assert_array_equal(ex.nfev, nfev)
|
| 59 |
+
assert_array_equal(analit.nfev+approx.nfev, nfev)
|
| 60 |
+
assert_array_equal(analit.ngev+approx.ngev, ngev)
|
| 61 |
+
assert_array_equal(analit.f, approx.f)
|
| 62 |
+
assert_array_almost_equal(analit.g, approx.g)
|
| 63 |
+
|
| 64 |
+
x = [10, 0.3]
|
| 65 |
+
f_analit = analit.fun(x)
|
| 66 |
+
g_analit = analit.grad(x)
|
| 67 |
+
nfev += 1
|
| 68 |
+
ngev += 1
|
| 69 |
+
assert_array_equal(ex.nfev, nfev)
|
| 70 |
+
assert_array_equal(analit.nfev+approx.nfev, nfev)
|
| 71 |
+
assert_array_equal(analit.ngev+approx.ngev, ngev)
|
| 72 |
+
f_approx = approx.fun(x)
|
| 73 |
+
g_approx = approx.grad(x)
|
| 74 |
+
nfev += 3
|
| 75 |
+
ngev += 1
|
| 76 |
+
assert_array_equal(ex.nfev, nfev)
|
| 77 |
+
assert_array_equal(analit.nfev+approx.nfev, nfev)
|
| 78 |
+
assert_array_equal(analit.ngev+approx.ngev, ngev)
|
| 79 |
+
assert_array_almost_equal(f_analit, f_approx)
|
| 80 |
+
assert_array_almost_equal(g_analit, g_approx)
|
| 81 |
+
|
| 82 |
+
x = [2.0, 1.0]
|
| 83 |
+
g_analit = analit.grad(x)
|
| 84 |
+
ngev += 1
|
| 85 |
+
assert_array_equal(ex.nfev, nfev)
|
| 86 |
+
assert_array_equal(analit.nfev+approx.nfev, nfev)
|
| 87 |
+
assert_array_equal(analit.ngev+approx.ngev, ngev)
|
| 88 |
+
|
| 89 |
+
g_approx = approx.grad(x)
|
| 90 |
+
nfev += 3
|
| 91 |
+
ngev += 1
|
| 92 |
+
assert_array_equal(ex.nfev, nfev)
|
| 93 |
+
assert_array_equal(analit.nfev+approx.nfev, nfev)
|
| 94 |
+
assert_array_equal(analit.ngev+approx.ngev, ngev)
|
| 95 |
+
assert_array_almost_equal(g_analit, g_approx)
|
| 96 |
+
|
| 97 |
+
x = [2.5, 0.3]
|
| 98 |
+
f_analit = analit.fun(x)
|
| 99 |
+
g_analit = analit.grad(x)
|
| 100 |
+
nfev += 1
|
| 101 |
+
ngev += 1
|
| 102 |
+
assert_array_equal(ex.nfev, nfev)
|
| 103 |
+
assert_array_equal(analit.nfev+approx.nfev, nfev)
|
| 104 |
+
assert_array_equal(analit.ngev+approx.ngev, ngev)
|
| 105 |
+
f_approx = approx.fun(x)
|
| 106 |
+
g_approx = approx.grad(x)
|
| 107 |
+
nfev += 3
|
| 108 |
+
ngev += 1
|
| 109 |
+
assert_array_equal(ex.nfev, nfev)
|
| 110 |
+
assert_array_equal(analit.nfev+approx.nfev, nfev)
|
| 111 |
+
assert_array_equal(analit.ngev+approx.ngev, ngev)
|
| 112 |
+
assert_array_almost_equal(f_analit, f_approx)
|
| 113 |
+
assert_array_almost_equal(g_analit, g_approx)
|
| 114 |
+
|
| 115 |
+
x = [2, 0.3]
|
| 116 |
+
f_analit = analit.fun(x)
|
| 117 |
+
g_analit = analit.grad(x)
|
| 118 |
+
nfev += 1
|
| 119 |
+
ngev += 1
|
| 120 |
+
assert_array_equal(ex.nfev, nfev)
|
| 121 |
+
assert_array_equal(analit.nfev+approx.nfev, nfev)
|
| 122 |
+
assert_array_equal(analit.ngev+approx.ngev, ngev)
|
| 123 |
+
f_approx = approx.fun(x)
|
| 124 |
+
g_approx = approx.grad(x)
|
| 125 |
+
nfev += 3
|
| 126 |
+
ngev += 1
|
| 127 |
+
assert_array_equal(ex.nfev, nfev)
|
| 128 |
+
assert_array_equal(analit.nfev+approx.nfev, nfev)
|
| 129 |
+
assert_array_equal(analit.ngev+approx.ngev, ngev)
|
| 130 |
+
assert_array_almost_equal(f_analit, f_approx)
|
| 131 |
+
assert_array_almost_equal(g_analit, g_approx)
|
| 132 |
+
|
| 133 |
+
def test_fun_and_grad(self):
|
| 134 |
+
ex = ExScalarFunction()
|
| 135 |
+
|
| 136 |
+
def fg_allclose(x, y):
|
| 137 |
+
assert_allclose(x[0], y[0])
|
| 138 |
+
assert_allclose(x[1], y[1])
|
| 139 |
+
|
| 140 |
+
# with analytic gradient
|
| 141 |
+
x0 = [2.0, 0.3]
|
| 142 |
+
analit = ScalarFunction(ex.fun, x0, (), ex.grad,
|
| 143 |
+
ex.hess, None, (-np.inf, np.inf))
|
| 144 |
+
|
| 145 |
+
fg = ex.fun(x0), ex.grad(x0)
|
| 146 |
+
fg_allclose(analit.fun_and_grad(x0), fg)
|
| 147 |
+
assert analit.ngev == 1
|
| 148 |
+
|
| 149 |
+
x0[1] = 1.
|
| 150 |
+
fg = ex.fun(x0), ex.grad(x0)
|
| 151 |
+
fg_allclose(analit.fun_and_grad(x0), fg)
|
| 152 |
+
|
| 153 |
+
# with finite difference gradient
|
| 154 |
+
x0 = [2.0, 0.3]
|
| 155 |
+
sf = ScalarFunction(ex.fun, x0, (), '3-point',
|
| 156 |
+
ex.hess, None, (-np.inf, np.inf))
|
| 157 |
+
assert sf.ngev == 1
|
| 158 |
+
fg = ex.fun(x0), ex.grad(x0)
|
| 159 |
+
fg_allclose(sf.fun_and_grad(x0), fg)
|
| 160 |
+
assert sf.ngev == 1
|
| 161 |
+
|
| 162 |
+
x0[1] = 1.
|
| 163 |
+
fg = ex.fun(x0), ex.grad(x0)
|
| 164 |
+
fg_allclose(sf.fun_and_grad(x0), fg)
|
| 165 |
+
|
| 166 |
+
def test_finite_difference_hess_linear_operator(self):
|
| 167 |
+
ex = ExScalarFunction()
|
| 168 |
+
nfev = 0
|
| 169 |
+
ngev = 0
|
| 170 |
+
nhev = 0
|
| 171 |
+
|
| 172 |
+
x0 = [1.0, 0.0]
|
| 173 |
+
analit = ScalarFunction(ex.fun, x0, (), ex.grad,
|
| 174 |
+
ex.hess, None, (-np.inf, np.inf))
|
| 175 |
+
nfev += 1
|
| 176 |
+
ngev += 1
|
| 177 |
+
nhev += 1
|
| 178 |
+
assert_array_equal(ex.nfev, nfev)
|
| 179 |
+
assert_array_equal(analit.nfev, nfev)
|
| 180 |
+
assert_array_equal(ex.ngev, ngev)
|
| 181 |
+
assert_array_equal(analit.ngev, ngev)
|
| 182 |
+
assert_array_equal(ex.nhev, nhev)
|
| 183 |
+
assert_array_equal(analit.nhev, nhev)
|
| 184 |
+
approx = ScalarFunction(ex.fun, x0, (), ex.grad,
|
| 185 |
+
'2-point', None, (-np.inf, np.inf))
|
| 186 |
+
assert_(isinstance(approx.H, LinearOperator))
|
| 187 |
+
for v in ([1.0, 2.0], [3.0, 4.0], [5.0, 2.0]):
|
| 188 |
+
assert_array_equal(analit.f, approx.f)
|
| 189 |
+
assert_array_almost_equal(analit.g, approx.g)
|
| 190 |
+
assert_array_almost_equal(analit.H.dot(v), approx.H.dot(v))
|
| 191 |
+
nfev += 1
|
| 192 |
+
ngev += 4
|
| 193 |
+
assert_array_equal(ex.nfev, nfev)
|
| 194 |
+
assert_array_equal(analit.nfev+approx.nfev, nfev)
|
| 195 |
+
assert_array_equal(ex.ngev, ngev)
|
| 196 |
+
assert_array_equal(analit.ngev+approx.ngev, ngev)
|
| 197 |
+
assert_array_equal(ex.nhev, nhev)
|
| 198 |
+
assert_array_equal(analit.nhev+approx.nhev, nhev)
|
| 199 |
+
|
| 200 |
+
x = [2.0, 1.0]
|
| 201 |
+
H_analit = analit.hess(x)
|
| 202 |
+
nhev += 1
|
| 203 |
+
assert_array_equal(ex.nfev, nfev)
|
| 204 |
+
assert_array_equal(analit.nfev+approx.nfev, nfev)
|
| 205 |
+
assert_array_equal(ex.ngev, ngev)
|
| 206 |
+
assert_array_equal(analit.ngev+approx.ngev, ngev)
|
| 207 |
+
assert_array_equal(ex.nhev, nhev)
|
| 208 |
+
assert_array_equal(analit.nhev+approx.nhev, nhev)
|
| 209 |
+
H_approx = approx.hess(x)
|
| 210 |
+
assert_(isinstance(H_approx, LinearOperator))
|
| 211 |
+
for v in ([1.0, 2.0], [3.0, 4.0], [5.0, 2.0]):
|
| 212 |
+
assert_array_almost_equal(H_analit.dot(v), H_approx.dot(v))
|
| 213 |
+
ngev += 4
|
| 214 |
+
assert_array_equal(ex.nfev, nfev)
|
| 215 |
+
assert_array_equal(analit.nfev+approx.nfev, nfev)
|
| 216 |
+
assert_array_equal(ex.ngev, ngev)
|
| 217 |
+
assert_array_equal(analit.ngev+approx.ngev, ngev)
|
| 218 |
+
assert_array_equal(ex.nhev, nhev)
|
| 219 |
+
assert_array_equal(analit.nhev+approx.nhev, nhev)
|
| 220 |
+
|
| 221 |
+
x = [2.1, 1.2]
|
| 222 |
+
H_analit = analit.hess(x)
|
| 223 |
+
nhev += 1
|
| 224 |
+
assert_array_equal(ex.nfev, nfev)
|
| 225 |
+
assert_array_equal(analit.nfev+approx.nfev, nfev)
|
| 226 |
+
assert_array_equal(ex.ngev, ngev)
|
| 227 |
+
assert_array_equal(analit.ngev+approx.ngev, ngev)
|
| 228 |
+
assert_array_equal(ex.nhev, nhev)
|
| 229 |
+
assert_array_equal(analit.nhev+approx.nhev, nhev)
|
| 230 |
+
H_approx = approx.hess(x)
|
| 231 |
+
assert_(isinstance(H_approx, LinearOperator))
|
| 232 |
+
for v in ([1.0, 2.0], [3.0, 4.0], [5.0, 2.0]):
|
| 233 |
+
assert_array_almost_equal(H_analit.dot(v), H_approx.dot(v))
|
| 234 |
+
ngev += 4
|
| 235 |
+
assert_array_equal(ex.nfev, nfev)
|
| 236 |
+
assert_array_equal(analit.nfev+approx.nfev, nfev)
|
| 237 |
+
assert_array_equal(ex.ngev, ngev)
|
| 238 |
+
assert_array_equal(analit.ngev+approx.ngev, ngev)
|
| 239 |
+
assert_array_equal(ex.nhev, nhev)
|
| 240 |
+
assert_array_equal(analit.nhev+approx.nhev, nhev)
|
| 241 |
+
|
| 242 |
+
x = [2.5, 0.3]
|
| 243 |
+
_ = analit.grad(x)
|
| 244 |
+
H_analit = analit.hess(x)
|
| 245 |
+
ngev += 1
|
| 246 |
+
nhev += 1
|
| 247 |
+
assert_array_equal(ex.nfev, nfev)
|
| 248 |
+
assert_array_equal(analit.nfev+approx.nfev, nfev)
|
| 249 |
+
assert_array_equal(ex.ngev, ngev)
|
| 250 |
+
assert_array_equal(analit.ngev+approx.ngev, ngev)
|
| 251 |
+
assert_array_equal(ex.nhev, nhev)
|
| 252 |
+
assert_array_equal(analit.nhev+approx.nhev, nhev)
|
| 253 |
+
_ = approx.grad(x)
|
| 254 |
+
H_approx = approx.hess(x)
|
| 255 |
+
assert_(isinstance(H_approx, LinearOperator))
|
| 256 |
+
for v in ([1.0, 2.0], [3.0, 4.0], [5.0, 2.0]):
|
| 257 |
+
assert_array_almost_equal(H_analit.dot(v), H_approx.dot(v))
|
| 258 |
+
ngev += 4
|
| 259 |
+
assert_array_equal(ex.nfev, nfev)
|
| 260 |
+
assert_array_equal(analit.nfev+approx.nfev, nfev)
|
| 261 |
+
assert_array_equal(ex.ngev, ngev)
|
| 262 |
+
assert_array_equal(analit.ngev+approx.ngev, ngev)
|
| 263 |
+
assert_array_equal(ex.nhev, nhev)
|
| 264 |
+
assert_array_equal(analit.nhev+approx.nhev, nhev)
|
| 265 |
+
|
| 266 |
+
x = [5.2, 2.3]
|
| 267 |
+
_ = analit.grad(x)
|
| 268 |
+
H_analit = analit.hess(x)
|
| 269 |
+
ngev += 1
|
| 270 |
+
nhev += 1
|
| 271 |
+
assert_array_equal(ex.nfev, nfev)
|
| 272 |
+
assert_array_equal(analit.nfev+approx.nfev, nfev)
|
| 273 |
+
assert_array_equal(ex.ngev, ngev)
|
| 274 |
+
assert_array_equal(analit.ngev+approx.ngev, ngev)
|
| 275 |
+
assert_array_equal(ex.nhev, nhev)
|
| 276 |
+
assert_array_equal(analit.nhev+approx.nhev, nhev)
|
| 277 |
+
_ = approx.grad(x)
|
| 278 |
+
H_approx = approx.hess(x)
|
| 279 |
+
assert_(isinstance(H_approx, LinearOperator))
|
| 280 |
+
for v in ([1.0, 2.0], [3.0, 4.0], [5.0, 2.0]):
|
| 281 |
+
assert_array_almost_equal(H_analit.dot(v), H_approx.dot(v))
|
| 282 |
+
ngev += 4
|
| 283 |
+
assert_array_equal(ex.nfev, nfev)
|
| 284 |
+
assert_array_equal(analit.nfev+approx.nfev, nfev)
|
| 285 |
+
assert_array_equal(ex.ngev, ngev)
|
| 286 |
+
assert_array_equal(analit.ngev+approx.ngev, ngev)
|
| 287 |
+
assert_array_equal(ex.nhev, nhev)
|
| 288 |
+
assert_array_equal(analit.nhev+approx.nhev, nhev)
|
| 289 |
+
|
| 290 |
+
@pytest.mark.thread_unsafe
|
| 291 |
+
def test_x_storage_overlap(self):
|
| 292 |
+
# Scalar_Function should not store references to arrays, it should
|
| 293 |
+
# store copies - this checks that updating an array in-place causes
|
| 294 |
+
# Scalar_Function.x to be updated.
|
| 295 |
+
|
| 296 |
+
def f(x):
|
| 297 |
+
return np.sum(np.asarray(x) ** 2)
|
| 298 |
+
|
| 299 |
+
x = np.array([1., 2., 3.])
|
| 300 |
+
sf = ScalarFunction(f, x, (), '3-point', lambda x: x, None, (-np.inf, np.inf))
|
| 301 |
+
|
| 302 |
+
assert x is not sf.x
|
| 303 |
+
assert_equal(sf.fun(x), 14.0)
|
| 304 |
+
assert x is not sf.x
|
| 305 |
+
|
| 306 |
+
x[0] = 0.
|
| 307 |
+
f1 = sf.fun(x)
|
| 308 |
+
assert_equal(f1, 13.0)
|
| 309 |
+
|
| 310 |
+
x[0] = 1
|
| 311 |
+
f2 = sf.fun(x)
|
| 312 |
+
assert_equal(f2, 14.0)
|
| 313 |
+
assert x is not sf.x
|
| 314 |
+
|
| 315 |
+
# now test with a HessianUpdate strategy specified
|
| 316 |
+
hess = BFGS()
|
| 317 |
+
x = np.array([1., 2., 3.])
|
| 318 |
+
sf = ScalarFunction(f, x, (), '3-point', hess, None, (-np.inf, np.inf))
|
| 319 |
+
|
| 320 |
+
assert x is not sf.x
|
| 321 |
+
assert_equal(sf.fun(x), 14.0)
|
| 322 |
+
assert x is not sf.x
|
| 323 |
+
|
| 324 |
+
x[0] = 0.
|
| 325 |
+
f1 = sf.fun(x)
|
| 326 |
+
assert_equal(f1, 13.0)
|
| 327 |
+
|
| 328 |
+
x[0] = 1
|
| 329 |
+
f2 = sf.fun(x)
|
| 330 |
+
assert_equal(f2, 14.0)
|
| 331 |
+
assert x is not sf.x
|
| 332 |
+
|
| 333 |
+
# gh13740 x is changed in user function
|
| 334 |
+
def ff(x):
|
| 335 |
+
x *= x # overwrite x
|
| 336 |
+
return np.sum(x)
|
| 337 |
+
|
| 338 |
+
x = np.array([1., 2., 3.])
|
| 339 |
+
sf = ScalarFunction(
|
| 340 |
+
ff, x, (), '3-point', lambda x: x, None, (-np.inf, np.inf)
|
| 341 |
+
)
|
| 342 |
+
assert x is not sf.x
|
| 343 |
+
assert_equal(sf.fun(x), 14.0)
|
| 344 |
+
assert_equal(sf.x, np.array([1., 2., 3.]))
|
| 345 |
+
assert x is not sf.x
|
| 346 |
+
|
| 347 |
+
def test_lowest_x(self):
|
| 348 |
+
# ScalarFunction should remember the lowest func(x) visited.
|
| 349 |
+
x0 = np.array([2, 3, 4])
|
| 350 |
+
sf = ScalarFunction(rosen, x0, (), rosen_der, rosen_hess,
|
| 351 |
+
None, None)
|
| 352 |
+
sf.fun([1, 1, 1])
|
| 353 |
+
sf.fun(x0)
|
| 354 |
+
sf.fun([1.01, 1, 1.0])
|
| 355 |
+
sf.grad([1.01, 1, 1.0])
|
| 356 |
+
assert_equal(sf._lowest_f, 0.0)
|
| 357 |
+
assert_equal(sf._lowest_x, [1.0, 1.0, 1.0])
|
| 358 |
+
|
| 359 |
+
sf = ScalarFunction(rosen, x0, (), '2-point', rosen_hess,
|
| 360 |
+
None, (-np.inf, np.inf))
|
| 361 |
+
sf.fun([1, 1, 1])
|
| 362 |
+
sf.fun(x0)
|
| 363 |
+
sf.fun([1.01, 1, 1.0])
|
| 364 |
+
sf.grad([1.01, 1, 1.0])
|
| 365 |
+
assert_equal(sf._lowest_f, 0.0)
|
| 366 |
+
assert_equal(sf._lowest_x, [1.0, 1.0, 1.0])
|
| 367 |
+
|
| 368 |
+
def test_float_size(self):
|
| 369 |
+
x0 = np.array([2, 3, 4]).astype(np.float32)
|
| 370 |
+
|
| 371 |
+
# check that ScalarFunction/approx_derivative always send the correct
|
| 372 |
+
# float width
|
| 373 |
+
def rosen_(x):
|
| 374 |
+
assert x.dtype == np.float32
|
| 375 |
+
return rosen(x)
|
| 376 |
+
|
| 377 |
+
sf = ScalarFunction(rosen_, x0, (), '2-point', rosen_hess,
|
| 378 |
+
None, (-np.inf, np.inf))
|
| 379 |
+
res = sf.fun(x0)
|
| 380 |
+
assert res.dtype == np.float32
|
| 381 |
+
|
| 382 |
+
|
| 383 |
+
class ExVectorialFunction:
|
| 384 |
+
|
| 385 |
+
def __init__(self):
|
| 386 |
+
self.nfev = 0
|
| 387 |
+
self.njev = 0
|
| 388 |
+
self.nhev = 0
|
| 389 |
+
|
| 390 |
+
def fun(self, x):
|
| 391 |
+
self.nfev += 1
|
| 392 |
+
return np.array([2*(x[0]**2 + x[1]**2 - 1) - x[0],
|
| 393 |
+
4*(x[0]**3 + x[1]**2 - 4) - 3*x[0]], dtype=x.dtype)
|
| 394 |
+
|
| 395 |
+
def jac(self, x):
|
| 396 |
+
self.njev += 1
|
| 397 |
+
return np.array([[4*x[0]-1, 4*x[1]],
|
| 398 |
+
[12*x[0]**2-3, 8*x[1]]], dtype=x.dtype)
|
| 399 |
+
|
| 400 |
+
def hess(self, x, v):
|
| 401 |
+
self.nhev += 1
|
| 402 |
+
return v[0]*4*np.eye(2) + v[1]*np.array([[24*x[0], 0],
|
| 403 |
+
[0, 8]])
|
| 404 |
+
|
| 405 |
+
|
| 406 |
+
class TestVectorialFunction(TestCase):
|
| 407 |
+
|
| 408 |
+
def test_finite_difference_jac(self):
|
| 409 |
+
ex = ExVectorialFunction()
|
| 410 |
+
nfev = 0
|
| 411 |
+
njev = 0
|
| 412 |
+
|
| 413 |
+
x0 = [1.0, 0.0]
|
| 414 |
+
analit = VectorFunction(ex.fun, x0, ex.jac, ex.hess, None, None,
|
| 415 |
+
(-np.inf, np.inf), None)
|
| 416 |
+
nfev += 1
|
| 417 |
+
njev += 1
|
| 418 |
+
assert_array_equal(ex.nfev, nfev)
|
| 419 |
+
assert_array_equal(analit.nfev, nfev)
|
| 420 |
+
assert_array_equal(ex.njev, njev)
|
| 421 |
+
assert_array_equal(analit.njev, njev)
|
| 422 |
+
approx = VectorFunction(ex.fun, x0, '2-point', ex.hess, None, None,
|
| 423 |
+
(-np.inf, np.inf), None)
|
| 424 |
+
nfev += 3
|
| 425 |
+
assert_array_equal(ex.nfev, nfev)
|
| 426 |
+
assert_array_equal(analit.nfev+approx.nfev, nfev)
|
| 427 |
+
assert_array_equal(ex.njev, njev)
|
| 428 |
+
assert_array_equal(analit.njev+approx.njev, njev)
|
| 429 |
+
assert_array_equal(analit.f, approx.f)
|
| 430 |
+
assert_array_almost_equal(analit.J, approx.J)
|
| 431 |
+
|
| 432 |
+
x = [10, 0.3]
|
| 433 |
+
f_analit = analit.fun(x)
|
| 434 |
+
J_analit = analit.jac(x)
|
| 435 |
+
nfev += 1
|
| 436 |
+
njev += 1
|
| 437 |
+
assert_array_equal(ex.nfev, nfev)
|
| 438 |
+
assert_array_equal(analit.nfev+approx.nfev, nfev)
|
| 439 |
+
assert_array_equal(ex.njev, njev)
|
| 440 |
+
assert_array_equal(analit.njev+approx.njev, njev)
|
| 441 |
+
f_approx = approx.fun(x)
|
| 442 |
+
J_approx = approx.jac(x)
|
| 443 |
+
nfev += 3
|
| 444 |
+
assert_array_equal(ex.nfev, nfev)
|
| 445 |
+
assert_array_equal(analit.nfev+approx.nfev, nfev)
|
| 446 |
+
assert_array_equal(ex.njev, njev)
|
| 447 |
+
assert_array_equal(analit.njev+approx.njev, njev)
|
| 448 |
+
assert_array_almost_equal(f_analit, f_approx)
|
| 449 |
+
assert_array_almost_equal(J_analit, J_approx, decimal=4)
|
| 450 |
+
|
| 451 |
+
x = [2.0, 1.0]
|
| 452 |
+
J_analit = analit.jac(x)
|
| 453 |
+
njev += 1
|
| 454 |
+
assert_array_equal(ex.nfev, nfev)
|
| 455 |
+
assert_array_equal(analit.nfev+approx.nfev, nfev)
|
| 456 |
+
assert_array_equal(ex.njev, njev)
|
| 457 |
+
assert_array_equal(analit.njev+approx.njev, njev)
|
| 458 |
+
J_approx = approx.jac(x)
|
| 459 |
+
nfev += 3
|
| 460 |
+
assert_array_equal(ex.nfev, nfev)
|
| 461 |
+
assert_array_equal(analit.nfev+approx.nfev, nfev)
|
| 462 |
+
assert_array_equal(ex.njev, njev)
|
| 463 |
+
assert_array_equal(analit.njev+approx.njev, njev)
|
| 464 |
+
assert_array_almost_equal(J_analit, J_approx)
|
| 465 |
+
|
| 466 |
+
x = [2.5, 0.3]
|
| 467 |
+
f_analit = analit.fun(x)
|
| 468 |
+
J_analit = analit.jac(x)
|
| 469 |
+
nfev += 1
|
| 470 |
+
njev += 1
|
| 471 |
+
assert_array_equal(ex.nfev, nfev)
|
| 472 |
+
assert_array_equal(analit.nfev+approx.nfev, nfev)
|
| 473 |
+
assert_array_equal(ex.njev, njev)
|
| 474 |
+
assert_array_equal(analit.njev+approx.njev, njev)
|
| 475 |
+
f_approx = approx.fun(x)
|
| 476 |
+
J_approx = approx.jac(x)
|
| 477 |
+
nfev += 3
|
| 478 |
+
assert_array_equal(ex.nfev, nfev)
|
| 479 |
+
assert_array_equal(analit.nfev+approx.nfev, nfev)
|
| 480 |
+
assert_array_equal(ex.njev, njev)
|
| 481 |
+
assert_array_equal(analit.njev+approx.njev, njev)
|
| 482 |
+
assert_array_almost_equal(f_analit, f_approx)
|
| 483 |
+
assert_array_almost_equal(J_analit, J_approx)
|
| 484 |
+
|
| 485 |
+
x = [2, 0.3]
|
| 486 |
+
f_analit = analit.fun(x)
|
| 487 |
+
J_analit = analit.jac(x)
|
| 488 |
+
nfev += 1
|
| 489 |
+
njev += 1
|
| 490 |
+
assert_array_equal(ex.nfev, nfev)
|
| 491 |
+
assert_array_equal(analit.nfev+approx.nfev, nfev)
|
| 492 |
+
assert_array_equal(ex.njev, njev)
|
| 493 |
+
assert_array_equal(analit.njev+approx.njev, njev)
|
| 494 |
+
f_approx = approx.fun(x)
|
| 495 |
+
J_approx = approx.jac(x)
|
| 496 |
+
nfev += 3
|
| 497 |
+
assert_array_equal(ex.nfev, nfev)
|
| 498 |
+
assert_array_equal(analit.nfev+approx.nfev, nfev)
|
| 499 |
+
assert_array_equal(ex.njev, njev)
|
| 500 |
+
assert_array_equal(analit.njev+approx.njev, njev)
|
| 501 |
+
assert_array_almost_equal(f_analit, f_approx)
|
| 502 |
+
assert_array_almost_equal(J_analit, J_approx)
|
| 503 |
+
|
| 504 |
+
def test_finite_difference_hess_linear_operator(self):
|
| 505 |
+
ex = ExVectorialFunction()
|
| 506 |
+
nfev = 0
|
| 507 |
+
njev = 0
|
| 508 |
+
nhev = 0
|
| 509 |
+
|
| 510 |
+
x0 = [1.0, 0.0]
|
| 511 |
+
v0 = [1.0, 2.0]
|
| 512 |
+
analit = VectorFunction(ex.fun, x0, ex.jac, ex.hess, None, None,
|
| 513 |
+
(-np.inf, np.inf), None)
|
| 514 |
+
nfev += 1
|
| 515 |
+
njev += 1
|
| 516 |
+
nhev += 1
|
| 517 |
+
assert_array_equal(ex.nfev, nfev)
|
| 518 |
+
assert_array_equal(analit.nfev, nfev)
|
| 519 |
+
assert_array_equal(ex.njev, njev)
|
| 520 |
+
assert_array_equal(analit.njev, njev)
|
| 521 |
+
assert_array_equal(ex.nhev, nhev)
|
| 522 |
+
assert_array_equal(analit.nhev, nhev)
|
| 523 |
+
approx = VectorFunction(ex.fun, x0, ex.jac, '2-point', None, None,
|
| 524 |
+
(-np.inf, np.inf), None)
|
| 525 |
+
assert_(isinstance(approx.H, LinearOperator))
|
| 526 |
+
for p in ([1.0, 2.0], [3.0, 4.0], [5.0, 2.0]):
|
| 527 |
+
assert_array_equal(analit.f, approx.f)
|
| 528 |
+
assert_array_almost_equal(analit.J, approx.J)
|
| 529 |
+
assert_array_almost_equal(analit.H.dot(p), approx.H.dot(p))
|
| 530 |
+
nfev += 1
|
| 531 |
+
njev += 4
|
| 532 |
+
assert_array_equal(ex.nfev, nfev)
|
| 533 |
+
assert_array_equal(analit.nfev+approx.nfev, nfev)
|
| 534 |
+
assert_array_equal(ex.njev, njev)
|
| 535 |
+
assert_array_equal(analit.njev+approx.njev, njev)
|
| 536 |
+
assert_array_equal(ex.nhev, nhev)
|
| 537 |
+
assert_array_equal(analit.nhev+approx.nhev, nhev)
|
| 538 |
+
|
| 539 |
+
x = [2.0, 1.0]
|
| 540 |
+
H_analit = analit.hess(x, v0)
|
| 541 |
+
nhev += 1
|
| 542 |
+
assert_array_equal(ex.nfev, nfev)
|
| 543 |
+
assert_array_equal(analit.nfev+approx.nfev, nfev)
|
| 544 |
+
assert_array_equal(ex.njev, njev)
|
| 545 |
+
assert_array_equal(analit.njev+approx.njev, njev)
|
| 546 |
+
assert_array_equal(ex.nhev, nhev)
|
| 547 |
+
assert_array_equal(analit.nhev+approx.nhev, nhev)
|
| 548 |
+
H_approx = approx.hess(x, v0)
|
| 549 |
+
assert_(isinstance(H_approx, LinearOperator))
|
| 550 |
+
for p in ([1.0, 2.0], [3.0, 4.0], [5.0, 2.0]):
|
| 551 |
+
assert_array_almost_equal(H_analit.dot(p), H_approx.dot(p),
|
| 552 |
+
decimal=5)
|
| 553 |
+
njev += 4
|
| 554 |
+
assert_array_equal(ex.nfev, nfev)
|
| 555 |
+
assert_array_equal(analit.nfev+approx.nfev, nfev)
|
| 556 |
+
assert_array_equal(ex.njev, njev)
|
| 557 |
+
assert_array_equal(analit.njev+approx.njev, njev)
|
| 558 |
+
assert_array_equal(ex.nhev, nhev)
|
| 559 |
+
assert_array_equal(analit.nhev+approx.nhev, nhev)
|
| 560 |
+
|
| 561 |
+
x = [2.1, 1.2]
|
| 562 |
+
v = [1.0, 1.0]
|
| 563 |
+
H_analit = analit.hess(x, v)
|
| 564 |
+
nhev += 1
|
| 565 |
+
assert_array_equal(ex.nfev, nfev)
|
| 566 |
+
assert_array_equal(analit.nfev+approx.nfev, nfev)
|
| 567 |
+
assert_array_equal(ex.njev, njev)
|
| 568 |
+
assert_array_equal(analit.njev+approx.njev, njev)
|
| 569 |
+
assert_array_equal(ex.nhev, nhev)
|
| 570 |
+
assert_array_equal(analit.nhev+approx.nhev, nhev)
|
| 571 |
+
H_approx = approx.hess(x, v)
|
| 572 |
+
assert_(isinstance(H_approx, LinearOperator))
|
| 573 |
+
for v in ([1.0, 2.0], [3.0, 4.0], [5.0, 2.0]):
|
| 574 |
+
assert_array_almost_equal(H_analit.dot(v), H_approx.dot(v))
|
| 575 |
+
njev += 4
|
| 576 |
+
assert_array_equal(ex.nfev, nfev)
|
| 577 |
+
assert_array_equal(analit.nfev+approx.nfev, nfev)
|
| 578 |
+
assert_array_equal(ex.njev, njev)
|
| 579 |
+
assert_array_equal(analit.njev+approx.njev, njev)
|
| 580 |
+
assert_array_equal(ex.nhev, nhev)
|
| 581 |
+
assert_array_equal(analit.nhev+approx.nhev, nhev)
|
| 582 |
+
|
| 583 |
+
x = [2.5, 0.3]
|
| 584 |
+
_ = analit.jac(x)
|
| 585 |
+
H_analit = analit.hess(x, v0)
|
| 586 |
+
njev += 1
|
| 587 |
+
nhev += 1
|
| 588 |
+
assert_array_equal(ex.nfev, nfev)
|
| 589 |
+
assert_array_equal(analit.nfev+approx.nfev, nfev)
|
| 590 |
+
assert_array_equal(ex.njev, njev)
|
| 591 |
+
assert_array_equal(analit.njev+approx.njev, njev)
|
| 592 |
+
assert_array_equal(ex.nhev, nhev)
|
| 593 |
+
assert_array_equal(analit.nhev+approx.nhev, nhev)
|
| 594 |
+
_ = approx.jac(x)
|
| 595 |
+
H_approx = approx.hess(x, v0)
|
| 596 |
+
assert_(isinstance(H_approx, LinearOperator))
|
| 597 |
+
for v in ([1.0, 2.0], [3.0, 4.0], [5.0, 2.0]):
|
| 598 |
+
assert_array_almost_equal(H_analit.dot(v), H_approx.dot(v), decimal=4)
|
| 599 |
+
njev += 4
|
| 600 |
+
assert_array_equal(ex.nfev, nfev)
|
| 601 |
+
assert_array_equal(analit.nfev+approx.nfev, nfev)
|
| 602 |
+
assert_array_equal(ex.njev, njev)
|
| 603 |
+
assert_array_equal(analit.njev+approx.njev, njev)
|
| 604 |
+
assert_array_equal(ex.nhev, nhev)
|
| 605 |
+
assert_array_equal(analit.nhev+approx.nhev, nhev)
|
| 606 |
+
|
| 607 |
+
x = [5.2, 2.3]
|
| 608 |
+
v = [2.3, 5.2]
|
| 609 |
+
_ = analit.jac(x)
|
| 610 |
+
H_analit = analit.hess(x, v)
|
| 611 |
+
njev += 1
|
| 612 |
+
nhev += 1
|
| 613 |
+
assert_array_equal(ex.nfev, nfev)
|
| 614 |
+
assert_array_equal(analit.nfev+approx.nfev, nfev)
|
| 615 |
+
assert_array_equal(ex.njev, njev)
|
| 616 |
+
assert_array_equal(analit.njev+approx.njev, njev)
|
| 617 |
+
assert_array_equal(ex.nhev, nhev)
|
| 618 |
+
assert_array_equal(analit.nhev+approx.nhev, nhev)
|
| 619 |
+
_ = approx.jac(x)
|
| 620 |
+
H_approx = approx.hess(x, v)
|
| 621 |
+
assert_(isinstance(H_approx, LinearOperator))
|
| 622 |
+
for v in ([1.0, 2.0], [3.0, 4.0], [5.0, 2.0]):
|
| 623 |
+
assert_array_almost_equal(H_analit.dot(v), H_approx.dot(v), decimal=4)
|
| 624 |
+
njev += 4
|
| 625 |
+
assert_array_equal(ex.nfev, nfev)
|
| 626 |
+
assert_array_equal(analit.nfev+approx.nfev, nfev)
|
| 627 |
+
assert_array_equal(ex.njev, njev)
|
| 628 |
+
assert_array_equal(analit.njev+approx.njev, njev)
|
| 629 |
+
assert_array_equal(ex.nhev, nhev)
|
| 630 |
+
assert_array_equal(analit.nhev+approx.nhev, nhev)
|
| 631 |
+
|
| 632 |
+
@pytest.mark.thread_unsafe
|
| 633 |
+
def test_x_storage_overlap(self):
|
| 634 |
+
# VectorFunction should not store references to arrays, it should
|
| 635 |
+
# store copies - this checks that updating an array in-place causes
|
| 636 |
+
# Scalar_Function.x to be updated.
|
| 637 |
+
ex = ExVectorialFunction()
|
| 638 |
+
x0 = np.array([1.0, 0.0])
|
| 639 |
+
|
| 640 |
+
vf = VectorFunction(ex.fun, x0, '3-point', ex.hess, None, None,
|
| 641 |
+
(-np.inf, np.inf), None)
|
| 642 |
+
|
| 643 |
+
assert x0 is not vf.x
|
| 644 |
+
assert_equal(vf.fun(x0), ex.fun(x0))
|
| 645 |
+
assert x0 is not vf.x
|
| 646 |
+
|
| 647 |
+
x0[0] = 2.
|
| 648 |
+
assert_equal(vf.fun(x0), ex.fun(x0))
|
| 649 |
+
assert x0 is not vf.x
|
| 650 |
+
|
| 651 |
+
x0[0] = 1.
|
| 652 |
+
assert_equal(vf.fun(x0), ex.fun(x0))
|
| 653 |
+
assert x0 is not vf.x
|
| 654 |
+
|
| 655 |
+
# now test with a HessianUpdate strategy specified
|
| 656 |
+
hess = BFGS()
|
| 657 |
+
x0 = np.array([1.0, 0.0])
|
| 658 |
+
vf = VectorFunction(ex.fun, x0, '3-point', hess, None, None,
|
| 659 |
+
(-np.inf, np.inf), None)
|
| 660 |
+
|
| 661 |
+
with pytest.warns(UserWarning):
|
| 662 |
+
# filter UserWarning because ExVectorialFunction is linear and
|
| 663 |
+
# a quasi-Newton approximation is used for the Hessian.
|
| 664 |
+
assert x0 is not vf.x
|
| 665 |
+
assert_equal(vf.fun(x0), ex.fun(x0))
|
| 666 |
+
assert x0 is not vf.x
|
| 667 |
+
|
| 668 |
+
x0[0] = 2.
|
| 669 |
+
assert_equal(vf.fun(x0), ex.fun(x0))
|
| 670 |
+
assert x0 is not vf.x
|
| 671 |
+
|
| 672 |
+
x0[0] = 1.
|
| 673 |
+
assert_equal(vf.fun(x0), ex.fun(x0))
|
| 674 |
+
assert x0 is not vf.x
|
| 675 |
+
|
| 676 |
+
def test_float_size(self):
|
| 677 |
+
ex = ExVectorialFunction()
|
| 678 |
+
x0 = np.array([1.0, 0.0]).astype(np.float32)
|
| 679 |
+
|
| 680 |
+
vf = VectorFunction(ex.fun, x0, ex.jac, ex.hess, None, None,
|
| 681 |
+
(-np.inf, np.inf), None)
|
| 682 |
+
|
| 683 |
+
res = vf.fun(x0)
|
| 684 |
+
assert res.dtype == np.float32
|
| 685 |
+
|
| 686 |
+
res = vf.jac(x0)
|
| 687 |
+
assert res.dtype == np.float32
|
| 688 |
+
|
| 689 |
+
|
| 690 |
+
def test_LinearVectorFunction():
|
| 691 |
+
A_dense = np.array([
|
| 692 |
+
[-1, 2, 0],
|
| 693 |
+
[0, 4, 2]
|
| 694 |
+
])
|
| 695 |
+
x0 = np.zeros(3)
|
| 696 |
+
A_sparse = csr_matrix(A_dense)
|
| 697 |
+
x = np.array([1, -1, 0])
|
| 698 |
+
v = np.array([-1, 1])
|
| 699 |
+
Ax = np.array([-3, -4])
|
| 700 |
+
|
| 701 |
+
f1 = LinearVectorFunction(A_dense, x0, None)
|
| 702 |
+
assert_(not f1.sparse_jacobian)
|
| 703 |
+
|
| 704 |
+
f2 = LinearVectorFunction(A_dense, x0, True)
|
| 705 |
+
assert_(f2.sparse_jacobian)
|
| 706 |
+
|
| 707 |
+
f3 = LinearVectorFunction(A_dense, x0, False)
|
| 708 |
+
assert_(not f3.sparse_jacobian)
|
| 709 |
+
|
| 710 |
+
f4 = LinearVectorFunction(A_sparse, x0, None)
|
| 711 |
+
assert_(f4.sparse_jacobian)
|
| 712 |
+
|
| 713 |
+
f5 = LinearVectorFunction(A_sparse, x0, True)
|
| 714 |
+
assert_(f5.sparse_jacobian)
|
| 715 |
+
|
| 716 |
+
f6 = LinearVectorFunction(A_sparse, x0, False)
|
| 717 |
+
assert_(not f6.sparse_jacobian)
|
| 718 |
+
|
| 719 |
+
assert_array_equal(f1.fun(x), Ax)
|
| 720 |
+
assert_array_equal(f2.fun(x), Ax)
|
| 721 |
+
assert_array_equal(f1.jac(x), A_dense)
|
| 722 |
+
assert_array_equal(f2.jac(x).toarray(), A_sparse.toarray())
|
| 723 |
+
assert_array_equal(f1.hess(x, v).toarray(), np.zeros((3, 3)))
|
| 724 |
+
|
| 725 |
+
|
| 726 |
+
def test_LinearVectorFunction_memoization():
|
| 727 |
+
A = np.array([[-1, 2, 0], [0, 4, 2]])
|
| 728 |
+
x0 = np.array([1, 2, -1])
|
| 729 |
+
fun = LinearVectorFunction(A, x0, False)
|
| 730 |
+
|
| 731 |
+
assert_array_equal(x0, fun.x)
|
| 732 |
+
assert_array_equal(A.dot(x0), fun.f)
|
| 733 |
+
|
| 734 |
+
x1 = np.array([-1, 3, 10])
|
| 735 |
+
assert_array_equal(A, fun.jac(x1))
|
| 736 |
+
assert_array_equal(x1, fun.x)
|
| 737 |
+
assert_array_equal(A.dot(x0), fun.f)
|
| 738 |
+
assert_array_equal(A.dot(x1), fun.fun(x1))
|
| 739 |
+
assert_array_equal(A.dot(x1), fun.f)
|
| 740 |
+
|
| 741 |
+
|
| 742 |
+
def test_IdentityVectorFunction():
|
| 743 |
+
x0 = np.zeros(3)
|
| 744 |
+
|
| 745 |
+
f1 = IdentityVectorFunction(x0, None)
|
| 746 |
+
f2 = IdentityVectorFunction(x0, False)
|
| 747 |
+
f3 = IdentityVectorFunction(x0, True)
|
| 748 |
+
|
| 749 |
+
assert_(f1.sparse_jacobian)
|
| 750 |
+
assert_(not f2.sparse_jacobian)
|
| 751 |
+
assert_(f3.sparse_jacobian)
|
| 752 |
+
|
| 753 |
+
x = np.array([-1, 2, 1])
|
| 754 |
+
v = np.array([-2, 3, 0])
|
| 755 |
+
|
| 756 |
+
assert_array_equal(f1.fun(x), x)
|
| 757 |
+
assert_array_equal(f2.fun(x), x)
|
| 758 |
+
|
| 759 |
+
assert_array_equal(f1.jac(x).toarray(), np.eye(3))
|
| 760 |
+
assert_array_equal(f2.jac(x), np.eye(3))
|
| 761 |
+
|
| 762 |
+
assert_array_equal(f1.hess(x, v).toarray(), np.zeros((3, 3)))
|
| 763 |
+
|
| 764 |
+
|
| 765 |
+
@pytest.mark.skipif(
|
| 766 |
+
platform.python_implementation() == "PyPy",
|
| 767 |
+
reason="assert_deallocate not available on PyPy"
|
| 768 |
+
)
|
| 769 |
+
def test_ScalarFunctionNoReferenceCycle():
|
| 770 |
+
"""Regression test for gh-20768."""
|
| 771 |
+
ex = ExScalarFunction()
|
| 772 |
+
x0 = np.zeros(3)
|
| 773 |
+
with assert_deallocated(lambda: ScalarFunction(ex.fun, x0, (), ex.grad,
|
| 774 |
+
ex.hess, None, (-np.inf, np.inf))):
|
| 775 |
+
pass
|
| 776 |
+
|
| 777 |
+
|
| 778 |
+
@pytest.mark.skipif(
|
| 779 |
+
platform.python_implementation() == "PyPy",
|
| 780 |
+
reason="assert_deallocate not available on PyPy"
|
| 781 |
+
)
|
| 782 |
+
@pytest.mark.xfail(reason="TODO remove reference cycle from VectorFunction")
|
| 783 |
+
def test_VectorFunctionNoReferenceCycle():
|
| 784 |
+
"""Regression test for gh-20768."""
|
| 785 |
+
ex = ExVectorialFunction()
|
| 786 |
+
x0 = [1.0, 0.0]
|
| 787 |
+
with assert_deallocated(lambda: VectorFunction(ex.fun, x0, ex.jac,
|
| 788 |
+
ex.hess, None, None, (-np.inf, np.inf), None)):
|
| 789 |
+
pass
|
| 790 |
+
|
| 791 |
+
|
| 792 |
+
@pytest.mark.skipif(
|
| 793 |
+
platform.python_implementation() == "PyPy",
|
| 794 |
+
reason="assert_deallocate not available on PyPy"
|
| 795 |
+
)
|
| 796 |
+
def test_LinearVectorFunctionNoReferenceCycle():
|
| 797 |
+
"""Regression test for gh-20768."""
|
| 798 |
+
A_dense = np.array([
|
| 799 |
+
[-1, 2, 0],
|
| 800 |
+
[0, 4, 2]
|
| 801 |
+
])
|
| 802 |
+
x0 = np.zeros(3)
|
| 803 |
+
A_sparse = csr_matrix(A_dense)
|
| 804 |
+
with assert_deallocated(lambda: LinearVectorFunction(A_sparse, x0, None)):
|
| 805 |
+
pass
|
mantis_evalkit/lib/python3.10/site-packages/scipy/optimize/tests/test_hessian_update_strategy.py
ADDED
|
@@ -0,0 +1,300 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import re
|
| 2 |
+
from copy import deepcopy
|
| 3 |
+
|
| 4 |
+
import numpy as np
|
| 5 |
+
import pytest
|
| 6 |
+
from numpy.linalg import norm
|
| 7 |
+
from numpy.testing import (TestCase, assert_array_almost_equal,
|
| 8 |
+
assert_array_equal, assert_array_less)
|
| 9 |
+
from scipy.optimize import (BFGS, SR1)
|
| 10 |
+
|
| 11 |
+
|
| 12 |
+
class Rosenbrock:
|
| 13 |
+
"""Rosenbrock function.
|
| 14 |
+
|
| 15 |
+
The following optimization problem:
|
| 16 |
+
minimize sum(100.0*(x[1:] - x[:-1]**2.0)**2.0 + (1 - x[:-1])**2.0)
|
| 17 |
+
"""
|
| 18 |
+
|
| 19 |
+
def __init__(self, n=2, random_state=0):
|
| 20 |
+
rng = np.random.RandomState(random_state)
|
| 21 |
+
self.x0 = rng.uniform(-1, 1, n)
|
| 22 |
+
self.x_opt = np.ones(n)
|
| 23 |
+
|
| 24 |
+
def fun(self, x):
|
| 25 |
+
x = np.asarray(x)
|
| 26 |
+
r = np.sum(100.0 * (x[1:] - x[:-1]**2.0)**2.0 + (1 - x[:-1])**2.0,
|
| 27 |
+
axis=0)
|
| 28 |
+
return r
|
| 29 |
+
|
| 30 |
+
def grad(self, x):
|
| 31 |
+
x = np.asarray(x)
|
| 32 |
+
xm = x[1:-1]
|
| 33 |
+
xm_m1 = x[:-2]
|
| 34 |
+
xm_p1 = x[2:]
|
| 35 |
+
der = np.zeros_like(x)
|
| 36 |
+
der[1:-1] = (200 * (xm - xm_m1**2) -
|
| 37 |
+
400 * (xm_p1 - xm**2) * xm - 2 * (1 - xm))
|
| 38 |
+
der[0] = -400 * x[0] * (x[1] - x[0]**2) - 2 * (1 - x[0])
|
| 39 |
+
der[-1] = 200 * (x[-1] - x[-2]**2)
|
| 40 |
+
return der
|
| 41 |
+
|
| 42 |
+
def hess(self, x):
|
| 43 |
+
x = np.atleast_1d(x)
|
| 44 |
+
H = np.diag(-400 * x[:-1], 1) - np.diag(400 * x[:-1], -1)
|
| 45 |
+
diagonal = np.zeros(len(x), dtype=x.dtype)
|
| 46 |
+
diagonal[0] = 1200 * x[0]**2 - 400 * x[1] + 2
|
| 47 |
+
diagonal[-1] = 200
|
| 48 |
+
diagonal[1:-1] = 202 + 1200 * x[1:-1]**2 - 400 * x[2:]
|
| 49 |
+
H = H + np.diag(diagonal)
|
| 50 |
+
return H
|
| 51 |
+
|
| 52 |
+
|
| 53 |
+
class TestHessianUpdateStrategy(TestCase):
|
| 54 |
+
|
| 55 |
+
|
| 56 |
+
def test_hessian_initialization(self):
|
| 57 |
+
|
| 58 |
+
ndims = 5
|
| 59 |
+
symmetric_matrix = np.array([[43, 24, 33, 34, 49],
|
| 60 |
+
[24, 36, 44, 15, 44],
|
| 61 |
+
[33, 44, 37, 1, 30],
|
| 62 |
+
[34, 15, 1, 5, 46],
|
| 63 |
+
[49, 44, 30, 46, 22]])
|
| 64 |
+
init_scales = (
|
| 65 |
+
('auto', np.eye(ndims)),
|
| 66 |
+
(2, np.eye(ndims) * 2),
|
| 67 |
+
(np.arange(1, ndims + 1) * np.eye(ndims),
|
| 68 |
+
np.arange(1, ndims + 1) * np.eye(ndims)),
|
| 69 |
+
(symmetric_matrix, symmetric_matrix),)
|
| 70 |
+
for approx_type in ['hess', 'inv_hess']:
|
| 71 |
+
for init_scale, true_matrix in init_scales:
|
| 72 |
+
# large min_{denominator,curvatur} makes them skip an update,
|
| 73 |
+
# so we can have our initial matrix
|
| 74 |
+
quasi_newton = (BFGS(init_scale=init_scale,
|
| 75 |
+
min_curvature=1e50,
|
| 76 |
+
exception_strategy='skip_update'),
|
| 77 |
+
SR1(init_scale=init_scale,
|
| 78 |
+
min_denominator=1e50))
|
| 79 |
+
|
| 80 |
+
for qn in quasi_newton:
|
| 81 |
+
qn.initialize(ndims, approx_type)
|
| 82 |
+
B = qn.get_matrix()
|
| 83 |
+
|
| 84 |
+
assert_array_equal(B, np.eye(ndims))
|
| 85 |
+
# don't test the auto init scale
|
| 86 |
+
if isinstance(init_scale, str) and init_scale == 'auto':
|
| 87 |
+
continue
|
| 88 |
+
|
| 89 |
+
qn.update(np.ones(ndims) * 1e-5, np.arange(ndims) + 0.2)
|
| 90 |
+
B = qn.get_matrix()
|
| 91 |
+
assert_array_equal(B, true_matrix)
|
| 92 |
+
|
| 93 |
+
# For this list of points, it is known
|
| 94 |
+
# that no exception occur during the
|
| 95 |
+
# Hessian update. Hence no update is
|
| 96 |
+
# skiped or damped.
|
| 97 |
+
|
| 98 |
+
|
| 99 |
+
def test_initialize_catch_illegal(self):
|
| 100 |
+
ndims = 3
|
| 101 |
+
# no complex allowed
|
| 102 |
+
inits_msg_errtype = ((complex(3.14),
|
| 103 |
+
r"float\(\) argument must be a string or a "
|
| 104 |
+
r"(real )?number, not 'complex'",
|
| 105 |
+
TypeError),
|
| 106 |
+
|
| 107 |
+
(np.array([3.2, 2.3, 1.2]).astype(np.complex128),
|
| 108 |
+
"init_scale contains complex elements, "
|
| 109 |
+
"must be real.",
|
| 110 |
+
TypeError),
|
| 111 |
+
|
| 112 |
+
(np.array([[43, 24, 33],
|
| 113 |
+
[24, 36, 44, ],
|
| 114 |
+
[33, 44, 37, ]]).astype(np.complex128),
|
| 115 |
+
"init_scale contains complex elements, "
|
| 116 |
+
"must be real.",
|
| 117 |
+
TypeError),
|
| 118 |
+
|
| 119 |
+
# not square
|
| 120 |
+
(np.array([[43, 55, 66]]),
|
| 121 |
+
re.escape(
|
| 122 |
+
"If init_scale is an array, it must have the "
|
| 123 |
+
"dimensions of the hess/inv_hess: (3, 3)."
|
| 124 |
+
" Got (1, 3)."),
|
| 125 |
+
ValueError),
|
| 126 |
+
|
| 127 |
+
# not symmetric
|
| 128 |
+
(np.array([[43, 24, 33],
|
| 129 |
+
[24.1, 36, 44, ],
|
| 130 |
+
[33, 44, 37, ]]),
|
| 131 |
+
re.escape("If init_scale is an array, it must be"
|
| 132 |
+
" symmetric (passing scipy.linalg.issymmetric)"
|
| 133 |
+
" to be an approximation of a hess/inv_hess."),
|
| 134 |
+
ValueError),
|
| 135 |
+
)
|
| 136 |
+
for approx_type in ['hess', 'inv_hess']:
|
| 137 |
+
for init_scale, message, errortype in inits_msg_errtype:
|
| 138 |
+
# large min_{denominator,curvatur} makes it skip an update,
|
| 139 |
+
# so we can retrieve our initial matrix
|
| 140 |
+
quasi_newton = (BFGS(init_scale=init_scale),
|
| 141 |
+
SR1(init_scale=init_scale))
|
| 142 |
+
|
| 143 |
+
for qn in quasi_newton:
|
| 144 |
+
qn.initialize(ndims, approx_type)
|
| 145 |
+
with pytest.raises(errortype, match=message):
|
| 146 |
+
qn.update(np.ones(ndims), np.arange(ndims))
|
| 147 |
+
|
| 148 |
+
def test_rosenbrock_with_no_exception(self):
|
| 149 |
+
# Define auxiliary problem
|
| 150 |
+
prob = Rosenbrock(n=5)
|
| 151 |
+
# Define iteration points
|
| 152 |
+
x_list = [[0.0976270, 0.4303787, 0.2055267, 0.0897663, -0.15269040],
|
| 153 |
+
[0.1847239, 0.0505757, 0.2123832, 0.0255081, 0.00083286],
|
| 154 |
+
[0.2142498, -0.0188480, 0.0503822, 0.0347033, 0.03323606],
|
| 155 |
+
[0.2071680, -0.0185071, 0.0341337, -0.0139298, 0.02881750],
|
| 156 |
+
[0.1533055, -0.0322935, 0.0280418, -0.0083592, 0.01503699],
|
| 157 |
+
[0.1382378, -0.0276671, 0.0266161, -0.0074060, 0.02801610],
|
| 158 |
+
[0.1651957, -0.0049124, 0.0269665, -0.0040025, 0.02138184],
|
| 159 |
+
[0.2354930, 0.0443711, 0.0173959, 0.0041872, 0.00794563],
|
| 160 |
+
[0.4168118, 0.1433867, 0.0111714, 0.0126265, -0.00658537],
|
| 161 |
+
[0.4681972, 0.2153273, 0.0225249, 0.0152704, -0.00463809],
|
| 162 |
+
[0.6023068, 0.3346815, 0.0731108, 0.0186618, -0.00371541],
|
| 163 |
+
[0.6415743, 0.3985468, 0.1324422, 0.0214160, -0.00062401],
|
| 164 |
+
[0.7503690, 0.5447616, 0.2804541, 0.0539851, 0.00242230],
|
| 165 |
+
[0.7452626, 0.5644594, 0.3324679, 0.0865153, 0.00454960],
|
| 166 |
+
[0.8059782, 0.6586838, 0.4229577, 0.1452990, 0.00976702],
|
| 167 |
+
[0.8549542, 0.7226562, 0.4991309, 0.2420093, 0.02772661],
|
| 168 |
+
[0.8571332, 0.7285741, 0.5279076, 0.2824549, 0.06030276],
|
| 169 |
+
[0.8835633, 0.7727077, 0.5957984, 0.3411303, 0.09652185],
|
| 170 |
+
[0.9071558, 0.8299587, 0.6771400, 0.4402896, 0.17469338],
|
| 171 |
+
[0.9190793, 0.8486480, 0.7163332, 0.5083780, 0.26107691],
|
| 172 |
+
[0.9371223, 0.8762177, 0.7653702, 0.5773109, 0.32181041],
|
| 173 |
+
[0.9554613, 0.9119893, 0.8282687, 0.6776178, 0.43162744],
|
| 174 |
+
[0.9545744, 0.9099264, 0.8270244, 0.6822220, 0.45237623],
|
| 175 |
+
[0.9688112, 0.9351710, 0.8730961, 0.7546601, 0.56622448],
|
| 176 |
+
[0.9743227, 0.9491953, 0.9005150, 0.8086497, 0.64505437],
|
| 177 |
+
[0.9807345, 0.9638853, 0.9283012, 0.8631675, 0.73812581],
|
| 178 |
+
[0.9886746, 0.9777760, 0.9558950, 0.9123417, 0.82726553],
|
| 179 |
+
[0.9899096, 0.9803828, 0.9615592, 0.9255600, 0.85822149],
|
| 180 |
+
[0.9969510, 0.9935441, 0.9864657, 0.9726775, 0.94358663],
|
| 181 |
+
[0.9979533, 0.9960274, 0.9921724, 0.9837415, 0.96626288],
|
| 182 |
+
[0.9995981, 0.9989171, 0.9974178, 0.9949954, 0.99023356],
|
| 183 |
+
[1.0002640, 1.0005088, 1.0010594, 1.0021161, 1.00386912],
|
| 184 |
+
[0.9998903, 0.9998459, 0.9997795, 0.9995484, 0.99916305],
|
| 185 |
+
[1.0000008, 0.9999905, 0.9999481, 0.9998903, 0.99978047],
|
| 186 |
+
[1.0000004, 0.9999983, 1.0000001, 1.0000031, 1.00000297],
|
| 187 |
+
[0.9999995, 1.0000003, 1.0000005, 1.0000001, 1.00000032],
|
| 188 |
+
[0.9999999, 0.9999997, 0.9999994, 0.9999989, 0.99999786],
|
| 189 |
+
[0.9999999, 0.9999999, 0.9999999, 0.9999999, 0.99999991]]
|
| 190 |
+
# Get iteration points
|
| 191 |
+
grad_list = [prob.grad(x) for x in x_list]
|
| 192 |
+
delta_x = [np.array(x_list[i+1])-np.array(x_list[i])
|
| 193 |
+
for i in range(len(x_list)-1)]
|
| 194 |
+
delta_grad = [grad_list[i+1]-grad_list[i]
|
| 195 |
+
for i in range(len(grad_list)-1)]
|
| 196 |
+
# Check curvature condition
|
| 197 |
+
for s, y in zip(delta_x, delta_grad):
|
| 198 |
+
if np.dot(s, y) <= 0:
|
| 199 |
+
raise ArithmeticError()
|
| 200 |
+
# Define QuasiNewton update
|
| 201 |
+
for quasi_newton in (BFGS(init_scale=1, min_curvature=1e-4),
|
| 202 |
+
SR1(init_scale=1)):
|
| 203 |
+
hess = deepcopy(quasi_newton)
|
| 204 |
+
inv_hess = deepcopy(quasi_newton)
|
| 205 |
+
hess.initialize(len(x_list[0]), 'hess')
|
| 206 |
+
inv_hess.initialize(len(x_list[0]), 'inv_hess')
|
| 207 |
+
# Compare the hessian and its inverse
|
| 208 |
+
for s, y in zip(delta_x, delta_grad):
|
| 209 |
+
hess.update(s, y)
|
| 210 |
+
inv_hess.update(s, y)
|
| 211 |
+
B = hess.get_matrix()
|
| 212 |
+
H = inv_hess.get_matrix()
|
| 213 |
+
assert_array_almost_equal(np.linalg.inv(B), H, decimal=10)
|
| 214 |
+
B_true = prob.hess(x_list[len(delta_x)])
|
| 215 |
+
assert_array_less(norm(B - B_true)/norm(B_true), 0.1)
|
| 216 |
+
|
| 217 |
+
def test_SR1_skip_update(self):
|
| 218 |
+
# Define auxiliary problem
|
| 219 |
+
prob = Rosenbrock(n=5)
|
| 220 |
+
# Define iteration points
|
| 221 |
+
x_list = [[0.0976270, 0.4303787, 0.2055267, 0.0897663, -0.15269040],
|
| 222 |
+
[0.1847239, 0.0505757, 0.2123832, 0.0255081, 0.00083286],
|
| 223 |
+
[0.2142498, -0.0188480, 0.0503822, 0.0347033, 0.03323606],
|
| 224 |
+
[0.2071680, -0.0185071, 0.0341337, -0.0139298, 0.02881750],
|
| 225 |
+
[0.1533055, -0.0322935, 0.0280418, -0.0083592, 0.01503699],
|
| 226 |
+
[0.1382378, -0.0276671, 0.0266161, -0.0074060, 0.02801610],
|
| 227 |
+
[0.1651957, -0.0049124, 0.0269665, -0.0040025, 0.02138184],
|
| 228 |
+
[0.2354930, 0.0443711, 0.0173959, 0.0041872, 0.00794563],
|
| 229 |
+
[0.4168118, 0.1433867, 0.0111714, 0.0126265, -0.00658537],
|
| 230 |
+
[0.4681972, 0.2153273, 0.0225249, 0.0152704, -0.00463809],
|
| 231 |
+
[0.6023068, 0.3346815, 0.0731108, 0.0186618, -0.00371541],
|
| 232 |
+
[0.6415743, 0.3985468, 0.1324422, 0.0214160, -0.00062401],
|
| 233 |
+
[0.7503690, 0.5447616, 0.2804541, 0.0539851, 0.00242230],
|
| 234 |
+
[0.7452626, 0.5644594, 0.3324679, 0.0865153, 0.00454960],
|
| 235 |
+
[0.8059782, 0.6586838, 0.4229577, 0.1452990, 0.00976702],
|
| 236 |
+
[0.8549542, 0.7226562, 0.4991309, 0.2420093, 0.02772661],
|
| 237 |
+
[0.8571332, 0.7285741, 0.5279076, 0.2824549, 0.06030276],
|
| 238 |
+
[0.8835633, 0.7727077, 0.5957984, 0.3411303, 0.09652185],
|
| 239 |
+
[0.9071558, 0.8299587, 0.6771400, 0.4402896, 0.17469338]]
|
| 240 |
+
# Get iteration points
|
| 241 |
+
grad_list = [prob.grad(x) for x in x_list]
|
| 242 |
+
delta_x = [np.array(x_list[i+1])-np.array(x_list[i])
|
| 243 |
+
for i in range(len(x_list)-1)]
|
| 244 |
+
delta_grad = [grad_list[i+1]-grad_list[i]
|
| 245 |
+
for i in range(len(grad_list)-1)]
|
| 246 |
+
hess = SR1(init_scale=1, min_denominator=1e-2)
|
| 247 |
+
hess.initialize(len(x_list[0]), 'hess')
|
| 248 |
+
# Compare the Hessian and its inverse
|
| 249 |
+
for i in range(len(delta_x)-1):
|
| 250 |
+
s = delta_x[i]
|
| 251 |
+
y = delta_grad[i]
|
| 252 |
+
hess.update(s, y)
|
| 253 |
+
# Test skip update
|
| 254 |
+
B = np.copy(hess.get_matrix())
|
| 255 |
+
s = delta_x[17]
|
| 256 |
+
y = delta_grad[17]
|
| 257 |
+
hess.update(s, y)
|
| 258 |
+
B_updated = np.copy(hess.get_matrix())
|
| 259 |
+
assert_array_equal(B, B_updated)
|
| 260 |
+
|
| 261 |
+
def test_BFGS_skip_update(self):
|
| 262 |
+
# Define auxiliary problem
|
| 263 |
+
prob = Rosenbrock(n=5)
|
| 264 |
+
# Define iteration points
|
| 265 |
+
x_list = [[0.0976270, 0.4303787, 0.2055267, 0.0897663, -0.15269040],
|
| 266 |
+
[0.1847239, 0.0505757, 0.2123832, 0.0255081, 0.00083286],
|
| 267 |
+
[0.2142498, -0.0188480, 0.0503822, 0.0347033, 0.03323606],
|
| 268 |
+
[0.2071680, -0.0185071, 0.0341337, -0.0139298, 0.02881750],
|
| 269 |
+
[0.1533055, -0.0322935, 0.0280418, -0.0083592, 0.01503699],
|
| 270 |
+
[0.1382378, -0.0276671, 0.0266161, -0.0074060, 0.02801610],
|
| 271 |
+
[0.1651957, -0.0049124, 0.0269665, -0.0040025, 0.02138184]]
|
| 272 |
+
# Get iteration points
|
| 273 |
+
grad_list = [prob.grad(x) for x in x_list]
|
| 274 |
+
delta_x = [np.array(x_list[i+1])-np.array(x_list[i])
|
| 275 |
+
for i in range(len(x_list)-1)]
|
| 276 |
+
delta_grad = [grad_list[i+1]-grad_list[i]
|
| 277 |
+
for i in range(len(grad_list)-1)]
|
| 278 |
+
hess = BFGS(init_scale=1, min_curvature=10)
|
| 279 |
+
hess.initialize(len(x_list[0]), 'hess')
|
| 280 |
+
# Compare the Hessian and its inverse
|
| 281 |
+
for i in range(len(delta_x)-1):
|
| 282 |
+
s = delta_x[i]
|
| 283 |
+
y = delta_grad[i]
|
| 284 |
+
hess.update(s, y)
|
| 285 |
+
# Test skip update
|
| 286 |
+
B = np.copy(hess.get_matrix())
|
| 287 |
+
s = delta_x[5]
|
| 288 |
+
y = delta_grad[5]
|
| 289 |
+
hess.update(s, y)
|
| 290 |
+
B_updated = np.copy(hess.get_matrix())
|
| 291 |
+
assert_array_equal(B, B_updated)
|
| 292 |
+
|
| 293 |
+
|
| 294 |
+
@pytest.mark.parametrize('strategy', [BFGS, SR1])
|
| 295 |
+
@pytest.mark.parametrize('approx_type', ['hess', 'inv_hess'])
|
| 296 |
+
def test_matmul_equals_dot(strategy, approx_type):
|
| 297 |
+
H = strategy(init_scale=1)
|
| 298 |
+
H.initialize(2, approx_type)
|
| 299 |
+
v = np.array([1, 2])
|
| 300 |
+
assert_array_equal(H @ v, H.dot(v))
|
mantis_evalkit/lib/python3.10/site-packages/scipy/optimize/tests/test_isotonic_regression.py
ADDED
|
@@ -0,0 +1,167 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import numpy as np
|
| 2 |
+
from numpy.testing import assert_allclose, assert_equal
|
| 3 |
+
import pytest
|
| 4 |
+
|
| 5 |
+
from scipy.optimize._pava_pybind import pava
|
| 6 |
+
from scipy.optimize import isotonic_regression
|
| 7 |
+
|
| 8 |
+
|
| 9 |
+
class TestIsotonicRegression:
|
| 10 |
+
@pytest.mark.parametrize(
|
| 11 |
+
("y", "w", "msg"),
|
| 12 |
+
[
|
| 13 |
+
([[0, 1]], None,
|
| 14 |
+
"array has incorrect number of dimensions: 2; expected 1"),
|
| 15 |
+
([0, 1], [[1, 2]],
|
| 16 |
+
"Input arrays y and w must have one dimension of equal length"),
|
| 17 |
+
([0, 1], [1],
|
| 18 |
+
"Input arrays y and w must have one dimension of equal length"),
|
| 19 |
+
(1, [1, 2],
|
| 20 |
+
"Input arrays y and w must have one dimension of equal length"),
|
| 21 |
+
([1, 2], 1,
|
| 22 |
+
"Input arrays y and w must have one dimension of equal length"),
|
| 23 |
+
([0, 1], [0, 1],
|
| 24 |
+
"Weights w must be strictly positive"),
|
| 25 |
+
]
|
| 26 |
+
)
|
| 27 |
+
def test_raise_error(self, y, w, msg):
|
| 28 |
+
with pytest.raises(ValueError, match=msg):
|
| 29 |
+
isotonic_regression(y=y, weights=w)
|
| 30 |
+
|
| 31 |
+
def test_simple_pava(self):
|
| 32 |
+
# Test case of Busing 2020
|
| 33 |
+
# https://doi.org/10.18637/jss.v102.c01
|
| 34 |
+
y = np.array([8, 4, 8, 2, 2, 0, 8], dtype=np.float64)
|
| 35 |
+
w = np.ones_like(y)
|
| 36 |
+
r = np.full(shape=y.shape[0] + 1, fill_value=-1, dtype=np.intp)
|
| 37 |
+
pava(y, w, r)
|
| 38 |
+
assert_allclose(y, [4, 4, 4, 4, 4, 4, 8])
|
| 39 |
+
# Only first 2 elements of w are changed.
|
| 40 |
+
assert_allclose(w, [6, 1, 1, 1, 1, 1, 1])
|
| 41 |
+
# Only first 3 elements of r are changed.
|
| 42 |
+
assert_allclose(r, [0, 6, 7, -1, -1, -1, -1, -1])
|
| 43 |
+
|
| 44 |
+
@pytest.mark.parametrize("y_dtype", [np.float64, np.float32, np.int64, np.int32])
|
| 45 |
+
@pytest.mark.parametrize("w_dtype", [np.float64, np.float32, np.int64, np.int32])
|
| 46 |
+
@pytest.mark.parametrize("w", [None, "ones"])
|
| 47 |
+
def test_simple_isotonic_regression(self, w, w_dtype, y_dtype):
|
| 48 |
+
# Test case of Busing 2020
|
| 49 |
+
# https://doi.org/10.18637/jss.v102.c01
|
| 50 |
+
y = np.array([8, 4, 8, 2, 2, 0, 8], dtype=y_dtype)
|
| 51 |
+
if w is not None:
|
| 52 |
+
w = np.ones_like(y, dtype=w_dtype)
|
| 53 |
+
res = isotonic_regression(y, weights=w)
|
| 54 |
+
assert res.x.dtype == np.float64
|
| 55 |
+
assert res.weights.dtype == np.float64
|
| 56 |
+
assert_allclose(res.x, [4, 4, 4, 4, 4, 4, 8])
|
| 57 |
+
assert_allclose(res.weights, [6, 1])
|
| 58 |
+
assert_allclose(res.blocks, [0, 6, 7])
|
| 59 |
+
# Assert that y was not overwritten
|
| 60 |
+
assert_equal(y, np.array([8, 4, 8, 2, 2, 0, 8], dtype=np.float64))
|
| 61 |
+
|
| 62 |
+
@pytest.mark.parametrize("increasing", [True, False])
|
| 63 |
+
def test_linspace(self, increasing):
|
| 64 |
+
n = 10
|
| 65 |
+
y = np.linspace(0, 1, n) if increasing else np.linspace(1, 0, n)
|
| 66 |
+
res = isotonic_regression(y, increasing=increasing)
|
| 67 |
+
assert_allclose(res.x, y)
|
| 68 |
+
assert_allclose(res.blocks, np.arange(n + 1))
|
| 69 |
+
|
| 70 |
+
def test_weights(self):
|
| 71 |
+
w = np.array([1, 2, 5, 0.5, 0.5, 0.5, 1, 3])
|
| 72 |
+
y = np.array([3, 2, 1, 10, 9, 8, 20, 10])
|
| 73 |
+
res = isotonic_regression(y, weights=w)
|
| 74 |
+
assert_allclose(res.x, [12/8, 12/8, 12/8, 9, 9, 9, 50/4, 50/4])
|
| 75 |
+
assert_allclose(res.weights, [8, 1.5, 4])
|
| 76 |
+
assert_allclose(res.blocks, [0, 3, 6, 8])
|
| 77 |
+
|
| 78 |
+
# weights are like repeated observations, we repeat the 3rd element 5
|
| 79 |
+
# times.
|
| 80 |
+
w2 = np.array([1, 2, 1, 1, 1, 1, 1, 0.5, 0.5, 0.5, 1, 3])
|
| 81 |
+
y2 = np.array([3, 2, 1, 1, 1, 1, 1, 10, 9, 8, 20, 10])
|
| 82 |
+
res2 = isotonic_regression(y2, weights=w2)
|
| 83 |
+
assert_allclose(np.diff(res2.x[0:7]), 0)
|
| 84 |
+
assert_allclose(res2.x[4:], res.x)
|
| 85 |
+
assert_allclose(res2.weights, res.weights)
|
| 86 |
+
assert_allclose(res2.blocks[1:] - 4, res.blocks[1:])
|
| 87 |
+
|
| 88 |
+
def test_against_R_monotone(self):
|
| 89 |
+
y = [0, 6, 8, 3, 5, 2, 1, 7, 9, 4]
|
| 90 |
+
res = isotonic_regression(y)
|
| 91 |
+
# R code
|
| 92 |
+
# library(monotone)
|
| 93 |
+
# options(digits=8)
|
| 94 |
+
# monotone(c(0, 6, 8, 3, 5, 2, 1, 7, 9, 4))
|
| 95 |
+
x_R = [
|
| 96 |
+
0, 4.1666667, 4.1666667, 4.1666667, 4.1666667, 4.1666667,
|
| 97 |
+
4.1666667, 6.6666667, 6.6666667, 6.6666667,
|
| 98 |
+
]
|
| 99 |
+
assert_allclose(res.x, x_R)
|
| 100 |
+
assert_equal(res.blocks, [0, 1, 7, 10])
|
| 101 |
+
|
| 102 |
+
n = 100
|
| 103 |
+
y = np.linspace(0, 1, num=n, endpoint=False)
|
| 104 |
+
y = 5 * y + np.sin(10 * y)
|
| 105 |
+
res = isotonic_regression(y)
|
| 106 |
+
# R code
|
| 107 |
+
# library(monotone)
|
| 108 |
+
# n <- 100
|
| 109 |
+
# y <- 5 * ((1:n)-1)/n + sin(10 * ((1:n)-1)/n)
|
| 110 |
+
# options(digits=8)
|
| 111 |
+
# monotone(y)
|
| 112 |
+
x_R = [
|
| 113 |
+
0.00000000, 0.14983342, 0.29866933, 0.44552021, 0.58941834, 0.72942554,
|
| 114 |
+
0.86464247, 0.99421769, 1.11735609, 1.23332691, 1.34147098, 1.44120736,
|
| 115 |
+
1.53203909, 1.57081100, 1.57081100, 1.57081100, 1.57081100, 1.57081100,
|
| 116 |
+
1.57081100, 1.57081100, 1.57081100, 1.57081100, 1.57081100, 1.57081100,
|
| 117 |
+
1.57081100, 1.57081100, 1.57081100, 1.57081100, 1.57081100, 1.57081100,
|
| 118 |
+
1.57081100, 1.57081100, 1.57081100, 1.57081100, 1.57081100, 1.57081100,
|
| 119 |
+
1.57081100, 1.57081100, 1.57081100, 1.57081100, 1.57081100, 1.57081100,
|
| 120 |
+
1.57081100, 1.57081100, 1.57081100, 1.57081100, 1.57081100, 1.57081100,
|
| 121 |
+
1.57081100, 1.57081100, 1.57081100, 1.62418532, 1.71654534, 1.81773256,
|
| 122 |
+
1.92723551, 2.04445967, 2.16873336, 2.29931446, 2.43539782, 2.57612334,
|
| 123 |
+
2.72058450, 2.86783750, 3.01691060, 3.16681390, 3.31654920, 3.46511999,
|
| 124 |
+
3.61154136, 3.75484992, 3.89411335, 4.02843976, 4.15698660, 4.27896904,
|
| 125 |
+
4.39366786, 4.50043662, 4.59870810, 4.68799998, 4.76791967, 4.83816823,
|
| 126 |
+
4.86564130, 4.86564130, 4.86564130, 4.86564130, 4.86564130, 4.86564130,
|
| 127 |
+
4.86564130, 4.86564130, 4.86564130, 4.86564130, 4.86564130, 4.86564130,
|
| 128 |
+
4.86564130, 4.86564130, 4.86564130, 4.86564130, 4.86564130, 4.86564130,
|
| 129 |
+
4.86564130, 4.86564130, 4.86564130, 4.86564130,
|
| 130 |
+
]
|
| 131 |
+
assert_allclose(res.x, x_R)
|
| 132 |
+
|
| 133 |
+
# Test increasing
|
| 134 |
+
assert np.all(np.diff(res.x) >= 0)
|
| 135 |
+
|
| 136 |
+
# Test balance property: sum(y) == sum(x)
|
| 137 |
+
assert_allclose(np.sum(res.x), np.sum(y))
|
| 138 |
+
|
| 139 |
+
# Reverse order
|
| 140 |
+
res_inv = isotonic_regression(-y, increasing=False)
|
| 141 |
+
assert_allclose(-res_inv.x, res.x)
|
| 142 |
+
assert_equal(res_inv.blocks, res.blocks)
|
| 143 |
+
|
| 144 |
+
def test_readonly(self):
|
| 145 |
+
x = np.arange(3, dtype=float)
|
| 146 |
+
w = np.ones(3, dtype=float)
|
| 147 |
+
|
| 148 |
+
x.flags.writeable = False
|
| 149 |
+
w.flags.writeable = False
|
| 150 |
+
|
| 151 |
+
res = isotonic_regression(x, weights=w)
|
| 152 |
+
assert np.all(np.isfinite(res.x))
|
| 153 |
+
assert np.all(np.isfinite(res.weights))
|
| 154 |
+
assert np.all(np.isfinite(res.blocks))
|
| 155 |
+
|
| 156 |
+
def test_non_contiguous_arrays(self):
|
| 157 |
+
x = np.arange(10, dtype=float)[::3]
|
| 158 |
+
w = np.ones(10, dtype=float)[::3]
|
| 159 |
+
assert not x.flags.c_contiguous
|
| 160 |
+
assert not x.flags.f_contiguous
|
| 161 |
+
assert not w.flags.c_contiguous
|
| 162 |
+
assert not w.flags.f_contiguous
|
| 163 |
+
|
| 164 |
+
res = isotonic_regression(x, weights=w)
|
| 165 |
+
assert np.all(np.isfinite(res.x))
|
| 166 |
+
assert np.all(np.isfinite(res.weights))
|
| 167 |
+
assert np.all(np.isfinite(res.blocks))
|
mantis_evalkit/lib/python3.10/site-packages/scipy/optimize/tests/test_lbfgsb_hessinv.py
ADDED
|
@@ -0,0 +1,43 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import numpy as np
|
| 2 |
+
from numpy.testing import assert_allclose
|
| 3 |
+
import scipy.linalg
|
| 4 |
+
from scipy.optimize import minimize
|
| 5 |
+
|
| 6 |
+
|
| 7 |
+
def test_1():
|
| 8 |
+
def f(x):
|
| 9 |
+
return x**4, 4*x**3
|
| 10 |
+
|
| 11 |
+
for gtol in [1e-8, 1e-12, 1e-20]:
|
| 12 |
+
for maxcor in range(20, 35):
|
| 13 |
+
result = minimize(fun=f, jac=True, method='L-BFGS-B', x0=20,
|
| 14 |
+
options={'gtol': gtol, 'maxcor': maxcor})
|
| 15 |
+
|
| 16 |
+
H1 = result.hess_inv(np.array([1])).reshape(1,1)
|
| 17 |
+
H2 = result.hess_inv.todense()
|
| 18 |
+
|
| 19 |
+
assert_allclose(H1, H2)
|
| 20 |
+
|
| 21 |
+
|
| 22 |
+
def test_2():
|
| 23 |
+
H0 = [[3, 0], [1, 2]]
|
| 24 |
+
|
| 25 |
+
def f(x):
|
| 26 |
+
return np.dot(x, np.dot(scipy.linalg.inv(H0), x))
|
| 27 |
+
|
| 28 |
+
result1 = minimize(fun=f, method='L-BFGS-B', x0=[10, 20])
|
| 29 |
+
result2 = minimize(fun=f, method='BFGS', x0=[10, 20])
|
| 30 |
+
|
| 31 |
+
H1 = result1.hess_inv.todense()
|
| 32 |
+
|
| 33 |
+
H2 = np.vstack((
|
| 34 |
+
result1.hess_inv(np.array([1, 0])),
|
| 35 |
+
result1.hess_inv(np.array([0, 1]))))
|
| 36 |
+
|
| 37 |
+
assert_allclose(
|
| 38 |
+
result1.hess_inv(np.array([1, 0]).reshape(2,1)).reshape(-1),
|
| 39 |
+
result1.hess_inv(np.array([1, 0])))
|
| 40 |
+
assert_allclose(H1, H2)
|
| 41 |
+
assert_allclose(H1, result2.hess_inv, rtol=1e-2, atol=0.03)
|
| 42 |
+
|
| 43 |
+
|
mantis_evalkit/lib/python3.10/site-packages/scipy/optimize/tests/test_lbfgsb_setulb.py
ADDED
|
@@ -0,0 +1,122 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import numpy as np
|
| 2 |
+
from scipy.optimize import _lbfgsb, minimize
|
| 3 |
+
|
| 4 |
+
|
| 5 |
+
def objfun(x):
|
| 6 |
+
"""simplified objective func to test lbfgsb bound violation"""
|
| 7 |
+
x0 = [0.8750000000000278,
|
| 8 |
+
0.7500000000000153,
|
| 9 |
+
0.9499999999999722,
|
| 10 |
+
0.8214285714285992,
|
| 11 |
+
0.6363636363636085]
|
| 12 |
+
x1 = [1.0, 0.0, 1.0, 0.0, 0.0]
|
| 13 |
+
x2 = [1.0,
|
| 14 |
+
0.0,
|
| 15 |
+
0.9889733043149325,
|
| 16 |
+
0.0,
|
| 17 |
+
0.026353554421041155]
|
| 18 |
+
x3 = [1.0,
|
| 19 |
+
0.0,
|
| 20 |
+
0.9889917442915558,
|
| 21 |
+
0.0,
|
| 22 |
+
0.020341986743231205]
|
| 23 |
+
|
| 24 |
+
f0 = 5163.647901211178
|
| 25 |
+
f1 = 5149.8181642072905
|
| 26 |
+
f2 = 5149.379332309634
|
| 27 |
+
f3 = 5149.374490771297
|
| 28 |
+
|
| 29 |
+
g0 = np.array([-0.5934820547965749,
|
| 30 |
+
1.6251549718258351,
|
| 31 |
+
-71.99168459202559,
|
| 32 |
+
5.346636965797545,
|
| 33 |
+
37.10732723092604])
|
| 34 |
+
g1 = np.array([-0.43295349282641515,
|
| 35 |
+
1.008607936794592,
|
| 36 |
+
18.223666726602975,
|
| 37 |
+
31.927010036981997,
|
| 38 |
+
-19.667512518739386])
|
| 39 |
+
g2 = np.array([-0.4699874455100256,
|
| 40 |
+
0.9466285353668347,
|
| 41 |
+
-0.016874360242016825,
|
| 42 |
+
48.44999161133457,
|
| 43 |
+
5.819631620590712])
|
| 44 |
+
g3 = np.array([-0.46970678696829116,
|
| 45 |
+
0.9612719312174818,
|
| 46 |
+
0.006129809488833699,
|
| 47 |
+
48.43557729419473,
|
| 48 |
+
6.005481418498221])
|
| 49 |
+
|
| 50 |
+
if np.allclose(x, x0):
|
| 51 |
+
f = f0
|
| 52 |
+
g = g0
|
| 53 |
+
elif np.allclose(x, x1):
|
| 54 |
+
f = f1
|
| 55 |
+
g = g1
|
| 56 |
+
elif np.allclose(x, x2):
|
| 57 |
+
f = f2
|
| 58 |
+
g = g2
|
| 59 |
+
elif np.allclose(x, x3):
|
| 60 |
+
f = f3
|
| 61 |
+
g = g3
|
| 62 |
+
else:
|
| 63 |
+
raise ValueError(
|
| 64 |
+
'Simplified objective function not defined '
|
| 65 |
+
'at requested point')
|
| 66 |
+
return (np.copy(f), np.copy(g))
|
| 67 |
+
|
| 68 |
+
|
| 69 |
+
def test_setulb_floatround():
|
| 70 |
+
"""test if setulb() violates bounds
|
| 71 |
+
|
| 72 |
+
checks for violation due to floating point rounding error
|
| 73 |
+
"""
|
| 74 |
+
|
| 75 |
+
n = 5
|
| 76 |
+
m = 10
|
| 77 |
+
factr = 1e7
|
| 78 |
+
pgtol = 1e-5
|
| 79 |
+
maxls = 20
|
| 80 |
+
nbd = np.full(shape=(n,), fill_value=2, dtype=np.int32)
|
| 81 |
+
low_bnd = np.zeros(n, dtype=np.float64)
|
| 82 |
+
upper_bnd = np.ones(n, dtype=np.float64)
|
| 83 |
+
|
| 84 |
+
x0 = np.array(
|
| 85 |
+
[0.8750000000000278,
|
| 86 |
+
0.7500000000000153,
|
| 87 |
+
0.9499999999999722,
|
| 88 |
+
0.8214285714285992,
|
| 89 |
+
0.6363636363636085])
|
| 90 |
+
x = np.copy(x0)
|
| 91 |
+
|
| 92 |
+
f = np.array(0.0, dtype=np.float64)
|
| 93 |
+
g = np.zeros(n, dtype=np.float64)
|
| 94 |
+
|
| 95 |
+
wa = np.zeros(2*m*n + 5*n + 11*m*m + 8*m, dtype=np.float64)
|
| 96 |
+
iwa = np.zeros(3*n, dtype=np.int32)
|
| 97 |
+
task = np.zeros(2, dtype=np.int32)
|
| 98 |
+
ln_task = np.zeros(2, dtype=np.int32)
|
| 99 |
+
lsave = np.zeros(4, dtype=np.int32)
|
| 100 |
+
isave = np.zeros(44, dtype=np.int32)
|
| 101 |
+
dsave = np.zeros(29, dtype=np.float64)
|
| 102 |
+
|
| 103 |
+
for n_iter in range(7): # 7 steps required to reproduce error
|
| 104 |
+
f, g = objfun(x)
|
| 105 |
+
|
| 106 |
+
_lbfgsb.setulb(m, x, low_bnd, upper_bnd, nbd, f, g, factr, pgtol, wa,
|
| 107 |
+
iwa, task, lsave, isave, dsave, maxls, ln_task)
|
| 108 |
+
|
| 109 |
+
assert (x <= upper_bnd).all() and (x >= low_bnd).all(), (
|
| 110 |
+
"_lbfgsb.setulb() stepped to a point outside of the bounds")
|
| 111 |
+
|
| 112 |
+
|
| 113 |
+
def test_gh_issue18730():
|
| 114 |
+
# issue 18730 reported that l-bfgs-b did not work with objectives
|
| 115 |
+
# returning single precision gradient arrays
|
| 116 |
+
def fun_single_precision(x):
|
| 117 |
+
x = x.astype(np.float32)
|
| 118 |
+
return np.sum(x**2), (2*x)
|
| 119 |
+
|
| 120 |
+
res = minimize(fun_single_precision, x0=np.array([1., 1.]), jac=True,
|
| 121 |
+
method="l-bfgs-b")
|
| 122 |
+
np.testing.assert_allclose(res.fun, 0., atol=1e-15)
|
mantis_evalkit/lib/python3.10/site-packages/scipy/optimize/tests/test_least_squares.py
ADDED
|
@@ -0,0 +1,874 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from itertools import product
|
| 2 |
+
|
| 3 |
+
import numpy as np
|
| 4 |
+
from numpy.linalg import norm
|
| 5 |
+
from numpy.testing import (assert_, assert_allclose,
|
| 6 |
+
assert_equal, suppress_warnings)
|
| 7 |
+
import pytest
|
| 8 |
+
from pytest import raises as assert_raises
|
| 9 |
+
from scipy.sparse import issparse, lil_matrix
|
| 10 |
+
from scipy.sparse.linalg import aslinearoperator
|
| 11 |
+
|
| 12 |
+
from scipy.optimize import least_squares, Bounds
|
| 13 |
+
from scipy.optimize._lsq.least_squares import IMPLEMENTED_LOSSES
|
| 14 |
+
from scipy.optimize._lsq.common import EPS, make_strictly_feasible, CL_scaling_vector
|
| 15 |
+
|
| 16 |
+
|
| 17 |
+
def fun_trivial(x, a=0):
|
| 18 |
+
return (x - a)**2 + 5.0
|
| 19 |
+
|
| 20 |
+
|
| 21 |
+
def jac_trivial(x, a=0.0):
|
| 22 |
+
return 2 * (x - a)
|
| 23 |
+
|
| 24 |
+
|
| 25 |
+
def fun_2d_trivial(x):
|
| 26 |
+
return np.array([x[0], x[1]])
|
| 27 |
+
|
| 28 |
+
|
| 29 |
+
def jac_2d_trivial(x):
|
| 30 |
+
return np.identity(2)
|
| 31 |
+
|
| 32 |
+
|
| 33 |
+
def fun_rosenbrock(x):
|
| 34 |
+
return np.array([10 * (x[1] - x[0]**2), (1 - x[0])])
|
| 35 |
+
|
| 36 |
+
|
| 37 |
+
def jac_rosenbrock(x):
|
| 38 |
+
return np.array([
|
| 39 |
+
[-20 * x[0], 10],
|
| 40 |
+
[-1, 0]
|
| 41 |
+
])
|
| 42 |
+
|
| 43 |
+
|
| 44 |
+
def jac_rosenbrock_bad_dim(x):
|
| 45 |
+
return np.array([
|
| 46 |
+
[-20 * x[0], 10],
|
| 47 |
+
[-1, 0],
|
| 48 |
+
[0.0, 0.0]
|
| 49 |
+
])
|
| 50 |
+
|
| 51 |
+
|
| 52 |
+
def fun_rosenbrock_cropped(x):
|
| 53 |
+
return fun_rosenbrock(x)[0]
|
| 54 |
+
|
| 55 |
+
|
| 56 |
+
def jac_rosenbrock_cropped(x):
|
| 57 |
+
return jac_rosenbrock(x)[0]
|
| 58 |
+
|
| 59 |
+
|
| 60 |
+
# When x is 1-D array, return is 2-D array.
|
| 61 |
+
def fun_wrong_dimensions(x):
|
| 62 |
+
return np.array([x, x**2, x**3])
|
| 63 |
+
|
| 64 |
+
|
| 65 |
+
def jac_wrong_dimensions(x, a=0.0):
|
| 66 |
+
return np.atleast_3d(jac_trivial(x, a=a))
|
| 67 |
+
|
| 68 |
+
|
| 69 |
+
def fun_bvp(x):
|
| 70 |
+
n = int(np.sqrt(x.shape[0]))
|
| 71 |
+
u = np.zeros((n + 2, n + 2))
|
| 72 |
+
x = x.reshape((n, n))
|
| 73 |
+
u[1:-1, 1:-1] = x
|
| 74 |
+
y = u[:-2, 1:-1] + u[2:, 1:-1] + u[1:-1, :-2] + u[1:-1, 2:] - 4 * x + x**3
|
| 75 |
+
return y.ravel()
|
| 76 |
+
|
| 77 |
+
|
| 78 |
+
class BroydenTridiagonal:
|
| 79 |
+
def __init__(self, n=100, mode='sparse'):
|
| 80 |
+
rng = np.random.RandomState(0)
|
| 81 |
+
|
| 82 |
+
self.n = n
|
| 83 |
+
|
| 84 |
+
self.x0 = -np.ones(n)
|
| 85 |
+
self.lb = np.linspace(-2, -1.5, n)
|
| 86 |
+
self.ub = np.linspace(-0.8, 0.0, n)
|
| 87 |
+
|
| 88 |
+
self.lb += 0.1 * rng.randn(n)
|
| 89 |
+
self.ub += 0.1 * rng.randn(n)
|
| 90 |
+
|
| 91 |
+
self.x0 += 0.1 * rng.randn(n)
|
| 92 |
+
self.x0 = make_strictly_feasible(self.x0, self.lb, self.ub)
|
| 93 |
+
|
| 94 |
+
if mode == 'sparse':
|
| 95 |
+
self.sparsity = lil_matrix((n, n), dtype=int)
|
| 96 |
+
i = np.arange(n)
|
| 97 |
+
self.sparsity[i, i] = 1
|
| 98 |
+
i = np.arange(1, n)
|
| 99 |
+
self.sparsity[i, i - 1] = 1
|
| 100 |
+
i = np.arange(n - 1)
|
| 101 |
+
self.sparsity[i, i + 1] = 1
|
| 102 |
+
|
| 103 |
+
self.jac = self._jac
|
| 104 |
+
elif mode == 'operator':
|
| 105 |
+
self.jac = lambda x: aslinearoperator(self._jac(x))
|
| 106 |
+
elif mode == 'dense':
|
| 107 |
+
self.sparsity = None
|
| 108 |
+
self.jac = lambda x: self._jac(x).toarray()
|
| 109 |
+
else:
|
| 110 |
+
assert_(False)
|
| 111 |
+
|
| 112 |
+
def fun(self, x):
|
| 113 |
+
f = (3 - x) * x + 1
|
| 114 |
+
f[1:] -= x[:-1]
|
| 115 |
+
f[:-1] -= 2 * x[1:]
|
| 116 |
+
return f
|
| 117 |
+
|
| 118 |
+
def _jac(self, x):
|
| 119 |
+
J = lil_matrix((self.n, self.n))
|
| 120 |
+
i = np.arange(self.n)
|
| 121 |
+
J[i, i] = 3 - 2 * x
|
| 122 |
+
i = np.arange(1, self.n)
|
| 123 |
+
J[i, i - 1] = -1
|
| 124 |
+
i = np.arange(self.n - 1)
|
| 125 |
+
J[i, i + 1] = -2
|
| 126 |
+
return J
|
| 127 |
+
|
| 128 |
+
|
| 129 |
+
class ExponentialFittingProblem:
|
| 130 |
+
"""Provide data and function for exponential fitting in the form
|
| 131 |
+
y = a + exp(b * x) + noise."""
|
| 132 |
+
|
| 133 |
+
def __init__(self, a, b, noise, n_outliers=1, x_range=(-1, 1),
|
| 134 |
+
n_points=11, random_seed=None):
|
| 135 |
+
rng = np.random.RandomState(random_seed)
|
| 136 |
+
self.m = n_points
|
| 137 |
+
self.n = 2
|
| 138 |
+
|
| 139 |
+
self.p0 = np.zeros(2)
|
| 140 |
+
self.x = np.linspace(x_range[0], x_range[1], n_points)
|
| 141 |
+
|
| 142 |
+
self.y = a + np.exp(b * self.x)
|
| 143 |
+
self.y += noise * rng.randn(self.m)
|
| 144 |
+
|
| 145 |
+
outliers = rng.randint(0, self.m, n_outliers)
|
| 146 |
+
self.y[outliers] += 50 * noise * rng.rand(n_outliers)
|
| 147 |
+
|
| 148 |
+
self.p_opt = np.array([a, b])
|
| 149 |
+
|
| 150 |
+
def fun(self, p):
|
| 151 |
+
return p[0] + np.exp(p[1] * self.x) - self.y
|
| 152 |
+
|
| 153 |
+
def jac(self, p):
|
| 154 |
+
J = np.empty((self.m, self.n))
|
| 155 |
+
J[:, 0] = 1
|
| 156 |
+
J[:, 1] = self.x * np.exp(p[1] * self.x)
|
| 157 |
+
return J
|
| 158 |
+
|
| 159 |
+
|
| 160 |
+
def cubic_soft_l1(z):
|
| 161 |
+
rho = np.empty((3, z.size))
|
| 162 |
+
|
| 163 |
+
t = 1 + z
|
| 164 |
+
rho[0] = 3 * (t**(1/3) - 1)
|
| 165 |
+
rho[1] = t ** (-2/3)
|
| 166 |
+
rho[2] = -2/3 * t**(-5/3)
|
| 167 |
+
|
| 168 |
+
return rho
|
| 169 |
+
|
| 170 |
+
|
| 171 |
+
LOSSES = list(IMPLEMENTED_LOSSES.keys()) + [cubic_soft_l1]
|
| 172 |
+
|
| 173 |
+
|
| 174 |
+
class BaseMixin:
|
| 175 |
+
def test_basic(self):
|
| 176 |
+
# Test that the basic calling sequence works.
|
| 177 |
+
res = least_squares(fun_trivial, 2., method=self.method)
|
| 178 |
+
assert_allclose(res.x, 0, atol=1e-4)
|
| 179 |
+
assert_allclose(res.fun, fun_trivial(res.x))
|
| 180 |
+
|
| 181 |
+
def test_args_kwargs(self):
|
| 182 |
+
# Test that args and kwargs are passed correctly to the functions.
|
| 183 |
+
a = 3.0
|
| 184 |
+
for jac in ['2-point', '3-point', 'cs', jac_trivial]:
|
| 185 |
+
with suppress_warnings() as sup:
|
| 186 |
+
sup.filter(
|
| 187 |
+
UserWarning,
|
| 188 |
+
"jac='(3-point|cs)' works equivalently to '2-point' for method='lm'"
|
| 189 |
+
)
|
| 190 |
+
res = least_squares(fun_trivial, 2.0, jac, args=(a,),
|
| 191 |
+
method=self.method)
|
| 192 |
+
res1 = least_squares(fun_trivial, 2.0, jac, kwargs={'a': a},
|
| 193 |
+
method=self.method)
|
| 194 |
+
|
| 195 |
+
assert_allclose(res.x, a, rtol=1e-4)
|
| 196 |
+
assert_allclose(res1.x, a, rtol=1e-4)
|
| 197 |
+
|
| 198 |
+
assert_raises(TypeError, least_squares, fun_trivial, 2.0,
|
| 199 |
+
args=(3, 4,), method=self.method)
|
| 200 |
+
assert_raises(TypeError, least_squares, fun_trivial, 2.0,
|
| 201 |
+
kwargs={'kaboom': 3}, method=self.method)
|
| 202 |
+
|
| 203 |
+
def test_jac_options(self):
|
| 204 |
+
for jac in ['2-point', '3-point', 'cs', jac_trivial]:
|
| 205 |
+
with suppress_warnings() as sup:
|
| 206 |
+
sup.filter(
|
| 207 |
+
UserWarning,
|
| 208 |
+
"jac='(3-point|cs)' works equivalently to '2-point' for method='lm'"
|
| 209 |
+
)
|
| 210 |
+
res = least_squares(fun_trivial, 2.0, jac, method=self.method)
|
| 211 |
+
assert_allclose(res.x, 0, atol=1e-4)
|
| 212 |
+
|
| 213 |
+
assert_raises(ValueError, least_squares, fun_trivial, 2.0, jac='oops',
|
| 214 |
+
method=self.method)
|
| 215 |
+
|
| 216 |
+
def test_nfev_options(self):
|
| 217 |
+
for max_nfev in [None, 20]:
|
| 218 |
+
res = least_squares(fun_trivial, 2.0, max_nfev=max_nfev,
|
| 219 |
+
method=self.method)
|
| 220 |
+
assert_allclose(res.x, 0, atol=1e-4)
|
| 221 |
+
|
| 222 |
+
def test_x_scale_options(self):
|
| 223 |
+
for x_scale in [1.0, np.array([0.5]), 'jac']:
|
| 224 |
+
res = least_squares(fun_trivial, 2.0, x_scale=x_scale)
|
| 225 |
+
assert_allclose(res.x, 0)
|
| 226 |
+
assert_raises(ValueError, least_squares, fun_trivial,
|
| 227 |
+
2.0, x_scale='auto', method=self.method)
|
| 228 |
+
assert_raises(ValueError, least_squares, fun_trivial,
|
| 229 |
+
2.0, x_scale=-1.0, method=self.method)
|
| 230 |
+
assert_raises(ValueError, least_squares, fun_trivial,
|
| 231 |
+
2.0, x_scale=None, method=self.method)
|
| 232 |
+
assert_raises(ValueError, least_squares, fun_trivial,
|
| 233 |
+
2.0, x_scale=1.0+2.0j, method=self.method)
|
| 234 |
+
|
| 235 |
+
def test_diff_step(self):
|
| 236 |
+
# res1 and res2 should be equivalent.
|
| 237 |
+
# res2 and res3 should be different.
|
| 238 |
+
res1 = least_squares(fun_trivial, 2.0, diff_step=1e-1,
|
| 239 |
+
method=self.method)
|
| 240 |
+
res2 = least_squares(fun_trivial, 2.0, diff_step=-1e-1,
|
| 241 |
+
method=self.method)
|
| 242 |
+
res3 = least_squares(fun_trivial, 2.0,
|
| 243 |
+
diff_step=None, method=self.method)
|
| 244 |
+
assert_allclose(res1.x, 0, atol=1e-4)
|
| 245 |
+
assert_allclose(res2.x, 0, atol=1e-4)
|
| 246 |
+
assert_allclose(res3.x, 0, atol=1e-4)
|
| 247 |
+
assert_equal(res1.x, res2.x)
|
| 248 |
+
assert_equal(res1.nfev, res2.nfev)
|
| 249 |
+
|
| 250 |
+
def test_incorrect_options_usage(self):
|
| 251 |
+
assert_raises(TypeError, least_squares, fun_trivial, 2.0,
|
| 252 |
+
method=self.method, options={'no_such_option': 100})
|
| 253 |
+
assert_raises(TypeError, least_squares, fun_trivial, 2.0,
|
| 254 |
+
method=self.method, options={'max_nfev': 100})
|
| 255 |
+
|
| 256 |
+
def test_full_result(self):
|
| 257 |
+
# MINPACK doesn't work very well with factor=100 on this problem,
|
| 258 |
+
# thus using low 'atol'.
|
| 259 |
+
res = least_squares(fun_trivial, 2.0, method=self.method)
|
| 260 |
+
assert_allclose(res.x, 0, atol=1e-4)
|
| 261 |
+
assert_allclose(res.cost, 12.5)
|
| 262 |
+
assert_allclose(res.fun, 5)
|
| 263 |
+
assert_allclose(res.jac, 0, atol=1e-4)
|
| 264 |
+
assert_allclose(res.grad, 0, atol=1e-2)
|
| 265 |
+
assert_allclose(res.optimality, 0, atol=1e-2)
|
| 266 |
+
assert_equal(res.active_mask, 0)
|
| 267 |
+
if self.method == 'lm':
|
| 268 |
+
assert_(res.nfev < 30)
|
| 269 |
+
assert_(res.njev is None)
|
| 270 |
+
else:
|
| 271 |
+
assert_(res.nfev < 10)
|
| 272 |
+
assert_(res.njev < 10)
|
| 273 |
+
assert_(res.status > 0)
|
| 274 |
+
assert_(res.success)
|
| 275 |
+
|
| 276 |
+
def test_full_result_single_fev(self):
|
| 277 |
+
# MINPACK checks the number of nfev after the iteration,
|
| 278 |
+
# so it's hard to tell what he is going to compute.
|
| 279 |
+
if self.method == 'lm':
|
| 280 |
+
return
|
| 281 |
+
|
| 282 |
+
res = least_squares(fun_trivial, 2.0, method=self.method,
|
| 283 |
+
max_nfev=1)
|
| 284 |
+
assert_equal(res.x, np.array([2]))
|
| 285 |
+
assert_equal(res.cost, 40.5)
|
| 286 |
+
assert_equal(res.fun, np.array([9]))
|
| 287 |
+
assert_equal(res.jac, np.array([[4]]))
|
| 288 |
+
assert_equal(res.grad, np.array([36]))
|
| 289 |
+
assert_equal(res.optimality, 36)
|
| 290 |
+
assert_equal(res.active_mask, np.array([0]))
|
| 291 |
+
assert_equal(res.nfev, 1)
|
| 292 |
+
assert_equal(res.njev, 1)
|
| 293 |
+
assert_equal(res.status, 0)
|
| 294 |
+
assert_equal(res.success, 0)
|
| 295 |
+
|
| 296 |
+
def test_rosenbrock(self):
|
| 297 |
+
x0 = [-2, 1]
|
| 298 |
+
x_opt = [1, 1]
|
| 299 |
+
for jac, x_scale, tr_solver in product(
|
| 300 |
+
['2-point', '3-point', 'cs', jac_rosenbrock],
|
| 301 |
+
[1.0, np.array([1.0, 0.2]), 'jac'],
|
| 302 |
+
['exact', 'lsmr']):
|
| 303 |
+
with suppress_warnings() as sup:
|
| 304 |
+
sup.filter(
|
| 305 |
+
UserWarning,
|
| 306 |
+
"jac='(3-point|cs)' works equivalently to '2-point' for method='lm'"
|
| 307 |
+
)
|
| 308 |
+
res = least_squares(fun_rosenbrock, x0, jac, x_scale=x_scale,
|
| 309 |
+
tr_solver=tr_solver, method=self.method)
|
| 310 |
+
assert_allclose(res.x, x_opt)
|
| 311 |
+
|
| 312 |
+
def test_rosenbrock_cropped(self):
|
| 313 |
+
x0 = [-2, 1]
|
| 314 |
+
if self.method == 'lm':
|
| 315 |
+
assert_raises(ValueError, least_squares, fun_rosenbrock_cropped,
|
| 316 |
+
x0, method='lm')
|
| 317 |
+
else:
|
| 318 |
+
for jac, x_scale, tr_solver in product(
|
| 319 |
+
['2-point', '3-point', 'cs', jac_rosenbrock_cropped],
|
| 320 |
+
[1.0, np.array([1.0, 0.2]), 'jac'],
|
| 321 |
+
['exact', 'lsmr']):
|
| 322 |
+
res = least_squares(
|
| 323 |
+
fun_rosenbrock_cropped, x0, jac, x_scale=x_scale,
|
| 324 |
+
tr_solver=tr_solver, method=self.method)
|
| 325 |
+
assert_allclose(res.cost, 0, atol=1e-14)
|
| 326 |
+
|
| 327 |
+
def test_fun_wrong_dimensions(self):
|
| 328 |
+
assert_raises(ValueError, least_squares, fun_wrong_dimensions,
|
| 329 |
+
2.0, method=self.method)
|
| 330 |
+
|
| 331 |
+
def test_jac_wrong_dimensions(self):
|
| 332 |
+
assert_raises(ValueError, least_squares, fun_trivial,
|
| 333 |
+
2.0, jac_wrong_dimensions, method=self.method)
|
| 334 |
+
|
| 335 |
+
def test_fun_and_jac_inconsistent_dimensions(self):
|
| 336 |
+
x0 = [1, 2]
|
| 337 |
+
assert_raises(ValueError, least_squares, fun_rosenbrock, x0,
|
| 338 |
+
jac_rosenbrock_bad_dim, method=self.method)
|
| 339 |
+
|
| 340 |
+
def test_x0_multidimensional(self):
|
| 341 |
+
x0 = np.ones(4).reshape(2, 2)
|
| 342 |
+
assert_raises(ValueError, least_squares, fun_trivial, x0,
|
| 343 |
+
method=self.method)
|
| 344 |
+
|
| 345 |
+
def test_x0_complex_scalar(self):
|
| 346 |
+
x0 = 2.0 + 0.0*1j
|
| 347 |
+
assert_raises(ValueError, least_squares, fun_trivial, x0,
|
| 348 |
+
method=self.method)
|
| 349 |
+
|
| 350 |
+
def test_x0_complex_array(self):
|
| 351 |
+
x0 = [1.0, 2.0 + 0.0*1j]
|
| 352 |
+
assert_raises(ValueError, least_squares, fun_trivial, x0,
|
| 353 |
+
method=self.method)
|
| 354 |
+
|
| 355 |
+
def test_bvp(self):
|
| 356 |
+
# This test was introduced with fix #5556. It turned out that
|
| 357 |
+
# dogbox solver had a bug with trust-region radius update, which
|
| 358 |
+
# could block its progress and create an infinite loop. And this
|
| 359 |
+
# discrete boundary value problem is the one which triggers it.
|
| 360 |
+
n = 10
|
| 361 |
+
x0 = np.ones(n**2)
|
| 362 |
+
if self.method == 'lm':
|
| 363 |
+
max_nfev = 5000 # To account for Jacobian estimation.
|
| 364 |
+
else:
|
| 365 |
+
max_nfev = 100
|
| 366 |
+
res = least_squares(fun_bvp, x0, ftol=1e-2, method=self.method,
|
| 367 |
+
max_nfev=max_nfev)
|
| 368 |
+
|
| 369 |
+
assert_(res.nfev < max_nfev)
|
| 370 |
+
assert_(res.cost < 0.5)
|
| 371 |
+
|
| 372 |
+
def test_error_raised_when_all_tolerances_below_eps(self):
|
| 373 |
+
# Test that all 0 tolerances are not allowed.
|
| 374 |
+
assert_raises(ValueError, least_squares, fun_trivial, 2.0,
|
| 375 |
+
method=self.method, ftol=None, xtol=None, gtol=None)
|
| 376 |
+
|
| 377 |
+
def test_convergence_with_only_one_tolerance_enabled(self):
|
| 378 |
+
if self.method == 'lm':
|
| 379 |
+
return # should not do test
|
| 380 |
+
x0 = [-2, 1]
|
| 381 |
+
x_opt = [1, 1]
|
| 382 |
+
for ftol, xtol, gtol in [(1e-8, None, None),
|
| 383 |
+
(None, 1e-8, None),
|
| 384 |
+
(None, None, 1e-8)]:
|
| 385 |
+
res = least_squares(fun_rosenbrock, x0, jac=jac_rosenbrock,
|
| 386 |
+
ftol=ftol, gtol=gtol, xtol=xtol,
|
| 387 |
+
method=self.method)
|
| 388 |
+
assert_allclose(res.x, x_opt)
|
| 389 |
+
|
| 390 |
+
|
| 391 |
+
class BoundsMixin:
|
| 392 |
+
def test_inconsistent(self):
|
| 393 |
+
assert_raises(ValueError, least_squares, fun_trivial, 2.0,
|
| 394 |
+
bounds=(10.0, 0.0), method=self.method)
|
| 395 |
+
|
| 396 |
+
def test_infeasible(self):
|
| 397 |
+
assert_raises(ValueError, least_squares, fun_trivial, 2.0,
|
| 398 |
+
bounds=(3., 4), method=self.method)
|
| 399 |
+
|
| 400 |
+
def test_wrong_number(self):
|
| 401 |
+
assert_raises(ValueError, least_squares, fun_trivial, 2.,
|
| 402 |
+
bounds=(1., 2, 3), method=self.method)
|
| 403 |
+
|
| 404 |
+
def test_inconsistent_shape(self):
|
| 405 |
+
assert_raises(ValueError, least_squares, fun_trivial, 2.0,
|
| 406 |
+
bounds=(1.0, [2.0, 3.0]), method=self.method)
|
| 407 |
+
# 1-D array won't be broadcast
|
| 408 |
+
assert_raises(ValueError, least_squares, fun_rosenbrock, [1.0, 2.0],
|
| 409 |
+
bounds=([0.0], [3.0, 4.0]), method=self.method)
|
| 410 |
+
|
| 411 |
+
def test_in_bounds(self):
|
| 412 |
+
for jac in ['2-point', '3-point', 'cs', jac_trivial]:
|
| 413 |
+
res = least_squares(fun_trivial, 2.0, jac=jac,
|
| 414 |
+
bounds=(-1.0, 3.0), method=self.method)
|
| 415 |
+
assert_allclose(res.x, 0.0, atol=1e-4)
|
| 416 |
+
assert_equal(res.active_mask, [0])
|
| 417 |
+
assert_(-1 <= res.x <= 3)
|
| 418 |
+
res = least_squares(fun_trivial, 2.0, jac=jac,
|
| 419 |
+
bounds=(0.5, 3.0), method=self.method)
|
| 420 |
+
assert_allclose(res.x, 0.5, atol=1e-4)
|
| 421 |
+
assert_equal(res.active_mask, [-1])
|
| 422 |
+
assert_(0.5 <= res.x <= 3)
|
| 423 |
+
|
| 424 |
+
def test_bounds_shape(self):
|
| 425 |
+
def get_bounds_direct(lb, ub):
|
| 426 |
+
return lb, ub
|
| 427 |
+
|
| 428 |
+
def get_bounds_instances(lb, ub):
|
| 429 |
+
return Bounds(lb, ub)
|
| 430 |
+
|
| 431 |
+
for jac in ['2-point', '3-point', 'cs', jac_2d_trivial]:
|
| 432 |
+
for bounds_func in [get_bounds_direct, get_bounds_instances]:
|
| 433 |
+
x0 = [1.0, 1.0]
|
| 434 |
+
res = least_squares(fun_2d_trivial, x0, jac=jac)
|
| 435 |
+
assert_allclose(res.x, [0.0, 0.0])
|
| 436 |
+
res = least_squares(fun_2d_trivial, x0, jac=jac,
|
| 437 |
+
bounds=bounds_func(0.5, [2.0, 2.0]),
|
| 438 |
+
method=self.method)
|
| 439 |
+
assert_allclose(res.x, [0.5, 0.5])
|
| 440 |
+
res = least_squares(fun_2d_trivial, x0, jac=jac,
|
| 441 |
+
bounds=bounds_func([0.3, 0.2], 3.0),
|
| 442 |
+
method=self.method)
|
| 443 |
+
assert_allclose(res.x, [0.3, 0.2])
|
| 444 |
+
res = least_squares(
|
| 445 |
+
fun_2d_trivial, x0, jac=jac,
|
| 446 |
+
bounds=bounds_func([-1, 0.5], [1.0, 3.0]),
|
| 447 |
+
method=self.method)
|
| 448 |
+
assert_allclose(res.x, [0.0, 0.5], atol=1e-5)
|
| 449 |
+
|
| 450 |
+
def test_bounds_instances(self):
|
| 451 |
+
res = least_squares(fun_trivial, 0.5, bounds=Bounds())
|
| 452 |
+
assert_allclose(res.x, 0.0, atol=1e-4)
|
| 453 |
+
|
| 454 |
+
res = least_squares(fun_trivial, 3.0, bounds=Bounds(lb=1.0))
|
| 455 |
+
assert_allclose(res.x, 1.0, atol=1e-4)
|
| 456 |
+
|
| 457 |
+
res = least_squares(fun_trivial, 0.5, bounds=Bounds(lb=-1.0, ub=1.0))
|
| 458 |
+
assert_allclose(res.x, 0.0, atol=1e-4)
|
| 459 |
+
|
| 460 |
+
res = least_squares(fun_trivial, -3.0, bounds=Bounds(ub=-1.0))
|
| 461 |
+
assert_allclose(res.x, -1.0, atol=1e-4)
|
| 462 |
+
|
| 463 |
+
res = least_squares(fun_2d_trivial, [0.5, 0.5],
|
| 464 |
+
bounds=Bounds(lb=[-1.0, -1.0], ub=1.0))
|
| 465 |
+
assert_allclose(res.x, [0.0, 0.0], atol=1e-5)
|
| 466 |
+
|
| 467 |
+
res = least_squares(fun_2d_trivial, [0.5, 0.5],
|
| 468 |
+
bounds=Bounds(lb=[0.1, 0.1]))
|
| 469 |
+
assert_allclose(res.x, [0.1, 0.1], atol=1e-5)
|
| 470 |
+
|
| 471 |
+
@pytest.mark.fail_slow(10)
|
| 472 |
+
def test_rosenbrock_bounds(self):
|
| 473 |
+
x0_1 = np.array([-2.0, 1.0])
|
| 474 |
+
x0_2 = np.array([2.0, 2.0])
|
| 475 |
+
x0_3 = np.array([-2.0, 2.0])
|
| 476 |
+
x0_4 = np.array([0.0, 2.0])
|
| 477 |
+
x0_5 = np.array([-1.2, 1.0])
|
| 478 |
+
problems = [
|
| 479 |
+
(x0_1, ([-np.inf, -1.5], np.inf)),
|
| 480 |
+
(x0_2, ([-np.inf, 1.5], np.inf)),
|
| 481 |
+
(x0_3, ([-np.inf, 1.5], np.inf)),
|
| 482 |
+
(x0_4, ([-np.inf, 1.5], [1.0, np.inf])),
|
| 483 |
+
(x0_2, ([1.0, 1.5], [3.0, 3.0])),
|
| 484 |
+
(x0_5, ([-50.0, 0.0], [0.5, 100]))
|
| 485 |
+
]
|
| 486 |
+
for x0, bounds in problems:
|
| 487 |
+
for jac, x_scale, tr_solver in product(
|
| 488 |
+
['2-point', '3-point', 'cs', jac_rosenbrock],
|
| 489 |
+
[1.0, [1.0, 0.5], 'jac'],
|
| 490 |
+
['exact', 'lsmr']):
|
| 491 |
+
res = least_squares(fun_rosenbrock, x0, jac, bounds,
|
| 492 |
+
x_scale=x_scale, tr_solver=tr_solver,
|
| 493 |
+
method=self.method)
|
| 494 |
+
assert_allclose(res.optimality, 0.0, atol=1e-5)
|
| 495 |
+
|
| 496 |
+
|
| 497 |
+
class SparseMixin:
|
| 498 |
+
def test_exact_tr_solver(self):
|
| 499 |
+
p = BroydenTridiagonal()
|
| 500 |
+
assert_raises(ValueError, least_squares, p.fun, p.x0, p.jac,
|
| 501 |
+
tr_solver='exact', method=self.method)
|
| 502 |
+
assert_raises(ValueError, least_squares, p.fun, p.x0,
|
| 503 |
+
tr_solver='exact', jac_sparsity=p.sparsity,
|
| 504 |
+
method=self.method)
|
| 505 |
+
|
| 506 |
+
def test_equivalence(self):
|
| 507 |
+
sparse = BroydenTridiagonal(mode='sparse')
|
| 508 |
+
dense = BroydenTridiagonal(mode='dense')
|
| 509 |
+
res_sparse = least_squares(
|
| 510 |
+
sparse.fun, sparse.x0, jac=sparse.jac,
|
| 511 |
+
method=self.method)
|
| 512 |
+
res_dense = least_squares(
|
| 513 |
+
dense.fun, dense.x0, jac=sparse.jac,
|
| 514 |
+
method=self.method)
|
| 515 |
+
assert_equal(res_sparse.nfev, res_dense.nfev)
|
| 516 |
+
assert_allclose(res_sparse.x, res_dense.x, atol=1e-20)
|
| 517 |
+
assert_allclose(res_sparse.cost, 0, atol=1e-20)
|
| 518 |
+
assert_allclose(res_dense.cost, 0, atol=1e-20)
|
| 519 |
+
|
| 520 |
+
def test_tr_options(self):
|
| 521 |
+
p = BroydenTridiagonal()
|
| 522 |
+
res = least_squares(p.fun, p.x0, p.jac, method=self.method,
|
| 523 |
+
tr_options={'btol': 1e-10})
|
| 524 |
+
assert_allclose(res.cost, 0, atol=1e-20)
|
| 525 |
+
|
| 526 |
+
def test_wrong_parameters(self):
|
| 527 |
+
p = BroydenTridiagonal()
|
| 528 |
+
assert_raises(ValueError, least_squares, p.fun, p.x0, p.jac,
|
| 529 |
+
tr_solver='best', method=self.method)
|
| 530 |
+
assert_raises(TypeError, least_squares, p.fun, p.x0, p.jac,
|
| 531 |
+
tr_solver='lsmr', tr_options={'tol': 1e-10})
|
| 532 |
+
|
| 533 |
+
def test_solver_selection(self):
|
| 534 |
+
sparse = BroydenTridiagonal(mode='sparse')
|
| 535 |
+
dense = BroydenTridiagonal(mode='dense')
|
| 536 |
+
res_sparse = least_squares(sparse.fun, sparse.x0, jac=sparse.jac,
|
| 537 |
+
method=self.method)
|
| 538 |
+
res_dense = least_squares(dense.fun, dense.x0, jac=dense.jac,
|
| 539 |
+
method=self.method)
|
| 540 |
+
assert_allclose(res_sparse.cost, 0, atol=1e-20)
|
| 541 |
+
assert_allclose(res_dense.cost, 0, atol=1e-20)
|
| 542 |
+
assert_(issparse(res_sparse.jac))
|
| 543 |
+
assert_(isinstance(res_dense.jac, np.ndarray))
|
| 544 |
+
|
| 545 |
+
def test_numerical_jac(self):
|
| 546 |
+
p = BroydenTridiagonal()
|
| 547 |
+
for jac in ['2-point', '3-point', 'cs']:
|
| 548 |
+
res_dense = least_squares(p.fun, p.x0, jac, method=self.method)
|
| 549 |
+
res_sparse = least_squares(
|
| 550 |
+
p.fun, p.x0, jac,method=self.method,
|
| 551 |
+
jac_sparsity=p.sparsity)
|
| 552 |
+
assert_equal(res_dense.nfev, res_sparse.nfev)
|
| 553 |
+
assert_allclose(res_dense.x, res_sparse.x, atol=1e-20)
|
| 554 |
+
assert_allclose(res_dense.cost, 0, atol=1e-20)
|
| 555 |
+
assert_allclose(res_sparse.cost, 0, atol=1e-20)
|
| 556 |
+
|
| 557 |
+
@pytest.mark.fail_slow(10)
|
| 558 |
+
def test_with_bounds(self):
|
| 559 |
+
p = BroydenTridiagonal()
|
| 560 |
+
for jac, jac_sparsity in product(
|
| 561 |
+
[p.jac, '2-point', '3-point', 'cs'], [None, p.sparsity]):
|
| 562 |
+
res_1 = least_squares(
|
| 563 |
+
p.fun, p.x0, jac, bounds=(p.lb, np.inf),
|
| 564 |
+
method=self.method,jac_sparsity=jac_sparsity)
|
| 565 |
+
res_2 = least_squares(
|
| 566 |
+
p.fun, p.x0, jac, bounds=(-np.inf, p.ub),
|
| 567 |
+
method=self.method, jac_sparsity=jac_sparsity)
|
| 568 |
+
res_3 = least_squares(
|
| 569 |
+
p.fun, p.x0, jac, bounds=(p.lb, p.ub),
|
| 570 |
+
method=self.method, jac_sparsity=jac_sparsity)
|
| 571 |
+
assert_allclose(res_1.optimality, 0, atol=1e-10)
|
| 572 |
+
assert_allclose(res_2.optimality, 0, atol=1e-10)
|
| 573 |
+
assert_allclose(res_3.optimality, 0, atol=1e-10)
|
| 574 |
+
|
| 575 |
+
def test_wrong_jac_sparsity(self):
|
| 576 |
+
p = BroydenTridiagonal()
|
| 577 |
+
sparsity = p.sparsity[:-1]
|
| 578 |
+
assert_raises(ValueError, least_squares, p.fun, p.x0,
|
| 579 |
+
jac_sparsity=sparsity, method=self.method)
|
| 580 |
+
|
| 581 |
+
def test_linear_operator(self):
|
| 582 |
+
p = BroydenTridiagonal(mode='operator')
|
| 583 |
+
res = least_squares(p.fun, p.x0, p.jac, method=self.method)
|
| 584 |
+
assert_allclose(res.cost, 0.0, atol=1e-20)
|
| 585 |
+
assert_raises(ValueError, least_squares, p.fun, p.x0, p.jac,
|
| 586 |
+
method=self.method, tr_solver='exact')
|
| 587 |
+
|
| 588 |
+
def test_x_scale_jac_scale(self):
|
| 589 |
+
p = BroydenTridiagonal()
|
| 590 |
+
res = least_squares(p.fun, p.x0, p.jac, method=self.method,
|
| 591 |
+
x_scale='jac')
|
| 592 |
+
assert_allclose(res.cost, 0.0, atol=1e-20)
|
| 593 |
+
|
| 594 |
+
p = BroydenTridiagonal(mode='operator')
|
| 595 |
+
assert_raises(ValueError, least_squares, p.fun, p.x0, p.jac,
|
| 596 |
+
method=self.method, x_scale='jac')
|
| 597 |
+
|
| 598 |
+
|
| 599 |
+
class LossFunctionMixin:
|
| 600 |
+
def test_options(self):
|
| 601 |
+
for loss in LOSSES:
|
| 602 |
+
res = least_squares(fun_trivial, 2.0, loss=loss,
|
| 603 |
+
method=self.method)
|
| 604 |
+
assert_allclose(res.x, 0, atol=1e-15)
|
| 605 |
+
|
| 606 |
+
assert_raises(ValueError, least_squares, fun_trivial, 2.0,
|
| 607 |
+
loss='hinge', method=self.method)
|
| 608 |
+
|
| 609 |
+
def test_fun(self):
|
| 610 |
+
# Test that res.fun is actual residuals, and not modified by loss
|
| 611 |
+
# function stuff.
|
| 612 |
+
for loss in LOSSES:
|
| 613 |
+
res = least_squares(fun_trivial, 2.0, loss=loss,
|
| 614 |
+
method=self.method)
|
| 615 |
+
assert_equal(res.fun, fun_trivial(res.x))
|
| 616 |
+
|
| 617 |
+
def test_grad(self):
|
| 618 |
+
# Test that res.grad is true gradient of loss function at the
|
| 619 |
+
# solution. Use max_nfev = 1, to avoid reaching minimum.
|
| 620 |
+
x = np.array([2.0]) # res.x will be this.
|
| 621 |
+
|
| 622 |
+
res = least_squares(fun_trivial, x, jac_trivial, loss='linear',
|
| 623 |
+
max_nfev=1, method=self.method)
|
| 624 |
+
assert_equal(res.grad, 2 * x * (x**2 + 5))
|
| 625 |
+
|
| 626 |
+
res = least_squares(fun_trivial, x, jac_trivial, loss='huber',
|
| 627 |
+
max_nfev=1, method=self.method)
|
| 628 |
+
assert_equal(res.grad, 2 * x)
|
| 629 |
+
|
| 630 |
+
res = least_squares(fun_trivial, x, jac_trivial, loss='soft_l1',
|
| 631 |
+
max_nfev=1, method=self.method)
|
| 632 |
+
assert_allclose(res.grad,
|
| 633 |
+
2 * x * (x**2 + 5) / (1 + (x**2 + 5)**2)**0.5)
|
| 634 |
+
|
| 635 |
+
res = least_squares(fun_trivial, x, jac_trivial, loss='cauchy',
|
| 636 |
+
max_nfev=1, method=self.method)
|
| 637 |
+
assert_allclose(res.grad, 2 * x * (x**2 + 5) / (1 + (x**2 + 5)**2))
|
| 638 |
+
|
| 639 |
+
res = least_squares(fun_trivial, x, jac_trivial, loss='arctan',
|
| 640 |
+
max_nfev=1, method=self.method)
|
| 641 |
+
assert_allclose(res.grad, 2 * x * (x**2 + 5) / (1 + (x**2 + 5)**4))
|
| 642 |
+
|
| 643 |
+
res = least_squares(fun_trivial, x, jac_trivial, loss=cubic_soft_l1,
|
| 644 |
+
max_nfev=1, method=self.method)
|
| 645 |
+
assert_allclose(res.grad,
|
| 646 |
+
2 * x * (x**2 + 5) / (1 + (x**2 + 5)**2)**(2/3))
|
| 647 |
+
|
| 648 |
+
def test_jac(self):
|
| 649 |
+
# Test that res.jac.T.dot(res.jac) gives Gauss-Newton approximation
|
| 650 |
+
# of Hessian. This approximation is computed by doubly differentiating
|
| 651 |
+
# the cost function and dropping the part containing second derivative
|
| 652 |
+
# of f. For a scalar function it is computed as
|
| 653 |
+
# H = (rho' + 2 * rho'' * f**2) * f'**2, if the expression inside the
|
| 654 |
+
# brackets is less than EPS it is replaced by EPS. Here, we check
|
| 655 |
+
# against the root of H.
|
| 656 |
+
|
| 657 |
+
x = 2.0 # res.x will be this.
|
| 658 |
+
f = x**2 + 5 # res.fun will be this.
|
| 659 |
+
|
| 660 |
+
res = least_squares(fun_trivial, x, jac_trivial, loss='linear',
|
| 661 |
+
max_nfev=1, method=self.method)
|
| 662 |
+
assert_equal(res.jac, 2 * x)
|
| 663 |
+
|
| 664 |
+
# For `huber` loss the Jacobian correction is identically zero
|
| 665 |
+
# in outlier region, in such cases it is modified to be equal EPS**0.5.
|
| 666 |
+
res = least_squares(fun_trivial, x, jac_trivial, loss='huber',
|
| 667 |
+
max_nfev=1, method=self.method)
|
| 668 |
+
assert_equal(res.jac, 2 * x * EPS**0.5)
|
| 669 |
+
|
| 670 |
+
# Now, let's apply `loss_scale` to turn the residual into an inlier.
|
| 671 |
+
# The loss function becomes linear.
|
| 672 |
+
res = least_squares(fun_trivial, x, jac_trivial, loss='huber',
|
| 673 |
+
f_scale=10, max_nfev=1)
|
| 674 |
+
assert_equal(res.jac, 2 * x)
|
| 675 |
+
|
| 676 |
+
# 'soft_l1' always gives a positive scaling.
|
| 677 |
+
res = least_squares(fun_trivial, x, jac_trivial, loss='soft_l1',
|
| 678 |
+
max_nfev=1, method=self.method)
|
| 679 |
+
assert_allclose(res.jac, 2 * x * (1 + f**2)**-0.75)
|
| 680 |
+
|
| 681 |
+
# For 'cauchy' the correction term turns out to be negative, and it
|
| 682 |
+
# replaced by EPS**0.5.
|
| 683 |
+
res = least_squares(fun_trivial, x, jac_trivial, loss='cauchy',
|
| 684 |
+
max_nfev=1, method=self.method)
|
| 685 |
+
assert_allclose(res.jac, 2 * x * EPS**0.5)
|
| 686 |
+
|
| 687 |
+
# Now use scaling to turn the residual to inlier.
|
| 688 |
+
res = least_squares(fun_trivial, x, jac_trivial, loss='cauchy',
|
| 689 |
+
f_scale=10, max_nfev=1, method=self.method)
|
| 690 |
+
fs = f / 10
|
| 691 |
+
assert_allclose(res.jac, 2 * x * (1 - fs**2)**0.5 / (1 + fs**2))
|
| 692 |
+
|
| 693 |
+
# 'arctan' gives an outlier.
|
| 694 |
+
res = least_squares(fun_trivial, x, jac_trivial, loss='arctan',
|
| 695 |
+
max_nfev=1, method=self.method)
|
| 696 |
+
assert_allclose(res.jac, 2 * x * EPS**0.5)
|
| 697 |
+
|
| 698 |
+
# Turn to inlier.
|
| 699 |
+
res = least_squares(fun_trivial, x, jac_trivial, loss='arctan',
|
| 700 |
+
f_scale=20.0, max_nfev=1, method=self.method)
|
| 701 |
+
fs = f / 20
|
| 702 |
+
assert_allclose(res.jac, 2 * x * (1 - 3 * fs**4)**0.5 / (1 + fs**4))
|
| 703 |
+
|
| 704 |
+
# cubic_soft_l1 will give an outlier.
|
| 705 |
+
res = least_squares(fun_trivial, x, jac_trivial, loss=cubic_soft_l1,
|
| 706 |
+
max_nfev=1)
|
| 707 |
+
assert_allclose(res.jac, 2 * x * EPS**0.5)
|
| 708 |
+
|
| 709 |
+
# Turn to inlier.
|
| 710 |
+
res = least_squares(fun_trivial, x, jac_trivial,
|
| 711 |
+
loss=cubic_soft_l1, f_scale=6, max_nfev=1)
|
| 712 |
+
fs = f / 6
|
| 713 |
+
assert_allclose(res.jac,
|
| 714 |
+
2 * x * (1 - fs**2 / 3)**0.5 * (1 + fs**2)**(-5/6))
|
| 715 |
+
|
| 716 |
+
def test_robustness(self):
|
| 717 |
+
for noise in [0.1, 1.0]:
|
| 718 |
+
p = ExponentialFittingProblem(1, 0.1, noise, random_seed=0)
|
| 719 |
+
|
| 720 |
+
for jac in ['2-point', '3-point', 'cs', p.jac]:
|
| 721 |
+
res_lsq = least_squares(p.fun, p.p0, jac=jac,
|
| 722 |
+
method=self.method)
|
| 723 |
+
assert_allclose(res_lsq.optimality, 0, atol=1e-2)
|
| 724 |
+
for loss in LOSSES:
|
| 725 |
+
if loss == 'linear':
|
| 726 |
+
continue
|
| 727 |
+
res_robust = least_squares(
|
| 728 |
+
p.fun, p.p0, jac=jac, loss=loss, f_scale=noise,
|
| 729 |
+
method=self.method)
|
| 730 |
+
assert_allclose(res_robust.optimality, 0, atol=1e-2)
|
| 731 |
+
assert_(norm(res_robust.x - p.p_opt) <
|
| 732 |
+
norm(res_lsq.x - p.p_opt))
|
| 733 |
+
|
| 734 |
+
|
| 735 |
+
class TestDogbox(BaseMixin, BoundsMixin, SparseMixin, LossFunctionMixin):
|
| 736 |
+
method = 'dogbox'
|
| 737 |
+
|
| 738 |
+
|
| 739 |
+
class TestTRF(BaseMixin, BoundsMixin, SparseMixin, LossFunctionMixin):
|
| 740 |
+
method = 'trf'
|
| 741 |
+
|
| 742 |
+
def test_lsmr_regularization(self):
|
| 743 |
+
p = BroydenTridiagonal()
|
| 744 |
+
for regularize in [True, False]:
|
| 745 |
+
res = least_squares(p.fun, p.x0, p.jac, method='trf',
|
| 746 |
+
tr_options={'regularize': regularize})
|
| 747 |
+
assert_allclose(res.cost, 0, atol=1e-20)
|
| 748 |
+
|
| 749 |
+
|
| 750 |
+
class TestLM(BaseMixin):
|
| 751 |
+
method = 'lm'
|
| 752 |
+
|
| 753 |
+
def test_bounds_not_supported(self):
|
| 754 |
+
assert_raises(ValueError, least_squares, fun_trivial,
|
| 755 |
+
2.0, bounds=(-3.0, 3.0), method='lm')
|
| 756 |
+
|
| 757 |
+
def test_m_less_n_not_supported(self):
|
| 758 |
+
x0 = [-2, 1]
|
| 759 |
+
assert_raises(ValueError, least_squares, fun_rosenbrock_cropped, x0,
|
| 760 |
+
method='lm')
|
| 761 |
+
|
| 762 |
+
def test_sparse_not_supported(self):
|
| 763 |
+
p = BroydenTridiagonal()
|
| 764 |
+
assert_raises(ValueError, least_squares, p.fun, p.x0, p.jac,
|
| 765 |
+
method='lm')
|
| 766 |
+
|
| 767 |
+
def test_jac_sparsity_not_supported(self):
|
| 768 |
+
assert_raises(ValueError, least_squares, fun_trivial, 2.0,
|
| 769 |
+
jac_sparsity=[1], method='lm')
|
| 770 |
+
|
| 771 |
+
def test_LinearOperator_not_supported(self):
|
| 772 |
+
p = BroydenTridiagonal(mode="operator")
|
| 773 |
+
assert_raises(ValueError, least_squares, p.fun, p.x0, p.jac,
|
| 774 |
+
method='lm')
|
| 775 |
+
|
| 776 |
+
def test_loss(self):
|
| 777 |
+
res = least_squares(fun_trivial, 2.0, loss='linear', method='lm')
|
| 778 |
+
assert_allclose(res.x, 0.0, atol=1e-4)
|
| 779 |
+
|
| 780 |
+
assert_raises(ValueError, least_squares, fun_trivial, 2.0,
|
| 781 |
+
method='lm', loss='huber')
|
| 782 |
+
|
| 783 |
+
|
| 784 |
+
def test_basic():
|
| 785 |
+
# test that 'method' arg is really optional
|
| 786 |
+
res = least_squares(fun_trivial, 2.0)
|
| 787 |
+
assert_allclose(res.x, 0, atol=1e-10)
|
| 788 |
+
|
| 789 |
+
|
| 790 |
+
def test_small_tolerances_for_lm():
|
| 791 |
+
for ftol, xtol, gtol in [(None, 1e-13, 1e-13),
|
| 792 |
+
(1e-13, None, 1e-13),
|
| 793 |
+
(1e-13, 1e-13, None)]:
|
| 794 |
+
assert_raises(ValueError, least_squares, fun_trivial, 2.0, xtol=xtol,
|
| 795 |
+
ftol=ftol, gtol=gtol, method='lm')
|
| 796 |
+
|
| 797 |
+
|
| 798 |
+
def test_fp32_gh12991():
|
| 799 |
+
# checks that smaller FP sizes can be used in least_squares
|
| 800 |
+
# this is the minimum working example reported for gh12991
|
| 801 |
+
rng = np.random.RandomState(1)
|
| 802 |
+
|
| 803 |
+
x = np.linspace(0, 1, 100).astype("float32")
|
| 804 |
+
y = rng.random(100).astype("float32")
|
| 805 |
+
|
| 806 |
+
def func(p, x):
|
| 807 |
+
return p[0] + p[1] * x
|
| 808 |
+
|
| 809 |
+
def err(p, x, y):
|
| 810 |
+
return func(p, x) - y
|
| 811 |
+
|
| 812 |
+
res = least_squares(err, [-1.0, -1.0], args=(x, y))
|
| 813 |
+
# previously the initial jacobian calculated for this would be all 0
|
| 814 |
+
# and the minimize would terminate immediately, with nfev=1, would
|
| 815 |
+
# report a successful minimization (it shouldn't have done), but be
|
| 816 |
+
# unchanged from the initial solution.
|
| 817 |
+
# It was terminating early because the underlying approx_derivative
|
| 818 |
+
# used a step size for FP64 when the working space was FP32.
|
| 819 |
+
assert res.nfev > 2
|
| 820 |
+
assert_allclose(res.x, np.array([0.4082241, 0.15530563]), atol=5e-5)
|
| 821 |
+
|
| 822 |
+
|
| 823 |
+
def test_gh_18793_and_19351():
|
| 824 |
+
answer = 1e-12
|
| 825 |
+
initial_guess = 1.1e-12
|
| 826 |
+
|
| 827 |
+
def chi2(x):
|
| 828 |
+
return (x-answer)**2
|
| 829 |
+
|
| 830 |
+
gtol = 1e-15
|
| 831 |
+
res = least_squares(chi2, x0=initial_guess, gtol=1e-15, bounds=(0, np.inf))
|
| 832 |
+
# Original motivation: gh-18793
|
| 833 |
+
# if we choose an initial condition that is close to the solution
|
| 834 |
+
# we shouldn't return an answer that is further away from the solution
|
| 835 |
+
|
| 836 |
+
# Update: gh-19351
|
| 837 |
+
# However this requirement does not go well with 'trf' algorithm logic.
|
| 838 |
+
# Some regressions were reported after the presumed fix.
|
| 839 |
+
# The returned solution is good as long as it satisfies the convergence
|
| 840 |
+
# conditions.
|
| 841 |
+
# Specifically in this case the scaled gradient will be sufficiently low.
|
| 842 |
+
|
| 843 |
+
scaling, _ = CL_scaling_vector(res.x, res.grad,
|
| 844 |
+
np.atleast_1d(0), np.atleast_1d(np.inf))
|
| 845 |
+
assert res.status == 1 # Converged by gradient
|
| 846 |
+
assert np.linalg.norm(res.grad * scaling, ord=np.inf) < gtol
|
| 847 |
+
|
| 848 |
+
|
| 849 |
+
def test_gh_19103():
|
| 850 |
+
# Checks that least_squares trf method selects a strictly feasible point,
|
| 851 |
+
# and thus succeeds instead of failing,
|
| 852 |
+
# when the initial guess is reported exactly at a boundary point.
|
| 853 |
+
# This is a reduced example from gh191303
|
| 854 |
+
|
| 855 |
+
ydata = np.array([0.] * 66 + [
|
| 856 |
+
1., 0., 0., 0., 0., 0., 1., 1., 0., 0., 1.,
|
| 857 |
+
1., 1., 1., 0., 0., 0., 1., 0., 0., 2., 1.,
|
| 858 |
+
0., 3., 1., 6., 5., 0., 0., 2., 8., 4., 4.,
|
| 859 |
+
6., 9., 7., 2., 7., 8., 2., 13., 9., 8., 11.,
|
| 860 |
+
10., 13., 14., 19., 11., 15., 18., 26., 19., 32., 29.,
|
| 861 |
+
28., 36., 32., 35., 36., 43., 52., 32., 58., 56., 52.,
|
| 862 |
+
67., 53., 72., 88., 77., 95., 94., 84., 86., 101., 107.,
|
| 863 |
+
108., 118., 96., 115., 138., 137.,
|
| 864 |
+
])
|
| 865 |
+
xdata = np.arange(0, ydata.size) * 0.1
|
| 866 |
+
|
| 867 |
+
def exponential_wrapped(params):
|
| 868 |
+
A, B, x0 = params
|
| 869 |
+
return A * np.exp(B * (xdata - x0)) - ydata
|
| 870 |
+
|
| 871 |
+
x0 = [0.01, 1., 5.]
|
| 872 |
+
bounds = ((0.01, 0, 0), (np.inf, 10, 20.9))
|
| 873 |
+
res = least_squares(exponential_wrapped, x0, method='trf', bounds=bounds)
|
| 874 |
+
assert res.success
|
mantis_evalkit/lib/python3.10/site-packages/scipy/optimize/tests/test_linear_assignment.py
ADDED
|
@@ -0,0 +1,116 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Author: Brian M. Clapper, G. Varoquaux, Lars Buitinck
|
| 2 |
+
# License: BSD
|
| 3 |
+
|
| 4 |
+
from numpy.testing import assert_array_equal
|
| 5 |
+
import pytest
|
| 6 |
+
|
| 7 |
+
import numpy as np
|
| 8 |
+
|
| 9 |
+
from scipy.optimize import linear_sum_assignment
|
| 10 |
+
from scipy.sparse import random
|
| 11 |
+
from scipy.sparse._sputils import matrix
|
| 12 |
+
from scipy.sparse.csgraph import min_weight_full_bipartite_matching
|
| 13 |
+
from scipy.sparse.csgraph.tests.test_matching import (
|
| 14 |
+
linear_sum_assignment_assertions, linear_sum_assignment_test_cases
|
| 15 |
+
)
|
| 16 |
+
|
| 17 |
+
|
| 18 |
+
def test_linear_sum_assignment_input_shape():
|
| 19 |
+
with pytest.raises(ValueError, match="expected a matrix"):
|
| 20 |
+
linear_sum_assignment([1, 2, 3])
|
| 21 |
+
|
| 22 |
+
|
| 23 |
+
def test_linear_sum_assignment_input_object():
|
| 24 |
+
C = [[1, 2, 3], [4, 5, 6]]
|
| 25 |
+
assert_array_equal(linear_sum_assignment(C),
|
| 26 |
+
linear_sum_assignment(np.asarray(C)))
|
| 27 |
+
assert_array_equal(linear_sum_assignment(C),
|
| 28 |
+
linear_sum_assignment(matrix(C)))
|
| 29 |
+
|
| 30 |
+
|
| 31 |
+
def test_linear_sum_assignment_input_bool():
|
| 32 |
+
I = np.identity(3)
|
| 33 |
+
assert_array_equal(linear_sum_assignment(I.astype(np.bool_)),
|
| 34 |
+
linear_sum_assignment(I))
|
| 35 |
+
|
| 36 |
+
|
| 37 |
+
def test_linear_sum_assignment_input_string():
|
| 38 |
+
I = np.identity(3)
|
| 39 |
+
with pytest.raises(TypeError, match="Cannot cast array data"):
|
| 40 |
+
linear_sum_assignment(I.astype(str))
|
| 41 |
+
|
| 42 |
+
|
| 43 |
+
def test_linear_sum_assignment_input_nan():
|
| 44 |
+
I = np.diag([np.nan, 1, 1])
|
| 45 |
+
with pytest.raises(ValueError, match="contains invalid numeric entries"):
|
| 46 |
+
linear_sum_assignment(I)
|
| 47 |
+
|
| 48 |
+
|
| 49 |
+
def test_linear_sum_assignment_input_neginf():
|
| 50 |
+
I = np.diag([1, -np.inf, 1])
|
| 51 |
+
with pytest.raises(ValueError, match="contains invalid numeric entries"):
|
| 52 |
+
linear_sum_assignment(I)
|
| 53 |
+
|
| 54 |
+
|
| 55 |
+
def test_linear_sum_assignment_input_inf():
|
| 56 |
+
I = np.identity(3)
|
| 57 |
+
I[:, 0] = np.inf
|
| 58 |
+
with pytest.raises(ValueError, match="cost matrix is infeasible"):
|
| 59 |
+
linear_sum_assignment(I)
|
| 60 |
+
|
| 61 |
+
|
| 62 |
+
def test_constant_cost_matrix():
|
| 63 |
+
# Fixes #11602
|
| 64 |
+
n = 8
|
| 65 |
+
C = np.ones((n, n))
|
| 66 |
+
row_ind, col_ind = linear_sum_assignment(C)
|
| 67 |
+
assert_array_equal(row_ind, np.arange(n))
|
| 68 |
+
assert_array_equal(col_ind, np.arange(n))
|
| 69 |
+
|
| 70 |
+
|
| 71 |
+
@pytest.mark.parametrize('num_rows,num_cols', [(0, 0), (2, 0), (0, 3)])
|
| 72 |
+
def test_linear_sum_assignment_trivial_cost(num_rows, num_cols):
|
| 73 |
+
C = np.empty(shape=(num_cols, num_rows))
|
| 74 |
+
row_ind, col_ind = linear_sum_assignment(C)
|
| 75 |
+
assert len(row_ind) == 0
|
| 76 |
+
assert len(col_ind) == 0
|
| 77 |
+
|
| 78 |
+
|
| 79 |
+
@pytest.mark.parametrize('sign,test_case', linear_sum_assignment_test_cases)
|
| 80 |
+
def test_linear_sum_assignment_small_inputs(sign, test_case):
|
| 81 |
+
linear_sum_assignment_assertions(
|
| 82 |
+
linear_sum_assignment, np.array, sign, test_case)
|
| 83 |
+
|
| 84 |
+
|
| 85 |
+
# Tests that combine scipy.optimize.linear_sum_assignment and
|
| 86 |
+
# scipy.sparse.csgraph.min_weight_full_bipartite_matching
|
| 87 |
+
def test_two_methods_give_same_result_on_many_sparse_inputs():
|
| 88 |
+
# As opposed to the test above, here we do not spell out the expected
|
| 89 |
+
# output; only assert that the two methods give the same result.
|
| 90 |
+
# Concretely, the below tests 100 cases of size 100x100, out of which
|
| 91 |
+
# 36 are infeasible.
|
| 92 |
+
np.random.seed(1234)
|
| 93 |
+
for _ in range(100):
|
| 94 |
+
lsa_raises = False
|
| 95 |
+
mwfbm_raises = False
|
| 96 |
+
sparse = random(100, 100, density=0.06,
|
| 97 |
+
data_rvs=lambda size: np.random.randint(1, 100, size))
|
| 98 |
+
# In csgraph, zeros correspond to missing edges, so we explicitly
|
| 99 |
+
# replace those with infinities
|
| 100 |
+
dense = np.full(sparse.shape, np.inf)
|
| 101 |
+
dense[sparse.row, sparse.col] = sparse.data
|
| 102 |
+
sparse = sparse.tocsr()
|
| 103 |
+
try:
|
| 104 |
+
row_ind, col_ind = linear_sum_assignment(dense)
|
| 105 |
+
lsa_cost = dense[row_ind, col_ind].sum()
|
| 106 |
+
except ValueError:
|
| 107 |
+
lsa_raises = True
|
| 108 |
+
try:
|
| 109 |
+
row_ind, col_ind = min_weight_full_bipartite_matching(sparse)
|
| 110 |
+
mwfbm_cost = sparse[row_ind, col_ind].sum()
|
| 111 |
+
except ValueError:
|
| 112 |
+
mwfbm_raises = True
|
| 113 |
+
# Ensure that if one method raises, so does the other one.
|
| 114 |
+
assert lsa_raises == mwfbm_raises
|
| 115 |
+
if not lsa_raises:
|
| 116 |
+
assert lsa_cost == mwfbm_cost
|
mantis_evalkit/lib/python3.10/site-packages/scipy/optimize/tests/test_linprog.py
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
mantis_evalkit/lib/python3.10/site-packages/scipy/optimize/tests/test_lsq_common.py
ADDED
|
@@ -0,0 +1,297 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from numpy.testing import assert_, assert_allclose, assert_equal
|
| 2 |
+
from pytest import raises as assert_raises
|
| 3 |
+
import numpy as np
|
| 4 |
+
|
| 5 |
+
from scipy.optimize._lsq.common import (
|
| 6 |
+
step_size_to_bound, find_active_constraints, make_strictly_feasible,
|
| 7 |
+
CL_scaling_vector, intersect_trust_region, build_quadratic_1d,
|
| 8 |
+
minimize_quadratic_1d, evaluate_quadratic, reflective_transformation,
|
| 9 |
+
left_multiplied_operator, right_multiplied_operator)
|
| 10 |
+
|
| 11 |
+
|
| 12 |
+
class TestBounds:
|
| 13 |
+
def test_step_size_to_bounds(self):
|
| 14 |
+
lb = np.array([-1.0, 2.5, 10.0])
|
| 15 |
+
ub = np.array([1.0, 5.0, 100.0])
|
| 16 |
+
x = np.array([0.0, 2.5, 12.0])
|
| 17 |
+
|
| 18 |
+
s = np.array([0.1, 0.0, 0.0])
|
| 19 |
+
step, hits = step_size_to_bound(x, s, lb, ub)
|
| 20 |
+
assert_equal(step, 10)
|
| 21 |
+
assert_equal(hits, [1, 0, 0])
|
| 22 |
+
|
| 23 |
+
s = np.array([0.01, 0.05, -1.0])
|
| 24 |
+
step, hits = step_size_to_bound(x, s, lb, ub)
|
| 25 |
+
assert_equal(step, 2)
|
| 26 |
+
assert_equal(hits, [0, 0, -1])
|
| 27 |
+
|
| 28 |
+
s = np.array([10.0, -0.0001, 100.0])
|
| 29 |
+
step, hits = step_size_to_bound(x, s, lb, ub)
|
| 30 |
+
assert_equal(step, np.array(-0))
|
| 31 |
+
assert_equal(hits, [0, -1, 0])
|
| 32 |
+
|
| 33 |
+
s = np.array([1.0, 0.5, -2.0])
|
| 34 |
+
step, hits = step_size_to_bound(x, s, lb, ub)
|
| 35 |
+
assert_equal(step, 1.0)
|
| 36 |
+
assert_equal(hits, [1, 0, -1])
|
| 37 |
+
|
| 38 |
+
s = np.zeros(3)
|
| 39 |
+
step, hits = step_size_to_bound(x, s, lb, ub)
|
| 40 |
+
assert_equal(step, np.inf)
|
| 41 |
+
assert_equal(hits, [0, 0, 0])
|
| 42 |
+
|
| 43 |
+
def test_find_active_constraints(self):
|
| 44 |
+
lb = np.array([0.0, -10.0, 1.0])
|
| 45 |
+
ub = np.array([1.0, 0.0, 100.0])
|
| 46 |
+
|
| 47 |
+
x = np.array([0.5, -5.0, 2.0])
|
| 48 |
+
active = find_active_constraints(x, lb, ub)
|
| 49 |
+
assert_equal(active, [0, 0, 0])
|
| 50 |
+
|
| 51 |
+
x = np.array([0.0, 0.0, 10.0])
|
| 52 |
+
active = find_active_constraints(x, lb, ub)
|
| 53 |
+
assert_equal(active, [-1, 1, 0])
|
| 54 |
+
|
| 55 |
+
active = find_active_constraints(x, lb, ub, rtol=0)
|
| 56 |
+
assert_equal(active, [-1, 1, 0])
|
| 57 |
+
|
| 58 |
+
x = np.array([1e-9, -1e-8, 100 - 1e-9])
|
| 59 |
+
active = find_active_constraints(x, lb, ub)
|
| 60 |
+
assert_equal(active, [0, 0, 1])
|
| 61 |
+
|
| 62 |
+
active = find_active_constraints(x, lb, ub, rtol=1.5e-9)
|
| 63 |
+
assert_equal(active, [-1, 0, 1])
|
| 64 |
+
|
| 65 |
+
lb = np.array([1.0, -np.inf, -np.inf])
|
| 66 |
+
ub = np.array([np.inf, 10.0, np.inf])
|
| 67 |
+
|
| 68 |
+
x = np.ones(3)
|
| 69 |
+
active = find_active_constraints(x, lb, ub)
|
| 70 |
+
assert_equal(active, [-1, 0, 0])
|
| 71 |
+
|
| 72 |
+
# Handles out-of-bound cases.
|
| 73 |
+
x = np.array([0.0, 11.0, 0.0])
|
| 74 |
+
active = find_active_constraints(x, lb, ub)
|
| 75 |
+
assert_equal(active, [-1, 1, 0])
|
| 76 |
+
|
| 77 |
+
active = find_active_constraints(x, lb, ub, rtol=0)
|
| 78 |
+
assert_equal(active, [-1, 1, 0])
|
| 79 |
+
|
| 80 |
+
def test_make_strictly_feasible(self):
|
| 81 |
+
lb = np.array([-0.5, -0.8, 2.0])
|
| 82 |
+
ub = np.array([0.8, 1.0, 3.0])
|
| 83 |
+
|
| 84 |
+
x = np.array([-0.5, 0.0, 2 + 1e-10])
|
| 85 |
+
|
| 86 |
+
x_new = make_strictly_feasible(x, lb, ub, rstep=0)
|
| 87 |
+
assert_(x_new[0] > -0.5)
|
| 88 |
+
assert_equal(x_new[1:], x[1:])
|
| 89 |
+
|
| 90 |
+
x_new = make_strictly_feasible(x, lb, ub, rstep=1e-4)
|
| 91 |
+
assert_equal(x_new, [-0.5 + 1e-4, 0.0, 2 * (1 + 1e-4)])
|
| 92 |
+
|
| 93 |
+
x = np.array([-0.5, -1, 3.1])
|
| 94 |
+
x_new = make_strictly_feasible(x, lb, ub)
|
| 95 |
+
assert_(np.all((x_new >= lb) & (x_new <= ub)))
|
| 96 |
+
|
| 97 |
+
x_new = make_strictly_feasible(x, lb, ub, rstep=0)
|
| 98 |
+
assert_(np.all((x_new >= lb) & (x_new <= ub)))
|
| 99 |
+
|
| 100 |
+
lb = np.array([-1, 100.0])
|
| 101 |
+
ub = np.array([1, 100.0 + 1e-10])
|
| 102 |
+
x = np.array([0, 100.0])
|
| 103 |
+
x_new = make_strictly_feasible(x, lb, ub, rstep=1e-8)
|
| 104 |
+
assert_equal(x_new, [0, 100.0 + 0.5e-10])
|
| 105 |
+
|
| 106 |
+
def test_scaling_vector(self):
|
| 107 |
+
lb = np.array([-np.inf, -5.0, 1.0, -np.inf])
|
| 108 |
+
ub = np.array([1.0, np.inf, 10.0, np.inf])
|
| 109 |
+
x = np.array([0.5, 2.0, 5.0, 0.0])
|
| 110 |
+
g = np.array([1.0, 0.1, -10.0, 0.0])
|
| 111 |
+
v, dv = CL_scaling_vector(x, g, lb, ub)
|
| 112 |
+
assert_equal(v, [1.0, 7.0, 5.0, 1.0])
|
| 113 |
+
assert_equal(dv, [0.0, 1.0, -1.0, 0.0])
|
| 114 |
+
|
| 115 |
+
|
| 116 |
+
class TestQuadraticFunction:
|
| 117 |
+
def setup_method(self):
|
| 118 |
+
self.J = np.array([
|
| 119 |
+
[0.1, 0.2],
|
| 120 |
+
[-1.0, 1.0],
|
| 121 |
+
[0.5, 0.2]])
|
| 122 |
+
self.g = np.array([0.8, -2.0])
|
| 123 |
+
self.diag = np.array([1.0, 2.0])
|
| 124 |
+
|
| 125 |
+
def test_build_quadratic_1d(self):
|
| 126 |
+
s = np.zeros(2)
|
| 127 |
+
a, b = build_quadratic_1d(self.J, self.g, s)
|
| 128 |
+
assert_equal(a, 0)
|
| 129 |
+
assert_equal(b, 0)
|
| 130 |
+
|
| 131 |
+
a, b = build_quadratic_1d(self.J, self.g, s, diag=self.diag)
|
| 132 |
+
assert_equal(a, 0)
|
| 133 |
+
assert_equal(b, 0)
|
| 134 |
+
|
| 135 |
+
s = np.array([1.0, -1.0])
|
| 136 |
+
a, b = build_quadratic_1d(self.J, self.g, s)
|
| 137 |
+
assert_equal(a, 2.05)
|
| 138 |
+
assert_equal(b, 2.8)
|
| 139 |
+
|
| 140 |
+
a, b = build_quadratic_1d(self.J, self.g, s, diag=self.diag)
|
| 141 |
+
assert_equal(a, 3.55)
|
| 142 |
+
assert_equal(b, 2.8)
|
| 143 |
+
|
| 144 |
+
s0 = np.array([0.5, 0.5])
|
| 145 |
+
a, b, c = build_quadratic_1d(self.J, self.g, s, diag=self.diag, s0=s0)
|
| 146 |
+
assert_equal(a, 3.55)
|
| 147 |
+
assert_allclose(b, 2.39)
|
| 148 |
+
assert_allclose(c, -0.1525)
|
| 149 |
+
|
| 150 |
+
def test_minimize_quadratic_1d(self):
|
| 151 |
+
a = 5
|
| 152 |
+
b = -1
|
| 153 |
+
|
| 154 |
+
t, y = minimize_quadratic_1d(a, b, 1, 2)
|
| 155 |
+
assert_equal(t, 1)
|
| 156 |
+
assert_allclose(y, a * t**2 + b * t, rtol=1e-15)
|
| 157 |
+
|
| 158 |
+
t, y = minimize_quadratic_1d(a, b, -2, -1)
|
| 159 |
+
assert_equal(t, -1)
|
| 160 |
+
assert_allclose(y, a * t**2 + b * t, rtol=1e-15)
|
| 161 |
+
|
| 162 |
+
t, y = minimize_quadratic_1d(a, b, -1, 1)
|
| 163 |
+
assert_equal(t, 0.1)
|
| 164 |
+
assert_allclose(y, a * t**2 + b * t, rtol=1e-15)
|
| 165 |
+
|
| 166 |
+
c = 10
|
| 167 |
+
t, y = minimize_quadratic_1d(a, b, -1, 1, c=c)
|
| 168 |
+
assert_equal(t, 0.1)
|
| 169 |
+
assert_allclose(y, a * t**2 + b * t + c, rtol=1e-15)
|
| 170 |
+
|
| 171 |
+
t, y = minimize_quadratic_1d(a, b, -np.inf, np.inf, c=c)
|
| 172 |
+
assert_equal(t, 0.1)
|
| 173 |
+
assert_allclose(y, a * t ** 2 + b * t + c, rtol=1e-15)
|
| 174 |
+
|
| 175 |
+
t, y = minimize_quadratic_1d(a, b, 0, np.inf, c=c)
|
| 176 |
+
assert_equal(t, 0.1)
|
| 177 |
+
assert_allclose(y, a * t ** 2 + b * t + c, rtol=1e-15)
|
| 178 |
+
|
| 179 |
+
t, y = minimize_quadratic_1d(a, b, -np.inf, 0, c=c)
|
| 180 |
+
assert_equal(t, 0)
|
| 181 |
+
assert_allclose(y, a * t ** 2 + b * t + c, rtol=1e-15)
|
| 182 |
+
|
| 183 |
+
a = -1
|
| 184 |
+
b = 0.2
|
| 185 |
+
t, y = minimize_quadratic_1d(a, b, -np.inf, np.inf)
|
| 186 |
+
assert_equal(y, -np.inf)
|
| 187 |
+
|
| 188 |
+
t, y = minimize_quadratic_1d(a, b, 0, np.inf)
|
| 189 |
+
assert_equal(t, np.inf)
|
| 190 |
+
assert_equal(y, -np.inf)
|
| 191 |
+
|
| 192 |
+
t, y = minimize_quadratic_1d(a, b, -np.inf, 0)
|
| 193 |
+
assert_equal(t, -np.inf)
|
| 194 |
+
assert_equal(y, -np.inf)
|
| 195 |
+
|
| 196 |
+
def test_evaluate_quadratic(self):
|
| 197 |
+
s = np.array([1.0, -1.0])
|
| 198 |
+
|
| 199 |
+
value = evaluate_quadratic(self.J, self.g, s)
|
| 200 |
+
assert_equal(value, 4.85)
|
| 201 |
+
|
| 202 |
+
value = evaluate_quadratic(self.J, self.g, s, diag=self.diag)
|
| 203 |
+
assert_equal(value, 6.35)
|
| 204 |
+
|
| 205 |
+
s = np.array([[1.0, -1.0],
|
| 206 |
+
[1.0, 1.0],
|
| 207 |
+
[0.0, 0.0]])
|
| 208 |
+
|
| 209 |
+
values = evaluate_quadratic(self.J, self.g, s)
|
| 210 |
+
assert_allclose(values, [4.85, -0.91, 0.0])
|
| 211 |
+
|
| 212 |
+
values = evaluate_quadratic(self.J, self.g, s, diag=self.diag)
|
| 213 |
+
assert_allclose(values, [6.35, 0.59, 0.0])
|
| 214 |
+
|
| 215 |
+
|
| 216 |
+
class TestTrustRegion:
|
| 217 |
+
def test_intersect(self):
|
| 218 |
+
Delta = 1.0
|
| 219 |
+
|
| 220 |
+
x = np.zeros(3)
|
| 221 |
+
s = np.array([1.0, 0.0, 0.0])
|
| 222 |
+
t_neg, t_pos = intersect_trust_region(x, s, Delta)
|
| 223 |
+
assert_equal(t_neg, -1)
|
| 224 |
+
assert_equal(t_pos, 1)
|
| 225 |
+
|
| 226 |
+
s = np.array([-1.0, 1.0, -1.0])
|
| 227 |
+
t_neg, t_pos = intersect_trust_region(x, s, Delta)
|
| 228 |
+
assert_allclose(t_neg, -3**-0.5)
|
| 229 |
+
assert_allclose(t_pos, 3**-0.5)
|
| 230 |
+
|
| 231 |
+
x = np.array([0.5, -0.5, 0])
|
| 232 |
+
s = np.array([0, 0, 1.0])
|
| 233 |
+
t_neg, t_pos = intersect_trust_region(x, s, Delta)
|
| 234 |
+
assert_allclose(t_neg, -2**-0.5)
|
| 235 |
+
assert_allclose(t_pos, 2**-0.5)
|
| 236 |
+
|
| 237 |
+
x = np.ones(3)
|
| 238 |
+
assert_raises(ValueError, intersect_trust_region, x, s, Delta)
|
| 239 |
+
|
| 240 |
+
x = np.zeros(3)
|
| 241 |
+
s = np.zeros(3)
|
| 242 |
+
assert_raises(ValueError, intersect_trust_region, x, s, Delta)
|
| 243 |
+
|
| 244 |
+
|
| 245 |
+
def test_reflective_transformation():
|
| 246 |
+
lb = np.array([-1, -2], dtype=float)
|
| 247 |
+
ub = np.array([5, 3], dtype=float)
|
| 248 |
+
|
| 249 |
+
y = np.array([0, 0])
|
| 250 |
+
x, g = reflective_transformation(y, lb, ub)
|
| 251 |
+
assert_equal(x, y)
|
| 252 |
+
assert_equal(g, np.ones(2))
|
| 253 |
+
|
| 254 |
+
y = np.array([-4, 4], dtype=float)
|
| 255 |
+
|
| 256 |
+
x, g = reflective_transformation(y, lb, np.array([np.inf, np.inf]))
|
| 257 |
+
assert_equal(x, [2, 4])
|
| 258 |
+
assert_equal(g, [-1, 1])
|
| 259 |
+
|
| 260 |
+
x, g = reflective_transformation(y, np.array([-np.inf, -np.inf]), ub)
|
| 261 |
+
assert_equal(x, [-4, 2])
|
| 262 |
+
assert_equal(g, [1, -1])
|
| 263 |
+
|
| 264 |
+
x, g = reflective_transformation(y, lb, ub)
|
| 265 |
+
assert_equal(x, [2, 2])
|
| 266 |
+
assert_equal(g, [-1, -1])
|
| 267 |
+
|
| 268 |
+
lb = np.array([-np.inf, -2])
|
| 269 |
+
ub = np.array([5, np.inf])
|
| 270 |
+
y = np.array([10, 10], dtype=float)
|
| 271 |
+
x, g = reflective_transformation(y, lb, ub)
|
| 272 |
+
assert_equal(x, [0, 10])
|
| 273 |
+
assert_equal(g, [-1, 1])
|
| 274 |
+
|
| 275 |
+
|
| 276 |
+
def test_linear_operators():
|
| 277 |
+
A = np.arange(6).reshape((3, 2))
|
| 278 |
+
|
| 279 |
+
d_left = np.array([-1, 2, 5])
|
| 280 |
+
DA = np.diag(d_left).dot(A)
|
| 281 |
+
J_left = left_multiplied_operator(A, d_left)
|
| 282 |
+
|
| 283 |
+
d_right = np.array([5, 10])
|
| 284 |
+
AD = A.dot(np.diag(d_right))
|
| 285 |
+
J_right = right_multiplied_operator(A, d_right)
|
| 286 |
+
|
| 287 |
+
x = np.array([-2, 3])
|
| 288 |
+
X = -2 * np.arange(2, 8).reshape((2, 3))
|
| 289 |
+
xt = np.array([0, -2, 15])
|
| 290 |
+
|
| 291 |
+
assert_allclose(DA.dot(x), J_left.matvec(x))
|
| 292 |
+
assert_allclose(DA.dot(X), J_left.matmat(X))
|
| 293 |
+
assert_allclose(DA.T.dot(xt), J_left.rmatvec(xt))
|
| 294 |
+
|
| 295 |
+
assert_allclose(AD.dot(x), J_right.matvec(x))
|
| 296 |
+
assert_allclose(AD.dot(X), J_right.matmat(X))
|
| 297 |
+
assert_allclose(AD.T.dot(xt), J_right.rmatvec(xt))
|
mantis_evalkit/lib/python3.10/site-packages/scipy/optimize/tests/test_lsq_linear.py
ADDED
|
@@ -0,0 +1,287 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import pytest
|
| 2 |
+
|
| 3 |
+
import numpy as np
|
| 4 |
+
from numpy.linalg import lstsq
|
| 5 |
+
from numpy.testing import assert_allclose, assert_equal, assert_
|
| 6 |
+
|
| 7 |
+
from scipy.sparse import rand, coo_matrix
|
| 8 |
+
from scipy.sparse.linalg import aslinearoperator
|
| 9 |
+
from scipy.optimize import lsq_linear
|
| 10 |
+
from scipy.optimize._minimize import Bounds
|
| 11 |
+
|
| 12 |
+
|
| 13 |
+
A = np.array([
|
| 14 |
+
[0.171, -0.057],
|
| 15 |
+
[-0.049, -0.248],
|
| 16 |
+
[-0.166, 0.054],
|
| 17 |
+
])
|
| 18 |
+
b = np.array([0.074, 1.014, -0.383])
|
| 19 |
+
|
| 20 |
+
|
| 21 |
+
class BaseMixin:
|
| 22 |
+
def setup_method(self):
|
| 23 |
+
self.rnd = np.random.RandomState(0)
|
| 24 |
+
|
| 25 |
+
def test_dense_no_bounds(self):
|
| 26 |
+
for lsq_solver in self.lsq_solvers:
|
| 27 |
+
res = lsq_linear(A, b, method=self.method, lsq_solver=lsq_solver)
|
| 28 |
+
assert_allclose(res.x, lstsq(A, b, rcond=-1)[0])
|
| 29 |
+
assert_allclose(res.x, res.unbounded_sol[0])
|
| 30 |
+
|
| 31 |
+
def test_dense_bounds(self):
|
| 32 |
+
# Solutions for comparison are taken from MATLAB.
|
| 33 |
+
lb = np.array([-1, -10])
|
| 34 |
+
ub = np.array([1, 0])
|
| 35 |
+
unbounded_sol = lstsq(A, b, rcond=-1)[0]
|
| 36 |
+
for lsq_solver in self.lsq_solvers:
|
| 37 |
+
res = lsq_linear(A, b, (lb, ub), method=self.method,
|
| 38 |
+
lsq_solver=lsq_solver)
|
| 39 |
+
assert_allclose(res.x, lstsq(A, b, rcond=-1)[0])
|
| 40 |
+
assert_allclose(res.unbounded_sol[0], unbounded_sol)
|
| 41 |
+
|
| 42 |
+
lb = np.array([0.0, -np.inf])
|
| 43 |
+
for lsq_solver in self.lsq_solvers:
|
| 44 |
+
res = lsq_linear(A, b, (lb, np.inf), method=self.method,
|
| 45 |
+
lsq_solver=lsq_solver)
|
| 46 |
+
assert_allclose(res.x, np.array([0.0, -4.084174437334673]),
|
| 47 |
+
atol=1e-6)
|
| 48 |
+
assert_allclose(res.unbounded_sol[0], unbounded_sol)
|
| 49 |
+
|
| 50 |
+
lb = np.array([-1, 0])
|
| 51 |
+
for lsq_solver in self.lsq_solvers:
|
| 52 |
+
res = lsq_linear(A, b, (lb, np.inf), method=self.method,
|
| 53 |
+
lsq_solver=lsq_solver)
|
| 54 |
+
assert_allclose(res.x, np.array([0.448427311733504, 0]),
|
| 55 |
+
atol=1e-15)
|
| 56 |
+
assert_allclose(res.unbounded_sol[0], unbounded_sol)
|
| 57 |
+
|
| 58 |
+
ub = np.array([np.inf, -5])
|
| 59 |
+
for lsq_solver in self.lsq_solvers:
|
| 60 |
+
res = lsq_linear(A, b, (-np.inf, ub), method=self.method,
|
| 61 |
+
lsq_solver=lsq_solver)
|
| 62 |
+
assert_allclose(res.x, np.array([-0.105560998682388, -5]))
|
| 63 |
+
assert_allclose(res.unbounded_sol[0], unbounded_sol)
|
| 64 |
+
|
| 65 |
+
ub = np.array([-1, np.inf])
|
| 66 |
+
for lsq_solver in self.lsq_solvers:
|
| 67 |
+
res = lsq_linear(A, b, (-np.inf, ub), method=self.method,
|
| 68 |
+
lsq_solver=lsq_solver)
|
| 69 |
+
assert_allclose(res.x, np.array([-1, -4.181102129483254]))
|
| 70 |
+
assert_allclose(res.unbounded_sol[0], unbounded_sol)
|
| 71 |
+
|
| 72 |
+
lb = np.array([0, -4])
|
| 73 |
+
ub = np.array([1, 0])
|
| 74 |
+
for lsq_solver in self.lsq_solvers:
|
| 75 |
+
res = lsq_linear(A, b, (lb, ub), method=self.method,
|
| 76 |
+
lsq_solver=lsq_solver)
|
| 77 |
+
assert_allclose(res.x, np.array([0.005236663400791, -4]))
|
| 78 |
+
assert_allclose(res.unbounded_sol[0], unbounded_sol)
|
| 79 |
+
|
| 80 |
+
def test_bounds_variants(self):
|
| 81 |
+
x = np.array([1, 3])
|
| 82 |
+
A = self.rnd.uniform(size=(2, 2))
|
| 83 |
+
b = A@x
|
| 84 |
+
lb = np.array([1, 1])
|
| 85 |
+
ub = np.array([2, 2])
|
| 86 |
+
bounds_old = (lb, ub)
|
| 87 |
+
bounds_new = Bounds(lb, ub)
|
| 88 |
+
res_old = lsq_linear(A, b, bounds_old)
|
| 89 |
+
res_new = lsq_linear(A, b, bounds_new)
|
| 90 |
+
assert not np.allclose(res_new.x, res_new.unbounded_sol[0])
|
| 91 |
+
assert_allclose(res_old.x, res_new.x)
|
| 92 |
+
|
| 93 |
+
def test_np_matrix(self):
|
| 94 |
+
# gh-10711
|
| 95 |
+
with np.testing.suppress_warnings() as sup:
|
| 96 |
+
sup.filter(PendingDeprecationWarning)
|
| 97 |
+
A = np.matrix([[20, -4, 0, 2, 3], [10, -2, 1, 0, -1]])
|
| 98 |
+
k = np.array([20, 15])
|
| 99 |
+
lsq_linear(A, k)
|
| 100 |
+
|
| 101 |
+
def test_dense_rank_deficient(self):
|
| 102 |
+
A = np.array([[-0.307, -0.184]])
|
| 103 |
+
b = np.array([0.773])
|
| 104 |
+
lb = [-0.1, -0.1]
|
| 105 |
+
ub = [0.1, 0.1]
|
| 106 |
+
for lsq_solver in self.lsq_solvers:
|
| 107 |
+
res = lsq_linear(A, b, (lb, ub), method=self.method,
|
| 108 |
+
lsq_solver=lsq_solver)
|
| 109 |
+
assert_allclose(res.x, [-0.1, -0.1])
|
| 110 |
+
assert_allclose(res.unbounded_sol[0], lstsq(A, b, rcond=-1)[0])
|
| 111 |
+
|
| 112 |
+
A = np.array([
|
| 113 |
+
[0.334, 0.668],
|
| 114 |
+
[-0.516, -1.032],
|
| 115 |
+
[0.192, 0.384],
|
| 116 |
+
])
|
| 117 |
+
b = np.array([-1.436, 0.135, 0.909])
|
| 118 |
+
lb = [0, -1]
|
| 119 |
+
ub = [1, -0.5]
|
| 120 |
+
for lsq_solver in self.lsq_solvers:
|
| 121 |
+
res = lsq_linear(A, b, (lb, ub), method=self.method,
|
| 122 |
+
lsq_solver=lsq_solver)
|
| 123 |
+
assert_allclose(res.optimality, 0, atol=1e-11)
|
| 124 |
+
assert_allclose(res.unbounded_sol[0], lstsq(A, b, rcond=-1)[0])
|
| 125 |
+
|
| 126 |
+
def test_full_result(self):
|
| 127 |
+
lb = np.array([0, -4])
|
| 128 |
+
ub = np.array([1, 0])
|
| 129 |
+
res = lsq_linear(A, b, (lb, ub), method=self.method)
|
| 130 |
+
|
| 131 |
+
assert_allclose(res.x, [0.005236663400791, -4])
|
| 132 |
+
assert_allclose(res.unbounded_sol[0], lstsq(A, b, rcond=-1)[0])
|
| 133 |
+
|
| 134 |
+
r = A.dot(res.x) - b
|
| 135 |
+
assert_allclose(res.cost, 0.5 * np.dot(r, r))
|
| 136 |
+
assert_allclose(res.fun, r)
|
| 137 |
+
|
| 138 |
+
assert_allclose(res.optimality, 0.0, atol=1e-12)
|
| 139 |
+
assert_equal(res.active_mask, [0, -1])
|
| 140 |
+
assert_(res.nit < 15)
|
| 141 |
+
assert_(res.status == 1 or res.status == 3)
|
| 142 |
+
assert_(isinstance(res.message, str))
|
| 143 |
+
assert_(res.success)
|
| 144 |
+
|
| 145 |
+
# This is a test for issue #9982.
|
| 146 |
+
def test_almost_singular(self):
|
| 147 |
+
A = np.array(
|
| 148 |
+
[[0.8854232310355122, 0.0365312146937765, 0.0365312146836789],
|
| 149 |
+
[0.3742460132129041, 0.0130523214078376, 0.0130523214077873],
|
| 150 |
+
[0.9680633871281361, 0.0319366128718639, 0.0319366128718388]])
|
| 151 |
+
|
| 152 |
+
b = np.array(
|
| 153 |
+
[0.0055029366538097, 0.0026677442422208, 0.0066612514782381])
|
| 154 |
+
|
| 155 |
+
result = lsq_linear(A, b, method=self.method)
|
| 156 |
+
assert_(result.cost < 1.1e-8)
|
| 157 |
+
|
| 158 |
+
@pytest.mark.xslow
|
| 159 |
+
def test_large_rank_deficient(self):
|
| 160 |
+
np.random.seed(0)
|
| 161 |
+
n, m = np.sort(np.random.randint(2, 1000, size=2))
|
| 162 |
+
m *= 2 # make m >> n
|
| 163 |
+
A = 1.0 * np.random.randint(-99, 99, size=[m, n])
|
| 164 |
+
b = 1.0 * np.random.randint(-99, 99, size=[m])
|
| 165 |
+
bounds = 1.0 * np.sort(np.random.randint(-99, 99, size=(2, n)), axis=0)
|
| 166 |
+
bounds[1, :] += 1.0 # ensure up > lb
|
| 167 |
+
|
| 168 |
+
# Make the A matrix strongly rank deficient by replicating some columns
|
| 169 |
+
w = np.random.choice(n, n) # Select random columns with duplicates
|
| 170 |
+
A = A[:, w]
|
| 171 |
+
|
| 172 |
+
x_bvls = lsq_linear(A, b, bounds=bounds, method='bvls').x
|
| 173 |
+
x_trf = lsq_linear(A, b, bounds=bounds, method='trf').x
|
| 174 |
+
|
| 175 |
+
cost_bvls = np.sum((A @ x_bvls - b)**2)
|
| 176 |
+
cost_trf = np.sum((A @ x_trf - b)**2)
|
| 177 |
+
|
| 178 |
+
assert_(abs(cost_bvls - cost_trf) < cost_trf*1e-10)
|
| 179 |
+
|
| 180 |
+
def test_convergence_small_matrix(self):
|
| 181 |
+
A = np.array([[49.0, 41.0, -32.0],
|
| 182 |
+
[-19.0, -32.0, -8.0],
|
| 183 |
+
[-13.0, 10.0, 69.0]])
|
| 184 |
+
b = np.array([-41.0, -90.0, 47.0])
|
| 185 |
+
bounds = np.array([[31.0, -44.0, 26.0],
|
| 186 |
+
[54.0, -32.0, 28.0]])
|
| 187 |
+
|
| 188 |
+
x_bvls = lsq_linear(A, b, bounds=bounds, method='bvls').x
|
| 189 |
+
x_trf = lsq_linear(A, b, bounds=bounds, method='trf').x
|
| 190 |
+
|
| 191 |
+
cost_bvls = np.sum((A @ x_bvls - b)**2)
|
| 192 |
+
cost_trf = np.sum((A @ x_trf - b)**2)
|
| 193 |
+
|
| 194 |
+
assert_(abs(cost_bvls - cost_trf) < cost_trf*1e-10)
|
| 195 |
+
|
| 196 |
+
|
| 197 |
+
class SparseMixin:
|
| 198 |
+
def test_sparse_and_LinearOperator(self):
|
| 199 |
+
m = 5000
|
| 200 |
+
n = 1000
|
| 201 |
+
rng = np.random.RandomState(0)
|
| 202 |
+
A = rand(m, n, random_state=rng)
|
| 203 |
+
b = rng.randn(m)
|
| 204 |
+
res = lsq_linear(A, b)
|
| 205 |
+
assert_allclose(res.optimality, 0, atol=1e-6)
|
| 206 |
+
|
| 207 |
+
A = aslinearoperator(A)
|
| 208 |
+
res = lsq_linear(A, b)
|
| 209 |
+
assert_allclose(res.optimality, 0, atol=1e-6)
|
| 210 |
+
|
| 211 |
+
@pytest.mark.fail_slow(10)
|
| 212 |
+
def test_sparse_bounds(self):
|
| 213 |
+
m = 5000
|
| 214 |
+
n = 1000
|
| 215 |
+
rng = np.random.RandomState(0)
|
| 216 |
+
A = rand(m, n, random_state=rng)
|
| 217 |
+
b = rng.randn(m)
|
| 218 |
+
lb = rng.randn(n)
|
| 219 |
+
ub = lb + 1
|
| 220 |
+
res = lsq_linear(A, b, (lb, ub))
|
| 221 |
+
assert_allclose(res.optimality, 0.0, atol=1e-6)
|
| 222 |
+
|
| 223 |
+
res = lsq_linear(A, b, (lb, ub), lsmr_tol=1e-13,
|
| 224 |
+
lsmr_maxiter=1500)
|
| 225 |
+
assert_allclose(res.optimality, 0.0, atol=1e-6)
|
| 226 |
+
|
| 227 |
+
res = lsq_linear(A, b, (lb, ub), lsmr_tol='auto')
|
| 228 |
+
assert_allclose(res.optimality, 0.0, atol=1e-6)
|
| 229 |
+
|
| 230 |
+
def test_sparse_ill_conditioned(self):
|
| 231 |
+
# Sparse matrix with condition number of ~4 million
|
| 232 |
+
data = np.array([1., 1., 1., 1. + 1e-6, 1.])
|
| 233 |
+
row = np.array([0, 0, 1, 2, 2])
|
| 234 |
+
col = np.array([0, 2, 1, 0, 2])
|
| 235 |
+
A = coo_matrix((data, (row, col)), shape=(3, 3))
|
| 236 |
+
|
| 237 |
+
# Get the exact solution
|
| 238 |
+
exact_sol = lsq_linear(A.toarray(), b, lsq_solver='exact')
|
| 239 |
+
|
| 240 |
+
# Default lsmr arguments should not fully converge the solution
|
| 241 |
+
default_lsmr_sol = lsq_linear(A, b, lsq_solver='lsmr')
|
| 242 |
+
with pytest.raises(AssertionError, match=""):
|
| 243 |
+
assert_allclose(exact_sol.x, default_lsmr_sol.x)
|
| 244 |
+
|
| 245 |
+
# By increasing the maximum lsmr iters, it will converge
|
| 246 |
+
conv_lsmr = lsq_linear(A, b, lsq_solver='lsmr', lsmr_maxiter=10)
|
| 247 |
+
assert_allclose(exact_sol.x, conv_lsmr.x)
|
| 248 |
+
|
| 249 |
+
|
| 250 |
+
class TestTRF(BaseMixin, SparseMixin):
|
| 251 |
+
method = 'trf'
|
| 252 |
+
lsq_solvers = ['exact', 'lsmr']
|
| 253 |
+
|
| 254 |
+
|
| 255 |
+
class TestBVLS(BaseMixin):
|
| 256 |
+
method = 'bvls'
|
| 257 |
+
lsq_solvers = ['exact']
|
| 258 |
+
|
| 259 |
+
|
| 260 |
+
class TestErrorChecking:
|
| 261 |
+
def test_option_lsmr_tol(self):
|
| 262 |
+
# Should work with a positive float, string equal to 'auto', or None
|
| 263 |
+
_ = lsq_linear(A, b, lsq_solver='lsmr', lsmr_tol=1e-2)
|
| 264 |
+
_ = lsq_linear(A, b, lsq_solver='lsmr', lsmr_tol='auto')
|
| 265 |
+
_ = lsq_linear(A, b, lsq_solver='lsmr', lsmr_tol=None)
|
| 266 |
+
|
| 267 |
+
# Should raise error with negative float, strings
|
| 268 |
+
# other than 'auto', and integers
|
| 269 |
+
err_message = "`lsmr_tol` must be None, 'auto', or positive float."
|
| 270 |
+
with pytest.raises(ValueError, match=err_message):
|
| 271 |
+
_ = lsq_linear(A, b, lsq_solver='lsmr', lsmr_tol=-0.1)
|
| 272 |
+
with pytest.raises(ValueError, match=err_message):
|
| 273 |
+
_ = lsq_linear(A, b, lsq_solver='lsmr', lsmr_tol='foo')
|
| 274 |
+
with pytest.raises(ValueError, match=err_message):
|
| 275 |
+
_ = lsq_linear(A, b, lsq_solver='lsmr', lsmr_tol=1)
|
| 276 |
+
|
| 277 |
+
def test_option_lsmr_maxiter(self):
|
| 278 |
+
# Should work with positive integers or None
|
| 279 |
+
_ = lsq_linear(A, b, lsq_solver='lsmr', lsmr_maxiter=1)
|
| 280 |
+
_ = lsq_linear(A, b, lsq_solver='lsmr', lsmr_maxiter=None)
|
| 281 |
+
|
| 282 |
+
# Should raise error with 0 or negative max iter
|
| 283 |
+
err_message = "`lsmr_maxiter` must be None or positive integer."
|
| 284 |
+
with pytest.raises(ValueError, match=err_message):
|
| 285 |
+
_ = lsq_linear(A, b, lsq_solver='lsmr', lsmr_maxiter=0)
|
| 286 |
+
with pytest.raises(ValueError, match=err_message):
|
| 287 |
+
_ = lsq_linear(A, b, lsq_solver='lsmr', lsmr_maxiter=-1)
|
mantis_evalkit/lib/python3.10/site-packages/scipy/optimize/tests/test_minpack.py
ADDED
|
@@ -0,0 +1,1194 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Unit tests for optimization routines from minpack.py.
|
| 3 |
+
"""
|
| 4 |
+
import warnings
|
| 5 |
+
import pytest
|
| 6 |
+
import threading
|
| 7 |
+
|
| 8 |
+
from numpy.testing import (assert_, assert_almost_equal, assert_array_equal,
|
| 9 |
+
assert_array_almost_equal, assert_allclose,
|
| 10 |
+
assert_warns, suppress_warnings)
|
| 11 |
+
from pytest import raises as assert_raises
|
| 12 |
+
import numpy as np
|
| 13 |
+
from numpy import array, float64
|
| 14 |
+
from multiprocessing.pool import ThreadPool
|
| 15 |
+
|
| 16 |
+
from scipy import optimize, linalg
|
| 17 |
+
from scipy.special import lambertw
|
| 18 |
+
from scipy.optimize._minpack_py import leastsq, curve_fit, fixed_point
|
| 19 |
+
from scipy.optimize import OptimizeWarning
|
| 20 |
+
from scipy.optimize._minimize import Bounds
|
| 21 |
+
|
| 22 |
+
|
| 23 |
+
class ReturnShape:
|
| 24 |
+
"""This class exists to create a callable that does not have a '__name__' attribute.
|
| 25 |
+
|
| 26 |
+
__init__ takes the argument 'shape', which should be a tuple of ints.
|
| 27 |
+
When an instance is called with a single argument 'x', it returns numpy.ones(shape).
|
| 28 |
+
"""
|
| 29 |
+
|
| 30 |
+
def __init__(self, shape):
|
| 31 |
+
self.shape = shape
|
| 32 |
+
|
| 33 |
+
def __call__(self, x):
|
| 34 |
+
return np.ones(self.shape)
|
| 35 |
+
|
| 36 |
+
|
| 37 |
+
def dummy_func(x, shape):
|
| 38 |
+
"""A function that returns an array of ones of the given shape.
|
| 39 |
+
`x` is ignored.
|
| 40 |
+
"""
|
| 41 |
+
return np.ones(shape)
|
| 42 |
+
|
| 43 |
+
|
| 44 |
+
def sequence_parallel(fs):
|
| 45 |
+
with ThreadPool(len(fs)) as pool:
|
| 46 |
+
return pool.map(lambda f: f(), fs)
|
| 47 |
+
|
| 48 |
+
|
| 49 |
+
# Function and Jacobian for tests of solvers for systems of nonlinear
|
| 50 |
+
# equations
|
| 51 |
+
|
| 52 |
+
|
| 53 |
+
def pressure_network(flow_rates, Qtot, k):
|
| 54 |
+
"""Evaluate non-linear equation system representing
|
| 55 |
+
the pressures and flows in a system of n parallel pipes::
|
| 56 |
+
|
| 57 |
+
f_i = P_i - P_0, for i = 1..n
|
| 58 |
+
f_0 = sum(Q_i) - Qtot
|
| 59 |
+
|
| 60 |
+
where Q_i is the flow rate in pipe i and P_i the pressure in that pipe.
|
| 61 |
+
Pressure is modeled as a P=kQ**2 where k is a valve coefficient and
|
| 62 |
+
Q is the flow rate.
|
| 63 |
+
|
| 64 |
+
Parameters
|
| 65 |
+
----------
|
| 66 |
+
flow_rates : float
|
| 67 |
+
A 1-D array of n flow rates [kg/s].
|
| 68 |
+
k : float
|
| 69 |
+
A 1-D array of n valve coefficients [1/kg m].
|
| 70 |
+
Qtot : float
|
| 71 |
+
A scalar, the total input flow rate [kg/s].
|
| 72 |
+
|
| 73 |
+
Returns
|
| 74 |
+
-------
|
| 75 |
+
F : float
|
| 76 |
+
A 1-D array, F[i] == f_i.
|
| 77 |
+
|
| 78 |
+
"""
|
| 79 |
+
P = k * flow_rates**2
|
| 80 |
+
F = np.hstack((P[1:] - P[0], flow_rates.sum() - Qtot))
|
| 81 |
+
return F
|
| 82 |
+
|
| 83 |
+
|
| 84 |
+
def pressure_network_jacobian(flow_rates, Qtot, k):
|
| 85 |
+
"""Return the jacobian of the equation system F(flow_rates)
|
| 86 |
+
computed by `pressure_network` with respect to
|
| 87 |
+
*flow_rates*. See `pressure_network` for the detailed
|
| 88 |
+
description of parameters.
|
| 89 |
+
|
| 90 |
+
Returns
|
| 91 |
+
-------
|
| 92 |
+
jac : float
|
| 93 |
+
*n* by *n* matrix ``df_i/dQ_i`` where ``n = len(flow_rates)``
|
| 94 |
+
and *f_i* and *Q_i* are described in the doc for `pressure_network`
|
| 95 |
+
"""
|
| 96 |
+
n = len(flow_rates)
|
| 97 |
+
pdiff = np.diag(flow_rates[1:] * 2 * k[1:] - 2 * flow_rates[0] * k[0])
|
| 98 |
+
|
| 99 |
+
jac = np.empty((n, n))
|
| 100 |
+
jac[:n-1, :n-1] = pdiff * 0
|
| 101 |
+
jac[:n-1, n-1] = 0
|
| 102 |
+
jac[n-1, :] = np.ones(n)
|
| 103 |
+
|
| 104 |
+
return jac
|
| 105 |
+
|
| 106 |
+
|
| 107 |
+
def pressure_network_fun_and_grad(flow_rates, Qtot, k):
|
| 108 |
+
return (pressure_network(flow_rates, Qtot, k),
|
| 109 |
+
pressure_network_jacobian(flow_rates, Qtot, k))
|
| 110 |
+
|
| 111 |
+
|
| 112 |
+
class TestFSolve:
|
| 113 |
+
def test_pressure_network_no_gradient(self):
|
| 114 |
+
# fsolve without gradient, equal pipes -> equal flows.
|
| 115 |
+
k = np.full(4, 0.5)
|
| 116 |
+
Qtot = 4
|
| 117 |
+
initial_guess = array([2., 0., 2., 0.])
|
| 118 |
+
final_flows, info, ier, mesg = optimize.fsolve(
|
| 119 |
+
pressure_network, initial_guess, args=(Qtot, k),
|
| 120 |
+
full_output=True)
|
| 121 |
+
assert_array_almost_equal(final_flows, np.ones(4))
|
| 122 |
+
assert_(ier == 1, mesg)
|
| 123 |
+
|
| 124 |
+
def test_pressure_network_with_gradient(self):
|
| 125 |
+
# fsolve with gradient, equal pipes -> equal flows
|
| 126 |
+
k = np.full(4, 0.5)
|
| 127 |
+
Qtot = 4
|
| 128 |
+
initial_guess = array([2., 0., 2., 0.])
|
| 129 |
+
final_flows = optimize.fsolve(
|
| 130 |
+
pressure_network, initial_guess, args=(Qtot, k),
|
| 131 |
+
fprime=pressure_network_jacobian)
|
| 132 |
+
assert_array_almost_equal(final_flows, np.ones(4))
|
| 133 |
+
|
| 134 |
+
def test_wrong_shape_func_callable(self):
|
| 135 |
+
func = ReturnShape(1)
|
| 136 |
+
# x0 is a list of two elements, but func will return an array with
|
| 137 |
+
# length 1, so this should result in a TypeError.
|
| 138 |
+
x0 = [1.5, 2.0]
|
| 139 |
+
assert_raises(TypeError, optimize.fsolve, func, x0)
|
| 140 |
+
|
| 141 |
+
def test_wrong_shape_func_function(self):
|
| 142 |
+
# x0 is a list of two elements, but func will return an array with
|
| 143 |
+
# length 1, so this should result in a TypeError.
|
| 144 |
+
x0 = [1.5, 2.0]
|
| 145 |
+
assert_raises(TypeError, optimize.fsolve, dummy_func, x0, args=((1,),))
|
| 146 |
+
|
| 147 |
+
def test_wrong_shape_fprime_callable(self):
|
| 148 |
+
func = ReturnShape(1)
|
| 149 |
+
deriv_func = ReturnShape((2,2))
|
| 150 |
+
assert_raises(TypeError, optimize.fsolve, func, x0=[0,1], fprime=deriv_func)
|
| 151 |
+
|
| 152 |
+
def test_wrong_shape_fprime_function(self):
|
| 153 |
+
def func(x):
|
| 154 |
+
return dummy_func(x, (2,))
|
| 155 |
+
def deriv_func(x):
|
| 156 |
+
return dummy_func(x, (3, 3))
|
| 157 |
+
assert_raises(TypeError, optimize.fsolve, func, x0=[0,1], fprime=deriv_func)
|
| 158 |
+
|
| 159 |
+
def test_func_can_raise(self):
|
| 160 |
+
def func(*args):
|
| 161 |
+
raise ValueError('I raised')
|
| 162 |
+
|
| 163 |
+
with assert_raises(ValueError, match='I raised'):
|
| 164 |
+
optimize.fsolve(func, x0=[0])
|
| 165 |
+
|
| 166 |
+
def test_Dfun_can_raise(self):
|
| 167 |
+
def func(x):
|
| 168 |
+
return x - np.array([10])
|
| 169 |
+
|
| 170 |
+
def deriv_func(*args):
|
| 171 |
+
raise ValueError('I raised')
|
| 172 |
+
|
| 173 |
+
with assert_raises(ValueError, match='I raised'):
|
| 174 |
+
optimize.fsolve(func, x0=[0], fprime=deriv_func)
|
| 175 |
+
|
| 176 |
+
def test_float32(self):
|
| 177 |
+
def func(x):
|
| 178 |
+
return np.array([x[0] - 100, x[1] - 1000], dtype=np.float32) ** 2
|
| 179 |
+
p = optimize.fsolve(func, np.array([1, 1], np.float32))
|
| 180 |
+
assert_allclose(func(p), [0, 0], atol=1e-3)
|
| 181 |
+
|
| 182 |
+
def test_reentrant_func(self):
|
| 183 |
+
def func(*args):
|
| 184 |
+
self.test_pressure_network_no_gradient()
|
| 185 |
+
return pressure_network(*args)
|
| 186 |
+
|
| 187 |
+
# fsolve without gradient, equal pipes -> equal flows.
|
| 188 |
+
k = np.full(4, 0.5)
|
| 189 |
+
Qtot = 4
|
| 190 |
+
initial_guess = array([2., 0., 2., 0.])
|
| 191 |
+
final_flows, info, ier, mesg = optimize.fsolve(
|
| 192 |
+
func, initial_guess, args=(Qtot, k),
|
| 193 |
+
full_output=True)
|
| 194 |
+
assert_array_almost_equal(final_flows, np.ones(4))
|
| 195 |
+
assert_(ier == 1, mesg)
|
| 196 |
+
|
| 197 |
+
def test_reentrant_Dfunc(self):
|
| 198 |
+
def deriv_func(*args):
|
| 199 |
+
self.test_pressure_network_with_gradient()
|
| 200 |
+
return pressure_network_jacobian(*args)
|
| 201 |
+
|
| 202 |
+
# fsolve with gradient, equal pipes -> equal flows
|
| 203 |
+
k = np.full(4, 0.5)
|
| 204 |
+
Qtot = 4
|
| 205 |
+
initial_guess = array([2., 0., 2., 0.])
|
| 206 |
+
final_flows = optimize.fsolve(
|
| 207 |
+
pressure_network, initial_guess, args=(Qtot, k),
|
| 208 |
+
fprime=deriv_func)
|
| 209 |
+
assert_array_almost_equal(final_flows, np.ones(4))
|
| 210 |
+
|
| 211 |
+
def test_concurrent_no_gradient(self):
|
| 212 |
+
v = sequence_parallel([self.test_pressure_network_no_gradient] * 10)
|
| 213 |
+
assert all([result is None for result in v])
|
| 214 |
+
|
| 215 |
+
def test_concurrent_with_gradient(self):
|
| 216 |
+
v = sequence_parallel([self.test_pressure_network_with_gradient] * 10)
|
| 217 |
+
assert all([result is None for result in v])
|
| 218 |
+
|
| 219 |
+
|
| 220 |
+
class TestRootHybr:
|
| 221 |
+
def test_pressure_network_no_gradient(self):
|
| 222 |
+
# root/hybr without gradient, equal pipes -> equal flows
|
| 223 |
+
k = np.full(4, 0.5)
|
| 224 |
+
Qtot = 4
|
| 225 |
+
initial_guess = array([2., 0., 2., 0.])
|
| 226 |
+
final_flows = optimize.root(pressure_network, initial_guess,
|
| 227 |
+
method='hybr', args=(Qtot, k)).x
|
| 228 |
+
assert_array_almost_equal(final_flows, np.ones(4))
|
| 229 |
+
|
| 230 |
+
def test_pressure_network_with_gradient(self):
|
| 231 |
+
# root/hybr with gradient, equal pipes -> equal flows
|
| 232 |
+
k = np.full(4, 0.5)
|
| 233 |
+
Qtot = 4
|
| 234 |
+
initial_guess = array([[2., 0., 2., 0.]])
|
| 235 |
+
final_flows = optimize.root(pressure_network, initial_guess,
|
| 236 |
+
args=(Qtot, k), method='hybr',
|
| 237 |
+
jac=pressure_network_jacobian).x
|
| 238 |
+
assert_array_almost_equal(final_flows, np.ones(4))
|
| 239 |
+
|
| 240 |
+
def test_pressure_network_with_gradient_combined(self):
|
| 241 |
+
# root/hybr with gradient and function combined, equal pipes -> equal
|
| 242 |
+
# flows
|
| 243 |
+
k = np.full(4, 0.5)
|
| 244 |
+
Qtot = 4
|
| 245 |
+
initial_guess = array([2., 0., 2., 0.])
|
| 246 |
+
final_flows = optimize.root(pressure_network_fun_and_grad,
|
| 247 |
+
initial_guess, args=(Qtot, k),
|
| 248 |
+
method='hybr', jac=True).x
|
| 249 |
+
assert_array_almost_equal(final_flows, np.ones(4))
|
| 250 |
+
|
| 251 |
+
|
| 252 |
+
class TestRootLM:
|
| 253 |
+
def test_pressure_network_no_gradient(self):
|
| 254 |
+
# root/lm without gradient, equal pipes -> equal flows
|
| 255 |
+
k = np.full(4, 0.5)
|
| 256 |
+
Qtot = 4
|
| 257 |
+
initial_guess = array([2., 0., 2., 0.])
|
| 258 |
+
final_flows = optimize.root(pressure_network, initial_guess,
|
| 259 |
+
method='lm', args=(Qtot, k)).x
|
| 260 |
+
assert_array_almost_equal(final_flows, np.ones(4))
|
| 261 |
+
|
| 262 |
+
|
| 263 |
+
class TestNfev:
|
| 264 |
+
def setup_method(self):
|
| 265 |
+
self.nfev = threading.local()
|
| 266 |
+
|
| 267 |
+
def zero_f(self, y):
|
| 268 |
+
if not hasattr(self.nfev, 'c'):
|
| 269 |
+
self.nfev.c = 0
|
| 270 |
+
self.nfev.c += 1
|
| 271 |
+
return y**2-3
|
| 272 |
+
|
| 273 |
+
@pytest.mark.parametrize('method', ['hybr', 'lm', 'broyden1',
|
| 274 |
+
'broyden2', 'anderson',
|
| 275 |
+
'linearmixing', 'diagbroyden',
|
| 276 |
+
'excitingmixing', 'krylov',
|
| 277 |
+
'df-sane'])
|
| 278 |
+
def test_root_nfev(self, method):
|
| 279 |
+
self.nfev.c = 0
|
| 280 |
+
solution = optimize.root(self.zero_f, 100, method=method)
|
| 281 |
+
assert solution.nfev == self.nfev.c
|
| 282 |
+
|
| 283 |
+
def test_fsolve_nfev(self):
|
| 284 |
+
self.nfev.c = 0
|
| 285 |
+
x, info, ier, mesg = optimize.fsolve(self.zero_f, 100, full_output=True)
|
| 286 |
+
assert info['nfev'] == self.nfev.c
|
| 287 |
+
|
| 288 |
+
|
| 289 |
+
class TestLeastSq:
|
| 290 |
+
def setup_method(self):
|
| 291 |
+
x = np.linspace(0, 10, 40)
|
| 292 |
+
a,b,c = 3.1, 42, -304.2
|
| 293 |
+
self.x = x
|
| 294 |
+
self.abc = a,b,c
|
| 295 |
+
y_true = a*x**2 + b*x + c
|
| 296 |
+
np.random.seed(0)
|
| 297 |
+
self.y_meas = y_true + 0.01*np.random.standard_normal(y_true.shape)
|
| 298 |
+
|
| 299 |
+
def residuals(self, p, y, x):
|
| 300 |
+
a,b,c = p
|
| 301 |
+
err = y-(a*x**2 + b*x + c)
|
| 302 |
+
return err
|
| 303 |
+
|
| 304 |
+
def residuals_jacobian(self, _p, _y, x):
|
| 305 |
+
return -np.vstack([x**2, x, np.ones_like(x)]).T
|
| 306 |
+
|
| 307 |
+
def test_basic(self):
|
| 308 |
+
p0 = array([0,0,0])
|
| 309 |
+
params_fit, ier = leastsq(self.residuals, p0,
|
| 310 |
+
args=(self.y_meas, self.x))
|
| 311 |
+
assert_(ier in (1,2,3,4), 'solution not found (ier=%d)' % ier)
|
| 312 |
+
# low precision due to random
|
| 313 |
+
assert_array_almost_equal(params_fit, self.abc, decimal=2)
|
| 314 |
+
|
| 315 |
+
def test_basic_with_gradient(self):
|
| 316 |
+
p0 = array([0,0,0])
|
| 317 |
+
params_fit, ier = leastsq(self.residuals, p0,
|
| 318 |
+
args=(self.y_meas, self.x),
|
| 319 |
+
Dfun=self.residuals_jacobian)
|
| 320 |
+
assert_(ier in (1,2,3,4), 'solution not found (ier=%d)' % ier)
|
| 321 |
+
# low precision due to random
|
| 322 |
+
assert_array_almost_equal(params_fit, self.abc, decimal=2)
|
| 323 |
+
|
| 324 |
+
def test_full_output(self):
|
| 325 |
+
p0 = array([[0,0,0]])
|
| 326 |
+
full_output = leastsq(self.residuals, p0,
|
| 327 |
+
args=(self.y_meas, self.x),
|
| 328 |
+
full_output=True)
|
| 329 |
+
params_fit, cov_x, infodict, mesg, ier = full_output
|
| 330 |
+
assert_(ier in (1,2,3,4), f'solution not found: {mesg}')
|
| 331 |
+
|
| 332 |
+
def test_input_untouched(self):
|
| 333 |
+
p0 = array([0,0,0],dtype=float64)
|
| 334 |
+
p0_copy = array(p0, copy=True)
|
| 335 |
+
full_output = leastsq(self.residuals, p0,
|
| 336 |
+
args=(self.y_meas, self.x),
|
| 337 |
+
full_output=True)
|
| 338 |
+
params_fit, cov_x, infodict, mesg, ier = full_output
|
| 339 |
+
assert_(ier in (1,2,3,4), f'solution not found: {mesg}')
|
| 340 |
+
assert_array_equal(p0, p0_copy)
|
| 341 |
+
|
| 342 |
+
def test_wrong_shape_func_callable(self):
|
| 343 |
+
func = ReturnShape(1)
|
| 344 |
+
# x0 is a list of two elements, but func will return an array with
|
| 345 |
+
# length 1, so this should result in a TypeError.
|
| 346 |
+
x0 = [1.5, 2.0]
|
| 347 |
+
assert_raises(TypeError, optimize.leastsq, func, x0)
|
| 348 |
+
|
| 349 |
+
def test_wrong_shape_func_function(self):
|
| 350 |
+
# x0 is a list of two elements, but func will return an array with
|
| 351 |
+
# length 1, so this should result in a TypeError.
|
| 352 |
+
x0 = [1.5, 2.0]
|
| 353 |
+
assert_raises(TypeError, optimize.leastsq, dummy_func, x0, args=((1,),))
|
| 354 |
+
|
| 355 |
+
def test_wrong_shape_Dfun_callable(self):
|
| 356 |
+
func = ReturnShape(1)
|
| 357 |
+
deriv_func = ReturnShape((2,2))
|
| 358 |
+
assert_raises(TypeError, optimize.leastsq, func, x0=[0,1], Dfun=deriv_func)
|
| 359 |
+
|
| 360 |
+
def test_wrong_shape_Dfun_function(self):
|
| 361 |
+
def func(x):
|
| 362 |
+
return dummy_func(x, (2,))
|
| 363 |
+
def deriv_func(x):
|
| 364 |
+
return dummy_func(x, (3, 3))
|
| 365 |
+
assert_raises(TypeError, optimize.leastsq, func, x0=[0,1], Dfun=deriv_func)
|
| 366 |
+
|
| 367 |
+
def test_float32(self):
|
| 368 |
+
# Regression test for gh-1447
|
| 369 |
+
def func(p,x,y):
|
| 370 |
+
q = p[0]*np.exp(-(x-p[1])**2/(2.0*p[2]**2))+p[3]
|
| 371 |
+
return q - y
|
| 372 |
+
|
| 373 |
+
x = np.array([1.475,1.429,1.409,1.419,1.455,1.519,1.472, 1.368,1.286,
|
| 374 |
+
1.231], dtype=np.float32)
|
| 375 |
+
y = np.array([0.0168,0.0193,0.0211,0.0202,0.0171,0.0151,0.0185,0.0258,
|
| 376 |
+
0.034,0.0396], dtype=np.float32)
|
| 377 |
+
p0 = np.array([1.0,1.0,1.0,1.0])
|
| 378 |
+
p1, success = optimize.leastsq(func, p0, args=(x,y))
|
| 379 |
+
|
| 380 |
+
assert_(success in [1,2,3,4])
|
| 381 |
+
assert_((func(p1,x,y)**2).sum() < 1e-4 * (func(p0,x,y)**2).sum())
|
| 382 |
+
|
| 383 |
+
def test_func_can_raise(self):
|
| 384 |
+
def func(*args):
|
| 385 |
+
raise ValueError('I raised')
|
| 386 |
+
|
| 387 |
+
with assert_raises(ValueError, match='I raised'):
|
| 388 |
+
optimize.leastsq(func, x0=[0])
|
| 389 |
+
|
| 390 |
+
def test_Dfun_can_raise(self):
|
| 391 |
+
def func(x):
|
| 392 |
+
return x - np.array([10])
|
| 393 |
+
|
| 394 |
+
def deriv_func(*args):
|
| 395 |
+
raise ValueError('I raised')
|
| 396 |
+
|
| 397 |
+
with assert_raises(ValueError, match='I raised'):
|
| 398 |
+
optimize.leastsq(func, x0=[0], Dfun=deriv_func)
|
| 399 |
+
|
| 400 |
+
def test_reentrant_func(self):
|
| 401 |
+
def func(*args):
|
| 402 |
+
self.test_basic()
|
| 403 |
+
return self.residuals(*args)
|
| 404 |
+
|
| 405 |
+
p0 = array([0,0,0])
|
| 406 |
+
params_fit, ier = leastsq(func, p0,
|
| 407 |
+
args=(self.y_meas, self.x))
|
| 408 |
+
assert_(ier in (1,2,3,4), 'solution not found (ier=%d)' % ier)
|
| 409 |
+
# low precision due to random
|
| 410 |
+
assert_array_almost_equal(params_fit, self.abc, decimal=2)
|
| 411 |
+
|
| 412 |
+
def test_reentrant_Dfun(self):
|
| 413 |
+
def deriv_func(*args):
|
| 414 |
+
self.test_basic()
|
| 415 |
+
return self.residuals_jacobian(*args)
|
| 416 |
+
|
| 417 |
+
p0 = array([0,0,0])
|
| 418 |
+
params_fit, ier = leastsq(self.residuals, p0,
|
| 419 |
+
args=(self.y_meas, self.x),
|
| 420 |
+
Dfun=deriv_func)
|
| 421 |
+
assert_(ier in (1,2,3,4), 'solution not found (ier=%d)' % ier)
|
| 422 |
+
# low precision due to random
|
| 423 |
+
assert_array_almost_equal(params_fit, self.abc, decimal=2)
|
| 424 |
+
|
| 425 |
+
def test_concurrent_no_gradient(self):
|
| 426 |
+
v = sequence_parallel([self.test_basic] * 10)
|
| 427 |
+
assert all([result is None for result in v])
|
| 428 |
+
|
| 429 |
+
def test_concurrent_with_gradient(self):
|
| 430 |
+
v = sequence_parallel([self.test_basic_with_gradient] * 10)
|
| 431 |
+
assert all([result is None for result in v])
|
| 432 |
+
|
| 433 |
+
def test_func_input_output_length_check(self):
|
| 434 |
+
|
| 435 |
+
def func(x):
|
| 436 |
+
return 2 * (x[0] - 3) ** 2 + 1
|
| 437 |
+
|
| 438 |
+
with assert_raises(TypeError,
|
| 439 |
+
match='Improper input: func input vector length N='):
|
| 440 |
+
optimize.leastsq(func, x0=[0, 1])
|
| 441 |
+
|
| 442 |
+
|
| 443 |
+
class TestCurveFit:
|
| 444 |
+
def setup_method(self):
|
| 445 |
+
self.y = array([1.0, 3.2, 9.5, 13.7])
|
| 446 |
+
self.x = array([1.0, 2.0, 3.0, 4.0])
|
| 447 |
+
|
| 448 |
+
def test_one_argument(self):
|
| 449 |
+
def func(x,a):
|
| 450 |
+
return x**a
|
| 451 |
+
popt, pcov = curve_fit(func, self.x, self.y)
|
| 452 |
+
assert_(len(popt) == 1)
|
| 453 |
+
assert_(pcov.shape == (1,1))
|
| 454 |
+
assert_almost_equal(popt[0], 1.9149, decimal=4)
|
| 455 |
+
assert_almost_equal(pcov[0,0], 0.0016, decimal=4)
|
| 456 |
+
|
| 457 |
+
# Test if we get the same with full_output. Regression test for #1415.
|
| 458 |
+
# Also test if check_finite can be turned off.
|
| 459 |
+
res = curve_fit(func, self.x, self.y,
|
| 460 |
+
full_output=1, check_finite=False)
|
| 461 |
+
(popt2, pcov2, infodict, errmsg, ier) = res
|
| 462 |
+
assert_array_almost_equal(popt, popt2)
|
| 463 |
+
|
| 464 |
+
def test_two_argument(self):
|
| 465 |
+
def func(x, a, b):
|
| 466 |
+
return b*x**a
|
| 467 |
+
popt, pcov = curve_fit(func, self.x, self.y)
|
| 468 |
+
assert_(len(popt) == 2)
|
| 469 |
+
assert_(pcov.shape == (2,2))
|
| 470 |
+
assert_array_almost_equal(popt, [1.7989, 1.1642], decimal=4)
|
| 471 |
+
assert_array_almost_equal(pcov, [[0.0852, -0.1260], [-0.1260, 0.1912]],
|
| 472 |
+
decimal=4)
|
| 473 |
+
|
| 474 |
+
def test_func_is_classmethod(self):
|
| 475 |
+
class test_self:
|
| 476 |
+
"""This class tests if curve_fit passes the correct number of
|
| 477 |
+
arguments when the model function is a class instance method.
|
| 478 |
+
"""
|
| 479 |
+
|
| 480 |
+
def func(self, x, a, b):
|
| 481 |
+
return b * x**a
|
| 482 |
+
|
| 483 |
+
test_self_inst = test_self()
|
| 484 |
+
popt, pcov = curve_fit(test_self_inst.func, self.x, self.y)
|
| 485 |
+
assert_(pcov.shape == (2,2))
|
| 486 |
+
assert_array_almost_equal(popt, [1.7989, 1.1642], decimal=4)
|
| 487 |
+
assert_array_almost_equal(pcov, [[0.0852, -0.1260], [-0.1260, 0.1912]],
|
| 488 |
+
decimal=4)
|
| 489 |
+
|
| 490 |
+
def test_regression_2639(self):
|
| 491 |
+
# This test fails if epsfcn in leastsq is too large.
|
| 492 |
+
x = [574.14200000000005, 574.154, 574.16499999999996,
|
| 493 |
+
574.17700000000002, 574.18799999999999, 574.19899999999996,
|
| 494 |
+
574.21100000000001, 574.22199999999998, 574.23400000000004,
|
| 495 |
+
574.245]
|
| 496 |
+
y = [859.0, 997.0, 1699.0, 2604.0, 2013.0, 1964.0, 2435.0,
|
| 497 |
+
1550.0, 949.0, 841.0]
|
| 498 |
+
guess = [574.1861428571428, 574.2155714285715, 1302.0, 1302.0,
|
| 499 |
+
0.0035019999999983615, 859.0]
|
| 500 |
+
good = [5.74177150e+02, 5.74209188e+02, 1.74187044e+03, 1.58646166e+03,
|
| 501 |
+
1.0068462e-02, 8.57450661e+02]
|
| 502 |
+
|
| 503 |
+
def f_double_gauss(x, x0, x1, A0, A1, sigma, c):
|
| 504 |
+
return (A0*np.exp(-(x-x0)**2/(2.*sigma**2))
|
| 505 |
+
+ A1*np.exp(-(x-x1)**2/(2.*sigma**2)) + c)
|
| 506 |
+
popt, pcov = curve_fit(f_double_gauss, x, y, guess, maxfev=10000)
|
| 507 |
+
assert_allclose(popt, good, rtol=1e-5)
|
| 508 |
+
|
| 509 |
+
def test_pcov(self):
|
| 510 |
+
xdata = np.array([0, 1, 2, 3, 4, 5])
|
| 511 |
+
ydata = np.array([1, 1, 5, 7, 8, 12])
|
| 512 |
+
sigma = np.array([1, 2, 1, 2, 1, 2])
|
| 513 |
+
|
| 514 |
+
def f(x, a, b):
|
| 515 |
+
return a*x + b
|
| 516 |
+
|
| 517 |
+
for method in ['lm', 'trf', 'dogbox']:
|
| 518 |
+
popt, pcov = curve_fit(f, xdata, ydata, p0=[2, 0], sigma=sigma,
|
| 519 |
+
method=method)
|
| 520 |
+
perr_scaled = np.sqrt(np.diag(pcov))
|
| 521 |
+
assert_allclose(perr_scaled, [0.20659803, 0.57204404], rtol=1e-3)
|
| 522 |
+
|
| 523 |
+
popt, pcov = curve_fit(f, xdata, ydata, p0=[2, 0], sigma=3*sigma,
|
| 524 |
+
method=method)
|
| 525 |
+
perr_scaled = np.sqrt(np.diag(pcov))
|
| 526 |
+
assert_allclose(perr_scaled, [0.20659803, 0.57204404], rtol=1e-3)
|
| 527 |
+
|
| 528 |
+
popt, pcov = curve_fit(f, xdata, ydata, p0=[2, 0], sigma=sigma,
|
| 529 |
+
absolute_sigma=True, method=method)
|
| 530 |
+
perr = np.sqrt(np.diag(pcov))
|
| 531 |
+
assert_allclose(perr, [0.30714756, 0.85045308], rtol=1e-3)
|
| 532 |
+
|
| 533 |
+
popt, pcov = curve_fit(f, xdata, ydata, p0=[2, 0], sigma=3*sigma,
|
| 534 |
+
absolute_sigma=True, method=method)
|
| 535 |
+
perr = np.sqrt(np.diag(pcov))
|
| 536 |
+
assert_allclose(perr, [3*0.30714756, 3*0.85045308], rtol=1e-3)
|
| 537 |
+
|
| 538 |
+
# infinite variances
|
| 539 |
+
|
| 540 |
+
def f_flat(x, a, b):
|
| 541 |
+
return a*x
|
| 542 |
+
|
| 543 |
+
pcov_expected = np.array([np.inf]*4).reshape(2, 2)
|
| 544 |
+
|
| 545 |
+
with suppress_warnings() as sup:
|
| 546 |
+
sup.filter(OptimizeWarning,
|
| 547 |
+
"Covariance of the parameters could not be estimated")
|
| 548 |
+
popt, pcov = curve_fit(f_flat, xdata, ydata, p0=[2, 0], sigma=sigma)
|
| 549 |
+
popt1, pcov1 = curve_fit(f, xdata[:2], ydata[:2], p0=[2, 0])
|
| 550 |
+
|
| 551 |
+
assert_(pcov.shape == (2, 2))
|
| 552 |
+
assert_array_equal(pcov, pcov_expected)
|
| 553 |
+
|
| 554 |
+
assert_(pcov1.shape == (2, 2))
|
| 555 |
+
assert_array_equal(pcov1, pcov_expected)
|
| 556 |
+
|
| 557 |
+
def test_array_like(self):
|
| 558 |
+
# Test sequence input. Regression test for gh-3037.
|
| 559 |
+
def f_linear(x, a, b):
|
| 560 |
+
return a*x + b
|
| 561 |
+
|
| 562 |
+
x = [1, 2, 3, 4]
|
| 563 |
+
y = [3, 5, 7, 9]
|
| 564 |
+
assert_allclose(curve_fit(f_linear, x, y)[0], [2, 1], atol=1e-10)
|
| 565 |
+
|
| 566 |
+
@pytest.mark.thread_unsafe
|
| 567 |
+
def test_indeterminate_covariance(self):
|
| 568 |
+
# Test that a warning is returned when pcov is indeterminate
|
| 569 |
+
xdata = np.array([1, 2, 3, 4, 5, 6])
|
| 570 |
+
ydata = np.array([1, 2, 3, 4, 5.5, 6])
|
| 571 |
+
assert_warns(OptimizeWarning, curve_fit,
|
| 572 |
+
lambda x, a, b: a*x, xdata, ydata)
|
| 573 |
+
|
| 574 |
+
def test_NaN_handling(self):
|
| 575 |
+
# Test for correct handling of NaNs in input data: gh-3422
|
| 576 |
+
|
| 577 |
+
# create input with NaNs
|
| 578 |
+
xdata = np.array([1, np.nan, 3])
|
| 579 |
+
ydata = np.array([1, 2, 3])
|
| 580 |
+
|
| 581 |
+
assert_raises(ValueError, curve_fit,
|
| 582 |
+
lambda x, a, b: a*x + b, xdata, ydata)
|
| 583 |
+
assert_raises(ValueError, curve_fit,
|
| 584 |
+
lambda x, a, b: a*x + b, ydata, xdata)
|
| 585 |
+
|
| 586 |
+
assert_raises(ValueError, curve_fit, lambda x, a, b: a*x + b,
|
| 587 |
+
xdata, ydata, **{"check_finite": True})
|
| 588 |
+
|
| 589 |
+
@staticmethod
|
| 590 |
+
def _check_nan_policy(f, xdata_with_nan, xdata_without_nan,
|
| 591 |
+
ydata_with_nan, ydata_without_nan, method):
|
| 592 |
+
kwargs = {'f': f, 'xdata': xdata_with_nan, 'ydata': ydata_with_nan,
|
| 593 |
+
'method': method, 'check_finite': False}
|
| 594 |
+
# propagate test
|
| 595 |
+
error_msg = ("`nan_policy='propagate'` is not supported "
|
| 596 |
+
"by this function.")
|
| 597 |
+
with assert_raises(ValueError, match=error_msg):
|
| 598 |
+
curve_fit(**kwargs, nan_policy="propagate", maxfev=2000)
|
| 599 |
+
|
| 600 |
+
# raise test
|
| 601 |
+
with assert_raises(ValueError, match="The input contains nan"):
|
| 602 |
+
curve_fit(**kwargs, nan_policy="raise")
|
| 603 |
+
|
| 604 |
+
# omit test
|
| 605 |
+
result_with_nan, _ = curve_fit(**kwargs, nan_policy="omit")
|
| 606 |
+
kwargs['xdata'] = xdata_without_nan
|
| 607 |
+
kwargs['ydata'] = ydata_without_nan
|
| 608 |
+
result_without_nan, _ = curve_fit(**kwargs)
|
| 609 |
+
assert_allclose(result_with_nan, result_without_nan)
|
| 610 |
+
|
| 611 |
+
# not valid policy test
|
| 612 |
+
# check for argument names in any order
|
| 613 |
+
error_msg = (r"nan_policy must be one of \{(?:'raise'|'omit'|None)"
|
| 614 |
+
r"(?:, ?(?:'raise'|'omit'|None))*\}")
|
| 615 |
+
with assert_raises(ValueError, match=error_msg):
|
| 616 |
+
curve_fit(**kwargs, nan_policy="hi")
|
| 617 |
+
|
| 618 |
+
@pytest.mark.parametrize('method', ["lm", "trf", "dogbox"])
|
| 619 |
+
def test_nan_policy_1d(self, method):
|
| 620 |
+
def f(x, a, b):
|
| 621 |
+
return a*x + b
|
| 622 |
+
|
| 623 |
+
xdata_with_nan = np.array([2, 3, np.nan, 4, 4, np.nan])
|
| 624 |
+
ydata_with_nan = np.array([1, 2, 5, 3, np.nan, 7])
|
| 625 |
+
xdata_without_nan = np.array([2, 3, 4])
|
| 626 |
+
ydata_without_nan = np.array([1, 2, 3])
|
| 627 |
+
|
| 628 |
+
self._check_nan_policy(f, xdata_with_nan, xdata_without_nan,
|
| 629 |
+
ydata_with_nan, ydata_without_nan, method)
|
| 630 |
+
|
| 631 |
+
@pytest.mark.parametrize('method', ["lm", "trf", "dogbox"])
|
| 632 |
+
def test_nan_policy_2d(self, method):
|
| 633 |
+
def f(x, a, b):
|
| 634 |
+
x1 = x[0, :]
|
| 635 |
+
x2 = x[1, :]
|
| 636 |
+
return a*x1 + b + x2
|
| 637 |
+
|
| 638 |
+
xdata_with_nan = np.array([[2, 3, np.nan, 4, 4, np.nan, 5],
|
| 639 |
+
[2, 3, np.nan, np.nan, 4, np.nan, 7]])
|
| 640 |
+
ydata_with_nan = np.array([1, 2, 5, 3, np.nan, 7, 10])
|
| 641 |
+
xdata_without_nan = np.array([[2, 3, 5], [2, 3, 7]])
|
| 642 |
+
ydata_without_nan = np.array([1, 2, 10])
|
| 643 |
+
|
| 644 |
+
self._check_nan_policy(f, xdata_with_nan, xdata_without_nan,
|
| 645 |
+
ydata_with_nan, ydata_without_nan, method)
|
| 646 |
+
|
| 647 |
+
@pytest.mark.parametrize('n', [2, 3])
|
| 648 |
+
@pytest.mark.parametrize('method', ["lm", "trf", "dogbox"])
|
| 649 |
+
def test_nan_policy_2_3d(self, n, method):
|
| 650 |
+
def f(x, a, b):
|
| 651 |
+
x1 = x[..., 0, :].squeeze()
|
| 652 |
+
x2 = x[..., 1, :].squeeze()
|
| 653 |
+
return a*x1 + b + x2
|
| 654 |
+
|
| 655 |
+
xdata_with_nan = np.array([[[2, 3, np.nan, 4, 4, np.nan, 5],
|
| 656 |
+
[2, 3, np.nan, np.nan, 4, np.nan, 7]]])
|
| 657 |
+
xdata_with_nan = xdata_with_nan.squeeze() if n == 2 else xdata_with_nan
|
| 658 |
+
ydata_with_nan = np.array([1, 2, 5, 3, np.nan, 7, 10])
|
| 659 |
+
xdata_without_nan = np.array([[[2, 3, 5], [2, 3, 7]]])
|
| 660 |
+
ydata_without_nan = np.array([1, 2, 10])
|
| 661 |
+
|
| 662 |
+
self._check_nan_policy(f, xdata_with_nan, xdata_without_nan,
|
| 663 |
+
ydata_with_nan, ydata_without_nan, method)
|
| 664 |
+
|
| 665 |
+
def test_empty_inputs(self):
|
| 666 |
+
# Test both with and without bounds (regression test for gh-9864)
|
| 667 |
+
assert_raises(ValueError, curve_fit, lambda x, a: a*x, [], [])
|
| 668 |
+
assert_raises(ValueError, curve_fit, lambda x, a: a*x, [], [],
|
| 669 |
+
bounds=(1, 2))
|
| 670 |
+
assert_raises(ValueError, curve_fit, lambda x, a: a*x, [1], [])
|
| 671 |
+
assert_raises(ValueError, curve_fit, lambda x, a: a*x, [2], [],
|
| 672 |
+
bounds=(1, 2))
|
| 673 |
+
|
| 674 |
+
def test_function_zero_params(self):
|
| 675 |
+
# Fit args is zero, so "Unable to determine number of fit parameters."
|
| 676 |
+
assert_raises(ValueError, curve_fit, lambda x: x, [1, 2], [3, 4])
|
| 677 |
+
|
| 678 |
+
def test_None_x(self): # Added in GH10196
|
| 679 |
+
popt, pcov = curve_fit(lambda _, a: a * np.arange(10),
|
| 680 |
+
None, 2 * np.arange(10))
|
| 681 |
+
assert_allclose(popt, [2.])
|
| 682 |
+
|
| 683 |
+
def test_method_argument(self):
|
| 684 |
+
def f(x, a, b):
|
| 685 |
+
return a * np.exp(-b*x)
|
| 686 |
+
|
| 687 |
+
xdata = np.linspace(0, 1, 11)
|
| 688 |
+
ydata = f(xdata, 2., 2.)
|
| 689 |
+
|
| 690 |
+
for method in ['trf', 'dogbox', 'lm', None]:
|
| 691 |
+
popt, pcov = curve_fit(f, xdata, ydata, method=method)
|
| 692 |
+
assert_allclose(popt, [2., 2.])
|
| 693 |
+
|
| 694 |
+
assert_raises(ValueError, curve_fit, f, xdata, ydata, method='unknown')
|
| 695 |
+
|
| 696 |
+
def test_full_output(self):
|
| 697 |
+
def f(x, a, b):
|
| 698 |
+
return a * np.exp(-b * x)
|
| 699 |
+
|
| 700 |
+
xdata = np.linspace(0, 1, 11)
|
| 701 |
+
ydata = f(xdata, 2., 2.)
|
| 702 |
+
|
| 703 |
+
for method in ['trf', 'dogbox', 'lm', None]:
|
| 704 |
+
popt, pcov, infodict, errmsg, ier = curve_fit(
|
| 705 |
+
f, xdata, ydata, method=method, full_output=True)
|
| 706 |
+
assert_allclose(popt, [2., 2.])
|
| 707 |
+
assert "nfev" in infodict
|
| 708 |
+
assert "fvec" in infodict
|
| 709 |
+
if method == 'lm' or method is None:
|
| 710 |
+
assert "fjac" in infodict
|
| 711 |
+
assert "ipvt" in infodict
|
| 712 |
+
assert "qtf" in infodict
|
| 713 |
+
assert isinstance(errmsg, str)
|
| 714 |
+
assert ier in (1, 2, 3, 4)
|
| 715 |
+
|
| 716 |
+
def test_bounds(self):
|
| 717 |
+
def f(x, a, b):
|
| 718 |
+
return a * np.exp(-b*x)
|
| 719 |
+
|
| 720 |
+
xdata = np.linspace(0, 1, 11)
|
| 721 |
+
ydata = f(xdata, 2., 2.)
|
| 722 |
+
|
| 723 |
+
# The minimum w/out bounds is at [2., 2.],
|
| 724 |
+
# and with bounds it's at [1.5, smth].
|
| 725 |
+
lb = [1., 0]
|
| 726 |
+
ub = [1.5, 3.]
|
| 727 |
+
|
| 728 |
+
# Test that both variants of the bounds yield the same result
|
| 729 |
+
bounds = (lb, ub)
|
| 730 |
+
bounds_class = Bounds(lb, ub)
|
| 731 |
+
for method in [None, 'trf', 'dogbox']:
|
| 732 |
+
popt, pcov = curve_fit(f, xdata, ydata, bounds=bounds,
|
| 733 |
+
method=method)
|
| 734 |
+
assert_allclose(popt[0], 1.5)
|
| 735 |
+
|
| 736 |
+
popt_class, pcov_class = curve_fit(f, xdata, ydata,
|
| 737 |
+
bounds=bounds_class,
|
| 738 |
+
method=method)
|
| 739 |
+
assert_allclose(popt_class, popt)
|
| 740 |
+
|
| 741 |
+
# With bounds, the starting estimate is feasible.
|
| 742 |
+
popt, pcov = curve_fit(f, xdata, ydata, method='trf',
|
| 743 |
+
bounds=([0., 0], [0.6, np.inf]))
|
| 744 |
+
assert_allclose(popt[0], 0.6)
|
| 745 |
+
|
| 746 |
+
# method='lm' doesn't support bounds.
|
| 747 |
+
assert_raises(ValueError, curve_fit, f, xdata, ydata, bounds=bounds,
|
| 748 |
+
method='lm')
|
| 749 |
+
|
| 750 |
+
def test_bounds_p0(self):
|
| 751 |
+
# This test is for issue #5719. The problem was that an initial guess
|
| 752 |
+
# was ignored when 'trf' or 'dogbox' methods were invoked.
|
| 753 |
+
def f(x, a):
|
| 754 |
+
return np.sin(x + a)
|
| 755 |
+
|
| 756 |
+
xdata = np.linspace(-2*np.pi, 2*np.pi, 40)
|
| 757 |
+
ydata = np.sin(xdata)
|
| 758 |
+
bounds = (-3 * np.pi, 3 * np.pi)
|
| 759 |
+
for method in ['trf', 'dogbox']:
|
| 760 |
+
popt_1, _ = curve_fit(f, xdata, ydata, p0=2.1*np.pi)
|
| 761 |
+
popt_2, _ = curve_fit(f, xdata, ydata, p0=2.1*np.pi,
|
| 762 |
+
bounds=bounds, method=method)
|
| 763 |
+
|
| 764 |
+
# If the initial guess is ignored, then popt_2 would be close 0.
|
| 765 |
+
assert_allclose(popt_1, popt_2)
|
| 766 |
+
|
| 767 |
+
def test_jac(self):
|
| 768 |
+
# Test that Jacobian callable is handled correctly and
|
| 769 |
+
# weighted if sigma is provided.
|
| 770 |
+
def f(x, a, b):
|
| 771 |
+
return a * np.exp(-b*x)
|
| 772 |
+
|
| 773 |
+
def jac(x, a, b):
|
| 774 |
+
e = np.exp(-b*x)
|
| 775 |
+
return np.vstack((e, -a * x * e)).T
|
| 776 |
+
|
| 777 |
+
xdata = np.linspace(0, 1, 11)
|
| 778 |
+
ydata = f(xdata, 2., 2.)
|
| 779 |
+
|
| 780 |
+
# Test numerical options for least_squares backend.
|
| 781 |
+
for method in ['trf', 'dogbox']:
|
| 782 |
+
for scheme in ['2-point', '3-point', 'cs']:
|
| 783 |
+
popt, pcov = curve_fit(f, xdata, ydata, jac=scheme,
|
| 784 |
+
method=method)
|
| 785 |
+
assert_allclose(popt, [2, 2])
|
| 786 |
+
|
| 787 |
+
# Test the analytic option.
|
| 788 |
+
for method in ['lm', 'trf', 'dogbox']:
|
| 789 |
+
popt, pcov = curve_fit(f, xdata, ydata, method=method, jac=jac)
|
| 790 |
+
assert_allclose(popt, [2, 2])
|
| 791 |
+
|
| 792 |
+
# Now add an outlier and provide sigma.
|
| 793 |
+
ydata[5] = 100
|
| 794 |
+
sigma = np.ones(xdata.shape[0])
|
| 795 |
+
sigma[5] = 200
|
| 796 |
+
for method in ['lm', 'trf', 'dogbox']:
|
| 797 |
+
popt, pcov = curve_fit(f, xdata, ydata, sigma=sigma, method=method,
|
| 798 |
+
jac=jac)
|
| 799 |
+
# Still the optimization process is influenced somehow,
|
| 800 |
+
# have to set rtol=1e-3.
|
| 801 |
+
assert_allclose(popt, [2, 2], rtol=1e-3)
|
| 802 |
+
|
| 803 |
+
def test_maxfev_and_bounds(self):
|
| 804 |
+
# gh-6340: with no bounds, curve_fit accepts parameter maxfev (via leastsq)
|
| 805 |
+
# but with bounds, the parameter is `max_nfev` (via least_squares)
|
| 806 |
+
x = np.arange(0, 10)
|
| 807 |
+
y = 2*x
|
| 808 |
+
popt1, _ = curve_fit(lambda x,p: p*x, x, y, bounds=(0, 3), maxfev=100)
|
| 809 |
+
popt2, _ = curve_fit(lambda x,p: p*x, x, y, bounds=(0, 3), max_nfev=100)
|
| 810 |
+
|
| 811 |
+
assert_allclose(popt1, 2, atol=1e-14)
|
| 812 |
+
assert_allclose(popt2, 2, atol=1e-14)
|
| 813 |
+
|
| 814 |
+
@pytest.mark.parametrize("sigma_dim", [0, 1, 2])
|
| 815 |
+
def test_curvefit_omitnan(self, sigma_dim):
|
| 816 |
+
def exponential(x, a, b):
|
| 817 |
+
return b * np.exp(a * x)
|
| 818 |
+
|
| 819 |
+
rng = np.random.default_rng(578285731148908)
|
| 820 |
+
N = 100
|
| 821 |
+
x = np.linspace(1, 10, N)
|
| 822 |
+
y = exponential(x, 0.2, 0.5)
|
| 823 |
+
|
| 824 |
+
if (sigma_dim == 0):
|
| 825 |
+
sigma = 0.05
|
| 826 |
+
y += rng.normal(0, sigma, N)
|
| 827 |
+
|
| 828 |
+
elif (sigma_dim == 1):
|
| 829 |
+
sigma = x * 0.05
|
| 830 |
+
y += rng.normal(0, sigma, N)
|
| 831 |
+
|
| 832 |
+
elif (sigma_dim == 2):
|
| 833 |
+
# The covariance matrix must be symmetric positive-semidefinite
|
| 834 |
+
a = rng.normal(0, 2, (N, N))
|
| 835 |
+
sigma = a @ a.T
|
| 836 |
+
y += rng.multivariate_normal(np.zeros_like(x), sigma)
|
| 837 |
+
else:
|
| 838 |
+
assert False, "The sigma must be a scalar, 1D array or 2D array."
|
| 839 |
+
|
| 840 |
+
p0 = [0.1, 1.0]
|
| 841 |
+
|
| 842 |
+
# Choose indices to place NaNs.
|
| 843 |
+
i_x = rng.integers(N, size=5)
|
| 844 |
+
i_y = rng.integers(N, size=5)
|
| 845 |
+
|
| 846 |
+
# Add NaNs and compute result using `curve_fit`
|
| 847 |
+
x[i_x] = np.nan
|
| 848 |
+
y[i_y] = np.nan
|
| 849 |
+
res_opt, res_cov = curve_fit(exponential, x, y, p0=p0, sigma=sigma,
|
| 850 |
+
nan_policy="omit")
|
| 851 |
+
|
| 852 |
+
# Manually remove elements that should be eliminated, and
|
| 853 |
+
# calculate reference using `curve_fit`
|
| 854 |
+
i_delete = np.unique(np.concatenate((i_x, i_y)))
|
| 855 |
+
x = np.delete(x, i_delete, axis=0)
|
| 856 |
+
y = np.delete(y, i_delete, axis=0)
|
| 857 |
+
|
| 858 |
+
sigma = np.asarray(sigma)
|
| 859 |
+
if sigma.ndim == 1:
|
| 860 |
+
sigma = np.delete(sigma, i_delete)
|
| 861 |
+
elif sigma.ndim == 2:
|
| 862 |
+
sigma = np.delete(sigma, i_delete, axis=0)
|
| 863 |
+
sigma = np.delete(sigma, i_delete, axis=1)
|
| 864 |
+
ref_opt, ref_cov = curve_fit(exponential, x, y, p0=p0, sigma=sigma)
|
| 865 |
+
|
| 866 |
+
assert_allclose(res_opt, ref_opt, atol=1e-14)
|
| 867 |
+
assert_allclose(res_cov, ref_cov, atol=1e-14)
|
| 868 |
+
|
| 869 |
+
def test_curvefit_simplecovariance(self):
|
| 870 |
+
|
| 871 |
+
def func(x, a, b):
|
| 872 |
+
return a * np.exp(-b*x)
|
| 873 |
+
|
| 874 |
+
def jac(x, a, b):
|
| 875 |
+
e = np.exp(-b*x)
|
| 876 |
+
return np.vstack((e, -a * x * e)).T
|
| 877 |
+
|
| 878 |
+
np.random.seed(0)
|
| 879 |
+
xdata = np.linspace(0, 4, 50)
|
| 880 |
+
y = func(xdata, 2.5, 1.3)
|
| 881 |
+
ydata = y + 0.2 * np.random.normal(size=len(xdata))
|
| 882 |
+
|
| 883 |
+
sigma = np.zeros(len(xdata)) + 0.2
|
| 884 |
+
covar = np.diag(sigma**2)
|
| 885 |
+
|
| 886 |
+
for jac1, jac2 in [(jac, jac), (None, None)]:
|
| 887 |
+
for absolute_sigma in [False, True]:
|
| 888 |
+
popt1, pcov1 = curve_fit(func, xdata, ydata, sigma=sigma,
|
| 889 |
+
jac=jac1, absolute_sigma=absolute_sigma)
|
| 890 |
+
popt2, pcov2 = curve_fit(func, xdata, ydata, sigma=covar,
|
| 891 |
+
jac=jac2, absolute_sigma=absolute_sigma)
|
| 892 |
+
|
| 893 |
+
assert_allclose(popt1, popt2, atol=1e-14)
|
| 894 |
+
assert_allclose(pcov1, pcov2, atol=1e-14)
|
| 895 |
+
|
| 896 |
+
def test_curvefit_covariance(self):
|
| 897 |
+
|
| 898 |
+
def funcp(x, a, b):
|
| 899 |
+
rotn = np.array([[1./np.sqrt(2), -1./np.sqrt(2), 0],
|
| 900 |
+
[1./np.sqrt(2), 1./np.sqrt(2), 0],
|
| 901 |
+
[0, 0, 1.0]])
|
| 902 |
+
return rotn.dot(a * np.exp(-b*x))
|
| 903 |
+
|
| 904 |
+
def jacp(x, a, b):
|
| 905 |
+
rotn = np.array([[1./np.sqrt(2), -1./np.sqrt(2), 0],
|
| 906 |
+
[1./np.sqrt(2), 1./np.sqrt(2), 0],
|
| 907 |
+
[0, 0, 1.0]])
|
| 908 |
+
e = np.exp(-b*x)
|
| 909 |
+
return rotn.dot(np.vstack((e, -a * x * e)).T)
|
| 910 |
+
|
| 911 |
+
def func(x, a, b):
|
| 912 |
+
return a * np.exp(-b*x)
|
| 913 |
+
|
| 914 |
+
def jac(x, a, b):
|
| 915 |
+
e = np.exp(-b*x)
|
| 916 |
+
return np.vstack((e, -a * x * e)).T
|
| 917 |
+
|
| 918 |
+
rng = np.random.RandomState(0)
|
| 919 |
+
xdata = np.arange(1, 4)
|
| 920 |
+
y = func(xdata, 2.5, 1.0)
|
| 921 |
+
ydata = y + 0.2 * rng.normal(size=len(xdata))
|
| 922 |
+
sigma = np.zeros(len(xdata)) + 0.2
|
| 923 |
+
covar = np.diag(sigma**2)
|
| 924 |
+
# Get a rotation matrix, and obtain ydatap = R ydata
|
| 925 |
+
# Chisq = ydata^T C^{-1} ydata
|
| 926 |
+
# = ydata^T R^T R C^{-1} R^T R ydata
|
| 927 |
+
# = ydatap^T Cp^{-1} ydatap
|
| 928 |
+
# Cp^{-1} = R C^{-1} R^T
|
| 929 |
+
# Cp = R C R^T, since R^-1 = R^T
|
| 930 |
+
rotn = np.array([[1./np.sqrt(2), -1./np.sqrt(2), 0],
|
| 931 |
+
[1./np.sqrt(2), 1./np.sqrt(2), 0],
|
| 932 |
+
[0, 0, 1.0]])
|
| 933 |
+
ydatap = rotn.dot(ydata)
|
| 934 |
+
covarp = rotn.dot(covar).dot(rotn.T)
|
| 935 |
+
|
| 936 |
+
for jac1, jac2 in [(jac, jacp), (None, None)]:
|
| 937 |
+
for absolute_sigma in [False, True]:
|
| 938 |
+
popt1, pcov1 = curve_fit(func, xdata, ydata, sigma=sigma,
|
| 939 |
+
jac=jac1, absolute_sigma=absolute_sigma)
|
| 940 |
+
popt2, pcov2 = curve_fit(funcp, xdata, ydatap, sigma=covarp,
|
| 941 |
+
jac=jac2, absolute_sigma=absolute_sigma)
|
| 942 |
+
|
| 943 |
+
assert_allclose(popt1, popt2, rtol=1.2e-7, atol=1e-14)
|
| 944 |
+
assert_allclose(pcov1, pcov2, rtol=1.2e-7, atol=1e-14)
|
| 945 |
+
|
| 946 |
+
@pytest.mark.parametrize("absolute_sigma", [False, True])
|
| 947 |
+
def test_curvefit_scalar_sigma(self, absolute_sigma):
|
| 948 |
+
def func(x, a, b):
|
| 949 |
+
return a * x + b
|
| 950 |
+
|
| 951 |
+
x, y = self.x, self.y
|
| 952 |
+
_, pcov1 = curve_fit(func, x, y, sigma=2, absolute_sigma=absolute_sigma)
|
| 953 |
+
# Explicitly building the sigma 1D array
|
| 954 |
+
_, pcov2 = curve_fit(
|
| 955 |
+
func, x, y, sigma=np.full_like(y, 2), absolute_sigma=absolute_sigma
|
| 956 |
+
)
|
| 957 |
+
assert np.all(pcov1 == pcov2)
|
| 958 |
+
|
| 959 |
+
def test_dtypes(self):
|
| 960 |
+
# regression test for gh-9581: curve_fit fails if x and y dtypes differ
|
| 961 |
+
x = np.arange(-3, 5)
|
| 962 |
+
y = 1.5*x + 3.0 + 0.5*np.sin(x)
|
| 963 |
+
|
| 964 |
+
def func(x, a, b):
|
| 965 |
+
return a*x + b
|
| 966 |
+
|
| 967 |
+
for method in ['lm', 'trf', 'dogbox']:
|
| 968 |
+
for dtx in [np.float32, np.float64]:
|
| 969 |
+
for dty in [np.float32, np.float64]:
|
| 970 |
+
x = x.astype(dtx)
|
| 971 |
+
y = y.astype(dty)
|
| 972 |
+
|
| 973 |
+
with warnings.catch_warnings():
|
| 974 |
+
warnings.simplefilter("error", OptimizeWarning)
|
| 975 |
+
p, cov = curve_fit(func, x, y, method=method)
|
| 976 |
+
|
| 977 |
+
assert np.isfinite(cov).all()
|
| 978 |
+
assert not np.allclose(p, 1) # curve_fit's initial value
|
| 979 |
+
|
| 980 |
+
def test_dtypes2(self):
|
| 981 |
+
# regression test for gh-7117: curve_fit fails if
|
| 982 |
+
# both inputs are float32
|
| 983 |
+
def hyperbola(x, s_1, s_2, o_x, o_y, c):
|
| 984 |
+
b_2 = (s_1 + s_2) / 2
|
| 985 |
+
b_1 = (s_2 - s_1) / 2
|
| 986 |
+
return o_y + b_1*(x-o_x) + b_2*np.sqrt((x-o_x)**2 + c**2/4)
|
| 987 |
+
|
| 988 |
+
min_fit = np.array([-3.0, 0.0, -2.0, -10.0, 0.0])
|
| 989 |
+
max_fit = np.array([0.0, 3.0, 3.0, 0.0, 10.0])
|
| 990 |
+
guess = np.array([-2.5/3.0, 4/3.0, 1.0, -4.0, 0.5])
|
| 991 |
+
|
| 992 |
+
params = [-2, .4, -1, -5, 9.5]
|
| 993 |
+
xdata = np.array([-32, -16, -8, 4, 4, 8, 16, 32])
|
| 994 |
+
ydata = hyperbola(xdata, *params)
|
| 995 |
+
|
| 996 |
+
# run optimization twice, with xdata being float32 and float64
|
| 997 |
+
popt_64, _ = curve_fit(f=hyperbola, xdata=xdata, ydata=ydata, p0=guess,
|
| 998 |
+
bounds=(min_fit, max_fit))
|
| 999 |
+
|
| 1000 |
+
xdata = xdata.astype(np.float32)
|
| 1001 |
+
ydata = hyperbola(xdata, *params)
|
| 1002 |
+
|
| 1003 |
+
popt_32, _ = curve_fit(f=hyperbola, xdata=xdata, ydata=ydata, p0=guess,
|
| 1004 |
+
bounds=(min_fit, max_fit))
|
| 1005 |
+
|
| 1006 |
+
assert_allclose(popt_32, popt_64, atol=2e-5)
|
| 1007 |
+
|
| 1008 |
+
def test_broadcast_y(self):
|
| 1009 |
+
xdata = np.arange(10)
|
| 1010 |
+
target = 4.7 * xdata ** 2 + 3.5 * xdata + np.random.rand(len(xdata))
|
| 1011 |
+
def fit_func(x, a, b):
|
| 1012 |
+
return a * x ** 2 + b * x - target
|
| 1013 |
+
for method in ['lm', 'trf', 'dogbox']:
|
| 1014 |
+
popt0, pcov0 = curve_fit(fit_func,
|
| 1015 |
+
xdata=xdata,
|
| 1016 |
+
ydata=np.zeros_like(xdata),
|
| 1017 |
+
method=method)
|
| 1018 |
+
popt1, pcov1 = curve_fit(fit_func,
|
| 1019 |
+
xdata=xdata,
|
| 1020 |
+
ydata=0,
|
| 1021 |
+
method=method)
|
| 1022 |
+
assert_allclose(pcov0, pcov1)
|
| 1023 |
+
|
| 1024 |
+
def test_args_in_kwargs(self):
|
| 1025 |
+
# Ensure that `args` cannot be passed as keyword argument to `curve_fit`
|
| 1026 |
+
|
| 1027 |
+
def func(x, a, b):
|
| 1028 |
+
return a * x + b
|
| 1029 |
+
|
| 1030 |
+
with assert_raises(ValueError):
|
| 1031 |
+
curve_fit(func,
|
| 1032 |
+
xdata=[1, 2, 3, 4],
|
| 1033 |
+
ydata=[5, 9, 13, 17],
|
| 1034 |
+
p0=[1],
|
| 1035 |
+
args=(1,))
|
| 1036 |
+
|
| 1037 |
+
def test_data_point_number_validation(self):
|
| 1038 |
+
def func(x, a, b, c, d, e):
|
| 1039 |
+
return a * np.exp(-b * x) + c + d + e
|
| 1040 |
+
|
| 1041 |
+
with assert_raises(TypeError, match="The number of func parameters="):
|
| 1042 |
+
curve_fit(func,
|
| 1043 |
+
xdata=[1, 2, 3, 4],
|
| 1044 |
+
ydata=[5, 9, 13, 17])
|
| 1045 |
+
|
| 1046 |
+
@pytest.mark.filterwarnings('ignore::RuntimeWarning')
|
| 1047 |
+
def test_gh4555(self):
|
| 1048 |
+
# gh-4555 reported that covariance matrices returned by `leastsq`
|
| 1049 |
+
# can have negative diagonal elements and eigenvalues. (In fact,
|
| 1050 |
+
# they can also be asymmetric.) This shows up in the output of
|
| 1051 |
+
# `scipy.optimize.curve_fit`. Check that it has been resolved.giit
|
| 1052 |
+
def f(x, a, b, c, d, e):
|
| 1053 |
+
return a*np.log(x + 1 + b) + c*np.log(x + 1 + d) + e
|
| 1054 |
+
|
| 1055 |
+
rng = np.random.default_rng(408113519974467917)
|
| 1056 |
+
n = 100
|
| 1057 |
+
x = np.arange(n)
|
| 1058 |
+
y = np.linspace(2, 7, n) + rng.random(n)
|
| 1059 |
+
p, cov = optimize.curve_fit(f, x, y, maxfev=100000)
|
| 1060 |
+
assert np.all(np.diag(cov) > 0)
|
| 1061 |
+
eigs = linalg.eigh(cov)[0] # separate line for debugging
|
| 1062 |
+
# some platforms see a small negative eigevenvalue
|
| 1063 |
+
assert np.all(eigs > -1e-2)
|
| 1064 |
+
assert_allclose(cov, cov.T)
|
| 1065 |
+
|
| 1066 |
+
def test_gh4555b(self):
|
| 1067 |
+
# check that PR gh-17247 did not significantly change covariance matrix
|
| 1068 |
+
# for simple cases
|
| 1069 |
+
rng = np.random.default_rng(408113519974467917)
|
| 1070 |
+
|
| 1071 |
+
def func(x, a, b, c):
|
| 1072 |
+
return a * np.exp(-b * x) + c
|
| 1073 |
+
|
| 1074 |
+
xdata = np.linspace(0, 4, 50)
|
| 1075 |
+
y = func(xdata, 2.5, 1.3, 0.5)
|
| 1076 |
+
y_noise = 0.2 * rng.normal(size=xdata.size)
|
| 1077 |
+
ydata = y + y_noise
|
| 1078 |
+
_, res = curve_fit(func, xdata, ydata)
|
| 1079 |
+
# reference from commit 1d80a2f254380d2b45733258ca42eb6b55c8755b
|
| 1080 |
+
ref = [[+0.0158972536486215, 0.0069207183284242, -0.0007474400714749],
|
| 1081 |
+
[+0.0069207183284242, 0.0205057958128679, +0.0053997711275403],
|
| 1082 |
+
[-0.0007474400714749, 0.0053997711275403, +0.0027833930320877]]
|
| 1083 |
+
# Linux_Python_38_32bit_full fails with default tolerance
|
| 1084 |
+
assert_allclose(res, ref, 2e-7)
|
| 1085 |
+
|
| 1086 |
+
def test_gh13670(self):
|
| 1087 |
+
# gh-13670 reported that `curve_fit` executes callables
|
| 1088 |
+
# with the same values of the parameters at the beginning of
|
| 1089 |
+
# optimization. Check that this has been resolved.
|
| 1090 |
+
|
| 1091 |
+
rng = np.random.default_rng(8250058582555444926)
|
| 1092 |
+
x = np.linspace(0, 3, 101)
|
| 1093 |
+
y = 2 * x + 1 + rng.normal(size=101) * 0.5
|
| 1094 |
+
|
| 1095 |
+
def line(x, *p):
|
| 1096 |
+
assert not np.all(line.last_p == p)
|
| 1097 |
+
line.last_p = p
|
| 1098 |
+
return x * p[0] + p[1]
|
| 1099 |
+
|
| 1100 |
+
def jac(x, *p):
|
| 1101 |
+
assert not np.all(jac.last_p == p)
|
| 1102 |
+
jac.last_p = p
|
| 1103 |
+
return np.array([x, np.ones_like(x)]).T
|
| 1104 |
+
|
| 1105 |
+
line.last_p = None
|
| 1106 |
+
jac.last_p = None
|
| 1107 |
+
p0 = np.array([1.0, 5.0])
|
| 1108 |
+
curve_fit(line, x, y, p0, method='lm', jac=jac)
|
| 1109 |
+
|
| 1110 |
+
@pytest.mark.parametrize('method', ['trf', 'dogbox'])
|
| 1111 |
+
def test_gh20155_error_mentions_x0(self, method):
|
| 1112 |
+
# `curve_fit` produced an error message that referred to an undocumented
|
| 1113 |
+
# variable `x0`, which was really `p0`. Check that this is resolved.
|
| 1114 |
+
def func(x,a):
|
| 1115 |
+
return x**a
|
| 1116 |
+
message = "Initial guess is outside of provided bounds"
|
| 1117 |
+
with pytest.raises(ValueError, match=message):
|
| 1118 |
+
curve_fit(func, self.x, self.y, p0=[1], bounds=(1000, 1001),
|
| 1119 |
+
method=method)
|
| 1120 |
+
|
| 1121 |
+
|
| 1122 |
+
class TestFixedPoint:
|
| 1123 |
+
|
| 1124 |
+
def test_scalar_trivial(self):
|
| 1125 |
+
# f(x) = 2x; fixed point should be x=0
|
| 1126 |
+
def func(x):
|
| 1127 |
+
return 2.0*x
|
| 1128 |
+
x0 = 1.0
|
| 1129 |
+
x = fixed_point(func, x0)
|
| 1130 |
+
assert_almost_equal(x, 0.0)
|
| 1131 |
+
|
| 1132 |
+
def test_scalar_basic1(self):
|
| 1133 |
+
# f(x) = x**2; x0=1.05; fixed point should be x=1
|
| 1134 |
+
def func(x):
|
| 1135 |
+
return x**2
|
| 1136 |
+
x0 = 1.05
|
| 1137 |
+
x = fixed_point(func, x0)
|
| 1138 |
+
assert_almost_equal(x, 1.0)
|
| 1139 |
+
|
| 1140 |
+
def test_scalar_basic2(self):
|
| 1141 |
+
# f(x) = x**0.5; x0=1.05; fixed point should be x=1
|
| 1142 |
+
def func(x):
|
| 1143 |
+
return x**0.5
|
| 1144 |
+
x0 = 1.05
|
| 1145 |
+
x = fixed_point(func, x0)
|
| 1146 |
+
assert_almost_equal(x, 1.0)
|
| 1147 |
+
|
| 1148 |
+
def test_array_trivial(self):
|
| 1149 |
+
def func(x):
|
| 1150 |
+
return 2.0*x
|
| 1151 |
+
x0 = [0.3, 0.15]
|
| 1152 |
+
with np.errstate(all='ignore'):
|
| 1153 |
+
x = fixed_point(func, x0)
|
| 1154 |
+
assert_almost_equal(x, [0.0, 0.0])
|
| 1155 |
+
|
| 1156 |
+
def test_array_basic1(self):
|
| 1157 |
+
# f(x) = c * x**2; fixed point should be x=1/c
|
| 1158 |
+
def func(x, c):
|
| 1159 |
+
return c * x**2
|
| 1160 |
+
c = array([0.75, 1.0, 1.25])
|
| 1161 |
+
x0 = [1.1, 1.15, 0.9]
|
| 1162 |
+
with np.errstate(all='ignore'):
|
| 1163 |
+
x = fixed_point(func, x0, args=(c,))
|
| 1164 |
+
assert_almost_equal(x, 1.0/c)
|
| 1165 |
+
|
| 1166 |
+
def test_array_basic2(self):
|
| 1167 |
+
# f(x) = c * x**0.5; fixed point should be x=c**2
|
| 1168 |
+
def func(x, c):
|
| 1169 |
+
return c * x**0.5
|
| 1170 |
+
c = array([0.75, 1.0, 1.25])
|
| 1171 |
+
x0 = [0.8, 1.1, 1.1]
|
| 1172 |
+
x = fixed_point(func, x0, args=(c,))
|
| 1173 |
+
assert_almost_equal(x, c**2)
|
| 1174 |
+
|
| 1175 |
+
def test_lambertw(self):
|
| 1176 |
+
# python-list/2010-December/594592.html
|
| 1177 |
+
xxroot = fixed_point(lambda xx: np.exp(-2.0*xx)/2.0, 1.0,
|
| 1178 |
+
args=(), xtol=1e-12, maxiter=500)
|
| 1179 |
+
assert_allclose(xxroot, np.exp(-2.0*xxroot)/2.0)
|
| 1180 |
+
assert_allclose(xxroot, lambertw(1)/2)
|
| 1181 |
+
|
| 1182 |
+
def test_no_acceleration(self):
|
| 1183 |
+
# GitHub issue 5460
|
| 1184 |
+
ks = 2
|
| 1185 |
+
kl = 6
|
| 1186 |
+
m = 1.3
|
| 1187 |
+
n0 = 1.001
|
| 1188 |
+
i0 = ((m-1)/m)*(kl/ks/m)**(1/(m-1))
|
| 1189 |
+
|
| 1190 |
+
def func(n):
|
| 1191 |
+
return np.log(kl/ks/n) / np.log(i0*n/(n - 1)) + 1
|
| 1192 |
+
|
| 1193 |
+
n = fixed_point(func, n0, method='iteration')
|
| 1194 |
+
assert_allclose(n, m)
|
mantis_evalkit/lib/python3.10/site-packages/scipy/optimize/tests/test_optimize.py
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
mantis_evalkit/lib/python3.10/site-packages/scipy/optimize/tests/test_regression.py
ADDED
|
@@ -0,0 +1,40 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Regression tests for optimize.
|
| 2 |
+
|
| 3 |
+
"""
|
| 4 |
+
import numpy as np
|
| 5 |
+
from numpy.testing import assert_almost_equal
|
| 6 |
+
from pytest import raises as assert_raises
|
| 7 |
+
|
| 8 |
+
import scipy.optimize
|
| 9 |
+
|
| 10 |
+
|
| 11 |
+
class TestRegression:
|
| 12 |
+
|
| 13 |
+
def test_newton_x0_is_0(self):
|
| 14 |
+
# Regression test for gh-1601
|
| 15 |
+
tgt = 1
|
| 16 |
+
res = scipy.optimize.newton(lambda x: x - 1, 0)
|
| 17 |
+
assert_almost_equal(res, tgt)
|
| 18 |
+
|
| 19 |
+
def test_newton_integers(self):
|
| 20 |
+
# Regression test for gh-1741
|
| 21 |
+
root = scipy.optimize.newton(lambda x: x**2 - 1, x0=2,
|
| 22 |
+
fprime=lambda x: 2*x)
|
| 23 |
+
assert_almost_equal(root, 1.0)
|
| 24 |
+
|
| 25 |
+
def test_lmdif_errmsg(self):
|
| 26 |
+
# This shouldn't cause a crash on Python 3
|
| 27 |
+
class SomeError(Exception):
|
| 28 |
+
pass
|
| 29 |
+
counter = [0]
|
| 30 |
+
|
| 31 |
+
def func(x):
|
| 32 |
+
counter[0] += 1
|
| 33 |
+
if counter[0] < 3:
|
| 34 |
+
return x**2 - np.array([9, 10, 11])
|
| 35 |
+
else:
|
| 36 |
+
raise SomeError()
|
| 37 |
+
assert_raises(SomeError,
|
| 38 |
+
scipy.optimize.leastsq,
|
| 39 |
+
func, [1, 2, 3])
|
| 40 |
+
|
mantis_evalkit/lib/python3.10/site-packages/scipy/optimize/tests/test_slsqp.py
ADDED
|
@@ -0,0 +1,613 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Unit test for SLSQP optimization.
|
| 3 |
+
"""
|
| 4 |
+
from numpy.testing import (assert_, assert_array_almost_equal,
|
| 5 |
+
assert_allclose, assert_equal)
|
| 6 |
+
from pytest import raises as assert_raises
|
| 7 |
+
import pytest
|
| 8 |
+
import numpy as np
|
| 9 |
+
import scipy
|
| 10 |
+
|
| 11 |
+
from scipy.optimize import fmin_slsqp, minimize, Bounds, NonlinearConstraint
|
| 12 |
+
|
| 13 |
+
|
| 14 |
+
class MyCallBack:
|
| 15 |
+
"""pass a custom callback function
|
| 16 |
+
|
| 17 |
+
This makes sure it's being used.
|
| 18 |
+
"""
|
| 19 |
+
def __init__(self):
|
| 20 |
+
self.been_called = False
|
| 21 |
+
self.ncalls = 0
|
| 22 |
+
|
| 23 |
+
def __call__(self, x):
|
| 24 |
+
self.been_called = True
|
| 25 |
+
self.ncalls += 1
|
| 26 |
+
|
| 27 |
+
|
| 28 |
+
class TestSLSQP:
|
| 29 |
+
"""
|
| 30 |
+
Test SLSQP algorithm using Example 14.4 from Numerical Methods for
|
| 31 |
+
Engineers by Steven Chapra and Raymond Canale.
|
| 32 |
+
This example maximizes the function f(x) = 2*x*y + 2*x - x**2 - 2*y**2,
|
| 33 |
+
which has a maximum at x=2, y=1.
|
| 34 |
+
"""
|
| 35 |
+
def setup_method(self):
|
| 36 |
+
self.opts = {'disp': False}
|
| 37 |
+
|
| 38 |
+
def fun(self, d, sign=1.0):
|
| 39 |
+
"""
|
| 40 |
+
Arguments:
|
| 41 |
+
d - A list of two elements, where d[0] represents x and d[1] represents y
|
| 42 |
+
in the following equation.
|
| 43 |
+
sign - A multiplier for f. Since we want to optimize it, and the SciPy
|
| 44 |
+
optimizers can only minimize functions, we need to multiply it by
|
| 45 |
+
-1 to achieve the desired solution
|
| 46 |
+
Returns:
|
| 47 |
+
2*x*y + 2*x - x**2 - 2*y**2
|
| 48 |
+
|
| 49 |
+
"""
|
| 50 |
+
x = d[0]
|
| 51 |
+
y = d[1]
|
| 52 |
+
return sign*(2*x*y + 2*x - x**2 - 2*y**2)
|
| 53 |
+
|
| 54 |
+
def jac(self, d, sign=1.0):
|
| 55 |
+
"""
|
| 56 |
+
This is the derivative of fun, returning a NumPy array
|
| 57 |
+
representing df/dx and df/dy.
|
| 58 |
+
|
| 59 |
+
"""
|
| 60 |
+
x = d[0]
|
| 61 |
+
y = d[1]
|
| 62 |
+
dfdx = sign*(-2*x + 2*y + 2)
|
| 63 |
+
dfdy = sign*(2*x - 4*y)
|
| 64 |
+
return np.array([dfdx, dfdy], float)
|
| 65 |
+
|
| 66 |
+
def fun_and_jac(self, d, sign=1.0):
|
| 67 |
+
return self.fun(d, sign), self.jac(d, sign)
|
| 68 |
+
|
| 69 |
+
def f_eqcon(self, x, sign=1.0):
|
| 70 |
+
""" Equality constraint """
|
| 71 |
+
return np.array([x[0] - x[1]])
|
| 72 |
+
|
| 73 |
+
def fprime_eqcon(self, x, sign=1.0):
|
| 74 |
+
""" Equality constraint, derivative """
|
| 75 |
+
return np.array([[1, -1]])
|
| 76 |
+
|
| 77 |
+
def f_eqcon_scalar(self, x, sign=1.0):
|
| 78 |
+
""" Scalar equality constraint """
|
| 79 |
+
return self.f_eqcon(x, sign)[0]
|
| 80 |
+
|
| 81 |
+
def fprime_eqcon_scalar(self, x, sign=1.0):
|
| 82 |
+
""" Scalar equality constraint, derivative """
|
| 83 |
+
return self.fprime_eqcon(x, sign)[0].tolist()
|
| 84 |
+
|
| 85 |
+
def f_ieqcon(self, x, sign=1.0):
|
| 86 |
+
""" Inequality constraint """
|
| 87 |
+
return np.array([x[0] - x[1] - 1.0])
|
| 88 |
+
|
| 89 |
+
def fprime_ieqcon(self, x, sign=1.0):
|
| 90 |
+
""" Inequality constraint, derivative """
|
| 91 |
+
return np.array([[1, -1]])
|
| 92 |
+
|
| 93 |
+
def f_ieqcon2(self, x):
|
| 94 |
+
""" Vector inequality constraint """
|
| 95 |
+
return np.asarray(x)
|
| 96 |
+
|
| 97 |
+
def fprime_ieqcon2(self, x):
|
| 98 |
+
""" Vector inequality constraint, derivative """
|
| 99 |
+
return np.identity(x.shape[0])
|
| 100 |
+
|
| 101 |
+
# minimize
|
| 102 |
+
def test_minimize_unbounded_approximated(self):
|
| 103 |
+
# Minimize, method='SLSQP': unbounded, approximated jacobian.
|
| 104 |
+
jacs = [None, False, '2-point', '3-point']
|
| 105 |
+
for jac in jacs:
|
| 106 |
+
res = minimize(self.fun, [-1.0, 1.0], args=(-1.0, ),
|
| 107 |
+
jac=jac, method='SLSQP',
|
| 108 |
+
options=self.opts)
|
| 109 |
+
assert_(res['success'], res['message'])
|
| 110 |
+
assert_allclose(res.x, [2, 1])
|
| 111 |
+
|
| 112 |
+
def test_minimize_unbounded_given(self):
|
| 113 |
+
# Minimize, method='SLSQP': unbounded, given Jacobian.
|
| 114 |
+
res = minimize(self.fun, [-1.0, 1.0], args=(-1.0, ),
|
| 115 |
+
jac=self.jac, method='SLSQP', options=self.opts)
|
| 116 |
+
assert_(res['success'], res['message'])
|
| 117 |
+
assert_allclose(res.x, [2, 1])
|
| 118 |
+
|
| 119 |
+
def test_minimize_bounded_approximated(self):
|
| 120 |
+
# Minimize, method='SLSQP': bounded, approximated jacobian.
|
| 121 |
+
jacs = [None, False, '2-point', '3-point']
|
| 122 |
+
for jac in jacs:
|
| 123 |
+
with np.errstate(invalid='ignore'):
|
| 124 |
+
res = minimize(self.fun, [-1.0, 1.0], args=(-1.0, ),
|
| 125 |
+
jac=jac,
|
| 126 |
+
bounds=((2.5, None), (None, 0.5)),
|
| 127 |
+
method='SLSQP', options=self.opts)
|
| 128 |
+
assert_(res['success'], res['message'])
|
| 129 |
+
assert_allclose(res.x, [2.5, 0.5])
|
| 130 |
+
assert_(2.5 <= res.x[0])
|
| 131 |
+
assert_(res.x[1] <= 0.5)
|
| 132 |
+
|
| 133 |
+
def test_minimize_unbounded_combined(self):
|
| 134 |
+
# Minimize, method='SLSQP': unbounded, combined function and Jacobian.
|
| 135 |
+
res = minimize(self.fun_and_jac, [-1.0, 1.0], args=(-1.0, ),
|
| 136 |
+
jac=True, method='SLSQP', options=self.opts)
|
| 137 |
+
assert_(res['success'], res['message'])
|
| 138 |
+
assert_allclose(res.x, [2, 1])
|
| 139 |
+
|
| 140 |
+
def test_minimize_equality_approximated(self):
|
| 141 |
+
# Minimize with method='SLSQP': equality constraint, approx. jacobian.
|
| 142 |
+
jacs = [None, False, '2-point', '3-point']
|
| 143 |
+
for jac in jacs:
|
| 144 |
+
res = minimize(self.fun, [-1.0, 1.0], args=(-1.0, ),
|
| 145 |
+
jac=jac,
|
| 146 |
+
constraints={'type': 'eq',
|
| 147 |
+
'fun': self.f_eqcon,
|
| 148 |
+
'args': (-1.0, )},
|
| 149 |
+
method='SLSQP', options=self.opts)
|
| 150 |
+
assert_(res['success'], res['message'])
|
| 151 |
+
assert_allclose(res.x, [1, 1])
|
| 152 |
+
|
| 153 |
+
def test_minimize_equality_given(self):
|
| 154 |
+
# Minimize with method='SLSQP': equality constraint, given Jacobian.
|
| 155 |
+
res = minimize(self.fun, [-1.0, 1.0], jac=self.jac,
|
| 156 |
+
method='SLSQP', args=(-1.0,),
|
| 157 |
+
constraints={'type': 'eq', 'fun':self.f_eqcon,
|
| 158 |
+
'args': (-1.0, )},
|
| 159 |
+
options=self.opts)
|
| 160 |
+
assert_(res['success'], res['message'])
|
| 161 |
+
assert_allclose(res.x, [1, 1])
|
| 162 |
+
|
| 163 |
+
def test_minimize_equality_given2(self):
|
| 164 |
+
# Minimize with method='SLSQP': equality constraint, given Jacobian
|
| 165 |
+
# for fun and const.
|
| 166 |
+
res = minimize(self.fun, [-1.0, 1.0], method='SLSQP',
|
| 167 |
+
jac=self.jac, args=(-1.0,),
|
| 168 |
+
constraints={'type': 'eq',
|
| 169 |
+
'fun': self.f_eqcon,
|
| 170 |
+
'args': (-1.0, ),
|
| 171 |
+
'jac': self.fprime_eqcon},
|
| 172 |
+
options=self.opts)
|
| 173 |
+
assert_(res['success'], res['message'])
|
| 174 |
+
assert_allclose(res.x, [1, 1])
|
| 175 |
+
|
| 176 |
+
def test_minimize_equality_given_cons_scalar(self):
|
| 177 |
+
# Minimize with method='SLSQP': scalar equality constraint, given
|
| 178 |
+
# Jacobian for fun and const.
|
| 179 |
+
res = minimize(self.fun, [-1.0, 1.0], method='SLSQP',
|
| 180 |
+
jac=self.jac, args=(-1.0,),
|
| 181 |
+
constraints={'type': 'eq',
|
| 182 |
+
'fun': self.f_eqcon_scalar,
|
| 183 |
+
'args': (-1.0, ),
|
| 184 |
+
'jac': self.fprime_eqcon_scalar},
|
| 185 |
+
options=self.opts)
|
| 186 |
+
assert_(res['success'], res['message'])
|
| 187 |
+
assert_allclose(res.x, [1, 1])
|
| 188 |
+
|
| 189 |
+
def test_minimize_inequality_given(self):
|
| 190 |
+
# Minimize with method='SLSQP': inequality constraint, given Jacobian.
|
| 191 |
+
res = minimize(self.fun, [-1.0, 1.0], method='SLSQP',
|
| 192 |
+
jac=self.jac, args=(-1.0, ),
|
| 193 |
+
constraints={'type': 'ineq',
|
| 194 |
+
'fun': self.f_ieqcon,
|
| 195 |
+
'args': (-1.0, )},
|
| 196 |
+
options=self.opts)
|
| 197 |
+
assert_(res['success'], res['message'])
|
| 198 |
+
assert_allclose(res.x, [2, 1], atol=1e-3)
|
| 199 |
+
|
| 200 |
+
def test_minimize_inequality_given_vector_constraints(self):
|
| 201 |
+
# Minimize with method='SLSQP': vector inequality constraint, given
|
| 202 |
+
# Jacobian.
|
| 203 |
+
res = minimize(self.fun, [-1.0, 1.0], jac=self.jac,
|
| 204 |
+
method='SLSQP', args=(-1.0,),
|
| 205 |
+
constraints={'type': 'ineq',
|
| 206 |
+
'fun': self.f_ieqcon2,
|
| 207 |
+
'jac': self.fprime_ieqcon2},
|
| 208 |
+
options=self.opts)
|
| 209 |
+
assert_(res['success'], res['message'])
|
| 210 |
+
assert_allclose(res.x, [2, 1])
|
| 211 |
+
|
| 212 |
+
def test_minimize_bounded_constraint(self):
|
| 213 |
+
# when the constraint makes the solver go up against a parameter
|
| 214 |
+
# bound make sure that the numerical differentiation of the
|
| 215 |
+
# jacobian doesn't try to exceed that bound using a finite difference.
|
| 216 |
+
# gh11403
|
| 217 |
+
def c(x):
|
| 218 |
+
assert 0 <= x[0] <= 1 and 0 <= x[1] <= 1, x
|
| 219 |
+
return x[0] ** 0.5 + x[1]
|
| 220 |
+
|
| 221 |
+
def f(x):
|
| 222 |
+
assert 0 <= x[0] <= 1 and 0 <= x[1] <= 1, x
|
| 223 |
+
return -x[0] ** 2 + x[1] ** 2
|
| 224 |
+
|
| 225 |
+
cns = [NonlinearConstraint(c, 0, 1.5)]
|
| 226 |
+
x0 = np.asarray([0.9, 0.5])
|
| 227 |
+
bnd = Bounds([0., 0.], [1.0, 1.0])
|
| 228 |
+
minimize(f, x0, method='SLSQP', bounds=bnd, constraints=cns)
|
| 229 |
+
|
| 230 |
+
def test_minimize_bound_equality_given2(self):
|
| 231 |
+
# Minimize with method='SLSQP': bounds, eq. const., given jac. for
|
| 232 |
+
# fun. and const.
|
| 233 |
+
res = minimize(self.fun, [-1.0, 1.0], method='SLSQP',
|
| 234 |
+
jac=self.jac, args=(-1.0, ),
|
| 235 |
+
bounds=[(-0.8, 1.), (-1, 0.8)],
|
| 236 |
+
constraints={'type': 'eq',
|
| 237 |
+
'fun': self.f_eqcon,
|
| 238 |
+
'args': (-1.0, ),
|
| 239 |
+
'jac': self.fprime_eqcon},
|
| 240 |
+
options=self.opts)
|
| 241 |
+
assert_(res['success'], res['message'])
|
| 242 |
+
assert_allclose(res.x, [0.8, 0.8], atol=1e-3)
|
| 243 |
+
assert_(-0.8 <= res.x[0] <= 1)
|
| 244 |
+
assert_(-1 <= res.x[1] <= 0.8)
|
| 245 |
+
|
| 246 |
+
# fmin_slsqp
|
| 247 |
+
def test_unbounded_approximated(self):
|
| 248 |
+
# SLSQP: unbounded, approximated Jacobian.
|
| 249 |
+
res = fmin_slsqp(self.fun, [-1.0, 1.0], args=(-1.0, ),
|
| 250 |
+
iprint = 0, full_output = 1)
|
| 251 |
+
x, fx, its, imode, smode = res
|
| 252 |
+
assert_(imode == 0, imode)
|
| 253 |
+
assert_array_almost_equal(x, [2, 1])
|
| 254 |
+
|
| 255 |
+
def test_unbounded_given(self):
|
| 256 |
+
# SLSQP: unbounded, given Jacobian.
|
| 257 |
+
res = fmin_slsqp(self.fun, [-1.0, 1.0], args=(-1.0, ),
|
| 258 |
+
fprime = self.jac, iprint = 0,
|
| 259 |
+
full_output = 1)
|
| 260 |
+
x, fx, its, imode, smode = res
|
| 261 |
+
assert_(imode == 0, imode)
|
| 262 |
+
assert_array_almost_equal(x, [2, 1])
|
| 263 |
+
|
| 264 |
+
def test_equality_approximated(self):
|
| 265 |
+
# SLSQP: equality constraint, approximated Jacobian.
|
| 266 |
+
res = fmin_slsqp(self.fun,[-1.0,1.0], args=(-1.0,),
|
| 267 |
+
eqcons = [self.f_eqcon],
|
| 268 |
+
iprint = 0, full_output = 1)
|
| 269 |
+
x, fx, its, imode, smode = res
|
| 270 |
+
assert_(imode == 0, imode)
|
| 271 |
+
assert_array_almost_equal(x, [1, 1])
|
| 272 |
+
|
| 273 |
+
def test_equality_given(self):
|
| 274 |
+
# SLSQP: equality constraint, given Jacobian.
|
| 275 |
+
res = fmin_slsqp(self.fun, [-1.0, 1.0],
|
| 276 |
+
fprime=self.jac, args=(-1.0,),
|
| 277 |
+
eqcons = [self.f_eqcon], iprint = 0,
|
| 278 |
+
full_output = 1)
|
| 279 |
+
x, fx, its, imode, smode = res
|
| 280 |
+
assert_(imode == 0, imode)
|
| 281 |
+
assert_array_almost_equal(x, [1, 1])
|
| 282 |
+
|
| 283 |
+
def test_equality_given2(self):
|
| 284 |
+
# SLSQP: equality constraint, given Jacobian for fun and const.
|
| 285 |
+
res = fmin_slsqp(self.fun, [-1.0, 1.0],
|
| 286 |
+
fprime=self.jac, args=(-1.0,),
|
| 287 |
+
f_eqcons = self.f_eqcon,
|
| 288 |
+
fprime_eqcons = self.fprime_eqcon,
|
| 289 |
+
iprint = 0,
|
| 290 |
+
full_output = 1)
|
| 291 |
+
x, fx, its, imode, smode = res
|
| 292 |
+
assert_(imode == 0, imode)
|
| 293 |
+
assert_array_almost_equal(x, [1, 1])
|
| 294 |
+
|
| 295 |
+
def test_inequality_given(self):
|
| 296 |
+
# SLSQP: inequality constraint, given Jacobian.
|
| 297 |
+
res = fmin_slsqp(self.fun, [-1.0, 1.0],
|
| 298 |
+
fprime=self.jac, args=(-1.0, ),
|
| 299 |
+
ieqcons = [self.f_ieqcon],
|
| 300 |
+
iprint = 0, full_output = 1)
|
| 301 |
+
x, fx, its, imode, smode = res
|
| 302 |
+
assert_(imode == 0, imode)
|
| 303 |
+
assert_array_almost_equal(x, [2, 1], decimal=3)
|
| 304 |
+
|
| 305 |
+
def test_bound_equality_given2(self):
|
| 306 |
+
# SLSQP: bounds, eq. const., given jac. for fun. and const.
|
| 307 |
+
res = fmin_slsqp(self.fun, [-1.0, 1.0],
|
| 308 |
+
fprime=self.jac, args=(-1.0, ),
|
| 309 |
+
bounds = [(-0.8, 1.), (-1, 0.8)],
|
| 310 |
+
f_eqcons = self.f_eqcon,
|
| 311 |
+
fprime_eqcons = self.fprime_eqcon,
|
| 312 |
+
iprint = 0, full_output = 1)
|
| 313 |
+
x, fx, its, imode, smode = res
|
| 314 |
+
assert_(imode == 0, imode)
|
| 315 |
+
assert_array_almost_equal(x, [0.8, 0.8], decimal=3)
|
| 316 |
+
assert_(-0.8 <= x[0] <= 1)
|
| 317 |
+
assert_(-1 <= x[1] <= 0.8)
|
| 318 |
+
|
| 319 |
+
def test_scalar_constraints(self):
|
| 320 |
+
# Regression test for gh-2182
|
| 321 |
+
x = fmin_slsqp(lambda z: z**2, [3.],
|
| 322 |
+
ieqcons=[lambda z: z[0] - 1],
|
| 323 |
+
iprint=0)
|
| 324 |
+
assert_array_almost_equal(x, [1.])
|
| 325 |
+
|
| 326 |
+
x = fmin_slsqp(lambda z: z**2, [3.],
|
| 327 |
+
f_ieqcons=lambda z: [z[0] - 1],
|
| 328 |
+
iprint=0)
|
| 329 |
+
assert_array_almost_equal(x, [1.])
|
| 330 |
+
|
| 331 |
+
def test_integer_bounds(self):
|
| 332 |
+
# This should not raise an exception
|
| 333 |
+
fmin_slsqp(lambda z: z**2 - 1, [0], bounds=[[0, 1]], iprint=0)
|
| 334 |
+
|
| 335 |
+
def test_array_bounds(self):
|
| 336 |
+
# NumPy used to treat n-dimensional 1-element arrays as scalars
|
| 337 |
+
# in some cases. The handling of `bounds` by `fmin_slsqp` still
|
| 338 |
+
# supports this behavior.
|
| 339 |
+
bounds = [(-np.inf, np.inf), (np.array([2]), np.array([3]))]
|
| 340 |
+
x = fmin_slsqp(lambda z: np.sum(z**2 - 1), [2.5, 2.5], bounds=bounds,
|
| 341 |
+
iprint=0)
|
| 342 |
+
assert_array_almost_equal(x, [0, 2])
|
| 343 |
+
|
| 344 |
+
def test_obj_must_return_scalar(self):
|
| 345 |
+
# Regression test for Github Issue #5433
|
| 346 |
+
# If objective function does not return a scalar, raises ValueError
|
| 347 |
+
with assert_raises(ValueError):
|
| 348 |
+
fmin_slsqp(lambda x: [0, 1], [1, 2, 3])
|
| 349 |
+
|
| 350 |
+
def test_obj_returns_scalar_in_list(self):
|
| 351 |
+
# Test for Github Issue #5433 and PR #6691
|
| 352 |
+
# Objective function should be able to return length-1 Python list
|
| 353 |
+
# containing the scalar
|
| 354 |
+
fmin_slsqp(lambda x: [0], [1, 2, 3], iprint=0)
|
| 355 |
+
|
| 356 |
+
def test_callback(self):
|
| 357 |
+
# Minimize, method='SLSQP': unbounded, approximated jacobian. Check for callback
|
| 358 |
+
callback = MyCallBack()
|
| 359 |
+
res = minimize(self.fun, [-1.0, 1.0], args=(-1.0, ),
|
| 360 |
+
method='SLSQP', callback=callback, options=self.opts)
|
| 361 |
+
assert_(res['success'], res['message'])
|
| 362 |
+
assert_(callback.been_called)
|
| 363 |
+
assert_equal(callback.ncalls, res['nit'])
|
| 364 |
+
|
| 365 |
+
def test_inconsistent_linearization(self):
|
| 366 |
+
# SLSQP must be able to solve this problem, even if the
|
| 367 |
+
# linearized problem at the starting point is infeasible.
|
| 368 |
+
|
| 369 |
+
# Linearized constraints are
|
| 370 |
+
#
|
| 371 |
+
# 2*x0[0]*x[0] >= 1
|
| 372 |
+
#
|
| 373 |
+
# At x0 = [0, 1], the second constraint is clearly infeasible.
|
| 374 |
+
# This triggers a call with n2==1 in the LSQ subroutine.
|
| 375 |
+
x = [0, 1]
|
| 376 |
+
def f1(x):
|
| 377 |
+
return x[0] + x[1] - 2
|
| 378 |
+
def f2(x):
|
| 379 |
+
return x[0] ** 2 - 1
|
| 380 |
+
sol = minimize(
|
| 381 |
+
lambda x: x[0]**2 + x[1]**2,
|
| 382 |
+
x,
|
| 383 |
+
constraints=({'type':'eq','fun': f1},
|
| 384 |
+
{'type':'ineq','fun': f2}),
|
| 385 |
+
bounds=((0,None), (0,None)),
|
| 386 |
+
method='SLSQP')
|
| 387 |
+
x = sol.x
|
| 388 |
+
|
| 389 |
+
assert_allclose(f1(x), 0, atol=1e-8)
|
| 390 |
+
assert_(f2(x) >= -1e-8)
|
| 391 |
+
assert_(sol.success, sol)
|
| 392 |
+
|
| 393 |
+
def test_regression_5743(self):
|
| 394 |
+
# SLSQP must not indicate success for this problem,
|
| 395 |
+
# which is infeasible.
|
| 396 |
+
x = [1, 2]
|
| 397 |
+
sol = minimize(
|
| 398 |
+
lambda x: x[0]**2 + x[1]**2,
|
| 399 |
+
x,
|
| 400 |
+
constraints=({'type':'eq','fun': lambda x: x[0]+x[1]-1},
|
| 401 |
+
{'type':'ineq','fun': lambda x: x[0]-2}),
|
| 402 |
+
bounds=((0,None), (0,None)),
|
| 403 |
+
method='SLSQP')
|
| 404 |
+
assert_(not sol.success, sol)
|
| 405 |
+
|
| 406 |
+
def test_gh_6676(self):
|
| 407 |
+
def func(x):
|
| 408 |
+
return (x[0] - 1)**2 + 2*(x[1] - 1)**2 + 0.5*(x[2] - 1)**2
|
| 409 |
+
|
| 410 |
+
sol = minimize(func, [0, 0, 0], method='SLSQP')
|
| 411 |
+
assert_(sol.jac.shape == (3,))
|
| 412 |
+
|
| 413 |
+
def test_invalid_bounds(self):
|
| 414 |
+
# Raise correct error when lower bound is greater than upper bound.
|
| 415 |
+
# See Github issue 6875.
|
| 416 |
+
bounds_list = [
|
| 417 |
+
((1, 2), (2, 1)),
|
| 418 |
+
((2, 1), (1, 2)),
|
| 419 |
+
((2, 1), (2, 1)),
|
| 420 |
+
((np.inf, 0), (np.inf, 0)),
|
| 421 |
+
((1, -np.inf), (0, 1)),
|
| 422 |
+
]
|
| 423 |
+
for bounds in bounds_list:
|
| 424 |
+
with assert_raises(ValueError):
|
| 425 |
+
minimize(self.fun, [-1.0, 1.0], bounds=bounds, method='SLSQP')
|
| 426 |
+
|
| 427 |
+
def test_bounds_clipping(self):
|
| 428 |
+
#
|
| 429 |
+
# SLSQP returns bogus results for initial guess out of bounds, gh-6859
|
| 430 |
+
#
|
| 431 |
+
def f(x):
|
| 432 |
+
return (x[0] - 1)**2
|
| 433 |
+
|
| 434 |
+
sol = minimize(f, [10], method='slsqp', bounds=[(None, 0)])
|
| 435 |
+
assert_(sol.success)
|
| 436 |
+
assert_allclose(sol.x, 0, atol=1e-10)
|
| 437 |
+
|
| 438 |
+
sol = minimize(f, [-10], method='slsqp', bounds=[(2, None)])
|
| 439 |
+
assert_(sol.success)
|
| 440 |
+
assert_allclose(sol.x, 2, atol=1e-10)
|
| 441 |
+
|
| 442 |
+
sol = minimize(f, [-10], method='slsqp', bounds=[(None, 0)])
|
| 443 |
+
assert_(sol.success)
|
| 444 |
+
assert_allclose(sol.x, 0, atol=1e-10)
|
| 445 |
+
|
| 446 |
+
sol = minimize(f, [10], method='slsqp', bounds=[(2, None)])
|
| 447 |
+
assert_(sol.success)
|
| 448 |
+
assert_allclose(sol.x, 2, atol=1e-10)
|
| 449 |
+
|
| 450 |
+
sol = minimize(f, [-0.5], method='slsqp', bounds=[(-1, 0)])
|
| 451 |
+
assert_(sol.success)
|
| 452 |
+
assert_allclose(sol.x, 0, atol=1e-10)
|
| 453 |
+
|
| 454 |
+
sol = minimize(f, [10], method='slsqp', bounds=[(-1, 0)])
|
| 455 |
+
assert_(sol.success)
|
| 456 |
+
assert_allclose(sol.x, 0, atol=1e-10)
|
| 457 |
+
|
| 458 |
+
def test_infeasible_initial(self):
|
| 459 |
+
# Check SLSQP behavior with infeasible initial point
|
| 460 |
+
def f(x):
|
| 461 |
+
x, = x
|
| 462 |
+
return x*x - 2*x + 1
|
| 463 |
+
|
| 464 |
+
cons_u = [{'type': 'ineq', 'fun': lambda x: 0 - x}]
|
| 465 |
+
cons_l = [{'type': 'ineq', 'fun': lambda x: x - 2}]
|
| 466 |
+
cons_ul = [{'type': 'ineq', 'fun': lambda x: 0 - x},
|
| 467 |
+
{'type': 'ineq', 'fun': lambda x: x + 1}]
|
| 468 |
+
|
| 469 |
+
sol = minimize(f, [10], method='slsqp', constraints=cons_u)
|
| 470 |
+
assert_(sol.success)
|
| 471 |
+
assert_allclose(sol.x, 0, atol=1e-10)
|
| 472 |
+
|
| 473 |
+
sol = minimize(f, [-10], method='slsqp', constraints=cons_l)
|
| 474 |
+
assert_(sol.success)
|
| 475 |
+
assert_allclose(sol.x, 2, atol=1e-10)
|
| 476 |
+
|
| 477 |
+
sol = minimize(f, [-10], method='slsqp', constraints=cons_u)
|
| 478 |
+
assert_(sol.success)
|
| 479 |
+
assert_allclose(sol.x, 0, atol=1e-10)
|
| 480 |
+
|
| 481 |
+
sol = minimize(f, [10], method='slsqp', constraints=cons_l)
|
| 482 |
+
assert_(sol.success)
|
| 483 |
+
assert_allclose(sol.x, 2, atol=1e-10)
|
| 484 |
+
|
| 485 |
+
sol = minimize(f, [-0.5], method='slsqp', constraints=cons_ul)
|
| 486 |
+
assert_(sol.success)
|
| 487 |
+
assert_allclose(sol.x, 0, atol=1e-10)
|
| 488 |
+
|
| 489 |
+
sol = minimize(f, [10], method='slsqp', constraints=cons_ul)
|
| 490 |
+
assert_(sol.success)
|
| 491 |
+
assert_allclose(sol.x, 0, atol=1e-10)
|
| 492 |
+
|
| 493 |
+
@pytest.mark.xfail(scipy.show_config(mode='dicts')['Compilers']['fortran']['name']
|
| 494 |
+
== "intel-llvm",
|
| 495 |
+
reason="Runtime warning due to floating point issues, not logic")
|
| 496 |
+
def test_inconsistent_inequalities(self):
|
| 497 |
+
# gh-7618
|
| 498 |
+
|
| 499 |
+
def cost(x):
|
| 500 |
+
return -1 * x[0] + 4 * x[1]
|
| 501 |
+
|
| 502 |
+
def ineqcons1(x):
|
| 503 |
+
return x[1] - x[0] - 1
|
| 504 |
+
|
| 505 |
+
def ineqcons2(x):
|
| 506 |
+
return x[0] - x[1]
|
| 507 |
+
|
| 508 |
+
# The inequalities are inconsistent, so no solution can exist:
|
| 509 |
+
#
|
| 510 |
+
# x1 >= x0 + 1
|
| 511 |
+
# x0 >= x1
|
| 512 |
+
|
| 513 |
+
x0 = (1,5)
|
| 514 |
+
bounds = ((-5, 5), (-5, 5))
|
| 515 |
+
cons = (dict(type='ineq', fun=ineqcons1), dict(type='ineq', fun=ineqcons2))
|
| 516 |
+
res = minimize(cost, x0, method='SLSQP', bounds=bounds, constraints=cons)
|
| 517 |
+
|
| 518 |
+
assert_(not res.success)
|
| 519 |
+
|
| 520 |
+
def test_new_bounds_type(self):
|
| 521 |
+
def f(x):
|
| 522 |
+
return x[0] ** 2 + x[1] ** 2
|
| 523 |
+
bounds = Bounds([1, 0], [np.inf, np.inf])
|
| 524 |
+
sol = minimize(f, [0, 0], method='slsqp', bounds=bounds)
|
| 525 |
+
assert_(sol.success)
|
| 526 |
+
assert_allclose(sol.x, [1, 0])
|
| 527 |
+
|
| 528 |
+
def test_nested_minimization(self):
|
| 529 |
+
|
| 530 |
+
class NestedProblem:
|
| 531 |
+
|
| 532 |
+
def __init__(self):
|
| 533 |
+
self.F_outer_count = 0
|
| 534 |
+
|
| 535 |
+
def F_outer(self, x):
|
| 536 |
+
self.F_outer_count += 1
|
| 537 |
+
if self.F_outer_count > 1000:
|
| 538 |
+
raise Exception("Nested minimization failed to terminate.")
|
| 539 |
+
inner_res = minimize(self.F_inner, (3, 4), method="SLSQP")
|
| 540 |
+
assert_(inner_res.success)
|
| 541 |
+
assert_allclose(inner_res.x, [1, 1])
|
| 542 |
+
return x[0]**2 + x[1]**2 + x[2]**2
|
| 543 |
+
|
| 544 |
+
def F_inner(self, x):
|
| 545 |
+
return (x[0] - 1)**2 + (x[1] - 1)**2
|
| 546 |
+
|
| 547 |
+
def solve(self):
|
| 548 |
+
outer_res = minimize(self.F_outer, (5, 5, 5), method="SLSQP")
|
| 549 |
+
assert_(outer_res.success)
|
| 550 |
+
assert_allclose(outer_res.x, [0, 0, 0])
|
| 551 |
+
|
| 552 |
+
problem = NestedProblem()
|
| 553 |
+
problem.solve()
|
| 554 |
+
|
| 555 |
+
def test_gh1758(self):
|
| 556 |
+
# the test suggested in gh1758
|
| 557 |
+
# https://nlopt.readthedocs.io/en/latest/NLopt_Tutorial/
|
| 558 |
+
# implement two equality constraints, in R^2.
|
| 559 |
+
def fun(x):
|
| 560 |
+
return np.sqrt(x[1])
|
| 561 |
+
|
| 562 |
+
def f_eqcon(x):
|
| 563 |
+
""" Equality constraint """
|
| 564 |
+
return x[1] - (2 * x[0]) ** 3
|
| 565 |
+
|
| 566 |
+
def f_eqcon2(x):
|
| 567 |
+
""" Equality constraint """
|
| 568 |
+
return x[1] - (-x[0] + 1) ** 3
|
| 569 |
+
|
| 570 |
+
c1 = {'type': 'eq', 'fun': f_eqcon}
|
| 571 |
+
c2 = {'type': 'eq', 'fun': f_eqcon2}
|
| 572 |
+
|
| 573 |
+
res = minimize(fun, [8, 0.25], method='SLSQP',
|
| 574 |
+
constraints=[c1, c2], bounds=[(-0.5, 1), (0, 8)])
|
| 575 |
+
|
| 576 |
+
np.testing.assert_allclose(res.fun, 0.5443310539518)
|
| 577 |
+
np.testing.assert_allclose(res.x, [0.33333333, 0.2962963])
|
| 578 |
+
assert res.success
|
| 579 |
+
|
| 580 |
+
def test_gh9640(self):
|
| 581 |
+
np.random.seed(10)
|
| 582 |
+
cons = ({'type': 'ineq', 'fun': lambda x: -x[0] - x[1] - 3},
|
| 583 |
+
{'type': 'ineq', 'fun': lambda x: x[1] + x[2] - 2})
|
| 584 |
+
bnds = ((-2, 2), (-2, 2), (-2, 2))
|
| 585 |
+
|
| 586 |
+
def target(x):
|
| 587 |
+
return 1
|
| 588 |
+
x0 = [-1.8869783504471584, -0.640096352696244, -0.8174212253407696]
|
| 589 |
+
res = minimize(target, x0, method='SLSQP', bounds=bnds, constraints=cons,
|
| 590 |
+
options={'disp':False, 'maxiter':10000})
|
| 591 |
+
|
| 592 |
+
# The problem is infeasible, so it cannot succeed
|
| 593 |
+
assert not res.success
|
| 594 |
+
|
| 595 |
+
@pytest.mark.thread_unsafe
|
| 596 |
+
def test_parameters_stay_within_bounds(self):
|
| 597 |
+
# gh11403. For some problems the SLSQP Fortran code suggests a step
|
| 598 |
+
# outside one of the lower/upper bounds. When this happens
|
| 599 |
+
# approx_derivative complains because it's being asked to evaluate
|
| 600 |
+
# a gradient outside its domain.
|
| 601 |
+
np.random.seed(1)
|
| 602 |
+
bounds = Bounds(np.array([0.1]), np.array([1.0]))
|
| 603 |
+
n_inputs = len(bounds.lb)
|
| 604 |
+
x0 = np.array(bounds.lb + (bounds.ub - bounds.lb) *
|
| 605 |
+
np.random.random(n_inputs))
|
| 606 |
+
|
| 607 |
+
def f(x):
|
| 608 |
+
assert (x >= bounds.lb).all()
|
| 609 |
+
return np.linalg.norm(x)
|
| 610 |
+
|
| 611 |
+
with pytest.warns(RuntimeWarning, match='x were outside bounds'):
|
| 612 |
+
res = minimize(f, x0, method='SLSQP', bounds=bounds)
|
| 613 |
+
assert res.success
|
mantis_evalkit/lib/python3.10/site-packages/scipy/optimize/tests/test_trustregion.py
ADDED
|
@@ -0,0 +1,110 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Unit tests for trust-region optimization routines.
|
| 3 |
+
|
| 4 |
+
"""
|
| 5 |
+
import pytest
|
| 6 |
+
import numpy as np
|
| 7 |
+
from numpy.testing import assert_, assert_equal, assert_allclose
|
| 8 |
+
from scipy.optimize import (minimize, rosen, rosen_der, rosen_hess,
|
| 9 |
+
rosen_hess_prod)
|
| 10 |
+
|
| 11 |
+
|
| 12 |
+
class Accumulator:
|
| 13 |
+
""" This is for testing callbacks."""
|
| 14 |
+
def __init__(self):
|
| 15 |
+
self.count = 0
|
| 16 |
+
self.accum = None
|
| 17 |
+
|
| 18 |
+
def __call__(self, x):
|
| 19 |
+
self.count += 1
|
| 20 |
+
if self.accum is None:
|
| 21 |
+
self.accum = np.array(x)
|
| 22 |
+
else:
|
| 23 |
+
self.accum += x
|
| 24 |
+
|
| 25 |
+
|
| 26 |
+
class TestTrustRegionSolvers:
|
| 27 |
+
|
| 28 |
+
def setup_method(self):
|
| 29 |
+
self.x_opt = [1.0, 1.0]
|
| 30 |
+
self.easy_guess = [2.0, 2.0]
|
| 31 |
+
self.hard_guess = [-1.2, 1.0]
|
| 32 |
+
|
| 33 |
+
def test_dogleg_accuracy(self):
|
| 34 |
+
# test the accuracy and the return_all option
|
| 35 |
+
x0 = self.hard_guess
|
| 36 |
+
r = minimize(rosen, x0, jac=rosen_der, hess=rosen_hess, tol=1e-8,
|
| 37 |
+
method='dogleg', options={'return_all': True},)
|
| 38 |
+
assert_allclose(x0, r['allvecs'][0])
|
| 39 |
+
assert_allclose(r['x'], r['allvecs'][-1])
|
| 40 |
+
assert_allclose(r['x'], self.x_opt)
|
| 41 |
+
|
| 42 |
+
def test_dogleg_callback(self):
|
| 43 |
+
# test the callback mechanism and the maxiter and return_all options
|
| 44 |
+
accumulator = Accumulator()
|
| 45 |
+
maxiter = 5
|
| 46 |
+
r = minimize(rosen, self.hard_guess, jac=rosen_der, hess=rosen_hess,
|
| 47 |
+
callback=accumulator, method='dogleg',
|
| 48 |
+
options={'return_all': True, 'maxiter': maxiter},)
|
| 49 |
+
assert_equal(accumulator.count, maxiter)
|
| 50 |
+
assert_equal(len(r['allvecs']), maxiter+1)
|
| 51 |
+
assert_allclose(r['x'], r['allvecs'][-1])
|
| 52 |
+
assert_allclose(sum(r['allvecs'][1:]), accumulator.accum)
|
| 53 |
+
|
| 54 |
+
@pytest.mark.thread_unsafe
|
| 55 |
+
def test_dogleg_user_warning(self):
|
| 56 |
+
with pytest.warns(RuntimeWarning,
|
| 57 |
+
match=r'Maximum number of iterations'):
|
| 58 |
+
minimize(rosen, self.hard_guess, jac=rosen_der,
|
| 59 |
+
hess=rosen_hess, method='dogleg',
|
| 60 |
+
options={'disp': True, 'maxiter': 1}, )
|
| 61 |
+
|
| 62 |
+
def test_solver_concordance(self):
|
| 63 |
+
# Assert that dogleg uses fewer iterations than ncg on the Rosenbrock
|
| 64 |
+
# test function, although this does not necessarily mean
|
| 65 |
+
# that dogleg is faster or better than ncg even for this function
|
| 66 |
+
# and especially not for other test functions.
|
| 67 |
+
f = rosen
|
| 68 |
+
g = rosen_der
|
| 69 |
+
h = rosen_hess
|
| 70 |
+
for x0 in (self.easy_guess, self.hard_guess):
|
| 71 |
+
r_dogleg = minimize(f, x0, jac=g, hess=h, tol=1e-8,
|
| 72 |
+
method='dogleg', options={'return_all': True})
|
| 73 |
+
r_trust_ncg = minimize(f, x0, jac=g, hess=h, tol=1e-8,
|
| 74 |
+
method='trust-ncg',
|
| 75 |
+
options={'return_all': True})
|
| 76 |
+
r_trust_krylov = minimize(f, x0, jac=g, hess=h, tol=1e-8,
|
| 77 |
+
method='trust-krylov',
|
| 78 |
+
options={'return_all': True})
|
| 79 |
+
r_ncg = minimize(f, x0, jac=g, hess=h, tol=1e-8,
|
| 80 |
+
method='newton-cg', options={'return_all': True})
|
| 81 |
+
r_iterative = minimize(f, x0, jac=g, hess=h, tol=1e-8,
|
| 82 |
+
method='trust-exact',
|
| 83 |
+
options={'return_all': True})
|
| 84 |
+
assert_allclose(self.x_opt, r_dogleg['x'])
|
| 85 |
+
assert_allclose(self.x_opt, r_trust_ncg['x'])
|
| 86 |
+
assert_allclose(self.x_opt, r_trust_krylov['x'])
|
| 87 |
+
assert_allclose(self.x_opt, r_ncg['x'])
|
| 88 |
+
assert_allclose(self.x_opt, r_iterative['x'])
|
| 89 |
+
assert_(len(r_dogleg['allvecs']) < len(r_ncg['allvecs']))
|
| 90 |
+
|
| 91 |
+
def test_trust_ncg_hessp(self):
|
| 92 |
+
for x0 in (self.easy_guess, self.hard_guess, self.x_opt):
|
| 93 |
+
r = minimize(rosen, x0, jac=rosen_der, hessp=rosen_hess_prod,
|
| 94 |
+
tol=1e-8, method='trust-ncg')
|
| 95 |
+
assert_allclose(self.x_opt, r['x'])
|
| 96 |
+
|
| 97 |
+
def test_trust_ncg_start_in_optimum(self):
|
| 98 |
+
r = minimize(rosen, x0=self.x_opt, jac=rosen_der, hess=rosen_hess,
|
| 99 |
+
tol=1e-8, method='trust-ncg')
|
| 100 |
+
assert_allclose(self.x_opt, r['x'])
|
| 101 |
+
|
| 102 |
+
def test_trust_krylov_start_in_optimum(self):
|
| 103 |
+
r = minimize(rosen, x0=self.x_opt, jac=rosen_der, hess=rosen_hess,
|
| 104 |
+
tol=1e-8, method='trust-krylov')
|
| 105 |
+
assert_allclose(self.x_opt, r['x'])
|
| 106 |
+
|
| 107 |
+
def test_trust_exact_start_in_optimum(self):
|
| 108 |
+
r = minimize(rosen, x0=self.x_opt, jac=rosen_der, hess=rosen_hess,
|
| 109 |
+
tol=1e-8, method='trust-exact')
|
| 110 |
+
assert_allclose(self.x_opt, r['x'])
|
mantis_evalkit/lib/python3.10/site-packages/scipy/optimize/tests/test_zeros.py
ADDED
|
@@ -0,0 +1,965 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import pytest
|
| 2 |
+
|
| 3 |
+
from functools import lru_cache
|
| 4 |
+
|
| 5 |
+
from numpy.testing import (assert_warns, assert_,
|
| 6 |
+
assert_allclose,
|
| 7 |
+
assert_equal,
|
| 8 |
+
assert_array_equal,
|
| 9 |
+
suppress_warnings)
|
| 10 |
+
import numpy as np
|
| 11 |
+
from numpy import finfo, power, nan, isclose, sqrt, exp, sin, cos
|
| 12 |
+
|
| 13 |
+
from scipy import optimize
|
| 14 |
+
from scipy.optimize import (_zeros_py as zeros, newton, root_scalar,
|
| 15 |
+
OptimizeResult)
|
| 16 |
+
|
| 17 |
+
from scipy._lib._util import getfullargspec_no_self as _getfullargspec
|
| 18 |
+
|
| 19 |
+
# Import testing parameters
|
| 20 |
+
from scipy.optimize._tstutils import get_tests, functions as tstutils_functions
|
| 21 |
+
|
| 22 |
+
TOL = 4*np.finfo(float).eps # tolerance
|
| 23 |
+
|
| 24 |
+
_FLOAT_EPS = finfo(float).eps
|
| 25 |
+
|
| 26 |
+
bracket_methods = [zeros.bisect, zeros.ridder, zeros.brentq, zeros.brenth,
|
| 27 |
+
zeros.toms748]
|
| 28 |
+
gradient_methods = [zeros.newton]
|
| 29 |
+
all_methods = bracket_methods + gradient_methods
|
| 30 |
+
|
| 31 |
+
# A few test functions used frequently:
|
| 32 |
+
# # A simple quadratic, (x-1)^2 - 1
|
| 33 |
+
def f1(x):
|
| 34 |
+
return x ** 2 - 2 * x - 1
|
| 35 |
+
|
| 36 |
+
|
| 37 |
+
def f1_1(x):
|
| 38 |
+
return 2 * x - 2
|
| 39 |
+
|
| 40 |
+
|
| 41 |
+
def f1_2(x):
|
| 42 |
+
return 2.0 + 0 * x
|
| 43 |
+
|
| 44 |
+
|
| 45 |
+
def f1_and_p_and_pp(x):
|
| 46 |
+
return f1(x), f1_1(x), f1_2(x)
|
| 47 |
+
|
| 48 |
+
|
| 49 |
+
# Simple transcendental function
|
| 50 |
+
def f2(x):
|
| 51 |
+
return exp(x) - cos(x)
|
| 52 |
+
|
| 53 |
+
|
| 54 |
+
def f2_1(x):
|
| 55 |
+
return exp(x) + sin(x)
|
| 56 |
+
|
| 57 |
+
|
| 58 |
+
def f2_2(x):
|
| 59 |
+
return exp(x) + cos(x)
|
| 60 |
+
|
| 61 |
+
|
| 62 |
+
# lru cached function
|
| 63 |
+
@lru_cache
|
| 64 |
+
def f_lrucached(x):
|
| 65 |
+
return x
|
| 66 |
+
|
| 67 |
+
|
| 68 |
+
class TestScalarRootFinders:
|
| 69 |
+
# Basic tests for all scalar root finders
|
| 70 |
+
|
| 71 |
+
xtol = 4 * np.finfo(float).eps
|
| 72 |
+
rtol = 4 * np.finfo(float).eps
|
| 73 |
+
|
| 74 |
+
def _run_one_test(self, tc, method, sig_args_keys=None,
|
| 75 |
+
sig_kwargs_keys=None, **kwargs):
|
| 76 |
+
method_args = []
|
| 77 |
+
for k in sig_args_keys or []:
|
| 78 |
+
if k not in tc:
|
| 79 |
+
# If a,b not present use x0, x1. Similarly for f and func
|
| 80 |
+
k = {'a': 'x0', 'b': 'x1', 'func': 'f'}.get(k, k)
|
| 81 |
+
method_args.append(tc[k])
|
| 82 |
+
|
| 83 |
+
method_kwargs = dict(**kwargs)
|
| 84 |
+
method_kwargs.update({'full_output': True, 'disp': False})
|
| 85 |
+
for k in sig_kwargs_keys or []:
|
| 86 |
+
method_kwargs[k] = tc[k]
|
| 87 |
+
|
| 88 |
+
root = tc.get('root')
|
| 89 |
+
func_args = tc.get('args', ())
|
| 90 |
+
|
| 91 |
+
try:
|
| 92 |
+
r, rr = method(*method_args, args=func_args, **method_kwargs)
|
| 93 |
+
return root, rr, tc
|
| 94 |
+
except Exception:
|
| 95 |
+
return root, zeros.RootResults(nan, -1, -1, zeros._EVALUEERR, method), tc
|
| 96 |
+
|
| 97 |
+
def run_tests(self, tests, method, name, known_fail=None, **kwargs):
|
| 98 |
+
r"""Run test-cases using the specified method and the supplied signature.
|
| 99 |
+
|
| 100 |
+
Extract the arguments for the method call from the test case
|
| 101 |
+
dictionary using the supplied keys for the method's signature."""
|
| 102 |
+
# The methods have one of two base signatures:
|
| 103 |
+
# (f, a, b, **kwargs) # newton
|
| 104 |
+
# (func, x0, **kwargs) # bisect/brentq/...
|
| 105 |
+
|
| 106 |
+
# FullArgSpec with args, varargs, varkw, defaults, ...
|
| 107 |
+
sig = _getfullargspec(method)
|
| 108 |
+
assert_(not sig.kwonlyargs)
|
| 109 |
+
nDefaults = len(sig.defaults)
|
| 110 |
+
nRequired = len(sig.args) - nDefaults
|
| 111 |
+
sig_args_keys = sig.args[:nRequired]
|
| 112 |
+
sig_kwargs_keys = []
|
| 113 |
+
if name in ['secant', 'newton', 'halley']:
|
| 114 |
+
if name in ['newton', 'halley']:
|
| 115 |
+
sig_kwargs_keys.append('fprime')
|
| 116 |
+
if name in ['halley']:
|
| 117 |
+
sig_kwargs_keys.append('fprime2')
|
| 118 |
+
kwargs['tol'] = self.xtol
|
| 119 |
+
else:
|
| 120 |
+
kwargs['xtol'] = self.xtol
|
| 121 |
+
kwargs['rtol'] = self.rtol
|
| 122 |
+
|
| 123 |
+
results = [list(self._run_one_test(
|
| 124 |
+
tc, method, sig_args_keys=sig_args_keys,
|
| 125 |
+
sig_kwargs_keys=sig_kwargs_keys, **kwargs)) for tc in tests]
|
| 126 |
+
# results= [[true root, full output, tc], ...]
|
| 127 |
+
|
| 128 |
+
known_fail = known_fail or []
|
| 129 |
+
notcvgd = [elt for elt in results if not elt[1].converged]
|
| 130 |
+
notcvgd = [elt for elt in notcvgd if elt[-1]['ID'] not in known_fail]
|
| 131 |
+
notcvged_IDS = [elt[-1]['ID'] for elt in notcvgd]
|
| 132 |
+
assert_equal([len(notcvged_IDS), notcvged_IDS], [0, []])
|
| 133 |
+
|
| 134 |
+
# The usable xtol and rtol depend on the test
|
| 135 |
+
tols = {'xtol': self.xtol, 'rtol': self.rtol}
|
| 136 |
+
tols.update(**kwargs)
|
| 137 |
+
rtol = tols['rtol']
|
| 138 |
+
atol = tols.get('tol', tols['xtol'])
|
| 139 |
+
|
| 140 |
+
cvgd = [elt for elt in results if elt[1].converged]
|
| 141 |
+
approx = [elt[1].root for elt in cvgd]
|
| 142 |
+
correct = [elt[0] for elt in cvgd]
|
| 143 |
+
# See if the root matches the reference value
|
| 144 |
+
notclose = [[a] + elt for a, c, elt in zip(approx, correct, cvgd) if
|
| 145 |
+
not isclose(a, c, rtol=rtol, atol=atol)
|
| 146 |
+
and elt[-1]['ID'] not in known_fail]
|
| 147 |
+
# If not, evaluate the function and see if is 0 at the purported root
|
| 148 |
+
fvs = [tc['f'](aroot, *tc.get('args', tuple()))
|
| 149 |
+
for aroot, c, fullout, tc in notclose]
|
| 150 |
+
notclose = [[fv] + elt for fv, elt in zip(fvs, notclose) if fv != 0]
|
| 151 |
+
assert_equal([notclose, len(notclose)], [[], 0])
|
| 152 |
+
method_from_result = [result[1].method for result in results]
|
| 153 |
+
expected_method = [name for _ in results]
|
| 154 |
+
assert_equal(method_from_result, expected_method)
|
| 155 |
+
|
| 156 |
+
def run_collection(self, collection, method, name, smoothness=None,
|
| 157 |
+
known_fail=None, **kwargs):
|
| 158 |
+
r"""Run a collection of tests using the specified method.
|
| 159 |
+
|
| 160 |
+
The name is used to determine some optional arguments."""
|
| 161 |
+
tests = get_tests(collection, smoothness=smoothness)
|
| 162 |
+
self.run_tests(tests, method, name, known_fail=known_fail, **kwargs)
|
| 163 |
+
|
| 164 |
+
|
| 165 |
+
class TestBracketMethods(TestScalarRootFinders):
|
| 166 |
+
@pytest.mark.parametrize('method', bracket_methods)
|
| 167 |
+
@pytest.mark.parametrize('function', tstutils_functions)
|
| 168 |
+
def test_basic_root_scalar(self, method, function):
|
| 169 |
+
# Tests bracketing root finders called via `root_scalar` on a small
|
| 170 |
+
# set of simple problems, each of which has a root at `x=1`. Checks for
|
| 171 |
+
# converged status and that the root was found.
|
| 172 |
+
a, b = .5, sqrt(3)
|
| 173 |
+
|
| 174 |
+
r = root_scalar(function, method=method.__name__, bracket=[a, b], x0=a,
|
| 175 |
+
xtol=self.xtol, rtol=self.rtol)
|
| 176 |
+
assert r.converged
|
| 177 |
+
assert_allclose(r.root, 1.0, atol=self.xtol, rtol=self.rtol)
|
| 178 |
+
assert r.method == method.__name__
|
| 179 |
+
|
| 180 |
+
@pytest.mark.parametrize('method', bracket_methods)
|
| 181 |
+
@pytest.mark.parametrize('function', tstutils_functions)
|
| 182 |
+
def test_basic_individual(self, method, function):
|
| 183 |
+
# Tests individual bracketing root finders on a small set of simple
|
| 184 |
+
# problems, each of which has a root at `x=1`. Checks for converged
|
| 185 |
+
# status and that the root was found.
|
| 186 |
+
a, b = .5, sqrt(3)
|
| 187 |
+
root, r = method(function, a, b, xtol=self.xtol, rtol=self.rtol,
|
| 188 |
+
full_output=True)
|
| 189 |
+
|
| 190 |
+
assert r.converged
|
| 191 |
+
assert_allclose(root, 1.0, atol=self.xtol, rtol=self.rtol)
|
| 192 |
+
|
| 193 |
+
@pytest.mark.parametrize('method', bracket_methods)
|
| 194 |
+
@pytest.mark.parametrize('function', tstutils_functions)
|
| 195 |
+
def test_bracket_is_array(self, method, function):
|
| 196 |
+
# Test bracketing root finders called via `root_scalar` on a small set
|
| 197 |
+
# of simple problems, each of which has a root at `x=1`. Check that
|
| 198 |
+
# passing `bracket` as a `ndarray` is accepted and leads to finding the
|
| 199 |
+
# correct root.
|
| 200 |
+
a, b = .5, sqrt(3)
|
| 201 |
+
r = root_scalar(function, method=method.__name__,
|
| 202 |
+
bracket=np.array([a, b]), x0=a, xtol=self.xtol,
|
| 203 |
+
rtol=self.rtol)
|
| 204 |
+
assert r.converged
|
| 205 |
+
assert_allclose(r.root, 1.0, atol=self.xtol, rtol=self.rtol)
|
| 206 |
+
assert r.method == method.__name__
|
| 207 |
+
|
| 208 |
+
@pytest.mark.parametrize('method', bracket_methods)
|
| 209 |
+
def test_aps_collection(self, method):
|
| 210 |
+
self.run_collection('aps', method, method.__name__, smoothness=1)
|
| 211 |
+
|
| 212 |
+
@pytest.mark.parametrize('method', [zeros.bisect, zeros.ridder,
|
| 213 |
+
zeros.toms748])
|
| 214 |
+
def test_chandrupatla_collection(self, method):
|
| 215 |
+
known_fail = {'fun7.4'} if method == zeros.ridder else {}
|
| 216 |
+
self.run_collection('chandrupatla', method, method.__name__,
|
| 217 |
+
known_fail=known_fail)
|
| 218 |
+
|
| 219 |
+
@pytest.mark.parametrize('method', bracket_methods)
|
| 220 |
+
def test_lru_cached_individual(self, method):
|
| 221 |
+
# check that https://github.com/scipy/scipy/issues/10846 is fixed
|
| 222 |
+
# (`root_scalar` failed when passed a function that was `@lru_cache`d)
|
| 223 |
+
a, b = -1, 1
|
| 224 |
+
root, r = method(f_lrucached, a, b, full_output=True)
|
| 225 |
+
assert r.converged
|
| 226 |
+
assert_allclose(root, 0)
|
| 227 |
+
|
| 228 |
+
|
| 229 |
+
class TestNewton(TestScalarRootFinders):
|
| 230 |
+
def test_newton_collections(self):
|
| 231 |
+
known_fail = ['aps.13.00']
|
| 232 |
+
known_fail += ['aps.12.05', 'aps.12.17'] # fails under Windows Py27
|
| 233 |
+
for collection in ['aps', 'complex']:
|
| 234 |
+
self.run_collection(collection, zeros.newton, 'newton',
|
| 235 |
+
smoothness=2, known_fail=known_fail)
|
| 236 |
+
|
| 237 |
+
def test_halley_collections(self):
|
| 238 |
+
known_fail = ['aps.12.06', 'aps.12.07', 'aps.12.08', 'aps.12.09',
|
| 239 |
+
'aps.12.10', 'aps.12.11', 'aps.12.12', 'aps.12.13',
|
| 240 |
+
'aps.12.14', 'aps.12.15', 'aps.12.16', 'aps.12.17',
|
| 241 |
+
'aps.12.18', 'aps.13.00']
|
| 242 |
+
for collection in ['aps', 'complex']:
|
| 243 |
+
self.run_collection(collection, zeros.newton, 'halley',
|
| 244 |
+
smoothness=2, known_fail=known_fail)
|
| 245 |
+
|
| 246 |
+
def test_newton(self):
|
| 247 |
+
for f, f_1, f_2 in [(f1, f1_1, f1_2), (f2, f2_1, f2_2)]:
|
| 248 |
+
x = zeros.newton(f, 3, tol=1e-6)
|
| 249 |
+
assert_allclose(f(x), 0, atol=1e-6)
|
| 250 |
+
x = zeros.newton(f, 3, x1=5, tol=1e-6) # secant, x0 and x1
|
| 251 |
+
assert_allclose(f(x), 0, atol=1e-6)
|
| 252 |
+
x = zeros.newton(f, 3, fprime=f_1, tol=1e-6) # newton
|
| 253 |
+
assert_allclose(f(x), 0, atol=1e-6)
|
| 254 |
+
x = zeros.newton(f, 3, fprime=f_1, fprime2=f_2, tol=1e-6) # halley
|
| 255 |
+
assert_allclose(f(x), 0, atol=1e-6)
|
| 256 |
+
|
| 257 |
+
def test_newton_by_name(self):
|
| 258 |
+
r"""Invoke newton through root_scalar()"""
|
| 259 |
+
for f, f_1, f_2 in [(f1, f1_1, f1_2), (f2, f2_1, f2_2)]:
|
| 260 |
+
r = root_scalar(f, method='newton', x0=3, fprime=f_1, xtol=1e-6)
|
| 261 |
+
assert_allclose(f(r.root), 0, atol=1e-6)
|
| 262 |
+
for f, f_1, f_2 in [(f1, f1_1, f1_2), (f2, f2_1, f2_2)]:
|
| 263 |
+
r = root_scalar(f, method='newton', x0=3, xtol=1e-6) # without f'
|
| 264 |
+
assert_allclose(f(r.root), 0, atol=1e-6)
|
| 265 |
+
|
| 266 |
+
def test_secant_by_name(self):
|
| 267 |
+
r"""Invoke secant through root_scalar()"""
|
| 268 |
+
for f, f_1, f_2 in [(f1, f1_1, f1_2), (f2, f2_1, f2_2)]:
|
| 269 |
+
r = root_scalar(f, method='secant', x0=3, x1=2, xtol=1e-6)
|
| 270 |
+
assert_allclose(f(r.root), 0, atol=1e-6)
|
| 271 |
+
r = root_scalar(f, method='secant', x0=3, x1=5, xtol=1e-6)
|
| 272 |
+
assert_allclose(f(r.root), 0, atol=1e-6)
|
| 273 |
+
for f, f_1, f_2 in [(f1, f1_1, f1_2), (f2, f2_1, f2_2)]:
|
| 274 |
+
r = root_scalar(f, method='secant', x0=3, xtol=1e-6) # without x1
|
| 275 |
+
assert_allclose(f(r.root), 0, atol=1e-6)
|
| 276 |
+
|
| 277 |
+
def test_halley_by_name(self):
|
| 278 |
+
r"""Invoke halley through root_scalar()"""
|
| 279 |
+
for f, f_1, f_2 in [(f1, f1_1, f1_2), (f2, f2_1, f2_2)]:
|
| 280 |
+
r = root_scalar(f, method='halley', x0=3,
|
| 281 |
+
fprime=f_1, fprime2=f_2, xtol=1e-6)
|
| 282 |
+
assert_allclose(f(r.root), 0, atol=1e-6)
|
| 283 |
+
|
| 284 |
+
def test_root_scalar_fail(self):
|
| 285 |
+
message = 'fprime2 must be specified for halley'
|
| 286 |
+
with pytest.raises(ValueError, match=message):
|
| 287 |
+
root_scalar(f1, method='halley', fprime=f1_1, x0=3, xtol=1e-6) # no fprime2
|
| 288 |
+
message = 'fprime must be specified for halley'
|
| 289 |
+
with pytest.raises(ValueError, match=message):
|
| 290 |
+
root_scalar(f1, method='halley', fprime2=f1_2, x0=3, xtol=1e-6) # no fprime
|
| 291 |
+
|
| 292 |
+
def test_array_newton(self):
|
| 293 |
+
"""test newton with array"""
|
| 294 |
+
|
| 295 |
+
def f1(x, *a):
|
| 296 |
+
b = a[0] + x * a[3]
|
| 297 |
+
return a[1] - a[2] * (np.exp(b / a[5]) - 1.0) - b / a[4] - x
|
| 298 |
+
|
| 299 |
+
def f1_1(x, *a):
|
| 300 |
+
b = a[3] / a[5]
|
| 301 |
+
return -a[2] * np.exp(a[0] / a[5] + x * b) * b - a[3] / a[4] - 1
|
| 302 |
+
|
| 303 |
+
def f1_2(x, *a):
|
| 304 |
+
b = a[3] / a[5]
|
| 305 |
+
return -a[2] * np.exp(a[0] / a[5] + x * b) * b**2
|
| 306 |
+
|
| 307 |
+
a0 = np.array([
|
| 308 |
+
5.32725221, 5.48673747, 5.49539973,
|
| 309 |
+
5.36387202, 4.80237316, 1.43764452,
|
| 310 |
+
5.23063958, 5.46094772, 5.50512718,
|
| 311 |
+
5.42046290
|
| 312 |
+
])
|
| 313 |
+
a1 = (np.sin(range(10)) + 1.0) * 7.0
|
| 314 |
+
args = (a0, a1, 1e-09, 0.004, 10, 0.27456)
|
| 315 |
+
x0 = [7.0] * 10
|
| 316 |
+
x = zeros.newton(f1, x0, f1_1, args)
|
| 317 |
+
x_expected = (
|
| 318 |
+
6.17264965, 11.7702805, 12.2219954,
|
| 319 |
+
7.11017681, 1.18151293, 0.143707955,
|
| 320 |
+
4.31928228, 10.5419107, 12.7552490,
|
| 321 |
+
8.91225749
|
| 322 |
+
)
|
| 323 |
+
assert_allclose(x, x_expected)
|
| 324 |
+
# test halley's
|
| 325 |
+
x = zeros.newton(f1, x0, f1_1, args, fprime2=f1_2)
|
| 326 |
+
assert_allclose(x, x_expected)
|
| 327 |
+
# test secant
|
| 328 |
+
x = zeros.newton(f1, x0, args=args)
|
| 329 |
+
assert_allclose(x, x_expected)
|
| 330 |
+
|
| 331 |
+
def test_array_newton_complex(self):
|
| 332 |
+
def f(x):
|
| 333 |
+
return x + 1+1j
|
| 334 |
+
|
| 335 |
+
def fprime(x):
|
| 336 |
+
return 1.0
|
| 337 |
+
|
| 338 |
+
t = np.full(4, 1j)
|
| 339 |
+
x = zeros.newton(f, t, fprime=fprime)
|
| 340 |
+
assert_allclose(f(x), 0.)
|
| 341 |
+
|
| 342 |
+
# should work even if x0 is not complex
|
| 343 |
+
t = np.ones(4)
|
| 344 |
+
x = zeros.newton(f, t, fprime=fprime)
|
| 345 |
+
assert_allclose(f(x), 0.)
|
| 346 |
+
|
| 347 |
+
x = zeros.newton(f, t)
|
| 348 |
+
assert_allclose(f(x), 0.)
|
| 349 |
+
|
| 350 |
+
def test_array_secant_active_zero_der(self):
|
| 351 |
+
"""test secant doesn't continue to iterate zero derivatives"""
|
| 352 |
+
x = zeros.newton(lambda x, *a: x*x - a[0], x0=[4.123, 5],
|
| 353 |
+
args=[np.array([17, 25])])
|
| 354 |
+
assert_allclose(x, (4.123105625617661, 5.0))
|
| 355 |
+
|
| 356 |
+
def test_array_newton_integers(self):
|
| 357 |
+
# test secant with float
|
| 358 |
+
x = zeros.newton(lambda y, z: z - y ** 2, [4.0] * 2,
|
| 359 |
+
args=([15.0, 17.0],))
|
| 360 |
+
assert_allclose(x, (3.872983346207417, 4.123105625617661))
|
| 361 |
+
# test integer becomes float
|
| 362 |
+
x = zeros.newton(lambda y, z: z - y ** 2, [4] * 2, args=([15, 17],))
|
| 363 |
+
assert_allclose(x, (3.872983346207417, 4.123105625617661))
|
| 364 |
+
|
| 365 |
+
@pytest.mark.thread_unsafe
|
| 366 |
+
def test_array_newton_zero_der_failures(self):
|
| 367 |
+
# test derivative zero warning
|
| 368 |
+
assert_warns(RuntimeWarning, zeros.newton,
|
| 369 |
+
lambda y: y**2 - 2, [0., 0.], lambda y: 2 * y)
|
| 370 |
+
# test failures and zero_der
|
| 371 |
+
with pytest.warns(RuntimeWarning):
|
| 372 |
+
results = zeros.newton(lambda y: y**2 - 2, [0., 0.],
|
| 373 |
+
lambda y: 2*y, full_output=True)
|
| 374 |
+
assert_allclose(results.root, 0)
|
| 375 |
+
assert results.zero_der.all()
|
| 376 |
+
assert not results.converged.any()
|
| 377 |
+
|
| 378 |
+
def test_newton_combined(self):
|
| 379 |
+
def f1(x):
|
| 380 |
+
return x ** 2 - 2 * x - 1
|
| 381 |
+
def f1_1(x):
|
| 382 |
+
return 2 * x - 2
|
| 383 |
+
def f1_2(x):
|
| 384 |
+
return 2.0 + 0 * x
|
| 385 |
+
|
| 386 |
+
def f1_and_p_and_pp(x):
|
| 387 |
+
return x**2 - 2*x-1, 2*x-2, 2.0
|
| 388 |
+
|
| 389 |
+
sol0 = root_scalar(f1, method='newton', x0=3, fprime=f1_1)
|
| 390 |
+
sol = root_scalar(f1_and_p_and_pp, method='newton', x0=3, fprime=True)
|
| 391 |
+
assert_allclose(sol0.root, sol.root, atol=1e-8)
|
| 392 |
+
assert_equal(2*sol.function_calls, sol0.function_calls)
|
| 393 |
+
|
| 394 |
+
sol0 = root_scalar(f1, method='halley', x0=3, fprime=f1_1, fprime2=f1_2)
|
| 395 |
+
sol = root_scalar(f1_and_p_and_pp, method='halley', x0=3, fprime2=True)
|
| 396 |
+
assert_allclose(sol0.root, sol.root, atol=1e-8)
|
| 397 |
+
assert_equal(3*sol.function_calls, sol0.function_calls)
|
| 398 |
+
|
| 399 |
+
def test_newton_full_output(self, capsys):
|
| 400 |
+
# Test the full_output capability, both when converging and not.
|
| 401 |
+
# Use simple polynomials, to avoid hitting platform dependencies
|
| 402 |
+
# (e.g., exp & trig) in number of iterations
|
| 403 |
+
|
| 404 |
+
x0 = 3
|
| 405 |
+
expected_counts = [(6, 7), (5, 10), (3, 9)]
|
| 406 |
+
|
| 407 |
+
for derivs in range(3):
|
| 408 |
+
kwargs = {'tol': 1e-6, 'full_output': True, }
|
| 409 |
+
for k, v in [['fprime', f1_1], ['fprime2', f1_2]][:derivs]:
|
| 410 |
+
kwargs[k] = v
|
| 411 |
+
|
| 412 |
+
x, r = zeros.newton(f1, x0, disp=False, **kwargs)
|
| 413 |
+
assert_(r.converged)
|
| 414 |
+
assert_equal(x, r.root)
|
| 415 |
+
assert_equal((r.iterations, r.function_calls), expected_counts[derivs])
|
| 416 |
+
if derivs == 0:
|
| 417 |
+
assert r.function_calls <= r.iterations + 1
|
| 418 |
+
else:
|
| 419 |
+
assert_equal(r.function_calls, (derivs + 1) * r.iterations)
|
| 420 |
+
|
| 421 |
+
# Now repeat, allowing one fewer iteration to force convergence failure
|
| 422 |
+
iters = r.iterations - 1
|
| 423 |
+
x, r = zeros.newton(f1, x0, maxiter=iters, disp=False, **kwargs)
|
| 424 |
+
assert_(not r.converged)
|
| 425 |
+
assert_equal(x, r.root)
|
| 426 |
+
assert_equal(r.iterations, iters)
|
| 427 |
+
|
| 428 |
+
if derivs == 1:
|
| 429 |
+
# Check that the correct Exception is raised and
|
| 430 |
+
# validate the start of the message.
|
| 431 |
+
msg = 'Failed to converge after %d iterations, value is .*' % (iters)
|
| 432 |
+
with pytest.raises(RuntimeError, match=msg):
|
| 433 |
+
x, r = zeros.newton(f1, x0, maxiter=iters, disp=True, **kwargs)
|
| 434 |
+
|
| 435 |
+
@pytest.mark.thread_unsafe
|
| 436 |
+
def test_deriv_zero_warning(self):
|
| 437 |
+
def func(x):
|
| 438 |
+
return x ** 2 - 2.0
|
| 439 |
+
def dfunc(x):
|
| 440 |
+
return 2 * x
|
| 441 |
+
assert_warns(RuntimeWarning, zeros.newton, func, 0.0, dfunc, disp=False)
|
| 442 |
+
with pytest.raises(RuntimeError, match='Derivative was zero'):
|
| 443 |
+
zeros.newton(func, 0.0, dfunc)
|
| 444 |
+
|
| 445 |
+
def test_newton_does_not_modify_x0(self):
|
| 446 |
+
# https://github.com/scipy/scipy/issues/9964
|
| 447 |
+
x0 = np.array([0.1, 3])
|
| 448 |
+
x0_copy = x0.copy() # Copy to test for equality.
|
| 449 |
+
newton(np.sin, x0, np.cos)
|
| 450 |
+
assert_array_equal(x0, x0_copy)
|
| 451 |
+
|
| 452 |
+
def test_gh17570_defaults(self):
|
| 453 |
+
# Previously, when fprime was not specified, root_scalar would default
|
| 454 |
+
# to secant. When x1 was not specified, secant failed.
|
| 455 |
+
# Check that without fprime, the default is secant if x1 is specified
|
| 456 |
+
# and newton otherwise.
|
| 457 |
+
# Also confirm that `x` is always a scalar (gh-21148)
|
| 458 |
+
def f(x):
|
| 459 |
+
assert np.isscalar(x)
|
| 460 |
+
return f1(x)
|
| 461 |
+
|
| 462 |
+
res_newton_default = root_scalar(f, method='newton', x0=3, xtol=1e-6)
|
| 463 |
+
res_secant_default = root_scalar(f, method='secant', x0=3, x1=2,
|
| 464 |
+
xtol=1e-6)
|
| 465 |
+
# `newton` uses the secant method when `x1` and `x2` are specified
|
| 466 |
+
res_secant = newton(f, x0=3, x1=2, tol=1e-6, full_output=True)[1]
|
| 467 |
+
|
| 468 |
+
# all three found a root
|
| 469 |
+
assert_allclose(f(res_newton_default.root), 0, atol=1e-6)
|
| 470 |
+
assert res_newton_default.root.shape == tuple()
|
| 471 |
+
assert_allclose(f(res_secant_default.root), 0, atol=1e-6)
|
| 472 |
+
assert res_secant_default.root.shape == tuple()
|
| 473 |
+
assert_allclose(f(res_secant.root), 0, atol=1e-6)
|
| 474 |
+
assert res_secant.root.shape == tuple()
|
| 475 |
+
|
| 476 |
+
# Defaults are correct
|
| 477 |
+
assert (res_secant_default.root
|
| 478 |
+
== res_secant.root
|
| 479 |
+
!= res_newton_default.iterations)
|
| 480 |
+
assert (res_secant_default.iterations
|
| 481 |
+
== res_secant_default.function_calls - 1 # true for secant
|
| 482 |
+
== res_secant.iterations
|
| 483 |
+
!= res_newton_default.iterations
|
| 484 |
+
== res_newton_default.function_calls/2) # newton 2-point diff
|
| 485 |
+
|
| 486 |
+
@pytest.mark.parametrize('kwargs', [dict(), {'method': 'newton'}])
|
| 487 |
+
def test_args_gh19090(self, kwargs):
|
| 488 |
+
def f(x, a, b):
|
| 489 |
+
assert a == 3
|
| 490 |
+
assert b == 1
|
| 491 |
+
return (x ** a - b)
|
| 492 |
+
|
| 493 |
+
res = optimize.root_scalar(f, x0=3, args=(3, 1), **kwargs)
|
| 494 |
+
assert res.converged
|
| 495 |
+
assert_allclose(res.root, 1)
|
| 496 |
+
|
| 497 |
+
@pytest.mark.parametrize('method', ['secant', 'newton'])
|
| 498 |
+
def test_int_x0_gh19280(self, method):
|
| 499 |
+
# Originally, `newton` ensured that only floats were passed to the
|
| 500 |
+
# callable. This was inadvertently changed by gh-17669. Check that
|
| 501 |
+
# it has been changed back.
|
| 502 |
+
def f(x):
|
| 503 |
+
# an integer raised to a negative integer power would fail
|
| 504 |
+
return x**-2 - 2
|
| 505 |
+
|
| 506 |
+
res = optimize.root_scalar(f, x0=1, method=method)
|
| 507 |
+
assert res.converged
|
| 508 |
+
assert_allclose(abs(res.root), 2**-0.5)
|
| 509 |
+
assert res.root.dtype == np.dtype(np.float64)
|
| 510 |
+
|
| 511 |
+
|
| 512 |
+
def test_gh_5555():
|
| 513 |
+
root = 0.1
|
| 514 |
+
|
| 515 |
+
def f(x):
|
| 516 |
+
return x - root
|
| 517 |
+
|
| 518 |
+
methods = [zeros.bisect, zeros.ridder]
|
| 519 |
+
xtol = rtol = TOL
|
| 520 |
+
for method in methods:
|
| 521 |
+
res = method(f, -1e8, 1e7, xtol=xtol, rtol=rtol)
|
| 522 |
+
assert_allclose(root, res, atol=xtol, rtol=rtol,
|
| 523 |
+
err_msg=f'method {method.__name__}')
|
| 524 |
+
|
| 525 |
+
|
| 526 |
+
def test_gh_5557():
|
| 527 |
+
# Show that without the changes in 5557 brentq and brenth might
|
| 528 |
+
# only achieve a tolerance of 2*(xtol + rtol*|res|).
|
| 529 |
+
|
| 530 |
+
# f linearly interpolates (0, -0.1), (0.5, -0.1), and (1,
|
| 531 |
+
# 0.4). The important parts are that |f(0)| < |f(1)| (so that
|
| 532 |
+
# brent takes 0 as the initial guess), |f(0)| < atol (so that
|
| 533 |
+
# brent accepts 0 as the root), and that the exact root of f lies
|
| 534 |
+
# more than atol away from 0 (so that brent doesn't achieve the
|
| 535 |
+
# desired tolerance).
|
| 536 |
+
def f(x):
|
| 537 |
+
if x < 0.5:
|
| 538 |
+
return -0.1
|
| 539 |
+
else:
|
| 540 |
+
return x - 0.6
|
| 541 |
+
|
| 542 |
+
atol = 0.51
|
| 543 |
+
rtol = 4 * _FLOAT_EPS
|
| 544 |
+
methods = [zeros.brentq, zeros.brenth]
|
| 545 |
+
for method in methods:
|
| 546 |
+
res = method(f, 0, 1, xtol=atol, rtol=rtol)
|
| 547 |
+
assert_allclose(0.6, res, atol=atol, rtol=rtol)
|
| 548 |
+
|
| 549 |
+
|
| 550 |
+
def test_brent_underflow_in_root_bracketing():
|
| 551 |
+
# Testing if an interval [a,b] brackets a zero of a function
|
| 552 |
+
# by checking f(a)*f(b) < 0 is not reliable when the product
|
| 553 |
+
# underflows/overflows. (reported in issue# 13737)
|
| 554 |
+
|
| 555 |
+
underflow_scenario = (-450.0, -350.0, -400.0)
|
| 556 |
+
overflow_scenario = (350.0, 450.0, 400.0)
|
| 557 |
+
|
| 558 |
+
for a, b, root in [underflow_scenario, overflow_scenario]:
|
| 559 |
+
c = np.exp(root)
|
| 560 |
+
for method in [zeros.brenth, zeros.brentq]:
|
| 561 |
+
res = method(lambda x: np.exp(x)-c, a, b)
|
| 562 |
+
assert_allclose(root, res)
|
| 563 |
+
|
| 564 |
+
|
| 565 |
+
class TestRootResults:
|
| 566 |
+
r = zeros.RootResults(root=1.0, iterations=44, function_calls=46, flag=0,
|
| 567 |
+
method="newton")
|
| 568 |
+
|
| 569 |
+
def test_repr(self):
|
| 570 |
+
expected_repr = (" converged: True\n flag: converged"
|
| 571 |
+
"\n function_calls: 46\n iterations: 44\n"
|
| 572 |
+
" root: 1.0\n method: newton")
|
| 573 |
+
assert_equal(repr(self.r), expected_repr)
|
| 574 |
+
|
| 575 |
+
def test_type(self):
|
| 576 |
+
assert isinstance(self.r, OptimizeResult)
|
| 577 |
+
|
| 578 |
+
|
| 579 |
+
def test_complex_halley():
|
| 580 |
+
"""Test Halley's works with complex roots"""
|
| 581 |
+
def f(x, *a):
|
| 582 |
+
return a[0] * x**2 + a[1] * x + a[2]
|
| 583 |
+
|
| 584 |
+
def f_1(x, *a):
|
| 585 |
+
return 2 * a[0] * x + a[1]
|
| 586 |
+
|
| 587 |
+
def f_2(x, *a):
|
| 588 |
+
retval = 2 * a[0]
|
| 589 |
+
try:
|
| 590 |
+
size = len(x)
|
| 591 |
+
except TypeError:
|
| 592 |
+
return retval
|
| 593 |
+
else:
|
| 594 |
+
return [retval] * size
|
| 595 |
+
|
| 596 |
+
z = complex(1.0, 2.0)
|
| 597 |
+
coeffs = (2.0, 3.0, 4.0)
|
| 598 |
+
y = zeros.newton(f, z, args=coeffs, fprime=f_1, fprime2=f_2, tol=1e-6)
|
| 599 |
+
# (-0.75000000000000078+1.1989578808281789j)
|
| 600 |
+
assert_allclose(f(y, *coeffs), 0, atol=1e-6)
|
| 601 |
+
z = [z] * 10
|
| 602 |
+
coeffs = (2.0, 3.0, 4.0)
|
| 603 |
+
y = zeros.newton(f, z, args=coeffs, fprime=f_1, fprime2=f_2, tol=1e-6)
|
| 604 |
+
assert_allclose(f(y, *coeffs), 0, atol=1e-6)
|
| 605 |
+
|
| 606 |
+
|
| 607 |
+
@pytest.mark.thread_unsafe
|
| 608 |
+
def test_zero_der_nz_dp(capsys):
|
| 609 |
+
"""Test secant method with a non-zero dp, but an infinite newton step"""
|
| 610 |
+
# pick a symmetrical functions and choose a point on the side that with dx
|
| 611 |
+
# makes a secant that is a flat line with zero slope, EG: f = (x - 100)**2,
|
| 612 |
+
# which has a root at x = 100 and is symmetrical around the line x = 100
|
| 613 |
+
# we have to pick a really big number so that it is consistently true
|
| 614 |
+
# now find a point on each side so that the secant has a zero slope
|
| 615 |
+
dx = np.finfo(float).eps ** 0.33
|
| 616 |
+
# 100 - p0 = p1 - 100 = p0 * (1 + dx) + dx - 100
|
| 617 |
+
# -> 200 = p0 * (2 + dx) + dx
|
| 618 |
+
p0 = (200.0 - dx) / (2.0 + dx)
|
| 619 |
+
with suppress_warnings() as sup:
|
| 620 |
+
sup.filter(RuntimeWarning, "RMS of")
|
| 621 |
+
x = zeros.newton(lambda y: (y - 100.0)**2, x0=[p0] * 10)
|
| 622 |
+
assert_allclose(x, [100] * 10)
|
| 623 |
+
# test scalar cases too
|
| 624 |
+
p0 = (2.0 - 1e-4) / (2.0 + 1e-4)
|
| 625 |
+
with suppress_warnings() as sup:
|
| 626 |
+
sup.filter(RuntimeWarning, "Tolerance of")
|
| 627 |
+
x = zeros.newton(lambda y: (y - 1.0) ** 2, x0=p0, disp=False)
|
| 628 |
+
assert_allclose(x, 1)
|
| 629 |
+
with pytest.raises(RuntimeError, match='Tolerance of'):
|
| 630 |
+
x = zeros.newton(lambda y: (y - 1.0) ** 2, x0=p0, disp=True)
|
| 631 |
+
p0 = (-2.0 + 1e-4) / (2.0 + 1e-4)
|
| 632 |
+
with suppress_warnings() as sup:
|
| 633 |
+
sup.filter(RuntimeWarning, "Tolerance of")
|
| 634 |
+
x = zeros.newton(lambda y: (y + 1.0) ** 2, x0=p0, disp=False)
|
| 635 |
+
assert_allclose(x, -1)
|
| 636 |
+
with pytest.raises(RuntimeError, match='Tolerance of'):
|
| 637 |
+
x = zeros.newton(lambda y: (y + 1.0) ** 2, x0=p0, disp=True)
|
| 638 |
+
|
| 639 |
+
|
| 640 |
+
@pytest.mark.thread_unsafe
|
| 641 |
+
def test_array_newton_failures():
|
| 642 |
+
"""Test that array newton fails as expected"""
|
| 643 |
+
# p = 0.68 # [MPa]
|
| 644 |
+
# dp = -0.068 * 1e6 # [Pa]
|
| 645 |
+
# T = 323 # [K]
|
| 646 |
+
diameter = 0.10 # [m]
|
| 647 |
+
# L = 100 # [m]
|
| 648 |
+
roughness = 0.00015 # [m]
|
| 649 |
+
rho = 988.1 # [kg/m**3]
|
| 650 |
+
mu = 5.4790e-04 # [Pa*s]
|
| 651 |
+
u = 2.488 # [m/s]
|
| 652 |
+
reynolds_number = rho * u * diameter / mu # Reynolds number
|
| 653 |
+
|
| 654 |
+
def colebrook_eqn(darcy_friction, re, dia):
|
| 655 |
+
return (1 / np.sqrt(darcy_friction) +
|
| 656 |
+
2 * np.log10(roughness / 3.7 / dia +
|
| 657 |
+
2.51 / re / np.sqrt(darcy_friction)))
|
| 658 |
+
|
| 659 |
+
# only some failures
|
| 660 |
+
with pytest.warns(RuntimeWarning):
|
| 661 |
+
result = zeros.newton(
|
| 662 |
+
colebrook_eqn, x0=[0.01, 0.2, 0.02223, 0.3], maxiter=2,
|
| 663 |
+
args=[reynolds_number, diameter], full_output=True
|
| 664 |
+
)
|
| 665 |
+
assert not result.converged.all()
|
| 666 |
+
# they all fail
|
| 667 |
+
with pytest.raises(RuntimeError):
|
| 668 |
+
result = zeros.newton(
|
| 669 |
+
colebrook_eqn, x0=[0.01] * 2, maxiter=2,
|
| 670 |
+
args=[reynolds_number, diameter], full_output=True
|
| 671 |
+
)
|
| 672 |
+
|
| 673 |
+
|
| 674 |
+
# this test should **not** raise a RuntimeWarning
|
| 675 |
+
def test_gh8904_zeroder_at_root_fails():
|
| 676 |
+
"""Test that Newton or Halley don't warn if zero derivative at root"""
|
| 677 |
+
|
| 678 |
+
# a function that has a zero derivative at it's root
|
| 679 |
+
def f_zeroder_root(x):
|
| 680 |
+
return x**3 - x**2
|
| 681 |
+
|
| 682 |
+
# should work with secant
|
| 683 |
+
r = zeros.newton(f_zeroder_root, x0=0)
|
| 684 |
+
assert_allclose(r, 0, atol=zeros._xtol, rtol=zeros._rtol)
|
| 685 |
+
# test again with array
|
| 686 |
+
r = zeros.newton(f_zeroder_root, x0=[0]*10)
|
| 687 |
+
assert_allclose(r, 0, atol=zeros._xtol, rtol=zeros._rtol)
|
| 688 |
+
|
| 689 |
+
# 1st derivative
|
| 690 |
+
def fder(x):
|
| 691 |
+
return 3 * x**2 - 2 * x
|
| 692 |
+
|
| 693 |
+
# 2nd derivative
|
| 694 |
+
def fder2(x):
|
| 695 |
+
return 6*x - 2
|
| 696 |
+
|
| 697 |
+
# should work with newton and halley
|
| 698 |
+
r = zeros.newton(f_zeroder_root, x0=0, fprime=fder)
|
| 699 |
+
assert_allclose(r, 0, atol=zeros._xtol, rtol=zeros._rtol)
|
| 700 |
+
r = zeros.newton(f_zeroder_root, x0=0, fprime=fder,
|
| 701 |
+
fprime2=fder2)
|
| 702 |
+
assert_allclose(r, 0, atol=zeros._xtol, rtol=zeros._rtol)
|
| 703 |
+
# test again with array
|
| 704 |
+
r = zeros.newton(f_zeroder_root, x0=[0]*10, fprime=fder)
|
| 705 |
+
assert_allclose(r, 0, atol=zeros._xtol, rtol=zeros._rtol)
|
| 706 |
+
r = zeros.newton(f_zeroder_root, x0=[0]*10, fprime=fder,
|
| 707 |
+
fprime2=fder2)
|
| 708 |
+
assert_allclose(r, 0, atol=zeros._xtol, rtol=zeros._rtol)
|
| 709 |
+
|
| 710 |
+
# also test that if a root is found we do not raise RuntimeWarning even if
|
| 711 |
+
# the derivative is zero, EG: at x = 0.5, then fval = -0.125 and
|
| 712 |
+
# fder = -0.25 so the next guess is 0.5 - (-0.125/-0.5) = 0 which is the
|
| 713 |
+
# root, but if the solver continued with that guess, then it will calculate
|
| 714 |
+
# a zero derivative, so it should return the root w/o RuntimeWarning
|
| 715 |
+
r = zeros.newton(f_zeroder_root, x0=0.5, fprime=fder)
|
| 716 |
+
assert_allclose(r, 0, atol=zeros._xtol, rtol=zeros._rtol)
|
| 717 |
+
# test again with array
|
| 718 |
+
r = zeros.newton(f_zeroder_root, x0=[0.5]*10, fprime=fder)
|
| 719 |
+
assert_allclose(r, 0, atol=zeros._xtol, rtol=zeros._rtol)
|
| 720 |
+
# doesn't apply to halley
|
| 721 |
+
|
| 722 |
+
|
| 723 |
+
def test_gh_8881():
|
| 724 |
+
r"""Test that Halley's method realizes that the 2nd order adjustment
|
| 725 |
+
is too big and drops off to the 1st order adjustment."""
|
| 726 |
+
n = 9
|
| 727 |
+
|
| 728 |
+
def f(x):
|
| 729 |
+
return power(x, 1.0/n) - power(n, 1.0/n)
|
| 730 |
+
|
| 731 |
+
def fp(x):
|
| 732 |
+
return power(x, (1.0-n)/n)/n
|
| 733 |
+
|
| 734 |
+
def fpp(x):
|
| 735 |
+
return power(x, (1.0-2*n)/n) * (1.0/n) * (1.0-n)/n
|
| 736 |
+
|
| 737 |
+
x0 = 0.1
|
| 738 |
+
# The root is at x=9.
|
| 739 |
+
# The function has positive slope, x0 < root.
|
| 740 |
+
# Newton succeeds in 8 iterations
|
| 741 |
+
rt, r = newton(f, x0, fprime=fp, full_output=True)
|
| 742 |
+
assert r.converged
|
| 743 |
+
# Before the Issue 8881/PR 8882, halley would send x in the wrong direction.
|
| 744 |
+
# Check that it now succeeds.
|
| 745 |
+
rt, r = newton(f, x0, fprime=fp, fprime2=fpp, full_output=True)
|
| 746 |
+
assert r.converged
|
| 747 |
+
|
| 748 |
+
|
| 749 |
+
def test_gh_9608_preserve_array_shape():
|
| 750 |
+
"""
|
| 751 |
+
Test that shape is preserved for array inputs even if fprime or fprime2 is
|
| 752 |
+
scalar
|
| 753 |
+
"""
|
| 754 |
+
def f(x):
|
| 755 |
+
return x**2
|
| 756 |
+
|
| 757 |
+
def fp(x):
|
| 758 |
+
return 2 * x
|
| 759 |
+
|
| 760 |
+
def fpp(x):
|
| 761 |
+
return 2
|
| 762 |
+
|
| 763 |
+
x0 = np.array([-2], dtype=np.float32)
|
| 764 |
+
rt, r = newton(f, x0, fprime=fp, fprime2=fpp, full_output=True)
|
| 765 |
+
assert r.converged
|
| 766 |
+
|
| 767 |
+
x0_array = np.array([-2, -3], dtype=np.float32)
|
| 768 |
+
# This next invocation should fail
|
| 769 |
+
with pytest.raises(IndexError):
|
| 770 |
+
result = zeros.newton(
|
| 771 |
+
f, x0_array, fprime=fp, fprime2=fpp, full_output=True
|
| 772 |
+
)
|
| 773 |
+
|
| 774 |
+
def fpp_array(x):
|
| 775 |
+
return np.full(np.shape(x), 2, dtype=np.float32)
|
| 776 |
+
|
| 777 |
+
result = zeros.newton(
|
| 778 |
+
f, x0_array, fprime=fp, fprime2=fpp_array, full_output=True
|
| 779 |
+
)
|
| 780 |
+
assert result.converged.all()
|
| 781 |
+
|
| 782 |
+
|
| 783 |
+
@pytest.mark.parametrize(
|
| 784 |
+
"maximum_iterations,flag_expected",
|
| 785 |
+
[(10, zeros.CONVERR), (100, zeros.CONVERGED)])
|
| 786 |
+
def test_gh9254_flag_if_maxiter_exceeded(maximum_iterations, flag_expected):
|
| 787 |
+
"""
|
| 788 |
+
Test that if the maximum iterations is exceeded that the flag is not
|
| 789 |
+
converged.
|
| 790 |
+
"""
|
| 791 |
+
result = zeros.brentq(
|
| 792 |
+
lambda x: ((1.2*x - 2.3)*x + 3.4)*x - 4.5,
|
| 793 |
+
-30, 30, (), 1e-6, 1e-6, maximum_iterations,
|
| 794 |
+
full_output=True, disp=False)
|
| 795 |
+
assert result[1].flag == flag_expected
|
| 796 |
+
if flag_expected == zeros.CONVERR:
|
| 797 |
+
# didn't converge because exceeded maximum iterations
|
| 798 |
+
assert result[1].iterations == maximum_iterations
|
| 799 |
+
elif flag_expected == zeros.CONVERGED:
|
| 800 |
+
# converged before maximum iterations
|
| 801 |
+
assert result[1].iterations < maximum_iterations
|
| 802 |
+
|
| 803 |
+
|
| 804 |
+
@pytest.mark.thread_unsafe
|
| 805 |
+
def test_gh9551_raise_error_if_disp_true():
|
| 806 |
+
"""Test that if disp is true then zero derivative raises RuntimeError"""
|
| 807 |
+
|
| 808 |
+
def f(x):
|
| 809 |
+
return x*x + 1
|
| 810 |
+
|
| 811 |
+
def f_p(x):
|
| 812 |
+
return 2*x
|
| 813 |
+
|
| 814 |
+
assert_warns(RuntimeWarning, zeros.newton, f, 1.0, f_p, disp=False)
|
| 815 |
+
with pytest.raises(
|
| 816 |
+
RuntimeError,
|
| 817 |
+
match=r'^Derivative was zero\. Failed to converge after \d+ iterations, '
|
| 818 |
+
r'value is [+-]?\d*\.\d+\.$'):
|
| 819 |
+
zeros.newton(f, 1.0, f_p)
|
| 820 |
+
root = zeros.newton(f, complex(10.0, 10.0), f_p)
|
| 821 |
+
assert_allclose(root, complex(0.0, 1.0))
|
| 822 |
+
|
| 823 |
+
|
| 824 |
+
@pytest.mark.parametrize('solver_name',
|
| 825 |
+
['brentq', 'brenth', 'bisect', 'ridder', 'toms748'])
|
| 826 |
+
def test_gh3089_8394(solver_name):
|
| 827 |
+
# gh-3089 and gh-8394 reported that bracketing solvers returned incorrect
|
| 828 |
+
# results when they encountered NaNs. Check that this is resolved.
|
| 829 |
+
def f(x):
|
| 830 |
+
return np.nan
|
| 831 |
+
|
| 832 |
+
solver = getattr(zeros, solver_name)
|
| 833 |
+
with pytest.raises(ValueError, match="The function value at x..."):
|
| 834 |
+
solver(f, 0, 1)
|
| 835 |
+
|
| 836 |
+
|
| 837 |
+
@pytest.mark.parametrize('method',
|
| 838 |
+
['brentq', 'brenth', 'bisect', 'ridder', 'toms748'])
|
| 839 |
+
def test_gh18171(method):
|
| 840 |
+
# gh-3089 and gh-8394 reported that bracketing solvers returned incorrect
|
| 841 |
+
# results when they encountered NaNs. Check that `root_scalar` returns
|
| 842 |
+
# normally but indicates that convergence was unsuccessful. See gh-18171.
|
| 843 |
+
def f(x):
|
| 844 |
+
f._count += 1
|
| 845 |
+
return np.nan
|
| 846 |
+
f._count = 0
|
| 847 |
+
|
| 848 |
+
res = root_scalar(f, bracket=(0, 1), method=method)
|
| 849 |
+
assert res.converged is False
|
| 850 |
+
assert res.flag.startswith("The function value at x")
|
| 851 |
+
assert res.function_calls == f._count
|
| 852 |
+
assert str(res.root) in res.flag
|
| 853 |
+
|
| 854 |
+
|
| 855 |
+
@pytest.mark.parametrize('solver_name',
|
| 856 |
+
['brentq', 'brenth', 'bisect', 'ridder', 'toms748'])
|
| 857 |
+
@pytest.mark.parametrize('rs_interface', [True, False])
|
| 858 |
+
def test_function_calls(solver_name, rs_interface):
|
| 859 |
+
# There do not appear to be checks that the bracketing solvers report the
|
| 860 |
+
# correct number of function evaluations. Check that this is the case.
|
| 861 |
+
solver = ((lambda f, a, b, **kwargs: root_scalar(f, bracket=(a, b)))
|
| 862 |
+
if rs_interface else getattr(zeros, solver_name))
|
| 863 |
+
|
| 864 |
+
def f(x):
|
| 865 |
+
f.calls += 1
|
| 866 |
+
return x**2 - 1
|
| 867 |
+
f.calls = 0
|
| 868 |
+
|
| 869 |
+
res = solver(f, 0, 10, full_output=True)
|
| 870 |
+
|
| 871 |
+
if rs_interface:
|
| 872 |
+
assert res.function_calls == f.calls
|
| 873 |
+
else:
|
| 874 |
+
assert res[1].function_calls == f.calls
|
| 875 |
+
|
| 876 |
+
|
| 877 |
+
@pytest.mark.thread_unsafe
|
| 878 |
+
def test_gh_14486_converged_false():
|
| 879 |
+
"""Test that zero slope with secant method results in a converged=False"""
|
| 880 |
+
def lhs(x):
|
| 881 |
+
return x * np.exp(-x*x) - 0.07
|
| 882 |
+
|
| 883 |
+
with pytest.warns(RuntimeWarning, match='Tolerance of'):
|
| 884 |
+
res = root_scalar(lhs, method='secant', x0=-0.15, x1=1.0)
|
| 885 |
+
assert not res.converged
|
| 886 |
+
assert res.flag == 'convergence error'
|
| 887 |
+
|
| 888 |
+
with pytest.warns(RuntimeWarning, match='Tolerance of'):
|
| 889 |
+
res = newton(lhs, x0=-0.15, x1=1.0, disp=False, full_output=True)[1]
|
| 890 |
+
assert not res.converged
|
| 891 |
+
assert res.flag == 'convergence error'
|
| 892 |
+
|
| 893 |
+
|
| 894 |
+
@pytest.mark.parametrize('solver_name',
|
| 895 |
+
['brentq', 'brenth', 'bisect', 'ridder', 'toms748'])
|
| 896 |
+
@pytest.mark.parametrize('rs_interface', [True, False])
|
| 897 |
+
def test_gh5584(solver_name, rs_interface):
|
| 898 |
+
# gh-5584 reported that an underflow can cause sign checks in the algorithm
|
| 899 |
+
# to fail. Check that this is resolved.
|
| 900 |
+
solver = ((lambda f, a, b, **kwargs: root_scalar(f, bracket=(a, b)))
|
| 901 |
+
if rs_interface else getattr(zeros, solver_name))
|
| 902 |
+
|
| 903 |
+
def f(x):
|
| 904 |
+
return 1e-200*x
|
| 905 |
+
|
| 906 |
+
# Report failure when signs are the same
|
| 907 |
+
with pytest.raises(ValueError, match='...must have different signs'):
|
| 908 |
+
solver(f, -0.5, -0.4, full_output=True)
|
| 909 |
+
|
| 910 |
+
# Solve successfully when signs are different
|
| 911 |
+
res = solver(f, -0.5, 0.4, full_output=True)
|
| 912 |
+
res = res if rs_interface else res[1]
|
| 913 |
+
assert res.converged
|
| 914 |
+
assert_allclose(res.root, 0, atol=1e-8)
|
| 915 |
+
|
| 916 |
+
# Solve successfully when one side is negative zero
|
| 917 |
+
res = solver(f, -0.5, float('-0.0'), full_output=True)
|
| 918 |
+
res = res if rs_interface else res[1]
|
| 919 |
+
assert res.converged
|
| 920 |
+
assert_allclose(res.root, 0, atol=1e-8)
|
| 921 |
+
|
| 922 |
+
|
| 923 |
+
def test_gh13407():
|
| 924 |
+
# gh-13407 reported that the message produced by `scipy.optimize.toms748`
|
| 925 |
+
# when `rtol < eps` is incorrect, and also that toms748 is unusual in
|
| 926 |
+
# accepting `rtol` as low as eps while other solvers raise at 4*eps. Check
|
| 927 |
+
# that the error message has been corrected and that `rtol=eps` can produce
|
| 928 |
+
# a lower function value than `rtol=4*eps`.
|
| 929 |
+
def f(x):
|
| 930 |
+
return x**3 - 2*x - 5
|
| 931 |
+
|
| 932 |
+
xtol = 1e-300
|
| 933 |
+
eps = np.finfo(float).eps
|
| 934 |
+
x1 = zeros.toms748(f, 1e-10, 1e10, xtol=xtol, rtol=1*eps)
|
| 935 |
+
f1 = f(x1)
|
| 936 |
+
x4 = zeros.toms748(f, 1e-10, 1e10, xtol=xtol, rtol=4*eps)
|
| 937 |
+
f4 = f(x4)
|
| 938 |
+
assert f1 < f4
|
| 939 |
+
|
| 940 |
+
# using old-style syntax to get exactly the same message
|
| 941 |
+
message = fr"rtol too small \({eps/2:g} < {eps:g}\)"
|
| 942 |
+
with pytest.raises(ValueError, match=message):
|
| 943 |
+
zeros.toms748(f, 1e-10, 1e10, xtol=xtol, rtol=eps/2)
|
| 944 |
+
|
| 945 |
+
|
| 946 |
+
def test_newton_complex_gh10103():
|
| 947 |
+
# gh-10103 reported a problem when `newton` is pass a Python complex x0,
|
| 948 |
+
# no `fprime` (secant method), and no `x1` (`x1` must be constructed).
|
| 949 |
+
# Check that this is resolved.
|
| 950 |
+
def f(z):
|
| 951 |
+
return z - 1
|
| 952 |
+
res = newton(f, 1+1j)
|
| 953 |
+
assert_allclose(res, 1, atol=1e-12)
|
| 954 |
+
|
| 955 |
+
res = root_scalar(f, x0=1+1j, x1=2+1.5j, method='secant')
|
| 956 |
+
assert_allclose(res.root, 1, atol=1e-12)
|
| 957 |
+
|
| 958 |
+
|
| 959 |
+
@pytest.mark.parametrize('method', all_methods)
|
| 960 |
+
def test_maxiter_int_check_gh10236(method):
|
| 961 |
+
# gh-10236 reported that the error message when `maxiter` is not an integer
|
| 962 |
+
# was difficult to interpret. Check that this was resolved (by gh-10907).
|
| 963 |
+
message = "'float' object cannot be interpreted as an integer"
|
| 964 |
+
with pytest.raises(TypeError, match=message):
|
| 965 |
+
method(f1, 0.0, 1.0, maxiter=72.45)
|
mantis_evalkit/lib/python3.10/site-packages/scipy/special/tests/__pycache__/test_basic.cpython-310.pyc
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:10c5111dce658184d3e985c693e3e0dc39dda5a4f88b46bcb0548d8d5918a418
|
| 3 |
+
size 165951
|
moondream/lib/python3.10/site-packages/numpy/random/_common.cpython-310-x86_64-linux-gnu.so
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:31a0a5cd1d83014366047940ae5a6c5466ebe0d6515d2e5e2311258f748d1706
|
| 3 |
+
size 276192
|
moondream/lib/python3.10/site-packages/numpy/random/_mt19937.cpython-310-x86_64-linux-gnu.so
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:d30bd4b7d986fc29800bd588c70d3223143932ddc802bdc46d72197fbc096c11
|
| 3 |
+
size 120224
|
moondream/lib/python3.10/site-packages/numpy/random/_philox.cpython-310-x86_64-linux-gnu.so
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:278070939f2017a7ed1ae91833add65251e6f70e72b9d37964eb30bc383f6b05
|
| 3 |
+
size 107384
|
moondream/lib/python3.10/site-packages/numpy/random/bit_generator.cpython-310-x86_64-linux-gnu.so
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:d4ee3d720f9eee0541548507dc7fe2f323ea481cdc33e37906fe78bc2e981a0d
|
| 3 |
+
size 246536
|
moondream/lib/python3.10/site-packages/pytz/zoneinfo/Australia/Adelaide
ADDED
|
Binary file (2.21 kB). View file
|
|
|
moondream/lib/python3.10/site-packages/pytz/zoneinfo/Australia/LHI
ADDED
|
Binary file (1.85 kB). View file
|
|
|
moondream/lib/python3.10/site-packages/pytz/zoneinfo/Australia/Lindeman
ADDED
|
Binary file (475 Bytes). View file
|
|
|
moondream/lib/python3.10/site-packages/pytz/zoneinfo/Australia/Queensland
ADDED
|
Binary file (419 Bytes). View file
|
|
|
moondream/lib/python3.10/site-packages/pytz/zoneinfo/Australia/Sydney
ADDED
|
Binary file (2.19 kB). View file
|
|
|
moondream/lib/python3.10/site-packages/pytz/zoneinfo/Australia/Yancowinna
ADDED
|
Binary file (2.23 kB). View file
|
|
|
moondream/lib/python3.10/site-packages/pytz/zoneinfo/Europe/Belfast
ADDED
|
Binary file (3.66 kB). View file
|
|
|
moondream/lib/python3.10/site-packages/pytz/zoneinfo/Europe/Berlin
ADDED
|
Binary file (2.3 kB). View file
|
|
|
moondream/lib/python3.10/site-packages/pytz/zoneinfo/Europe/Bratislava
ADDED
|
Binary file (2.3 kB). View file
|
|
|
moondream/lib/python3.10/site-packages/pytz/zoneinfo/Europe/Brussels
ADDED
|
Binary file (2.93 kB). View file
|
|
|
moondream/lib/python3.10/site-packages/pytz/zoneinfo/Europe/Busingen
ADDED
|
Binary file (1.91 kB). View file
|
|
|
moondream/lib/python3.10/site-packages/pytz/zoneinfo/Europe/Copenhagen
ADDED
|
Binary file (2.3 kB). View file
|
|
|
moondream/lib/python3.10/site-packages/pytz/zoneinfo/Europe/Dublin
ADDED
|
Binary file (3.49 kB). View file
|
|
|