Add files using upload-large-folder tool
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- .gitattributes +1 -0
- llava_next/lib/python3.10/site-packages/scipy/optimize/_bglu_dense.cpython-310-x86_64-linux-gnu.so +3 -0
- llava_next/lib/python3.10/site-packages/scipy/optimize/tests/__pycache__/test__linprog_clean_inputs.cpython-310.pyc +0 -0
- llava_next/lib/python3.10/site-packages/scipy/optimize/tests/__pycache__/test__spectral.cpython-310.pyc +0 -0
- llava_next/lib/python3.10/site-packages/scipy/optimize/tests/__pycache__/test_bracket.cpython-310.pyc +0 -0
- llava_next/lib/python3.10/site-packages/scipy/optimize/tests/__pycache__/test_cobyla.cpython-310.pyc +0 -0
- llava_next/lib/python3.10/site-packages/scipy/optimize/tests/__pycache__/test_cython_optimize.cpython-310.pyc +0 -0
- llava_next/lib/python3.10/site-packages/scipy/optimize/tests/__pycache__/test_differentiate.cpython-310.pyc +0 -0
- llava_next/lib/python3.10/site-packages/scipy/optimize/tests/__pycache__/test_isotonic_regression.cpython-310.pyc +0 -0
- llava_next/lib/python3.10/site-packages/scipy/optimize/tests/__pycache__/test_least_squares.cpython-310.pyc +0 -0
- llava_next/lib/python3.10/site-packages/scipy/optimize/tests/__pycache__/test_linprog.cpython-310.pyc +0 -0
- llava_next/lib/python3.10/site-packages/scipy/optimize/tests/__pycache__/test_lsq_common.cpython-310.pyc +0 -0
- llava_next/lib/python3.10/site-packages/scipy/optimize/tests/__pycache__/test_nnls.cpython-310.pyc +0 -0
- llava_next/lib/python3.10/site-packages/scipy/optimize/tests/__pycache__/test_nonlin.cpython-310.pyc +0 -0
- llava_next/lib/python3.10/site-packages/scipy/optimize/tests/__pycache__/test_trustregion_exact.cpython-310.pyc +0 -0
- llava_next/lib/python3.10/site-packages/scipy/optimize/tests/__pycache__/test_trustregion_krylov.cpython-310.pyc +0 -0
- llava_next/lib/python3.10/site-packages/scipy/optimize/tests/_cython_examples/extending.pyx +43 -0
- llava_next/lib/python3.10/site-packages/scipy/optimize/tests/_cython_examples/meson.build +25 -0
- llava_next/lib/python3.10/site-packages/scipy/optimize/tests/test__spectral.py +226 -0
- llava_next/lib/python3.10/site-packages/scipy/optimize/tests/test_cobyla.py +166 -0
- llava_next/lib/python3.10/site-packages/scipy/optimize/tests/test_constraint_conversion.py +278 -0
- llava_next/lib/python3.10/site-packages/scipy/optimize/tests/test_direct.py +318 -0
- llava_next/lib/python3.10/site-packages/scipy/optimize/tests/test_extending.py +24 -0
- llava_next/lib/python3.10/site-packages/scipy/optimize/tests/test_linesearch.py +314 -0
- llava_next/lib/python3.10/site-packages/scipy/optimize/tests/test_milp.py +385 -0
- llava_next/lib/python3.10/site-packages/scipy/optimize/tests/test_minpack.py +1121 -0
- llava_next/lib/python3.10/site-packages/scipy/optimize/tests/test_quadratic_assignment.py +431 -0
- llava_next/lib/python3.10/site-packages/scipy/optimize/tests/test_tnc.py +345 -0
- llava_next/lib/python3.10/site-packages/scipy/optimize/tests/test_trustregion_exact.py +354 -0
- llava_next/lib/python3.10/site-packages/scipy/optimize/tests/test_trustregion_krylov.py +171 -0
- parrot/lib/python3.10/site-packages/torch/distributed/_shard/__pycache__/__init__.cpython-310.pyc +0 -0
- parrot/lib/python3.10/site-packages/torch/distributed/_shard/__pycache__/_utils.cpython-310.pyc +0 -0
- parrot/lib/python3.10/site-packages/torch/distributed/_shard/__pycache__/api.cpython-310.pyc +0 -0
- parrot/lib/python3.10/site-packages/torch/distributed/_shard/__pycache__/common_op_utils.cpython-310.pyc +0 -0
- parrot/lib/python3.10/site-packages/torch/distributed/_shard/__pycache__/metadata.cpython-310.pyc +0 -0
- parrot/lib/python3.10/site-packages/torch/distributed/_shard/__pycache__/op_registry_utils.cpython-310.pyc +0 -0
- parrot/lib/python3.10/site-packages/torch/distributed/_shard/__pycache__/sharder.cpython-310.pyc +0 -0
- parrot/lib/python3.10/site-packages/torch/distributed/_shard/sharded_tensor/__pycache__/logger.cpython-310.pyc +0 -0
- parrot/lib/python3.10/site-packages/torch/distributed/_shard/sharded_tensor/__pycache__/shard.cpython-310.pyc +0 -0
- parrot/lib/python3.10/site-packages/torch/distributed/_shard/sharded_tensor/_ops/__pycache__/_common.cpython-310.pyc +0 -0
- parrot/lib/python3.10/site-packages/torch/distributed/_shard/sharded_tensor/_ops/__pycache__/binary_cmp.cpython-310.pyc +0 -0
- parrot/lib/python3.10/site-packages/torch/distributed/_shard/sharded_tensor/_ops/__pycache__/tensor_ops.cpython-310.pyc +0 -0
- parrot/lib/python3.10/site-packages/torch/distributed/_shard/sharded_tensor/_ops/_common.py +108 -0
- parrot/lib/python3.10/site-packages/torch/distributed/_shard/sharded_tensor/_ops/binary_cmp.py +69 -0
- parrot/lib/python3.10/site-packages/torch/distributed/_shard/sharded_tensor/_ops/init.py +144 -0
- parrot/lib/python3.10/site-packages/torch/distributed/_shard/sharded_tensor/_ops/misc_ops.py +13 -0
- parrot/lib/python3.10/site-packages/torch/distributed/_shard/sharded_tensor/_ops/tensor_ops.py +216 -0
- parrot/lib/python3.10/site-packages/torch/distributed/_shard/sharded_tensor/utils.py +219 -0
- parrot/lib/python3.10/site-packages/torch/distributed/_shard/sharding_plan/__init__.py +4 -0
- parrot/lib/python3.10/site-packages/torch/distributed/_shard/sharding_plan/__pycache__/__init__.cpython-310.pyc +0 -0
.gitattributes
CHANGED
|
@@ -360,3 +360,4 @@ llava_next/lib/python3.10/site-packages/scipy/optimize/_cobyla.cpython-310-x86_6
|
|
| 360 |
llava_next/lib/python3.10/site-packages/scipy/optimize/_highs/_highs_wrapper.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
|
| 361 |
llava_next/lib/python3.10/site-packages/scipy/optimize/_lsq/givens_elimination.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
|
| 362 |
parrot/lib/python3.10/site-packages/torch/testing/_internal/generated/__pycache__/annotated_fn_args.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
| 360 |
llava_next/lib/python3.10/site-packages/scipy/optimize/_highs/_highs_wrapper.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
|
| 361 |
llava_next/lib/python3.10/site-packages/scipy/optimize/_lsq/givens_elimination.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
|
| 362 |
parrot/lib/python3.10/site-packages/torch/testing/_internal/generated/__pycache__/annotated_fn_args.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
|
| 363 |
+
llava_next/lib/python3.10/site-packages/scipy/optimize/_bglu_dense.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
|
llava_next/lib/python3.10/site-packages/scipy/optimize/_bglu_dense.cpython-310-x86_64-linux-gnu.so
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:c45eca1a2737717c4e47975adcd7b7c1c1d02e98dba7d5103eea7e67787a6fea
|
| 3 |
+
size 364392
|
llava_next/lib/python3.10/site-packages/scipy/optimize/tests/__pycache__/test__linprog_clean_inputs.cpython-310.pyc
ADDED
|
Binary file (8.03 kB). View file
|
|
|
llava_next/lib/python3.10/site-packages/scipy/optimize/tests/__pycache__/test__spectral.cpython-310.pyc
ADDED
|
Binary file (6.98 kB). View file
|
|
|
llava_next/lib/python3.10/site-packages/scipy/optimize/tests/__pycache__/test_bracket.cpython-310.pyc
ADDED
|
Binary file (22.9 kB). View file
|
|
|
llava_next/lib/python3.10/site-packages/scipy/optimize/tests/__pycache__/test_cobyla.cpython-310.pyc
ADDED
|
Binary file (7.23 kB). View file
|
|
|
llava_next/lib/python3.10/site-packages/scipy/optimize/tests/__pycache__/test_cython_optimize.cpython-310.pyc
ADDED
|
Binary file (2.33 kB). View file
|
|
|
llava_next/lib/python3.10/site-packages/scipy/optimize/tests/__pycache__/test_differentiate.cpython-310.pyc
ADDED
|
Binary file (19.5 kB). View file
|
|
|
llava_next/lib/python3.10/site-packages/scipy/optimize/tests/__pycache__/test_isotonic_regression.cpython-310.pyc
ADDED
|
Binary file (5.71 kB). View file
|
|
|
llava_next/lib/python3.10/site-packages/scipy/optimize/tests/__pycache__/test_least_squares.cpython-310.pyc
ADDED
|
Binary file (28.5 kB). View file
|
|
|
llava_next/lib/python3.10/site-packages/scipy/optimize/tests/__pycache__/test_linprog.cpython-310.pyc
ADDED
|
Binary file (80.4 kB). View file
|
|
|
llava_next/lib/python3.10/site-packages/scipy/optimize/tests/__pycache__/test_lsq_common.cpython-310.pyc
ADDED
|
Binary file (8.28 kB). View file
|
|
|
llava_next/lib/python3.10/site-packages/scipy/optimize/tests/__pycache__/test_nnls.cpython-310.pyc
ADDED
|
Binary file (10.2 kB). View file
|
|
|
llava_next/lib/python3.10/site-packages/scipy/optimize/tests/__pycache__/test_nonlin.cpython-310.pyc
ADDED
|
Binary file (17.9 kB). View file
|
|
|
llava_next/lib/python3.10/site-packages/scipy/optimize/tests/__pycache__/test_trustregion_exact.cpython-310.pyc
ADDED
|
Binary file (9.46 kB). View file
|
|
|
llava_next/lib/python3.10/site-packages/scipy/optimize/tests/__pycache__/test_trustregion_krylov.cpython-310.pyc
ADDED
|
Binary file (6.07 kB). View file
|
|
|
llava_next/lib/python3.10/site-packages/scipy/optimize/tests/_cython_examples/extending.pyx
ADDED
|
@@ -0,0 +1,43 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/usr/bin/env python3
|
| 2 |
+
#cython: language_level=3
|
| 3 |
+
#cython: boundscheck=False
|
| 4 |
+
#cython: wraparound=False
|
| 5 |
+
"""
|
| 6 |
+
Taken from docstring for scipy.optimize.cython_optimize module.
|
| 7 |
+
"""
|
| 8 |
+
|
| 9 |
+
from scipy.optimize.cython_optimize cimport brentq
|
| 10 |
+
|
| 11 |
+
# import math from Cython
|
| 12 |
+
from libc cimport math
|
| 13 |
+
|
| 14 |
+
myargs = {'C0': 1.0, 'C1': 0.7} # a dictionary of extra arguments
|
| 15 |
+
XLO, XHI = 0.5, 1.0 # lower and upper search boundaries
|
| 16 |
+
XTOL, RTOL, MITR = 1e-3, 1e-3, 10 # other solver parameters
|
| 17 |
+
|
| 18 |
+
# user-defined struct for extra parameters
|
| 19 |
+
ctypedef struct test_params:
|
| 20 |
+
double C0
|
| 21 |
+
double C1
|
| 22 |
+
|
| 23 |
+
|
| 24 |
+
# user-defined callback
|
| 25 |
+
cdef double f(double x, void *args) noexcept:
|
| 26 |
+
cdef test_params *myargs = <test_params *> args
|
| 27 |
+
return myargs.C0 - math.exp(-(x - myargs.C1))
|
| 28 |
+
|
| 29 |
+
|
| 30 |
+
# Cython wrapper function
|
| 31 |
+
cdef double brentq_wrapper_example(dict args, double xa, double xb,
|
| 32 |
+
double xtol, double rtol, int mitr):
|
| 33 |
+
# Cython automatically casts dictionary to struct
|
| 34 |
+
cdef test_params myargs = args
|
| 35 |
+
return brentq(
|
| 36 |
+
f, xa, xb, <test_params *> &myargs, xtol, rtol, mitr, NULL)
|
| 37 |
+
|
| 38 |
+
|
| 39 |
+
# Python function
|
| 40 |
+
def brentq_example(args=myargs, xa=XLO, xb=XHI, xtol=XTOL, rtol=RTOL,
|
| 41 |
+
mitr=MITR):
|
| 42 |
+
'''Calls Cython wrapper from Python.'''
|
| 43 |
+
return brentq_wrapper_example(args, xa, xb, xtol, rtol, mitr)
|
llava_next/lib/python3.10/site-packages/scipy/optimize/tests/_cython_examples/meson.build
ADDED
|
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
project('random-build-examples', 'c', 'cpp', 'cython')
|
| 2 |
+
|
| 3 |
+
fs = import('fs')
|
| 4 |
+
|
| 5 |
+
py3 = import('python').find_installation(pure: false)
|
| 6 |
+
|
| 7 |
+
cy = meson.get_compiler('cython')
|
| 8 |
+
|
| 9 |
+
if not cy.version().version_compare('>=3.0.8')
|
| 10 |
+
error('tests requires Cython >= 3.0.8')
|
| 11 |
+
endif
|
| 12 |
+
|
| 13 |
+
py3.extension_module(
|
| 14 |
+
'extending',
|
| 15 |
+
'extending.pyx',
|
| 16 |
+
install: false,
|
| 17 |
+
)
|
| 18 |
+
|
| 19 |
+
extending_cpp = fs.copyfile('extending.pyx', 'extending_cpp.pyx')
|
| 20 |
+
py3.extension_module(
|
| 21 |
+
'extending_cpp',
|
| 22 |
+
extending_cpp,
|
| 23 |
+
install: false,
|
| 24 |
+
override_options : ['cython_language=cpp']
|
| 25 |
+
)
|
llava_next/lib/python3.10/site-packages/scipy/optimize/tests/test__spectral.py
ADDED
|
@@ -0,0 +1,226 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import itertools
|
| 2 |
+
|
| 3 |
+
import numpy as np
|
| 4 |
+
from numpy import exp
|
| 5 |
+
from numpy.testing import assert_, assert_equal
|
| 6 |
+
|
| 7 |
+
from scipy.optimize import root
|
| 8 |
+
|
| 9 |
+
|
| 10 |
+
def test_performance():
|
| 11 |
+
# Compare performance results to those listed in
|
| 12 |
+
# [Cheng & Li, IMA J. Num. An. 29, 814 (2008)]
|
| 13 |
+
# and
|
| 14 |
+
# [W. La Cruz, J.M. Martinez, M. Raydan, Math. Comp. 75, 1429 (2006)].
|
| 15 |
+
# and those produced by dfsane.f from M. Raydan's website.
|
| 16 |
+
#
|
| 17 |
+
# Where the results disagree, the largest limits are taken.
|
| 18 |
+
|
| 19 |
+
e_a = 1e-5
|
| 20 |
+
e_r = 1e-4
|
| 21 |
+
|
| 22 |
+
table_1 = [
|
| 23 |
+
dict(F=F_1, x0=x0_1, n=1000, nit=5, nfev=5),
|
| 24 |
+
dict(F=F_1, x0=x0_1, n=10000, nit=2, nfev=2),
|
| 25 |
+
dict(F=F_2, x0=x0_2, n=500, nit=11, nfev=11),
|
| 26 |
+
dict(F=F_2, x0=x0_2, n=2000, nit=11, nfev=11),
|
| 27 |
+
# dict(F=F_4, x0=x0_4, n=999, nit=243, nfev=1188) removed:
|
| 28 |
+
# too sensitive to rounding errors
|
| 29 |
+
# Results from dfsane.f; papers list nit=3, nfev=3
|
| 30 |
+
dict(F=F_6, x0=x0_6, n=100, nit=6, nfev=6),
|
| 31 |
+
# Must have n%3==0, typo in papers?
|
| 32 |
+
dict(F=F_7, x0=x0_7, n=99, nit=23, nfev=29),
|
| 33 |
+
# Must have n%3==0, typo in papers?
|
| 34 |
+
dict(F=F_7, x0=x0_7, n=999, nit=23, nfev=29),
|
| 35 |
+
# Results from dfsane.f; papers list nit=nfev=6?
|
| 36 |
+
dict(F=F_9, x0=x0_9, n=100, nit=12, nfev=18),
|
| 37 |
+
dict(F=F_9, x0=x0_9, n=1000, nit=12, nfev=18),
|
| 38 |
+
# Results from dfsane.f; papers list nit=2, nfev=12
|
| 39 |
+
dict(F=F_10, x0=x0_10, n=1000, nit=5, nfev=5),
|
| 40 |
+
]
|
| 41 |
+
|
| 42 |
+
# Check also scaling invariance
|
| 43 |
+
for xscale, yscale, line_search in itertools.product(
|
| 44 |
+
[1.0, 1e-10, 1e10], [1.0, 1e-10, 1e10], ['cruz', 'cheng']
|
| 45 |
+
):
|
| 46 |
+
for problem in table_1:
|
| 47 |
+
n = problem['n']
|
| 48 |
+
def func(x, n):
|
| 49 |
+
return yscale * problem['F'](x / xscale, n)
|
| 50 |
+
args = (n,)
|
| 51 |
+
x0 = problem['x0'](n) * xscale
|
| 52 |
+
|
| 53 |
+
fatol = np.sqrt(n) * e_a * yscale + e_r * np.linalg.norm(func(x0, n))
|
| 54 |
+
|
| 55 |
+
sigma_eps = 1e-10 * min(yscale/xscale, xscale/yscale)
|
| 56 |
+
sigma_0 = xscale/yscale
|
| 57 |
+
|
| 58 |
+
with np.errstate(over='ignore'):
|
| 59 |
+
sol = root(func, x0, args=args,
|
| 60 |
+
options=dict(ftol=0, fatol=fatol, maxfev=problem['nfev'] + 1,
|
| 61 |
+
sigma_0=sigma_0, sigma_eps=sigma_eps,
|
| 62 |
+
line_search=line_search),
|
| 63 |
+
method='DF-SANE')
|
| 64 |
+
|
| 65 |
+
err_msg = repr(
|
| 66 |
+
[xscale, yscale, line_search, problem, np.linalg.norm(func(sol.x, n)),
|
| 67 |
+
fatol, sol.success, sol.nit, sol.nfev]
|
| 68 |
+
)
|
| 69 |
+
assert sol.success, err_msg
|
| 70 |
+
# nfev+1: dfsane.f doesn't count first eval
|
| 71 |
+
assert sol.nfev <= problem['nfev'] + 1, err_msg
|
| 72 |
+
assert sol.nit <= problem['nit'], err_msg
|
| 73 |
+
assert np.linalg.norm(func(sol.x, n)) <= fatol, err_msg
|
| 74 |
+
|
| 75 |
+
|
| 76 |
+
def test_complex():
|
| 77 |
+
def func(z):
|
| 78 |
+
return z**2 - 1 + 2j
|
| 79 |
+
x0 = 2.0j
|
| 80 |
+
|
| 81 |
+
ftol = 1e-4
|
| 82 |
+
sol = root(func, x0, tol=ftol, method='DF-SANE')
|
| 83 |
+
|
| 84 |
+
assert_(sol.success)
|
| 85 |
+
|
| 86 |
+
f0 = np.linalg.norm(func(x0))
|
| 87 |
+
fx = np.linalg.norm(func(sol.x))
|
| 88 |
+
assert_(fx <= ftol*f0)
|
| 89 |
+
|
| 90 |
+
|
| 91 |
+
def test_linear_definite():
|
| 92 |
+
# The DF-SANE paper proves convergence for "strongly isolated"
|
| 93 |
+
# solutions.
|
| 94 |
+
#
|
| 95 |
+
# For linear systems F(x) = A x - b = 0, with A positive or
|
| 96 |
+
# negative definite, the solution is strongly isolated.
|
| 97 |
+
|
| 98 |
+
def check_solvability(A, b, line_search='cruz'):
|
| 99 |
+
def func(x):
|
| 100 |
+
return A.dot(x) - b
|
| 101 |
+
xp = np.linalg.solve(A, b)
|
| 102 |
+
eps = np.linalg.norm(func(xp)) * 1e3
|
| 103 |
+
sol = root(
|
| 104 |
+
func, b,
|
| 105 |
+
options=dict(fatol=eps, ftol=0, maxfev=17523, line_search=line_search),
|
| 106 |
+
method='DF-SANE',
|
| 107 |
+
)
|
| 108 |
+
assert_(sol.success)
|
| 109 |
+
assert_(np.linalg.norm(func(sol.x)) <= eps)
|
| 110 |
+
|
| 111 |
+
n = 90
|
| 112 |
+
|
| 113 |
+
# Test linear pos.def. system
|
| 114 |
+
np.random.seed(1234)
|
| 115 |
+
A = np.arange(n*n).reshape(n, n)
|
| 116 |
+
A = A + n*n * np.diag(1 + np.arange(n))
|
| 117 |
+
assert_(np.linalg.eigvals(A).min() > 0)
|
| 118 |
+
b = np.arange(n) * 1.0
|
| 119 |
+
check_solvability(A, b, 'cruz')
|
| 120 |
+
check_solvability(A, b, 'cheng')
|
| 121 |
+
|
| 122 |
+
# Test linear neg.def. system
|
| 123 |
+
check_solvability(-A, b, 'cruz')
|
| 124 |
+
check_solvability(-A, b, 'cheng')
|
| 125 |
+
|
| 126 |
+
|
| 127 |
+
def test_shape():
|
| 128 |
+
def f(x, arg):
|
| 129 |
+
return x - arg
|
| 130 |
+
|
| 131 |
+
for dt in [float, complex]:
|
| 132 |
+
x = np.zeros([2,2])
|
| 133 |
+
arg = np.ones([2,2], dtype=dt)
|
| 134 |
+
|
| 135 |
+
sol = root(f, x, args=(arg,), method='DF-SANE')
|
| 136 |
+
assert_(sol.success)
|
| 137 |
+
assert_equal(sol.x.shape, x.shape)
|
| 138 |
+
|
| 139 |
+
|
| 140 |
+
# Some of the test functions and initial guesses listed in
|
| 141 |
+
# [W. La Cruz, M. Raydan. Optimization Methods and Software, 18, 583 (2003)]
|
| 142 |
+
|
| 143 |
+
def F_1(x, n):
|
| 144 |
+
g = np.zeros([n])
|
| 145 |
+
i = np.arange(2, n+1)
|
| 146 |
+
g[0] = exp(x[0] - 1) - 1
|
| 147 |
+
g[1:] = i*(exp(x[1:] - 1) - x[1:])
|
| 148 |
+
return g
|
| 149 |
+
|
| 150 |
+
def x0_1(n):
|
| 151 |
+
x0 = np.empty([n])
|
| 152 |
+
x0.fill(n/(n-1))
|
| 153 |
+
return x0
|
| 154 |
+
|
| 155 |
+
def F_2(x, n):
|
| 156 |
+
g = np.zeros([n])
|
| 157 |
+
i = np.arange(2, n+1)
|
| 158 |
+
g[0] = exp(x[0]) - 1
|
| 159 |
+
g[1:] = 0.1*i*(exp(x[1:]) + x[:-1] - 1)
|
| 160 |
+
return g
|
| 161 |
+
|
| 162 |
+
def x0_2(n):
|
| 163 |
+
x0 = np.empty([n])
|
| 164 |
+
x0.fill(1/n**2)
|
| 165 |
+
return x0
|
| 166 |
+
|
| 167 |
+
|
| 168 |
+
def F_4(x, n): # skip name check
|
| 169 |
+
assert_equal(n % 3, 0)
|
| 170 |
+
g = np.zeros([n])
|
| 171 |
+
# Note: the first line is typoed in some of the references;
|
| 172 |
+
# correct in original [Gasparo, Optimization Meth. 13, 79 (2000)]
|
| 173 |
+
g[::3] = 0.6 * x[::3] + 1.6 * x[1::3]**3 - 7.2 * x[1::3]**2 + 9.6 * x[1::3] - 4.8
|
| 174 |
+
g[1::3] = (0.48 * x[::3] - 0.72 * x[1::3]**3 + 3.24 * x[1::3]**2 - 4.32 * x[1::3]
|
| 175 |
+
- x[2::3] + 0.2 * x[2::3]**3 + 2.16)
|
| 176 |
+
g[2::3] = 1.25 * x[2::3] - 0.25*x[2::3]**3
|
| 177 |
+
return g
|
| 178 |
+
|
| 179 |
+
|
| 180 |
+
def x0_4(n): # skip name check
|
| 181 |
+
assert_equal(n % 3, 0)
|
| 182 |
+
x0 = np.array([-1, 1/2, -1] * (n//3))
|
| 183 |
+
return x0
|
| 184 |
+
|
| 185 |
+
def F_6(x, n):
|
| 186 |
+
c = 0.9
|
| 187 |
+
mu = (np.arange(1, n+1) - 0.5)/n
|
| 188 |
+
return x - 1/(1 - c/(2*n) * (mu[:,None]*x / (mu[:,None] + mu)).sum(axis=1))
|
| 189 |
+
|
| 190 |
+
def x0_6(n):
|
| 191 |
+
return np.ones([n])
|
| 192 |
+
|
| 193 |
+
def F_7(x, n):
|
| 194 |
+
assert_equal(n % 3, 0)
|
| 195 |
+
|
| 196 |
+
def phi(t):
|
| 197 |
+
v = 0.5*t - 2
|
| 198 |
+
v[t > -1] = ((-592*t**3 + 888*t**2 + 4551*t - 1924)/1998)[t > -1]
|
| 199 |
+
v[t >= 2] = (0.5*t + 2)[t >= 2]
|
| 200 |
+
return v
|
| 201 |
+
g = np.zeros([n])
|
| 202 |
+
g[::3] = 1e4 * x[1::3]**2 - 1
|
| 203 |
+
g[1::3] = exp(-x[::3]) + exp(-x[1::3]) - 1.0001
|
| 204 |
+
g[2::3] = phi(x[2::3])
|
| 205 |
+
return g
|
| 206 |
+
|
| 207 |
+
def x0_7(n):
|
| 208 |
+
assert_equal(n % 3, 0)
|
| 209 |
+
return np.array([1e-3, 18, 1] * (n//3))
|
| 210 |
+
|
| 211 |
+
def F_9(x, n):
|
| 212 |
+
g = np.zeros([n])
|
| 213 |
+
i = np.arange(2, n)
|
| 214 |
+
g[0] = x[0]**3/3 + x[1]**2/2
|
| 215 |
+
g[1:-1] = -x[1:-1]**2/2 + i*x[1:-1]**3/3 + x[2:]**2/2
|
| 216 |
+
g[-1] = -x[-1]**2/2 + n*x[-1]**3/3
|
| 217 |
+
return g
|
| 218 |
+
|
| 219 |
+
def x0_9(n):
|
| 220 |
+
return np.ones([n])
|
| 221 |
+
|
| 222 |
+
def F_10(x, n):
|
| 223 |
+
return np.log(1 + x) - x/n
|
| 224 |
+
|
| 225 |
+
def x0_10(n):
|
| 226 |
+
return np.ones([n])
|
llava_next/lib/python3.10/site-packages/scipy/optimize/tests/test_cobyla.py
ADDED
|
@@ -0,0 +1,166 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import math
|
| 2 |
+
|
| 3 |
+
import numpy as np
|
| 4 |
+
from numpy.testing import assert_allclose, assert_, assert_array_equal
|
| 5 |
+
import pytest
|
| 6 |
+
|
| 7 |
+
from scipy.optimize import fmin_cobyla, minimize, Bounds
|
| 8 |
+
|
| 9 |
+
|
| 10 |
+
class TestCobyla:
|
| 11 |
+
def setup_method(self):
|
| 12 |
+
self.x0 = [4.95, 0.66]
|
| 13 |
+
self.solution = [math.sqrt(25 - (2.0/3)**2), 2.0/3]
|
| 14 |
+
self.opts = {'disp': False, 'rhobeg': 1, 'tol': 1e-5,
|
| 15 |
+
'maxiter': 100}
|
| 16 |
+
|
| 17 |
+
def fun(self, x):
|
| 18 |
+
return x[0]**2 + abs(x[1])**3
|
| 19 |
+
|
| 20 |
+
def con1(self, x):
|
| 21 |
+
return x[0]**2 + x[1]**2 - 25
|
| 22 |
+
|
| 23 |
+
def con2(self, x):
|
| 24 |
+
return -self.con1(x)
|
| 25 |
+
|
| 26 |
+
@pytest.mark.xslow(True, reason='not slow, but noisy so only run rarely')
|
| 27 |
+
def test_simple(self, capfd):
|
| 28 |
+
# use disp=True as smoke test for gh-8118
|
| 29 |
+
x = fmin_cobyla(self.fun, self.x0, [self.con1, self.con2], rhobeg=1,
|
| 30 |
+
rhoend=1e-5, maxfun=100, disp=True)
|
| 31 |
+
assert_allclose(x, self.solution, atol=1e-4)
|
| 32 |
+
|
| 33 |
+
def test_minimize_simple(self):
|
| 34 |
+
class Callback:
|
| 35 |
+
def __init__(self):
|
| 36 |
+
self.n_calls = 0
|
| 37 |
+
self.last_x = None
|
| 38 |
+
|
| 39 |
+
def __call__(self, x):
|
| 40 |
+
self.n_calls += 1
|
| 41 |
+
self.last_x = x
|
| 42 |
+
|
| 43 |
+
callback = Callback()
|
| 44 |
+
|
| 45 |
+
# Minimize with method='COBYLA'
|
| 46 |
+
cons = ({'type': 'ineq', 'fun': self.con1},
|
| 47 |
+
{'type': 'ineq', 'fun': self.con2})
|
| 48 |
+
sol = minimize(self.fun, self.x0, method='cobyla', constraints=cons,
|
| 49 |
+
callback=callback, options=self.opts)
|
| 50 |
+
assert_allclose(sol.x, self.solution, atol=1e-4)
|
| 51 |
+
assert_(sol.success, sol.message)
|
| 52 |
+
assert_(sol.maxcv < 1e-5, sol)
|
| 53 |
+
assert_(sol.nfev < 70, sol)
|
| 54 |
+
assert_(sol.fun < self.fun(self.solution) + 1e-3, sol)
|
| 55 |
+
assert_(sol.nfev == callback.n_calls,
|
| 56 |
+
"Callback is not called exactly once for every function eval.")
|
| 57 |
+
assert_array_equal(
|
| 58 |
+
sol.x,
|
| 59 |
+
callback.last_x,
|
| 60 |
+
"Last design vector sent to the callback is not equal to returned value.",
|
| 61 |
+
)
|
| 62 |
+
|
| 63 |
+
def test_minimize_constraint_violation(self):
|
| 64 |
+
np.random.seed(1234)
|
| 65 |
+
pb = np.random.rand(10, 10)
|
| 66 |
+
spread = np.random.rand(10)
|
| 67 |
+
|
| 68 |
+
def p(w):
|
| 69 |
+
return pb.dot(w)
|
| 70 |
+
|
| 71 |
+
def f(w):
|
| 72 |
+
return -(w * spread).sum()
|
| 73 |
+
|
| 74 |
+
def c1(w):
|
| 75 |
+
return 500 - abs(p(w)).sum()
|
| 76 |
+
|
| 77 |
+
def c2(w):
|
| 78 |
+
return 5 - abs(p(w).sum())
|
| 79 |
+
|
| 80 |
+
def c3(w):
|
| 81 |
+
return 5 - abs(p(w)).max()
|
| 82 |
+
|
| 83 |
+
cons = ({'type': 'ineq', 'fun': c1},
|
| 84 |
+
{'type': 'ineq', 'fun': c2},
|
| 85 |
+
{'type': 'ineq', 'fun': c3})
|
| 86 |
+
w0 = np.zeros((10,))
|
| 87 |
+
sol = minimize(f, w0, method='cobyla', constraints=cons,
|
| 88 |
+
options={'catol': 1e-6})
|
| 89 |
+
assert_(sol.maxcv > 1e-6)
|
| 90 |
+
assert_(not sol.success)
|
| 91 |
+
|
| 92 |
+
|
| 93 |
+
def test_vector_constraints():
|
| 94 |
+
# test that fmin_cobyla and minimize can take a combination
|
| 95 |
+
# of constraints, some returning a number and others an array
|
| 96 |
+
def fun(x):
|
| 97 |
+
return (x[0] - 1)**2 + (x[1] - 2.5)**2
|
| 98 |
+
|
| 99 |
+
def fmin(x):
|
| 100 |
+
return fun(x) - 1
|
| 101 |
+
|
| 102 |
+
def cons1(x):
|
| 103 |
+
a = np.array([[1, -2, 2], [-1, -2, 6], [-1, 2, 2]])
|
| 104 |
+
return np.array([a[i, 0] * x[0] + a[i, 1] * x[1] +
|
| 105 |
+
a[i, 2] for i in range(len(a))])
|
| 106 |
+
|
| 107 |
+
def cons2(x):
|
| 108 |
+
return x # identity, acts as bounds x > 0
|
| 109 |
+
|
| 110 |
+
x0 = np.array([2, 0])
|
| 111 |
+
cons_list = [fun, cons1, cons2]
|
| 112 |
+
|
| 113 |
+
xsol = [1.4, 1.7]
|
| 114 |
+
fsol = 0.8
|
| 115 |
+
|
| 116 |
+
# testing fmin_cobyla
|
| 117 |
+
sol = fmin_cobyla(fun, x0, cons_list, rhoend=1e-5)
|
| 118 |
+
assert_allclose(sol, xsol, atol=1e-4)
|
| 119 |
+
|
| 120 |
+
sol = fmin_cobyla(fun, x0, fmin, rhoend=1e-5)
|
| 121 |
+
assert_allclose(fun(sol), 1, atol=1e-4)
|
| 122 |
+
|
| 123 |
+
# testing minimize
|
| 124 |
+
constraints = [{'type': 'ineq', 'fun': cons} for cons in cons_list]
|
| 125 |
+
sol = minimize(fun, x0, constraints=constraints, tol=1e-5)
|
| 126 |
+
assert_allclose(sol.x, xsol, atol=1e-4)
|
| 127 |
+
assert_(sol.success, sol.message)
|
| 128 |
+
assert_allclose(sol.fun, fsol, atol=1e-4)
|
| 129 |
+
|
| 130 |
+
constraints = {'type': 'ineq', 'fun': fmin}
|
| 131 |
+
sol = minimize(fun, x0, constraints=constraints, tol=1e-5)
|
| 132 |
+
assert_allclose(sol.fun, 1, atol=1e-4)
|
| 133 |
+
|
| 134 |
+
|
| 135 |
+
class TestBounds:
|
| 136 |
+
# Test cobyla support for bounds (only when used via `minimize`)
|
| 137 |
+
# Invalid bounds is tested in
|
| 138 |
+
# test_optimize.TestOptimizeSimple.test_minimize_invalid_bounds
|
| 139 |
+
|
| 140 |
+
def test_basic(self):
|
| 141 |
+
def f(x):
|
| 142 |
+
return np.sum(x**2)
|
| 143 |
+
|
| 144 |
+
lb = [-1, None, 1, None, -0.5]
|
| 145 |
+
ub = [-0.5, -0.5, None, None, -0.5]
|
| 146 |
+
bounds = [(a, b) for a, b in zip(lb, ub)]
|
| 147 |
+
# these are converted to Bounds internally
|
| 148 |
+
|
| 149 |
+
res = minimize(f, x0=[1, 2, 3, 4, 5], method='cobyla', bounds=bounds)
|
| 150 |
+
ref = [-0.5, -0.5, 1, 0, -0.5]
|
| 151 |
+
assert res.success
|
| 152 |
+
assert_allclose(res.x, ref, atol=1e-3)
|
| 153 |
+
|
| 154 |
+
def test_unbounded(self):
|
| 155 |
+
def f(x):
|
| 156 |
+
return np.sum(x**2)
|
| 157 |
+
|
| 158 |
+
bounds = Bounds([-np.inf, -np.inf], [np.inf, np.inf])
|
| 159 |
+
res = minimize(f, x0=[1, 2], method='cobyla', bounds=bounds)
|
| 160 |
+
assert res.success
|
| 161 |
+
assert_allclose(res.x, 0, atol=1e-3)
|
| 162 |
+
|
| 163 |
+
bounds = Bounds([1, -np.inf], [np.inf, np.inf])
|
| 164 |
+
res = minimize(f, x0=[1, 2], method='cobyla', bounds=bounds)
|
| 165 |
+
assert res.success
|
| 166 |
+
assert_allclose(res.x, [1, 0], atol=1e-3)
|
llava_next/lib/python3.10/site-packages/scipy/optimize/tests/test_constraint_conversion.py
ADDED
|
@@ -0,0 +1,278 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Unit test for constraint conversion
|
| 3 |
+
"""
|
| 4 |
+
|
| 5 |
+
import numpy as np
|
| 6 |
+
from numpy.testing import (assert_array_almost_equal,
|
| 7 |
+
assert_allclose, assert_warns, suppress_warnings)
|
| 8 |
+
import pytest
|
| 9 |
+
from scipy.optimize import (NonlinearConstraint, LinearConstraint,
|
| 10 |
+
OptimizeWarning, minimize, BFGS)
|
| 11 |
+
from .test_minimize_constrained import (Maratos, HyperbolicIneq, Rosenbrock,
|
| 12 |
+
IneqRosenbrock, EqIneqRosenbrock,
|
| 13 |
+
BoundedRosenbrock, Elec)
|
| 14 |
+
|
| 15 |
+
|
| 16 |
+
class TestOldToNew:
|
| 17 |
+
x0 = (2, 0)
|
| 18 |
+
bnds = ((0, None), (0, None))
|
| 19 |
+
method = "trust-constr"
|
| 20 |
+
|
| 21 |
+
def test_constraint_dictionary_1(self):
|
| 22 |
+
def fun(x):
|
| 23 |
+
return (x[0] - 1) ** 2 + (x[1] - 2.5) ** 2
|
| 24 |
+
cons = ({'type': 'ineq', 'fun': lambda x: x[0] - 2 * x[1] + 2},
|
| 25 |
+
{'type': 'ineq', 'fun': lambda x: -x[0] - 2 * x[1] + 6},
|
| 26 |
+
{'type': 'ineq', 'fun': lambda x: -x[0] + 2 * x[1] + 2})
|
| 27 |
+
|
| 28 |
+
with suppress_warnings() as sup:
|
| 29 |
+
sup.filter(UserWarning, "delta_grad == 0.0")
|
| 30 |
+
res = minimize(fun, self.x0, method=self.method,
|
| 31 |
+
bounds=self.bnds, constraints=cons)
|
| 32 |
+
assert_allclose(res.x, [1.4, 1.7], rtol=1e-4)
|
| 33 |
+
assert_allclose(res.fun, 0.8, rtol=1e-4)
|
| 34 |
+
|
| 35 |
+
def test_constraint_dictionary_2(self):
|
| 36 |
+
def fun(x):
|
| 37 |
+
return (x[0] - 1) ** 2 + (x[1] - 2.5) ** 2
|
| 38 |
+
cons = {'type': 'eq',
|
| 39 |
+
'fun': lambda x, p1, p2: p1*x[0] - p2*x[1],
|
| 40 |
+
'args': (1, 1.1),
|
| 41 |
+
'jac': lambda x, p1, p2: np.array([[p1, -p2]])}
|
| 42 |
+
with suppress_warnings() as sup:
|
| 43 |
+
sup.filter(UserWarning, "delta_grad == 0.0")
|
| 44 |
+
res = minimize(fun, self.x0, method=self.method,
|
| 45 |
+
bounds=self.bnds, constraints=cons)
|
| 46 |
+
assert_allclose(res.x, [1.7918552, 1.62895927])
|
| 47 |
+
assert_allclose(res.fun, 1.3857466063348418)
|
| 48 |
+
|
| 49 |
+
def test_constraint_dictionary_3(self):
|
| 50 |
+
def fun(x):
|
| 51 |
+
return (x[0] - 1) ** 2 + (x[1] - 2.5) ** 2
|
| 52 |
+
cons = [{'type': 'ineq', 'fun': lambda x: x[0] - 2 * x[1] + 2},
|
| 53 |
+
NonlinearConstraint(lambda x: x[0] - x[1], 0, 0)]
|
| 54 |
+
|
| 55 |
+
with suppress_warnings() as sup:
|
| 56 |
+
sup.filter(UserWarning, "delta_grad == 0.0")
|
| 57 |
+
res = minimize(fun, self.x0, method=self.method,
|
| 58 |
+
bounds=self.bnds, constraints=cons)
|
| 59 |
+
assert_allclose(res.x, [1.75, 1.75], rtol=1e-4)
|
| 60 |
+
assert_allclose(res.fun, 1.125, rtol=1e-4)
|
| 61 |
+
|
| 62 |
+
|
| 63 |
+
class TestNewToOld:
|
| 64 |
+
|
| 65 |
+
def test_multiple_constraint_objects(self):
|
| 66 |
+
def fun(x):
|
| 67 |
+
return (x[0] - 1) ** 2 + (x[1] - 2.5) ** 2 + (x[2] - 0.75) ** 2
|
| 68 |
+
x0 = [2, 0, 1]
|
| 69 |
+
coni = [] # only inequality constraints (can use cobyla)
|
| 70 |
+
methods = ["slsqp", "cobyla", "cobyqa", "trust-constr"]
|
| 71 |
+
|
| 72 |
+
# mixed old and new
|
| 73 |
+
coni.append([{'type': 'ineq', 'fun': lambda x: x[0] - 2 * x[1] + 2},
|
| 74 |
+
NonlinearConstraint(lambda x: x[0] - x[1], -1, 1)])
|
| 75 |
+
|
| 76 |
+
coni.append([LinearConstraint([1, -2, 0], -2, np.inf),
|
| 77 |
+
NonlinearConstraint(lambda x: x[0] - x[1], -1, 1)])
|
| 78 |
+
|
| 79 |
+
coni.append([NonlinearConstraint(lambda x: x[0] - 2 * x[1] + 2, 0, np.inf),
|
| 80 |
+
NonlinearConstraint(lambda x: x[0] - x[1], -1, 1)])
|
| 81 |
+
|
| 82 |
+
for con in coni:
|
| 83 |
+
funs = {}
|
| 84 |
+
for method in methods:
|
| 85 |
+
with suppress_warnings() as sup:
|
| 86 |
+
sup.filter(UserWarning)
|
| 87 |
+
result = minimize(fun, x0, method=method, constraints=con)
|
| 88 |
+
funs[method] = result.fun
|
| 89 |
+
assert_allclose(funs['slsqp'], funs['trust-constr'], rtol=1e-4)
|
| 90 |
+
assert_allclose(funs['cobyla'], funs['trust-constr'], rtol=1e-4)
|
| 91 |
+
assert_allclose(funs['cobyqa'], funs['trust-constr'], rtol=1e-4)
|
| 92 |
+
|
| 93 |
+
@pytest.mark.fail_slow(10)
|
| 94 |
+
def test_individual_constraint_objects(self):
|
| 95 |
+
def fun(x):
|
| 96 |
+
return (x[0] - 1) ** 2 + (x[1] - 2.5) ** 2 + (x[2] - 0.75) ** 2
|
| 97 |
+
x0 = [2, 0, 1]
|
| 98 |
+
|
| 99 |
+
cone = [] # with equality constraints (can't use cobyla)
|
| 100 |
+
coni = [] # only inequality constraints (can use cobyla)
|
| 101 |
+
methods = ["slsqp", "cobyla", "cobyqa", "trust-constr"]
|
| 102 |
+
|
| 103 |
+
# nonstandard data types for constraint equality bounds
|
| 104 |
+
cone.append(NonlinearConstraint(lambda x: x[0] - x[1], 1, 1))
|
| 105 |
+
cone.append(NonlinearConstraint(lambda x: x[0] - x[1], [1.21], [1.21]))
|
| 106 |
+
cone.append(NonlinearConstraint(lambda x: x[0] - x[1],
|
| 107 |
+
1.21, np.array([1.21])))
|
| 108 |
+
|
| 109 |
+
# multiple equalities
|
| 110 |
+
cone.append(NonlinearConstraint(
|
| 111 |
+
lambda x: [x[0] - x[1], x[1] - x[2]],
|
| 112 |
+
1.21, 1.21)) # two same equalities
|
| 113 |
+
cone.append(NonlinearConstraint(
|
| 114 |
+
lambda x: [x[0] - x[1], x[1] - x[2]],
|
| 115 |
+
[1.21, 1.4], [1.21, 1.4])) # two different equalities
|
| 116 |
+
cone.append(NonlinearConstraint(
|
| 117 |
+
lambda x: [x[0] - x[1], x[1] - x[2]],
|
| 118 |
+
[1.21, 1.21], 1.21)) # equality specified two ways
|
| 119 |
+
cone.append(NonlinearConstraint(
|
| 120 |
+
lambda x: [x[0] - x[1], x[1] - x[2]],
|
| 121 |
+
[1.21, -np.inf], [1.21, np.inf])) # equality + unbounded
|
| 122 |
+
|
| 123 |
+
# nonstandard data types for constraint inequality bounds
|
| 124 |
+
coni.append(NonlinearConstraint(lambda x: x[0] - x[1], 1.21, np.inf))
|
| 125 |
+
coni.append(NonlinearConstraint(lambda x: x[0] - x[1], [1.21], np.inf))
|
| 126 |
+
coni.append(NonlinearConstraint(lambda x: x[0] - x[1],
|
| 127 |
+
1.21, np.array([np.inf])))
|
| 128 |
+
coni.append(NonlinearConstraint(lambda x: x[0] - x[1], -np.inf, -3))
|
| 129 |
+
coni.append(NonlinearConstraint(lambda x: x[0] - x[1],
|
| 130 |
+
np.array(-np.inf), -3))
|
| 131 |
+
|
| 132 |
+
# multiple inequalities/equalities
|
| 133 |
+
coni.append(NonlinearConstraint(
|
| 134 |
+
lambda x: [x[0] - x[1], x[1] - x[2]],
|
| 135 |
+
1.21, np.inf)) # two same inequalities
|
| 136 |
+
cone.append(NonlinearConstraint(
|
| 137 |
+
lambda x: [x[0] - x[1], x[1] - x[2]],
|
| 138 |
+
[1.21, -np.inf], [1.21, 1.4])) # mixed equality/inequality
|
| 139 |
+
coni.append(NonlinearConstraint(
|
| 140 |
+
lambda x: [x[0] - x[1], x[1] - x[2]],
|
| 141 |
+
[1.1, .8], [1.2, 1.4])) # bounded above and below
|
| 142 |
+
coni.append(NonlinearConstraint(
|
| 143 |
+
lambda x: [x[0] - x[1], x[1] - x[2]],
|
| 144 |
+
[-1.2, -1.4], [-1.1, -.8])) # - bounded above and below
|
| 145 |
+
|
| 146 |
+
# quick check of LinearConstraint class (very little new code to test)
|
| 147 |
+
cone.append(LinearConstraint([1, -1, 0], 1.21, 1.21))
|
| 148 |
+
cone.append(LinearConstraint([[1, -1, 0], [0, 1, -1]], 1.21, 1.21))
|
| 149 |
+
cone.append(LinearConstraint([[1, -1, 0], [0, 1, -1]],
|
| 150 |
+
[1.21, -np.inf], [1.21, 1.4]))
|
| 151 |
+
|
| 152 |
+
for con in coni:
|
| 153 |
+
funs = {}
|
| 154 |
+
for method in methods:
|
| 155 |
+
with suppress_warnings() as sup:
|
| 156 |
+
sup.filter(UserWarning)
|
| 157 |
+
result = minimize(fun, x0, method=method, constraints=con)
|
| 158 |
+
funs[method] = result.fun
|
| 159 |
+
assert_allclose(funs['slsqp'], funs['trust-constr'], rtol=1e-3)
|
| 160 |
+
assert_allclose(funs['cobyla'], funs['trust-constr'], rtol=1e-3)
|
| 161 |
+
assert_allclose(funs['cobyqa'], funs['trust-constr'], rtol=1e-3)
|
| 162 |
+
|
| 163 |
+
for con in cone:
|
| 164 |
+
funs = {}
|
| 165 |
+
for method in [method for method in methods if method != 'cobyla']:
|
| 166 |
+
with suppress_warnings() as sup:
|
| 167 |
+
sup.filter(UserWarning)
|
| 168 |
+
result = minimize(fun, x0, method=method, constraints=con)
|
| 169 |
+
funs[method] = result.fun
|
| 170 |
+
assert_allclose(funs['slsqp'], funs['trust-constr'], rtol=1e-3)
|
| 171 |
+
assert_allclose(funs['cobyqa'], funs['trust-constr'], rtol=1e-3)
|
| 172 |
+
|
| 173 |
+
|
| 174 |
+
class TestNewToOldSLSQP:
|
| 175 |
+
method = 'slsqp'
|
| 176 |
+
elec = Elec(n_electrons=2)
|
| 177 |
+
elec.x_opt = np.array([-0.58438468, 0.58438466, 0.73597047,
|
| 178 |
+
-0.73597044, 0.34180668, -0.34180667])
|
| 179 |
+
brock = BoundedRosenbrock()
|
| 180 |
+
brock.x_opt = [0, 0]
|
| 181 |
+
list_of_problems = [Maratos(),
|
| 182 |
+
HyperbolicIneq(),
|
| 183 |
+
Rosenbrock(),
|
| 184 |
+
IneqRosenbrock(),
|
| 185 |
+
EqIneqRosenbrock(),
|
| 186 |
+
elec,
|
| 187 |
+
brock
|
| 188 |
+
]
|
| 189 |
+
|
| 190 |
+
def test_list_of_problems(self):
|
| 191 |
+
|
| 192 |
+
for prob in self.list_of_problems:
|
| 193 |
+
|
| 194 |
+
with suppress_warnings() as sup:
|
| 195 |
+
sup.filter(UserWarning)
|
| 196 |
+
result = minimize(prob.fun, prob.x0,
|
| 197 |
+
method=self.method,
|
| 198 |
+
bounds=prob.bounds,
|
| 199 |
+
constraints=prob.constr)
|
| 200 |
+
|
| 201 |
+
assert_array_almost_equal(result.x, prob.x_opt, decimal=3)
|
| 202 |
+
|
| 203 |
+
def test_warn_mixed_constraints(self):
|
| 204 |
+
# warns about inefficiency of mixed equality/inequality constraints
|
| 205 |
+
def fun(x):
|
| 206 |
+
return (x[0] - 1) ** 2 + (x[1] - 2.5) ** 2 + (x[2] - 0.75) ** 2
|
| 207 |
+
cons = NonlinearConstraint(lambda x: [x[0]**2 - x[1], x[1] - x[2]],
|
| 208 |
+
[1.1, .8], [1.1, 1.4])
|
| 209 |
+
bnds = ((0, None), (0, None), (0, None))
|
| 210 |
+
with suppress_warnings() as sup:
|
| 211 |
+
sup.filter(UserWarning, "delta_grad == 0.0")
|
| 212 |
+
assert_warns(OptimizeWarning, minimize, fun, (2, 0, 1),
|
| 213 |
+
method=self.method, bounds=bnds, constraints=cons)
|
| 214 |
+
|
| 215 |
+
def test_warn_ignored_options(self):
|
| 216 |
+
# warns about constraint options being ignored
|
| 217 |
+
def fun(x):
|
| 218 |
+
return (x[0] - 1) ** 2 + (x[1] - 2.5) ** 2 + (x[2] - 0.75) ** 2
|
| 219 |
+
x0 = (2, 0, 1)
|
| 220 |
+
|
| 221 |
+
if self.method == "slsqp":
|
| 222 |
+
bnds = ((0, None), (0, None), (0, None))
|
| 223 |
+
else:
|
| 224 |
+
bnds = None
|
| 225 |
+
|
| 226 |
+
cons = NonlinearConstraint(lambda x: x[0], 2, np.inf)
|
| 227 |
+
res = minimize(fun, x0, method=self.method,
|
| 228 |
+
bounds=bnds, constraints=cons)
|
| 229 |
+
# no warnings without constraint options
|
| 230 |
+
assert_allclose(res.fun, 1)
|
| 231 |
+
|
| 232 |
+
cons = LinearConstraint([1, 0, 0], 2, np.inf)
|
| 233 |
+
res = minimize(fun, x0, method=self.method,
|
| 234 |
+
bounds=bnds, constraints=cons)
|
| 235 |
+
# no warnings without constraint options
|
| 236 |
+
assert_allclose(res.fun, 1)
|
| 237 |
+
|
| 238 |
+
cons = []
|
| 239 |
+
cons.append(NonlinearConstraint(lambda x: x[0]**2, 2, np.inf,
|
| 240 |
+
keep_feasible=True))
|
| 241 |
+
cons.append(NonlinearConstraint(lambda x: x[0]**2, 2, np.inf,
|
| 242 |
+
hess=BFGS()))
|
| 243 |
+
cons.append(NonlinearConstraint(lambda x: x[0]**2, 2, np.inf,
|
| 244 |
+
finite_diff_jac_sparsity=42))
|
| 245 |
+
cons.append(NonlinearConstraint(lambda x: x[0]**2, 2, np.inf,
|
| 246 |
+
finite_diff_rel_step=42))
|
| 247 |
+
cons.append(LinearConstraint([1, 0, 0], 2, np.inf,
|
| 248 |
+
keep_feasible=True))
|
| 249 |
+
for con in cons:
|
| 250 |
+
assert_warns(OptimizeWarning, minimize, fun, x0,
|
| 251 |
+
method=self.method, bounds=bnds, constraints=cons)
|
| 252 |
+
|
| 253 |
+
|
| 254 |
+
class TestNewToOldCobyla:
|
| 255 |
+
method = 'cobyla'
|
| 256 |
+
|
| 257 |
+
list_of_problems = [
|
| 258 |
+
Elec(n_electrons=2),
|
| 259 |
+
Elec(n_electrons=4),
|
| 260 |
+
]
|
| 261 |
+
|
| 262 |
+
@pytest.mark.slow
|
| 263 |
+
def test_list_of_problems(self):
|
| 264 |
+
|
| 265 |
+
for prob in self.list_of_problems:
|
| 266 |
+
|
| 267 |
+
with suppress_warnings() as sup:
|
| 268 |
+
sup.filter(UserWarning)
|
| 269 |
+
truth = minimize(prob.fun, prob.x0,
|
| 270 |
+
method='trust-constr',
|
| 271 |
+
bounds=prob.bounds,
|
| 272 |
+
constraints=prob.constr)
|
| 273 |
+
result = minimize(prob.fun, prob.x0,
|
| 274 |
+
method=self.method,
|
| 275 |
+
bounds=prob.bounds,
|
| 276 |
+
constraints=prob.constr)
|
| 277 |
+
|
| 278 |
+
assert_allclose(result.fun, truth.fun, rtol=1e-3)
|
llava_next/lib/python3.10/site-packages/scipy/optimize/tests/test_direct.py
ADDED
|
@@ -0,0 +1,318 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Unit test for DIRECT optimization algorithm.
|
| 3 |
+
"""
|
| 4 |
+
from numpy.testing import (assert_allclose,
|
| 5 |
+
assert_array_less)
|
| 6 |
+
import pytest
|
| 7 |
+
import numpy as np
|
| 8 |
+
from scipy.optimize import direct, Bounds
|
| 9 |
+
|
| 10 |
+
|
| 11 |
+
class TestDIRECT:
|
| 12 |
+
|
| 13 |
+
def setup_method(self):
|
| 14 |
+
self.fun_calls = 0
|
| 15 |
+
self.bounds_sphere = 4*[(-2, 3)]
|
| 16 |
+
self.optimum_sphere_pos = np.zeros((4, ))
|
| 17 |
+
self.optimum_sphere = 0.0
|
| 18 |
+
self.bounds_stylinski_tang = Bounds([-4., -4.], [4., 4.])
|
| 19 |
+
self.maxiter = 1000
|
| 20 |
+
|
| 21 |
+
# test functions
|
| 22 |
+
def sphere(self, x):
|
| 23 |
+
self.fun_calls += 1
|
| 24 |
+
return np.square(x).sum()
|
| 25 |
+
|
| 26 |
+
def inv(self, x):
|
| 27 |
+
if np.sum(x) == 0:
|
| 28 |
+
raise ZeroDivisionError()
|
| 29 |
+
return 1/np.sum(x)
|
| 30 |
+
|
| 31 |
+
def nan_fun(self, x):
|
| 32 |
+
return np.nan
|
| 33 |
+
|
| 34 |
+
def inf_fun(self, x):
|
| 35 |
+
return np.inf
|
| 36 |
+
|
| 37 |
+
def styblinski_tang(self, pos):
|
| 38 |
+
x, y = pos
|
| 39 |
+
return 0.5 * (x**4 - 16 * x**2 + 5 * x + y**4 - 16 * y**2 + 5 * y)
|
| 40 |
+
|
| 41 |
+
@pytest.mark.parametrize("locally_biased", [True, False])
|
| 42 |
+
def test_direct(self, locally_biased):
|
| 43 |
+
res = direct(self.sphere, self.bounds_sphere,
|
| 44 |
+
locally_biased=locally_biased)
|
| 45 |
+
|
| 46 |
+
# test accuracy
|
| 47 |
+
assert_allclose(res.x, self.optimum_sphere_pos,
|
| 48 |
+
rtol=1e-3, atol=1e-3)
|
| 49 |
+
assert_allclose(res.fun, self.optimum_sphere, atol=1e-5, rtol=1e-5)
|
| 50 |
+
|
| 51 |
+
# test that result lies within bounds
|
| 52 |
+
_bounds = np.asarray(self.bounds_sphere)
|
| 53 |
+
assert_array_less(_bounds[:, 0], res.x)
|
| 54 |
+
assert_array_less(res.x, _bounds[:, 1])
|
| 55 |
+
|
| 56 |
+
# test number of function evaluations. Original DIRECT overshoots by
|
| 57 |
+
# up to 500 evaluations in last iteration
|
| 58 |
+
assert res.nfev <= 1000 * (len(self.bounds_sphere) + 1)
|
| 59 |
+
# test that number of function evaluations is correct
|
| 60 |
+
assert res.nfev == self.fun_calls
|
| 61 |
+
|
| 62 |
+
# test that number of iterations is below supplied maximum
|
| 63 |
+
assert res.nit <= self.maxiter
|
| 64 |
+
|
| 65 |
+
@pytest.mark.parametrize("locally_biased", [True, False])
|
| 66 |
+
def test_direct_callback(self, locally_biased):
|
| 67 |
+
# test that callback does not change the result
|
| 68 |
+
res = direct(self.sphere, self.bounds_sphere,
|
| 69 |
+
locally_biased=locally_biased)
|
| 70 |
+
|
| 71 |
+
def callback(x):
|
| 72 |
+
x = 2*x
|
| 73 |
+
dummy = np.square(x)
|
| 74 |
+
print("DIRECT minimization algorithm callback test")
|
| 75 |
+
return dummy
|
| 76 |
+
|
| 77 |
+
res_callback = direct(self.sphere, self.bounds_sphere,
|
| 78 |
+
locally_biased=locally_biased,
|
| 79 |
+
callback=callback)
|
| 80 |
+
|
| 81 |
+
assert_allclose(res.x, res_callback.x)
|
| 82 |
+
|
| 83 |
+
assert res.nit == res_callback.nit
|
| 84 |
+
assert res.nfev == res_callback.nfev
|
| 85 |
+
assert res.status == res_callback.status
|
| 86 |
+
assert res.success == res_callback.success
|
| 87 |
+
assert res.fun == res_callback.fun
|
| 88 |
+
assert_allclose(res.x, res_callback.x)
|
| 89 |
+
assert res.message == res_callback.message
|
| 90 |
+
|
| 91 |
+
# test accuracy
|
| 92 |
+
assert_allclose(res_callback.x, self.optimum_sphere_pos,
|
| 93 |
+
rtol=1e-3, atol=1e-3)
|
| 94 |
+
assert_allclose(res_callback.fun, self.optimum_sphere,
|
| 95 |
+
atol=1e-5, rtol=1e-5)
|
| 96 |
+
|
| 97 |
+
@pytest.mark.parametrize("locally_biased", [True, False])
|
| 98 |
+
def test_exception(self, locally_biased):
|
| 99 |
+
bounds = 4*[(-10, 10)]
|
| 100 |
+
with pytest.raises(ZeroDivisionError):
|
| 101 |
+
direct(self.inv, bounds=bounds,
|
| 102 |
+
locally_biased=locally_biased)
|
| 103 |
+
|
| 104 |
+
@pytest.mark.parametrize("locally_biased", [True, False])
|
| 105 |
+
def test_nan(self, locally_biased):
|
| 106 |
+
bounds = 4*[(-10, 10)]
|
| 107 |
+
direct(self.nan_fun, bounds=bounds,
|
| 108 |
+
locally_biased=locally_biased)
|
| 109 |
+
|
| 110 |
+
@pytest.mark.parametrize("len_tol", [1e-3, 1e-4])
|
| 111 |
+
@pytest.mark.parametrize("locally_biased", [True, False])
|
| 112 |
+
def test_len_tol(self, len_tol, locally_biased):
|
| 113 |
+
bounds = 4*[(-10., 10.)]
|
| 114 |
+
res = direct(self.sphere, bounds=bounds, len_tol=len_tol,
|
| 115 |
+
vol_tol=1e-30, locally_biased=locally_biased)
|
| 116 |
+
assert res.status == 5
|
| 117 |
+
assert res.success
|
| 118 |
+
assert_allclose(res.x, np.zeros((4, )))
|
| 119 |
+
message = ("The side length measure of the hyperrectangle containing "
|
| 120 |
+
"the lowest function value found is below "
|
| 121 |
+
f"len_tol={len_tol}")
|
| 122 |
+
assert res.message == message
|
| 123 |
+
|
| 124 |
+
@pytest.mark.parametrize("vol_tol", [1e-6, 1e-8])
|
| 125 |
+
@pytest.mark.parametrize("locally_biased", [True, False])
|
| 126 |
+
def test_vol_tol(self, vol_tol, locally_biased):
|
| 127 |
+
bounds = 4*[(-10., 10.)]
|
| 128 |
+
res = direct(self.sphere, bounds=bounds, vol_tol=vol_tol,
|
| 129 |
+
len_tol=0., locally_biased=locally_biased)
|
| 130 |
+
assert res.status == 4
|
| 131 |
+
assert res.success
|
| 132 |
+
assert_allclose(res.x, np.zeros((4, )))
|
| 133 |
+
message = ("The volume of the hyperrectangle containing the lowest "
|
| 134 |
+
f"function value found is below vol_tol={vol_tol}")
|
| 135 |
+
assert res.message == message
|
| 136 |
+
|
| 137 |
+
@pytest.mark.parametrize("f_min_rtol", [1e-3, 1e-5, 1e-7])
|
| 138 |
+
@pytest.mark.parametrize("locally_biased", [True, False])
|
| 139 |
+
def test_f_min(self, f_min_rtol, locally_biased):
|
| 140 |
+
# test that desired function value is reached within
|
| 141 |
+
# relative tolerance of f_min_rtol
|
| 142 |
+
f_min = 1.
|
| 143 |
+
bounds = 4*[(-2., 10.)]
|
| 144 |
+
res = direct(self.sphere, bounds=bounds, f_min=f_min,
|
| 145 |
+
f_min_rtol=f_min_rtol,
|
| 146 |
+
locally_biased=locally_biased)
|
| 147 |
+
assert res.status == 3
|
| 148 |
+
assert res.success
|
| 149 |
+
assert res.fun < f_min * (1. + f_min_rtol)
|
| 150 |
+
message = ("The best function value found is within a relative "
|
| 151 |
+
f"error={f_min_rtol} of the (known) global optimum f_min")
|
| 152 |
+
assert res.message == message
|
| 153 |
+
|
| 154 |
+
def circle_with_args(self, x, a, b):
|
| 155 |
+
return np.square(x[0] - a) + np.square(x[1] - b).sum()
|
| 156 |
+
|
| 157 |
+
@pytest.mark.parametrize("locally_biased", [True, False])
|
| 158 |
+
def test_f_circle_with_args(self, locally_biased):
|
| 159 |
+
bounds = 2*[(-2.0, 2.0)]
|
| 160 |
+
|
| 161 |
+
res = direct(self.circle_with_args, bounds, args=(1, 1), maxfun=1250,
|
| 162 |
+
locally_biased=locally_biased)
|
| 163 |
+
assert_allclose(res.x, np.array([1., 1.]), rtol=1e-5)
|
| 164 |
+
|
| 165 |
+
@pytest.mark.parametrize("locally_biased", [True, False])
|
| 166 |
+
def test_failure_maxfun(self, locally_biased):
|
| 167 |
+
# test that if optimization runs for the maximal number of
|
| 168 |
+
# evaluations, success = False is returned
|
| 169 |
+
|
| 170 |
+
maxfun = 100
|
| 171 |
+
result = direct(self.styblinski_tang, self.bounds_stylinski_tang,
|
| 172 |
+
maxfun=maxfun, locally_biased=locally_biased)
|
| 173 |
+
assert result.success is False
|
| 174 |
+
assert result.status == 1
|
| 175 |
+
assert result.nfev >= maxfun
|
| 176 |
+
message = ("Number of function evaluations done is "
|
| 177 |
+
f"larger than maxfun={maxfun}")
|
| 178 |
+
assert result.message == message
|
| 179 |
+
|
| 180 |
+
@pytest.mark.parametrize("locally_biased", [True, False])
|
| 181 |
+
def test_failure_maxiter(self, locally_biased):
|
| 182 |
+
# test that if optimization runs for the maximal number of
|
| 183 |
+
# iterations, success = False is returned
|
| 184 |
+
|
| 185 |
+
maxiter = 10
|
| 186 |
+
result = direct(self.styblinski_tang, self.bounds_stylinski_tang,
|
| 187 |
+
maxiter=maxiter, locally_biased=locally_biased)
|
| 188 |
+
assert result.success is False
|
| 189 |
+
assert result.status == 2
|
| 190 |
+
assert result.nit >= maxiter
|
| 191 |
+
message = f"Number of iterations is larger than maxiter={maxiter}"
|
| 192 |
+
assert result.message == message
|
| 193 |
+
|
| 194 |
+
@pytest.mark.parametrize("locally_biased", [True, False])
|
| 195 |
+
def test_bounds_variants(self, locally_biased):
|
| 196 |
+
# test that new and old bounds yield same result
|
| 197 |
+
|
| 198 |
+
lb = [-6., 1., -5.]
|
| 199 |
+
ub = [-1., 3., 5.]
|
| 200 |
+
x_opt = np.array([-1., 1., 0.])
|
| 201 |
+
bounds_old = list(zip(lb, ub))
|
| 202 |
+
bounds_new = Bounds(lb, ub)
|
| 203 |
+
|
| 204 |
+
res_old_bounds = direct(self.sphere, bounds_old,
|
| 205 |
+
locally_biased=locally_biased)
|
| 206 |
+
res_new_bounds = direct(self.sphere, bounds_new,
|
| 207 |
+
locally_biased=locally_biased)
|
| 208 |
+
|
| 209 |
+
assert res_new_bounds.nfev == res_old_bounds.nfev
|
| 210 |
+
assert res_new_bounds.message == res_old_bounds.message
|
| 211 |
+
assert res_new_bounds.success == res_old_bounds.success
|
| 212 |
+
assert res_new_bounds.nit == res_old_bounds.nit
|
| 213 |
+
assert_allclose(res_new_bounds.x, res_old_bounds.x)
|
| 214 |
+
assert_allclose(res_new_bounds.x, x_opt, rtol=1e-2)
|
| 215 |
+
|
| 216 |
+
@pytest.mark.parametrize("locally_biased", [True, False])
|
| 217 |
+
@pytest.mark.parametrize("eps", [1e-5, 1e-4, 1e-3])
|
| 218 |
+
def test_epsilon(self, eps, locally_biased):
|
| 219 |
+
result = direct(self.styblinski_tang, self.bounds_stylinski_tang,
|
| 220 |
+
eps=eps, vol_tol=1e-6,
|
| 221 |
+
locally_biased=locally_biased)
|
| 222 |
+
assert result.status == 4
|
| 223 |
+
assert result.success
|
| 224 |
+
|
| 225 |
+
@pytest.mark.xslow
|
| 226 |
+
@pytest.mark.parametrize("locally_biased", [True, False])
|
| 227 |
+
def test_no_segmentation_fault(self, locally_biased):
|
| 228 |
+
# test that an excessive number of function evaluations
|
| 229 |
+
# does not result in segmentation fault
|
| 230 |
+
bounds = [(-5., 20.)] * 100
|
| 231 |
+
result = direct(self.sphere, bounds, maxfun=10000000,
|
| 232 |
+
maxiter=1000000, locally_biased=locally_biased)
|
| 233 |
+
assert result is not None
|
| 234 |
+
|
| 235 |
+
@pytest.mark.parametrize("locally_biased", [True, False])
|
| 236 |
+
def test_inf_fun(self, locally_biased):
|
| 237 |
+
# test that an objective value of infinity does not crash DIRECT
|
| 238 |
+
bounds = [(-5., 5.)] * 2
|
| 239 |
+
result = direct(self.inf_fun, bounds,
|
| 240 |
+
locally_biased=locally_biased)
|
| 241 |
+
assert result is not None
|
| 242 |
+
|
| 243 |
+
@pytest.mark.parametrize("len_tol", [-1, 2])
|
| 244 |
+
def test_len_tol_validation(self, len_tol):
|
| 245 |
+
error_msg = "len_tol must be between 0 and 1."
|
| 246 |
+
with pytest.raises(ValueError, match=error_msg):
|
| 247 |
+
direct(self.styblinski_tang, self.bounds_stylinski_tang,
|
| 248 |
+
len_tol=len_tol)
|
| 249 |
+
|
| 250 |
+
@pytest.mark.parametrize("vol_tol", [-1, 2])
|
| 251 |
+
def test_vol_tol_validation(self, vol_tol):
|
| 252 |
+
error_msg = "vol_tol must be between 0 and 1."
|
| 253 |
+
with pytest.raises(ValueError, match=error_msg):
|
| 254 |
+
direct(self.styblinski_tang, self.bounds_stylinski_tang,
|
| 255 |
+
vol_tol=vol_tol)
|
| 256 |
+
|
| 257 |
+
@pytest.mark.parametrize("f_min_rtol", [-1, 2])
|
| 258 |
+
def test_fmin_rtol_validation(self, f_min_rtol):
|
| 259 |
+
error_msg = "f_min_rtol must be between 0 and 1."
|
| 260 |
+
with pytest.raises(ValueError, match=error_msg):
|
| 261 |
+
direct(self.styblinski_tang, self.bounds_stylinski_tang,
|
| 262 |
+
f_min_rtol=f_min_rtol, f_min=0.)
|
| 263 |
+
|
| 264 |
+
@pytest.mark.parametrize("maxfun", [1.5, "string", (1, 2)])
|
| 265 |
+
def test_maxfun_wrong_type(self, maxfun):
|
| 266 |
+
error_msg = "maxfun must be of type int."
|
| 267 |
+
with pytest.raises(ValueError, match=error_msg):
|
| 268 |
+
direct(self.styblinski_tang, self.bounds_stylinski_tang,
|
| 269 |
+
maxfun=maxfun)
|
| 270 |
+
|
| 271 |
+
@pytest.mark.parametrize("maxiter", [1.5, "string", (1, 2)])
|
| 272 |
+
def test_maxiter_wrong_type(self, maxiter):
|
| 273 |
+
error_msg = "maxiter must be of type int."
|
| 274 |
+
with pytest.raises(ValueError, match=error_msg):
|
| 275 |
+
direct(self.styblinski_tang, self.bounds_stylinski_tang,
|
| 276 |
+
maxiter=maxiter)
|
| 277 |
+
|
| 278 |
+
def test_negative_maxiter(self):
|
| 279 |
+
error_msg = "maxiter must be > 0."
|
| 280 |
+
with pytest.raises(ValueError, match=error_msg):
|
| 281 |
+
direct(self.styblinski_tang, self.bounds_stylinski_tang,
|
| 282 |
+
maxiter=-1)
|
| 283 |
+
|
| 284 |
+
def test_negative_maxfun(self):
|
| 285 |
+
error_msg = "maxfun must be > 0."
|
| 286 |
+
with pytest.raises(ValueError, match=error_msg):
|
| 287 |
+
direct(self.styblinski_tang, self.bounds_stylinski_tang,
|
| 288 |
+
maxfun=-1)
|
| 289 |
+
|
| 290 |
+
@pytest.mark.parametrize("bounds", ["bounds", 2., 0])
|
| 291 |
+
def test_invalid_bounds_type(self, bounds):
|
| 292 |
+
error_msg = ("bounds must be a sequence or "
|
| 293 |
+
"instance of Bounds class")
|
| 294 |
+
with pytest.raises(ValueError, match=error_msg):
|
| 295 |
+
direct(self.styblinski_tang, bounds)
|
| 296 |
+
|
| 297 |
+
@pytest.mark.parametrize("bounds",
|
| 298 |
+
[Bounds([-1., -1], [-2, 1]),
|
| 299 |
+
Bounds([-np.nan, -1], [-2, np.nan]),
|
| 300 |
+
]
|
| 301 |
+
)
|
| 302 |
+
def test_incorrect_bounds(self, bounds):
|
| 303 |
+
error_msg = 'Bounds are not consistent min < max'
|
| 304 |
+
with pytest.raises(ValueError, match=error_msg):
|
| 305 |
+
direct(self.styblinski_tang, bounds)
|
| 306 |
+
|
| 307 |
+
def test_inf_bounds(self):
|
| 308 |
+
error_msg = 'Bounds must not be inf.'
|
| 309 |
+
bounds = Bounds([-np.inf, -1], [-2, np.inf])
|
| 310 |
+
with pytest.raises(ValueError, match=error_msg):
|
| 311 |
+
direct(self.styblinski_tang, bounds)
|
| 312 |
+
|
| 313 |
+
@pytest.mark.parametrize("locally_biased", ["bias", [0, 0], 2.])
|
| 314 |
+
def test_locally_biased_validation(self, locally_biased):
|
| 315 |
+
error_msg = 'locally_biased must be True or False.'
|
| 316 |
+
with pytest.raises(ValueError, match=error_msg):
|
| 317 |
+
direct(self.styblinski_tang, self.bounds_stylinski_tang,
|
| 318 |
+
locally_biased=locally_biased)
|
llava_next/lib/python3.10/site-packages/scipy/optimize/tests/test_extending.py
ADDED
|
@@ -0,0 +1,24 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
import platform
|
| 3 |
+
|
| 4 |
+
import pytest
|
| 5 |
+
|
| 6 |
+
from scipy._lib._testutils import IS_EDITABLE, _test_cython_extension, cython
|
| 7 |
+
|
| 8 |
+
|
| 9 |
+
@pytest.mark.fail_slow(20)
|
| 10 |
+
# essential per https://github.com/scipy/scipy/pull/20487#discussion_r1567057247
|
| 11 |
+
@pytest.mark.skipif(IS_EDITABLE,
|
| 12 |
+
reason='Editable install cannot find .pxd headers.')
|
| 13 |
+
@pytest.mark.skipif(platform.machine() in ["wasm32", "wasm64"],
|
| 14 |
+
reason="Can't start subprocess")
|
| 15 |
+
@pytest.mark.skipif(cython is None, reason="requires cython")
|
| 16 |
+
def test_cython(tmp_path):
|
| 17 |
+
srcdir = os.path.dirname(os.path.dirname(__file__))
|
| 18 |
+
extensions, extensions_cpp = _test_cython_extension(tmp_path, srcdir)
|
| 19 |
+
# actually test the cython c-extensions
|
| 20 |
+
# From docstring for scipy.optimize.cython_optimize module
|
| 21 |
+
x = extensions.brentq_example()
|
| 22 |
+
assert x == 0.6999942848231314
|
| 23 |
+
x = extensions_cpp.brentq_example()
|
| 24 |
+
assert x == 0.6999942848231314
|
llava_next/lib/python3.10/site-packages/scipy/optimize/tests/test_linesearch.py
ADDED
|
@@ -0,0 +1,314 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Tests for line search routines
|
| 3 |
+
"""
|
| 4 |
+
from numpy.testing import (assert_equal, assert_array_almost_equal,
|
| 5 |
+
assert_array_almost_equal_nulp, assert_warns,
|
| 6 |
+
suppress_warnings)
|
| 7 |
+
import scipy.optimize._linesearch as ls
|
| 8 |
+
from scipy.optimize._linesearch import LineSearchWarning
|
| 9 |
+
import numpy as np
|
| 10 |
+
|
| 11 |
+
|
| 12 |
+
def assert_wolfe(s, phi, derphi, c1=1e-4, c2=0.9, err_msg=""):
|
| 13 |
+
"""
|
| 14 |
+
Check that strong Wolfe conditions apply
|
| 15 |
+
"""
|
| 16 |
+
phi1 = phi(s)
|
| 17 |
+
phi0 = phi(0)
|
| 18 |
+
derphi0 = derphi(0)
|
| 19 |
+
derphi1 = derphi(s)
|
| 20 |
+
msg = (f"s = {s}; phi(0) = {phi0}; phi(s) = {phi1}; phi'(0) = {derphi0};"
|
| 21 |
+
f" phi'(s) = {derphi1}; {err_msg}")
|
| 22 |
+
|
| 23 |
+
assert phi1 <= phi0 + c1*s*derphi0, "Wolfe 1 failed: " + msg
|
| 24 |
+
assert abs(derphi1) <= abs(c2*derphi0), "Wolfe 2 failed: " + msg
|
| 25 |
+
|
| 26 |
+
|
| 27 |
+
def assert_armijo(s, phi, c1=1e-4, err_msg=""):
|
| 28 |
+
"""
|
| 29 |
+
Check that Armijo condition applies
|
| 30 |
+
"""
|
| 31 |
+
phi1 = phi(s)
|
| 32 |
+
phi0 = phi(0)
|
| 33 |
+
msg = f"s = {s}; phi(0) = {phi0}; phi(s) = {phi1}; {err_msg}"
|
| 34 |
+
assert phi1 <= (1 - c1*s)*phi0, msg
|
| 35 |
+
|
| 36 |
+
|
| 37 |
+
def assert_line_wolfe(x, p, s, f, fprime, **kw):
|
| 38 |
+
assert_wolfe(s, phi=lambda sp: f(x + p*sp),
|
| 39 |
+
derphi=lambda sp: np.dot(fprime(x + p*sp), p), **kw)
|
| 40 |
+
|
| 41 |
+
|
| 42 |
+
def assert_line_armijo(x, p, s, f, **kw):
|
| 43 |
+
assert_armijo(s, phi=lambda sp: f(x + p*sp), **kw)
|
| 44 |
+
|
| 45 |
+
|
| 46 |
+
def assert_fp_equal(x, y, err_msg="", nulp=50):
|
| 47 |
+
"""Assert two arrays are equal, up to some floating-point rounding error"""
|
| 48 |
+
try:
|
| 49 |
+
assert_array_almost_equal_nulp(x, y, nulp)
|
| 50 |
+
except AssertionError as e:
|
| 51 |
+
raise AssertionError(f"{e}\n{err_msg}") from e
|
| 52 |
+
|
| 53 |
+
|
| 54 |
+
class TestLineSearch:
|
| 55 |
+
# -- scalar functions; must have dphi(0.) < 0
|
| 56 |
+
def _scalar_func_1(self, s): # skip name check
|
| 57 |
+
self.fcount += 1
|
| 58 |
+
p = -s - s**3 + s**4
|
| 59 |
+
dp = -1 - 3*s**2 + 4*s**3
|
| 60 |
+
return p, dp
|
| 61 |
+
|
| 62 |
+
def _scalar_func_2(self, s): # skip name check
|
| 63 |
+
self.fcount += 1
|
| 64 |
+
p = np.exp(-4*s) + s**2
|
| 65 |
+
dp = -4*np.exp(-4*s) + 2*s
|
| 66 |
+
return p, dp
|
| 67 |
+
|
| 68 |
+
def _scalar_func_3(self, s): # skip name check
|
| 69 |
+
self.fcount += 1
|
| 70 |
+
p = -np.sin(10*s)
|
| 71 |
+
dp = -10*np.cos(10*s)
|
| 72 |
+
return p, dp
|
| 73 |
+
|
| 74 |
+
# -- n-d functions
|
| 75 |
+
|
| 76 |
+
def _line_func_1(self, x): # skip name check
|
| 77 |
+
self.fcount += 1
|
| 78 |
+
f = np.dot(x, x)
|
| 79 |
+
df = 2*x
|
| 80 |
+
return f, df
|
| 81 |
+
|
| 82 |
+
def _line_func_2(self, x): # skip name check
|
| 83 |
+
self.fcount += 1
|
| 84 |
+
f = np.dot(x, np.dot(self.A, x)) + 1
|
| 85 |
+
df = np.dot(self.A + self.A.T, x)
|
| 86 |
+
return f, df
|
| 87 |
+
|
| 88 |
+
# --
|
| 89 |
+
|
| 90 |
+
def setup_method(self):
|
| 91 |
+
self.scalar_funcs = []
|
| 92 |
+
self.line_funcs = []
|
| 93 |
+
self.N = 20
|
| 94 |
+
self.fcount = 0
|
| 95 |
+
|
| 96 |
+
def bind_index(func, idx):
|
| 97 |
+
# Remember Python's closure semantics!
|
| 98 |
+
return lambda *a, **kw: func(*a, **kw)[idx]
|
| 99 |
+
|
| 100 |
+
for name in sorted(dir(self)):
|
| 101 |
+
if name.startswith('_scalar_func_'):
|
| 102 |
+
value = getattr(self, name)
|
| 103 |
+
self.scalar_funcs.append(
|
| 104 |
+
(name, bind_index(value, 0), bind_index(value, 1)))
|
| 105 |
+
elif name.startswith('_line_func_'):
|
| 106 |
+
value = getattr(self, name)
|
| 107 |
+
self.line_funcs.append(
|
| 108 |
+
(name, bind_index(value, 0), bind_index(value, 1)))
|
| 109 |
+
|
| 110 |
+
np.random.seed(1234)
|
| 111 |
+
self.A = np.random.randn(self.N, self.N)
|
| 112 |
+
|
| 113 |
+
def scalar_iter(self):
|
| 114 |
+
for name, phi, derphi in self.scalar_funcs:
|
| 115 |
+
for old_phi0 in np.random.randn(3):
|
| 116 |
+
yield name, phi, derphi, old_phi0
|
| 117 |
+
|
| 118 |
+
def line_iter(self):
|
| 119 |
+
for name, f, fprime in self.line_funcs:
|
| 120 |
+
k = 0
|
| 121 |
+
while k < 9:
|
| 122 |
+
x = np.random.randn(self.N)
|
| 123 |
+
p = np.random.randn(self.N)
|
| 124 |
+
if np.dot(p, fprime(x)) >= 0:
|
| 125 |
+
# always pick a descent direction
|
| 126 |
+
continue
|
| 127 |
+
k += 1
|
| 128 |
+
old_fv = float(np.random.randn())
|
| 129 |
+
yield name, f, fprime, x, p, old_fv
|
| 130 |
+
|
| 131 |
+
# -- Generic scalar searches
|
| 132 |
+
|
| 133 |
+
def test_scalar_search_wolfe1(self):
|
| 134 |
+
c = 0
|
| 135 |
+
for name, phi, derphi, old_phi0 in self.scalar_iter():
|
| 136 |
+
c += 1
|
| 137 |
+
s, phi1, phi0 = ls.scalar_search_wolfe1(phi, derphi, phi(0),
|
| 138 |
+
old_phi0, derphi(0))
|
| 139 |
+
assert_fp_equal(phi0, phi(0), name)
|
| 140 |
+
assert_fp_equal(phi1, phi(s), name)
|
| 141 |
+
assert_wolfe(s, phi, derphi, err_msg=name)
|
| 142 |
+
|
| 143 |
+
assert c > 3 # check that the iterator really works...
|
| 144 |
+
|
| 145 |
+
def test_scalar_search_wolfe2(self):
|
| 146 |
+
for name, phi, derphi, old_phi0 in self.scalar_iter():
|
| 147 |
+
s, phi1, phi0, derphi1 = ls.scalar_search_wolfe2(
|
| 148 |
+
phi, derphi, phi(0), old_phi0, derphi(0))
|
| 149 |
+
assert_fp_equal(phi0, phi(0), name)
|
| 150 |
+
assert_fp_equal(phi1, phi(s), name)
|
| 151 |
+
if derphi1 is not None:
|
| 152 |
+
assert_fp_equal(derphi1, derphi(s), name)
|
| 153 |
+
assert_wolfe(s, phi, derphi, err_msg=f"{name} {old_phi0:g}")
|
| 154 |
+
|
| 155 |
+
def test_scalar_search_wolfe2_with_low_amax(self):
|
| 156 |
+
def phi(alpha):
|
| 157 |
+
return (alpha - 5) ** 2
|
| 158 |
+
|
| 159 |
+
def derphi(alpha):
|
| 160 |
+
return 2 * (alpha - 5)
|
| 161 |
+
|
| 162 |
+
alpha_star, _, _, derphi_star = ls.scalar_search_wolfe2(phi, derphi, amax=0.001)
|
| 163 |
+
assert alpha_star is None # Not converged
|
| 164 |
+
assert derphi_star is None # Not converged
|
| 165 |
+
|
| 166 |
+
def test_scalar_search_wolfe2_regression(self):
|
| 167 |
+
# Regression test for gh-12157
|
| 168 |
+
# This phi has its minimum at alpha=4/3 ~ 1.333.
|
| 169 |
+
def phi(alpha):
|
| 170 |
+
if alpha < 1:
|
| 171 |
+
return - 3*np.pi/2 * (alpha - 1)
|
| 172 |
+
else:
|
| 173 |
+
return np.cos(3*np.pi/2 * alpha - np.pi)
|
| 174 |
+
|
| 175 |
+
def derphi(alpha):
|
| 176 |
+
if alpha < 1:
|
| 177 |
+
return - 3*np.pi/2
|
| 178 |
+
else:
|
| 179 |
+
return - 3*np.pi/2 * np.sin(3*np.pi/2 * alpha - np.pi)
|
| 180 |
+
|
| 181 |
+
s, _, _, _ = ls.scalar_search_wolfe2(phi, derphi)
|
| 182 |
+
# Without the fix in gh-13073, the scalar_search_wolfe2
|
| 183 |
+
# returned s=2.0 instead.
|
| 184 |
+
assert s < 1.5
|
| 185 |
+
|
| 186 |
+
def test_scalar_search_armijo(self):
|
| 187 |
+
for name, phi, derphi, old_phi0 in self.scalar_iter():
|
| 188 |
+
s, phi1 = ls.scalar_search_armijo(phi, phi(0), derphi(0))
|
| 189 |
+
assert_fp_equal(phi1, phi(s), name)
|
| 190 |
+
assert_armijo(s, phi, err_msg=f"{name} {old_phi0:g}")
|
| 191 |
+
|
| 192 |
+
# -- Generic line searches
|
| 193 |
+
|
| 194 |
+
def test_line_search_wolfe1(self):
|
| 195 |
+
c = 0
|
| 196 |
+
smax = 100
|
| 197 |
+
for name, f, fprime, x, p, old_f in self.line_iter():
|
| 198 |
+
f0 = f(x)
|
| 199 |
+
g0 = fprime(x)
|
| 200 |
+
self.fcount = 0
|
| 201 |
+
s, fc, gc, fv, ofv, gv = ls.line_search_wolfe1(f, fprime, x, p,
|
| 202 |
+
g0, f0, old_f,
|
| 203 |
+
amax=smax)
|
| 204 |
+
assert_equal(self.fcount, fc+gc)
|
| 205 |
+
assert_fp_equal(ofv, f(x))
|
| 206 |
+
if s is None:
|
| 207 |
+
continue
|
| 208 |
+
assert_fp_equal(fv, f(x + s*p))
|
| 209 |
+
assert_array_almost_equal(gv, fprime(x + s*p), decimal=14)
|
| 210 |
+
if s < smax:
|
| 211 |
+
c += 1
|
| 212 |
+
assert_line_wolfe(x, p, s, f, fprime, err_msg=name)
|
| 213 |
+
|
| 214 |
+
assert c > 3 # check that the iterator really works...
|
| 215 |
+
|
| 216 |
+
def test_line_search_wolfe2(self):
|
| 217 |
+
c = 0
|
| 218 |
+
smax = 512
|
| 219 |
+
for name, f, fprime, x, p, old_f in self.line_iter():
|
| 220 |
+
f0 = f(x)
|
| 221 |
+
g0 = fprime(x)
|
| 222 |
+
self.fcount = 0
|
| 223 |
+
with suppress_warnings() as sup:
|
| 224 |
+
sup.filter(LineSearchWarning,
|
| 225 |
+
"The line search algorithm could not find a solution")
|
| 226 |
+
sup.filter(LineSearchWarning,
|
| 227 |
+
"The line search algorithm did not converge")
|
| 228 |
+
s, fc, gc, fv, ofv, gv = ls.line_search_wolfe2(f, fprime, x, p,
|
| 229 |
+
g0, f0, old_f,
|
| 230 |
+
amax=smax)
|
| 231 |
+
assert_equal(self.fcount, fc+gc)
|
| 232 |
+
assert_fp_equal(ofv, f(x))
|
| 233 |
+
assert_fp_equal(fv, f(x + s*p))
|
| 234 |
+
if gv is not None:
|
| 235 |
+
assert_array_almost_equal(gv, fprime(x + s*p), decimal=14)
|
| 236 |
+
if s < smax:
|
| 237 |
+
c += 1
|
| 238 |
+
assert_line_wolfe(x, p, s, f, fprime, err_msg=name)
|
| 239 |
+
assert c > 3 # check that the iterator really works...
|
| 240 |
+
|
| 241 |
+
def test_line_search_wolfe2_bounds(self):
|
| 242 |
+
# See gh-7475
|
| 243 |
+
|
| 244 |
+
# For this f and p, starting at a point on axis 0, the strong Wolfe
|
| 245 |
+
# condition 2 is met if and only if the step length s satisfies
|
| 246 |
+
# |x + s| <= c2 * |x|
|
| 247 |
+
def f(x):
|
| 248 |
+
return np.dot(x, x)
|
| 249 |
+
def fp(x):
|
| 250 |
+
return 2 * x
|
| 251 |
+
p = np.array([1, 0])
|
| 252 |
+
|
| 253 |
+
# Smallest s satisfying strong Wolfe conditions for these arguments is 30
|
| 254 |
+
x = -60 * p
|
| 255 |
+
c2 = 0.5
|
| 256 |
+
|
| 257 |
+
s, _, _, _, _, _ = ls.line_search_wolfe2(f, fp, x, p, amax=30, c2=c2)
|
| 258 |
+
assert_line_wolfe(x, p, s, f, fp)
|
| 259 |
+
|
| 260 |
+
s, _, _, _, _, _ = assert_warns(LineSearchWarning,
|
| 261 |
+
ls.line_search_wolfe2, f, fp, x, p,
|
| 262 |
+
amax=29, c2=c2)
|
| 263 |
+
assert s is None
|
| 264 |
+
|
| 265 |
+
# s=30 will only be tried on the 6th iteration, so this won't converge
|
| 266 |
+
assert_warns(LineSearchWarning, ls.line_search_wolfe2, f, fp, x, p,
|
| 267 |
+
c2=c2, maxiter=5)
|
| 268 |
+
|
| 269 |
+
def test_line_search_armijo(self):
|
| 270 |
+
c = 0
|
| 271 |
+
for name, f, fprime, x, p, old_f in self.line_iter():
|
| 272 |
+
f0 = f(x)
|
| 273 |
+
g0 = fprime(x)
|
| 274 |
+
self.fcount = 0
|
| 275 |
+
s, fc, fv = ls.line_search_armijo(f, x, p, g0, f0)
|
| 276 |
+
c += 1
|
| 277 |
+
assert_equal(self.fcount, fc)
|
| 278 |
+
assert_fp_equal(fv, f(x + s*p))
|
| 279 |
+
assert_line_armijo(x, p, s, f, err_msg=name)
|
| 280 |
+
assert c >= 9
|
| 281 |
+
|
| 282 |
+
# -- More specific tests
|
| 283 |
+
|
| 284 |
+
def test_armijo_terminate_1(self):
|
| 285 |
+
# Armijo should evaluate the function only once if the trial step
|
| 286 |
+
# is already suitable
|
| 287 |
+
count = [0]
|
| 288 |
+
|
| 289 |
+
def phi(s):
|
| 290 |
+
count[0] += 1
|
| 291 |
+
return -s + 0.01*s**2
|
| 292 |
+
s, phi1 = ls.scalar_search_armijo(phi, phi(0), -1, alpha0=1)
|
| 293 |
+
assert_equal(s, 1)
|
| 294 |
+
assert_equal(count[0], 2)
|
| 295 |
+
assert_armijo(s, phi)
|
| 296 |
+
|
| 297 |
+
def test_wolfe_terminate(self):
|
| 298 |
+
# wolfe1 and wolfe2 should also evaluate the function only a few
|
| 299 |
+
# times if the trial step is already suitable
|
| 300 |
+
|
| 301 |
+
def phi(s):
|
| 302 |
+
count[0] += 1
|
| 303 |
+
return -s + 0.05*s**2
|
| 304 |
+
|
| 305 |
+
def derphi(s):
|
| 306 |
+
count[0] += 1
|
| 307 |
+
return -1 + 0.05*2*s
|
| 308 |
+
|
| 309 |
+
for func in [ls.scalar_search_wolfe1, ls.scalar_search_wolfe2]:
|
| 310 |
+
count = [0]
|
| 311 |
+
r = func(phi, derphi, phi(0), None, derphi(0))
|
| 312 |
+
assert r[0] is not None, (r, func)
|
| 313 |
+
assert count[0] <= 2 + 2, (count, func)
|
| 314 |
+
assert_wolfe(r[0], phi, derphi, err_msg=str(func))
|
llava_next/lib/python3.10/site-packages/scipy/optimize/tests/test_milp.py
ADDED
|
@@ -0,0 +1,385 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Unit test for Mixed Integer Linear Programming
|
| 3 |
+
"""
|
| 4 |
+
import re
|
| 5 |
+
|
| 6 |
+
import numpy as np
|
| 7 |
+
from numpy.testing import assert_allclose, assert_array_equal
|
| 8 |
+
import pytest
|
| 9 |
+
|
| 10 |
+
from .test_linprog import magic_square
|
| 11 |
+
from scipy.optimize import milp, Bounds, LinearConstraint
|
| 12 |
+
from scipy import sparse
|
| 13 |
+
|
| 14 |
+
|
| 15 |
+
def test_milp_iv():
|
| 16 |
+
|
| 17 |
+
message = "`c` must be a dense array"
|
| 18 |
+
with pytest.raises(ValueError, match=message):
|
| 19 |
+
milp(sparse.coo_array([0, 0]))
|
| 20 |
+
|
| 21 |
+
message = "`c` must be a one-dimensional array of finite numbers with"
|
| 22 |
+
with pytest.raises(ValueError, match=message):
|
| 23 |
+
milp(np.zeros((3, 4)))
|
| 24 |
+
with pytest.raises(ValueError, match=message):
|
| 25 |
+
milp([])
|
| 26 |
+
with pytest.raises(ValueError, match=message):
|
| 27 |
+
milp(None)
|
| 28 |
+
|
| 29 |
+
message = "`bounds` must be convertible into an instance of..."
|
| 30 |
+
with pytest.raises(ValueError, match=message):
|
| 31 |
+
milp(1, bounds=10)
|
| 32 |
+
|
| 33 |
+
message = "`constraints` (or each element within `constraints`) must be"
|
| 34 |
+
with pytest.raises(ValueError, match=re.escape(message)):
|
| 35 |
+
milp(1, constraints=10)
|
| 36 |
+
with pytest.raises(ValueError, match=re.escape(message)):
|
| 37 |
+
milp(np.zeros(3), constraints=([[1, 2, 3]], [2, 3], [2, 3]))
|
| 38 |
+
with pytest.raises(ValueError, match=re.escape(message)):
|
| 39 |
+
milp(np.zeros(2), constraints=([[1, 2]], [2], sparse.coo_array([2])))
|
| 40 |
+
|
| 41 |
+
message = "The shape of `A` must be (len(b_l), len(c))."
|
| 42 |
+
with pytest.raises(ValueError, match=re.escape(message)):
|
| 43 |
+
milp(np.zeros(3), constraints=([[1, 2]], [2], [2]))
|
| 44 |
+
|
| 45 |
+
message = "`integrality` must be a dense array"
|
| 46 |
+
with pytest.raises(ValueError, match=message):
|
| 47 |
+
milp([1, 2], integrality=sparse.coo_array([1, 2]))
|
| 48 |
+
|
| 49 |
+
message = ("`integrality` must contain integers 0-3 and be broadcastable "
|
| 50 |
+
"to `c.shape`.")
|
| 51 |
+
with pytest.raises(ValueError, match=message):
|
| 52 |
+
milp([1, 2, 3], integrality=[1, 2])
|
| 53 |
+
with pytest.raises(ValueError, match=message):
|
| 54 |
+
milp([1, 2, 3], integrality=[1, 5, 3])
|
| 55 |
+
|
| 56 |
+
message = "Lower and upper bounds must be dense arrays."
|
| 57 |
+
with pytest.raises(ValueError, match=message):
|
| 58 |
+
milp([1, 2, 3], bounds=([1, 2], sparse.coo_array([3, 4])))
|
| 59 |
+
|
| 60 |
+
message = "`lb`, `ub`, and `keep_feasible` must be broadcastable."
|
| 61 |
+
with pytest.raises(ValueError, match=message):
|
| 62 |
+
milp([1, 2, 3], bounds=([1, 2], [3, 4, 5]))
|
| 63 |
+
with pytest.raises(ValueError, match=message):
|
| 64 |
+
milp([1, 2, 3], bounds=([1, 2, 3], [4, 5]))
|
| 65 |
+
|
| 66 |
+
message = "`bounds.lb` and `bounds.ub` must contain reals and..."
|
| 67 |
+
with pytest.raises(ValueError, match=message):
|
| 68 |
+
milp([1, 2, 3], bounds=([1, 2], [3, 4]))
|
| 69 |
+
with pytest.raises(ValueError, match=message):
|
| 70 |
+
milp([1, 2, 3], bounds=([1, 2, 3], ["3+4", 4, 5]))
|
| 71 |
+
with pytest.raises(ValueError, match=message):
|
| 72 |
+
milp([1, 2, 3], bounds=([1, 2, 3], [set(), 4, 5]))
|
| 73 |
+
|
| 74 |
+
|
| 75 |
+
@pytest.mark.xfail(run=False,
|
| 76 |
+
reason="Needs to be fixed in `_highs_wrapper`")
|
| 77 |
+
def test_milp_options(capsys):
|
| 78 |
+
# run=False now because of gh-16347
|
| 79 |
+
message = "Unrecognized options detected: {'ekki'}..."
|
| 80 |
+
options = {'ekki': True}
|
| 81 |
+
with pytest.warns(RuntimeWarning, match=message):
|
| 82 |
+
milp(1, options=options)
|
| 83 |
+
|
| 84 |
+
A, b, c, numbers, M = magic_square(3)
|
| 85 |
+
options = {"disp": True, "presolve": False, "time_limit": 0.05}
|
| 86 |
+
res = milp(c=c, constraints=(A, b, b), bounds=(0, 1), integrality=1,
|
| 87 |
+
options=options)
|
| 88 |
+
|
| 89 |
+
captured = capsys.readouterr()
|
| 90 |
+
assert "Presolve is switched off" in captured.out
|
| 91 |
+
assert "Time Limit Reached" in captured.out
|
| 92 |
+
assert not res.success
|
| 93 |
+
|
| 94 |
+
|
| 95 |
+
def test_result():
|
| 96 |
+
A, b, c, numbers, M = magic_square(3)
|
| 97 |
+
res = milp(c=c, constraints=(A, b, b), bounds=(0, 1), integrality=1)
|
| 98 |
+
assert res.status == 0
|
| 99 |
+
assert res.success
|
| 100 |
+
msg = "Optimization terminated successfully. (HiGHS Status 7:"
|
| 101 |
+
assert res.message.startswith(msg)
|
| 102 |
+
assert isinstance(res.x, np.ndarray)
|
| 103 |
+
assert isinstance(res.fun, float)
|
| 104 |
+
assert isinstance(res.mip_node_count, int)
|
| 105 |
+
assert isinstance(res.mip_dual_bound, float)
|
| 106 |
+
assert isinstance(res.mip_gap, float)
|
| 107 |
+
|
| 108 |
+
A, b, c, numbers, M = magic_square(6)
|
| 109 |
+
res = milp(c=c*0, constraints=(A, b, b), bounds=(0, 1), integrality=1,
|
| 110 |
+
options={'time_limit': 0.05})
|
| 111 |
+
assert res.status == 1
|
| 112 |
+
assert not res.success
|
| 113 |
+
msg = "Time limit reached. (HiGHS Status 13:"
|
| 114 |
+
assert res.message.startswith(msg)
|
| 115 |
+
assert (res.fun is res.mip_dual_bound is res.mip_gap
|
| 116 |
+
is res.mip_node_count is res.x is None)
|
| 117 |
+
|
| 118 |
+
res = milp(1, bounds=(1, -1))
|
| 119 |
+
assert res.status == 2
|
| 120 |
+
assert not res.success
|
| 121 |
+
msg = "The problem is infeasible. (HiGHS Status 8:"
|
| 122 |
+
assert res.message.startswith(msg)
|
| 123 |
+
assert (res.fun is res.mip_dual_bound is res.mip_gap
|
| 124 |
+
is res.mip_node_count is res.x is None)
|
| 125 |
+
|
| 126 |
+
res = milp(-1)
|
| 127 |
+
assert res.status == 3
|
| 128 |
+
assert not res.success
|
| 129 |
+
msg = "The problem is unbounded. (HiGHS Status 10:"
|
| 130 |
+
assert res.message.startswith(msg)
|
| 131 |
+
assert (res.fun is res.mip_dual_bound is res.mip_gap
|
| 132 |
+
is res.mip_node_count is res.x is None)
|
| 133 |
+
|
| 134 |
+
|
| 135 |
+
def test_milp_optional_args():
|
| 136 |
+
# check that arguments other than `c` are indeed optional
|
| 137 |
+
res = milp(1)
|
| 138 |
+
assert res.fun == 0
|
| 139 |
+
assert_array_equal(res.x, [0])
|
| 140 |
+
|
| 141 |
+
|
| 142 |
+
def test_milp_1():
|
| 143 |
+
# solve magic square problem
|
| 144 |
+
n = 3
|
| 145 |
+
A, b, c, numbers, M = magic_square(n)
|
| 146 |
+
A = sparse.csc_array(A) # confirm that sparse arrays are accepted
|
| 147 |
+
res = milp(c=c*0, constraints=(A, b, b), bounds=(0, 1), integrality=1)
|
| 148 |
+
|
| 149 |
+
# check that solution is a magic square
|
| 150 |
+
x = np.round(res.x)
|
| 151 |
+
s = (numbers.flatten() * x).reshape(n**2, n, n)
|
| 152 |
+
square = np.sum(s, axis=0)
|
| 153 |
+
np.testing.assert_allclose(square.sum(axis=0), M)
|
| 154 |
+
np.testing.assert_allclose(square.sum(axis=1), M)
|
| 155 |
+
np.testing.assert_allclose(np.diag(square).sum(), M)
|
| 156 |
+
np.testing.assert_allclose(np.diag(square[:, ::-1]).sum(), M)
|
| 157 |
+
|
| 158 |
+
|
| 159 |
+
def test_milp_2():
|
| 160 |
+
# solve MIP with inequality constraints and all integer constraints
|
| 161 |
+
# source: slide 5,
|
| 162 |
+
# https://www.cs.upc.edu/~erodri/webpage/cps/theory/lp/milp/slides.pdf
|
| 163 |
+
# also check that `milp` accepts all valid ways of specifying constraints
|
| 164 |
+
c = -np.ones(2)
|
| 165 |
+
A = [[-2, 2], [-8, 10]]
|
| 166 |
+
b_l = [1, -np.inf]
|
| 167 |
+
b_u = [np.inf, 13]
|
| 168 |
+
linear_constraint = LinearConstraint(A, b_l, b_u)
|
| 169 |
+
|
| 170 |
+
# solve original problem
|
| 171 |
+
res1 = milp(c=c, constraints=(A, b_l, b_u), integrality=True)
|
| 172 |
+
res2 = milp(c=c, constraints=linear_constraint, integrality=True)
|
| 173 |
+
res3 = milp(c=c, constraints=[(A, b_l, b_u)], integrality=True)
|
| 174 |
+
res4 = milp(c=c, constraints=[linear_constraint], integrality=True)
|
| 175 |
+
res5 = milp(c=c, integrality=True,
|
| 176 |
+
constraints=[(A[:1], b_l[:1], b_u[:1]),
|
| 177 |
+
(A[1:], b_l[1:], b_u[1:])])
|
| 178 |
+
res6 = milp(c=c, integrality=True,
|
| 179 |
+
constraints=[LinearConstraint(A[:1], b_l[:1], b_u[:1]),
|
| 180 |
+
LinearConstraint(A[1:], b_l[1:], b_u[1:])])
|
| 181 |
+
res7 = milp(c=c, integrality=True,
|
| 182 |
+
constraints=[(A[:1], b_l[:1], b_u[:1]),
|
| 183 |
+
LinearConstraint(A[1:], b_l[1:], b_u[1:])])
|
| 184 |
+
xs = np.array([res1.x, res2.x, res3.x, res4.x, res5.x, res6.x, res7.x])
|
| 185 |
+
funs = np.array([res1.fun, res2.fun, res3.fun,
|
| 186 |
+
res4.fun, res5.fun, res6.fun, res7.fun])
|
| 187 |
+
np.testing.assert_allclose(xs, np.broadcast_to([1, 2], xs.shape))
|
| 188 |
+
np.testing.assert_allclose(funs, -3)
|
| 189 |
+
|
| 190 |
+
# solve relaxed problem
|
| 191 |
+
res = milp(c=c, constraints=(A, b_l, b_u))
|
| 192 |
+
np.testing.assert_allclose(res.x, [4, 4.5])
|
| 193 |
+
np.testing.assert_allclose(res.fun, -8.5)
|
| 194 |
+
|
| 195 |
+
|
| 196 |
+
def test_milp_3():
|
| 197 |
+
# solve MIP with inequality constraints and all integer constraints
|
| 198 |
+
# source: https://en.wikipedia.org/wiki/Integer_programming#Example
|
| 199 |
+
c = [0, -1]
|
| 200 |
+
A = [[-1, 1], [3, 2], [2, 3]]
|
| 201 |
+
b_u = [1, 12, 12]
|
| 202 |
+
b_l = np.full_like(b_u, -np.inf, dtype=np.float64)
|
| 203 |
+
constraints = LinearConstraint(A, b_l, b_u)
|
| 204 |
+
|
| 205 |
+
integrality = np.ones_like(c)
|
| 206 |
+
|
| 207 |
+
# solve original problem
|
| 208 |
+
res = milp(c=c, constraints=constraints, integrality=integrality)
|
| 209 |
+
assert_allclose(res.fun, -2)
|
| 210 |
+
# two optimal solutions possible, just need one of them
|
| 211 |
+
assert np.allclose(res.x, [1, 2]) or np.allclose(res.x, [2, 2])
|
| 212 |
+
|
| 213 |
+
# solve relaxed problem
|
| 214 |
+
res = milp(c=c, constraints=constraints)
|
| 215 |
+
assert_allclose(res.fun, -2.8)
|
| 216 |
+
assert_allclose(res.x, [1.8, 2.8])
|
| 217 |
+
|
| 218 |
+
|
| 219 |
+
def test_milp_4():
|
| 220 |
+
# solve MIP with inequality constraints and only one integer constraint
|
| 221 |
+
# source: https://www.mathworks.com/help/optim/ug/intlinprog.html
|
| 222 |
+
c = [8, 1]
|
| 223 |
+
integrality = [0, 1]
|
| 224 |
+
A = [[1, 2], [-4, -1], [2, 1]]
|
| 225 |
+
b_l = [-14, -np.inf, -np.inf]
|
| 226 |
+
b_u = [np.inf, -33, 20]
|
| 227 |
+
constraints = LinearConstraint(A, b_l, b_u)
|
| 228 |
+
bounds = Bounds(-np.inf, np.inf)
|
| 229 |
+
|
| 230 |
+
res = milp(c, integrality=integrality, bounds=bounds,
|
| 231 |
+
constraints=constraints)
|
| 232 |
+
assert_allclose(res.fun, 59)
|
| 233 |
+
assert_allclose(res.x, [6.5, 7])
|
| 234 |
+
|
| 235 |
+
|
| 236 |
+
def test_milp_5():
|
| 237 |
+
# solve MIP with inequality and equality constraints
|
| 238 |
+
# source: https://www.mathworks.com/help/optim/ug/intlinprog.html
|
| 239 |
+
c = [-3, -2, -1]
|
| 240 |
+
integrality = [0, 0, 1]
|
| 241 |
+
lb = [0, 0, 0]
|
| 242 |
+
ub = [np.inf, np.inf, 1]
|
| 243 |
+
bounds = Bounds(lb, ub)
|
| 244 |
+
A = [[1, 1, 1], [4, 2, 1]]
|
| 245 |
+
b_l = [-np.inf, 12]
|
| 246 |
+
b_u = [7, 12]
|
| 247 |
+
constraints = LinearConstraint(A, b_l, b_u)
|
| 248 |
+
|
| 249 |
+
res = milp(c, integrality=integrality, bounds=bounds,
|
| 250 |
+
constraints=constraints)
|
| 251 |
+
# there are multiple solutions
|
| 252 |
+
assert_allclose(res.fun, -12)
|
| 253 |
+
|
| 254 |
+
|
| 255 |
+
@pytest.mark.slow
|
| 256 |
+
@pytest.mark.timeout(120) # prerelease_deps_coverage_64bit_blas job
|
| 257 |
+
def test_milp_6():
|
| 258 |
+
# solve a larger MIP with only equality constraints
|
| 259 |
+
# source: https://www.mathworks.com/help/optim/ug/intlinprog.html
|
| 260 |
+
integrality = 1
|
| 261 |
+
A_eq = np.array([[22, 13, 26, 33, 21, 3, 14, 26],
|
| 262 |
+
[39, 16, 22, 28, 26, 30, 23, 24],
|
| 263 |
+
[18, 14, 29, 27, 30, 38, 26, 26],
|
| 264 |
+
[41, 26, 28, 36, 18, 38, 16, 26]])
|
| 265 |
+
b_eq = np.array([7872, 10466, 11322, 12058])
|
| 266 |
+
c = np.array([2, 10, 13, 17, 7, 5, 7, 3])
|
| 267 |
+
|
| 268 |
+
res = milp(c=c, constraints=(A_eq, b_eq, b_eq), integrality=integrality)
|
| 269 |
+
|
| 270 |
+
np.testing.assert_allclose(res.fun, 1854)
|
| 271 |
+
|
| 272 |
+
|
| 273 |
+
def test_infeasible_prob_16609():
|
| 274 |
+
# Ensure presolve does not mark trivially infeasible problems
|
| 275 |
+
# as Optimal -- see gh-16609
|
| 276 |
+
c = [1.0, 0.0]
|
| 277 |
+
integrality = [0, 1]
|
| 278 |
+
|
| 279 |
+
lb = [0, -np.inf]
|
| 280 |
+
ub = [np.inf, np.inf]
|
| 281 |
+
bounds = Bounds(lb, ub)
|
| 282 |
+
|
| 283 |
+
A_eq = [[0.0, 1.0]]
|
| 284 |
+
b_eq = [0.5]
|
| 285 |
+
constraints = LinearConstraint(A_eq, b_eq, b_eq)
|
| 286 |
+
|
| 287 |
+
res = milp(c, integrality=integrality, bounds=bounds,
|
| 288 |
+
constraints=constraints)
|
| 289 |
+
np.testing.assert_equal(res.status, 2)
|
| 290 |
+
|
| 291 |
+
|
| 292 |
+
_msg_time = "Time limit reached. (HiGHS Status 13:"
|
| 293 |
+
_msg_iter = "Iteration limit reached. (HiGHS Status 14:"
|
| 294 |
+
|
| 295 |
+
|
| 296 |
+
@pytest.mark.skipif(np.intp(0).itemsize < 8,
|
| 297 |
+
reason="Unhandled 32-bit GCC FP bug")
|
| 298 |
+
@pytest.mark.slow
|
| 299 |
+
@pytest.mark.parametrize(["options", "msg"], [({"time_limit": 0.1}, _msg_time),
|
| 300 |
+
({"node_limit": 1}, _msg_iter)])
|
| 301 |
+
def test_milp_timeout_16545(options, msg):
|
| 302 |
+
# Ensure solution is not thrown away if MILP solver times out
|
| 303 |
+
# -- see gh-16545
|
| 304 |
+
rng = np.random.default_rng(5123833489170494244)
|
| 305 |
+
A = rng.integers(0, 5, size=(100, 100))
|
| 306 |
+
b_lb = np.full(100, fill_value=-np.inf)
|
| 307 |
+
b_ub = np.full(100, fill_value=25)
|
| 308 |
+
constraints = LinearConstraint(A, b_lb, b_ub)
|
| 309 |
+
variable_lb = np.zeros(100)
|
| 310 |
+
variable_ub = np.ones(100)
|
| 311 |
+
variable_bounds = Bounds(variable_lb, variable_ub)
|
| 312 |
+
integrality = np.ones(100)
|
| 313 |
+
c_vector = -np.ones(100)
|
| 314 |
+
res = milp(
|
| 315 |
+
c_vector,
|
| 316 |
+
integrality=integrality,
|
| 317 |
+
bounds=variable_bounds,
|
| 318 |
+
constraints=constraints,
|
| 319 |
+
options=options,
|
| 320 |
+
)
|
| 321 |
+
|
| 322 |
+
assert res.message.startswith(msg)
|
| 323 |
+
assert res["x"] is not None
|
| 324 |
+
|
| 325 |
+
# ensure solution is feasible
|
| 326 |
+
x = res["x"]
|
| 327 |
+
tol = 1e-8 # sometimes needed due to finite numerical precision
|
| 328 |
+
assert np.all(b_lb - tol <= A @ x) and np.all(A @ x <= b_ub + tol)
|
| 329 |
+
assert np.all(variable_lb - tol <= x) and np.all(x <= variable_ub + tol)
|
| 330 |
+
assert np.allclose(x, np.round(x))
|
| 331 |
+
|
| 332 |
+
|
| 333 |
+
def test_three_constraints_16878():
|
| 334 |
+
# `milp` failed when exactly three constraints were passed
|
| 335 |
+
# Ensure that this is no longer the case.
|
| 336 |
+
rng = np.random.default_rng(5123833489170494244)
|
| 337 |
+
A = rng.integers(0, 5, size=(6, 6))
|
| 338 |
+
bl = np.full(6, fill_value=-np.inf)
|
| 339 |
+
bu = np.full(6, fill_value=10)
|
| 340 |
+
constraints = [LinearConstraint(A[:2], bl[:2], bu[:2]),
|
| 341 |
+
LinearConstraint(A[2:4], bl[2:4], bu[2:4]),
|
| 342 |
+
LinearConstraint(A[4:], bl[4:], bu[4:])]
|
| 343 |
+
constraints2 = [(A[:2], bl[:2], bu[:2]),
|
| 344 |
+
(A[2:4], bl[2:4], bu[2:4]),
|
| 345 |
+
(A[4:], bl[4:], bu[4:])]
|
| 346 |
+
lb = np.zeros(6)
|
| 347 |
+
ub = np.ones(6)
|
| 348 |
+
variable_bounds = Bounds(lb, ub)
|
| 349 |
+
c = -np.ones(6)
|
| 350 |
+
res1 = milp(c, bounds=variable_bounds, constraints=constraints)
|
| 351 |
+
res2 = milp(c, bounds=variable_bounds, constraints=constraints2)
|
| 352 |
+
ref = milp(c, bounds=variable_bounds, constraints=(A, bl, bu))
|
| 353 |
+
assert res1.success and res2.success
|
| 354 |
+
assert_allclose(res1.x, ref.x)
|
| 355 |
+
assert_allclose(res2.x, ref.x)
|
| 356 |
+
|
| 357 |
+
|
| 358 |
+
@pytest.mark.xslow
|
| 359 |
+
def test_mip_rel_gap_passdown():
|
| 360 |
+
# Solve problem with decreasing mip_gap to make sure mip_rel_gap decreases
|
| 361 |
+
# Adapted from test_linprog::TestLinprogHiGHSMIP::test_mip_rel_gap_passdown
|
| 362 |
+
# MIP taken from test_mip_6 above
|
| 363 |
+
A_eq = np.array([[22, 13, 26, 33, 21, 3, 14, 26],
|
| 364 |
+
[39, 16, 22, 28, 26, 30, 23, 24],
|
| 365 |
+
[18, 14, 29, 27, 30, 38, 26, 26],
|
| 366 |
+
[41, 26, 28, 36, 18, 38, 16, 26]])
|
| 367 |
+
b_eq = np.array([7872, 10466, 11322, 12058])
|
| 368 |
+
c = np.array([2, 10, 13, 17, 7, 5, 7, 3])
|
| 369 |
+
|
| 370 |
+
mip_rel_gaps = [0.25, 0.01, 0.001]
|
| 371 |
+
sol_mip_gaps = []
|
| 372 |
+
for mip_rel_gap in mip_rel_gaps:
|
| 373 |
+
res = milp(c=c, bounds=(0, np.inf), constraints=(A_eq, b_eq, b_eq),
|
| 374 |
+
integrality=True, options={"mip_rel_gap": mip_rel_gap})
|
| 375 |
+
# assert that the solution actually has mip_gap lower than the
|
| 376 |
+
# required mip_rel_gap supplied
|
| 377 |
+
assert res.mip_gap <= mip_rel_gap
|
| 378 |
+
# check that `res.mip_gap` is as defined in the documentation
|
| 379 |
+
assert res.mip_gap == (res.fun - res.mip_dual_bound)/res.fun
|
| 380 |
+
sol_mip_gaps.append(res.mip_gap)
|
| 381 |
+
|
| 382 |
+
# make sure that the mip_rel_gap parameter is actually doing something
|
| 383 |
+
# check that differences between solution gaps are declining
|
| 384 |
+
# monotonically with the mip_rel_gap parameter.
|
| 385 |
+
assert np.all(np.diff(sol_mip_gaps) < 0)
|
llava_next/lib/python3.10/site-packages/scipy/optimize/tests/test_minpack.py
ADDED
|
@@ -0,0 +1,1121 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Unit tests for optimization routines from minpack.py.
|
| 3 |
+
"""
|
| 4 |
+
import warnings
|
| 5 |
+
import pytest
|
| 6 |
+
|
| 7 |
+
from numpy.testing import (assert_, assert_almost_equal, assert_array_equal,
|
| 8 |
+
assert_array_almost_equal, assert_allclose,
|
| 9 |
+
assert_warns, suppress_warnings)
|
| 10 |
+
from pytest import raises as assert_raises
|
| 11 |
+
import numpy as np
|
| 12 |
+
from numpy import array, float64
|
| 13 |
+
from multiprocessing.pool import ThreadPool
|
| 14 |
+
|
| 15 |
+
from scipy import optimize, linalg
|
| 16 |
+
from scipy.special import lambertw
|
| 17 |
+
from scipy.optimize._minpack_py import leastsq, curve_fit, fixed_point
|
| 18 |
+
from scipy.optimize import OptimizeWarning
|
| 19 |
+
from scipy.optimize._minimize import Bounds
|
| 20 |
+
|
| 21 |
+
|
| 22 |
+
class ReturnShape:
|
| 23 |
+
"""This class exists to create a callable that does not have a '__name__' attribute.
|
| 24 |
+
|
| 25 |
+
__init__ takes the argument 'shape', which should be a tuple of ints.
|
| 26 |
+
When an instance is called with a single argument 'x', it returns numpy.ones(shape).
|
| 27 |
+
"""
|
| 28 |
+
|
| 29 |
+
def __init__(self, shape):
|
| 30 |
+
self.shape = shape
|
| 31 |
+
|
| 32 |
+
def __call__(self, x):
|
| 33 |
+
return np.ones(self.shape)
|
| 34 |
+
|
| 35 |
+
|
| 36 |
+
def dummy_func(x, shape):
|
| 37 |
+
"""A function that returns an array of ones of the given shape.
|
| 38 |
+
`x` is ignored.
|
| 39 |
+
"""
|
| 40 |
+
return np.ones(shape)
|
| 41 |
+
|
| 42 |
+
|
| 43 |
+
def sequence_parallel(fs):
|
| 44 |
+
with ThreadPool(len(fs)) as pool:
|
| 45 |
+
return pool.map(lambda f: f(), fs)
|
| 46 |
+
|
| 47 |
+
|
| 48 |
+
# Function and Jacobian for tests of solvers for systems of nonlinear
|
| 49 |
+
# equations
|
| 50 |
+
|
| 51 |
+
|
| 52 |
+
def pressure_network(flow_rates, Qtot, k):
|
| 53 |
+
"""Evaluate non-linear equation system representing
|
| 54 |
+
the pressures and flows in a system of n parallel pipes::
|
| 55 |
+
|
| 56 |
+
f_i = P_i - P_0, for i = 1..n
|
| 57 |
+
f_0 = sum(Q_i) - Qtot
|
| 58 |
+
|
| 59 |
+
where Q_i is the flow rate in pipe i and P_i the pressure in that pipe.
|
| 60 |
+
Pressure is modeled as a P=kQ**2 where k is a valve coefficient and
|
| 61 |
+
Q is the flow rate.
|
| 62 |
+
|
| 63 |
+
Parameters
|
| 64 |
+
----------
|
| 65 |
+
flow_rates : float
|
| 66 |
+
A 1-D array of n flow rates [kg/s].
|
| 67 |
+
k : float
|
| 68 |
+
A 1-D array of n valve coefficients [1/kg m].
|
| 69 |
+
Qtot : float
|
| 70 |
+
A scalar, the total input flow rate [kg/s].
|
| 71 |
+
|
| 72 |
+
Returns
|
| 73 |
+
-------
|
| 74 |
+
F : float
|
| 75 |
+
A 1-D array, F[i] == f_i.
|
| 76 |
+
|
| 77 |
+
"""
|
| 78 |
+
P = k * flow_rates**2
|
| 79 |
+
F = np.hstack((P[1:] - P[0], flow_rates.sum() - Qtot))
|
| 80 |
+
return F
|
| 81 |
+
|
| 82 |
+
|
| 83 |
+
def pressure_network_jacobian(flow_rates, Qtot, k):
|
| 84 |
+
"""Return the jacobian of the equation system F(flow_rates)
|
| 85 |
+
computed by `pressure_network` with respect to
|
| 86 |
+
*flow_rates*. See `pressure_network` for the detailed
|
| 87 |
+
description of parameters.
|
| 88 |
+
|
| 89 |
+
Returns
|
| 90 |
+
-------
|
| 91 |
+
jac : float
|
| 92 |
+
*n* by *n* matrix ``df_i/dQ_i`` where ``n = len(flow_rates)``
|
| 93 |
+
and *f_i* and *Q_i* are described in the doc for `pressure_network`
|
| 94 |
+
"""
|
| 95 |
+
n = len(flow_rates)
|
| 96 |
+
pdiff = np.diag(flow_rates[1:] * 2 * k[1:] - 2 * flow_rates[0] * k[0])
|
| 97 |
+
|
| 98 |
+
jac = np.empty((n, n))
|
| 99 |
+
jac[:n-1, :n-1] = pdiff * 0
|
| 100 |
+
jac[:n-1, n-1] = 0
|
| 101 |
+
jac[n-1, :] = np.ones(n)
|
| 102 |
+
|
| 103 |
+
return jac
|
| 104 |
+
|
| 105 |
+
|
| 106 |
+
def pressure_network_fun_and_grad(flow_rates, Qtot, k):
|
| 107 |
+
return (pressure_network(flow_rates, Qtot, k),
|
| 108 |
+
pressure_network_jacobian(flow_rates, Qtot, k))
|
| 109 |
+
|
| 110 |
+
|
| 111 |
+
class TestFSolve:
|
| 112 |
+
def test_pressure_network_no_gradient(self):
|
| 113 |
+
# fsolve without gradient, equal pipes -> equal flows.
|
| 114 |
+
k = np.full(4, 0.5)
|
| 115 |
+
Qtot = 4
|
| 116 |
+
initial_guess = array([2., 0., 2., 0.])
|
| 117 |
+
final_flows, info, ier, mesg = optimize.fsolve(
|
| 118 |
+
pressure_network, initial_guess, args=(Qtot, k),
|
| 119 |
+
full_output=True)
|
| 120 |
+
assert_array_almost_equal(final_flows, np.ones(4))
|
| 121 |
+
assert_(ier == 1, mesg)
|
| 122 |
+
|
| 123 |
+
def test_pressure_network_with_gradient(self):
|
| 124 |
+
# fsolve with gradient, equal pipes -> equal flows
|
| 125 |
+
k = np.full(4, 0.5)
|
| 126 |
+
Qtot = 4
|
| 127 |
+
initial_guess = array([2., 0., 2., 0.])
|
| 128 |
+
final_flows = optimize.fsolve(
|
| 129 |
+
pressure_network, initial_guess, args=(Qtot, k),
|
| 130 |
+
fprime=pressure_network_jacobian)
|
| 131 |
+
assert_array_almost_equal(final_flows, np.ones(4))
|
| 132 |
+
|
| 133 |
+
def test_wrong_shape_func_callable(self):
|
| 134 |
+
func = ReturnShape(1)
|
| 135 |
+
# x0 is a list of two elements, but func will return an array with
|
| 136 |
+
# length 1, so this should result in a TypeError.
|
| 137 |
+
x0 = [1.5, 2.0]
|
| 138 |
+
assert_raises(TypeError, optimize.fsolve, func, x0)
|
| 139 |
+
|
| 140 |
+
def test_wrong_shape_func_function(self):
|
| 141 |
+
# x0 is a list of two elements, but func will return an array with
|
| 142 |
+
# length 1, so this should result in a TypeError.
|
| 143 |
+
x0 = [1.5, 2.0]
|
| 144 |
+
assert_raises(TypeError, optimize.fsolve, dummy_func, x0, args=((1,),))
|
| 145 |
+
|
| 146 |
+
def test_wrong_shape_fprime_callable(self):
|
| 147 |
+
func = ReturnShape(1)
|
| 148 |
+
deriv_func = ReturnShape((2,2))
|
| 149 |
+
assert_raises(TypeError, optimize.fsolve, func, x0=[0,1], fprime=deriv_func)
|
| 150 |
+
|
| 151 |
+
def test_wrong_shape_fprime_function(self):
|
| 152 |
+
def func(x):
|
| 153 |
+
return dummy_func(x, (2,))
|
| 154 |
+
def deriv_func(x):
|
| 155 |
+
return dummy_func(x, (3, 3))
|
| 156 |
+
assert_raises(TypeError, optimize.fsolve, func, x0=[0,1], fprime=deriv_func)
|
| 157 |
+
|
| 158 |
+
def test_func_can_raise(self):
|
| 159 |
+
def func(*args):
|
| 160 |
+
raise ValueError('I raised')
|
| 161 |
+
|
| 162 |
+
with assert_raises(ValueError, match='I raised'):
|
| 163 |
+
optimize.fsolve(func, x0=[0])
|
| 164 |
+
|
| 165 |
+
def test_Dfun_can_raise(self):
|
| 166 |
+
def func(x):
|
| 167 |
+
return x - np.array([10])
|
| 168 |
+
|
| 169 |
+
def deriv_func(*args):
|
| 170 |
+
raise ValueError('I raised')
|
| 171 |
+
|
| 172 |
+
with assert_raises(ValueError, match='I raised'):
|
| 173 |
+
optimize.fsolve(func, x0=[0], fprime=deriv_func)
|
| 174 |
+
|
| 175 |
+
def test_float32(self):
|
| 176 |
+
def func(x):
|
| 177 |
+
return np.array([x[0] - 100, x[1] - 1000], dtype=np.float32) ** 2
|
| 178 |
+
p = optimize.fsolve(func, np.array([1, 1], np.float32))
|
| 179 |
+
assert_allclose(func(p), [0, 0], atol=1e-3)
|
| 180 |
+
|
| 181 |
+
def test_reentrant_func(self):
|
| 182 |
+
def func(*args):
|
| 183 |
+
self.test_pressure_network_no_gradient()
|
| 184 |
+
return pressure_network(*args)
|
| 185 |
+
|
| 186 |
+
# fsolve without gradient, equal pipes -> equal flows.
|
| 187 |
+
k = np.full(4, 0.5)
|
| 188 |
+
Qtot = 4
|
| 189 |
+
initial_guess = array([2., 0., 2., 0.])
|
| 190 |
+
final_flows, info, ier, mesg = optimize.fsolve(
|
| 191 |
+
func, initial_guess, args=(Qtot, k),
|
| 192 |
+
full_output=True)
|
| 193 |
+
assert_array_almost_equal(final_flows, np.ones(4))
|
| 194 |
+
assert_(ier == 1, mesg)
|
| 195 |
+
|
| 196 |
+
def test_reentrant_Dfunc(self):
|
| 197 |
+
def deriv_func(*args):
|
| 198 |
+
self.test_pressure_network_with_gradient()
|
| 199 |
+
return pressure_network_jacobian(*args)
|
| 200 |
+
|
| 201 |
+
# fsolve with gradient, equal pipes -> equal flows
|
| 202 |
+
k = np.full(4, 0.5)
|
| 203 |
+
Qtot = 4
|
| 204 |
+
initial_guess = array([2., 0., 2., 0.])
|
| 205 |
+
final_flows = optimize.fsolve(
|
| 206 |
+
pressure_network, initial_guess, args=(Qtot, k),
|
| 207 |
+
fprime=deriv_func)
|
| 208 |
+
assert_array_almost_equal(final_flows, np.ones(4))
|
| 209 |
+
|
| 210 |
+
def test_concurrent_no_gradient(self):
|
| 211 |
+
v = sequence_parallel([self.test_pressure_network_no_gradient] * 10)
|
| 212 |
+
assert all([result is None for result in v])
|
| 213 |
+
|
| 214 |
+
def test_concurrent_with_gradient(self):
|
| 215 |
+
v = sequence_parallel([self.test_pressure_network_with_gradient] * 10)
|
| 216 |
+
assert all([result is None for result in v])
|
| 217 |
+
|
| 218 |
+
|
| 219 |
+
class TestRootHybr:
|
| 220 |
+
def test_pressure_network_no_gradient(self):
|
| 221 |
+
# root/hybr without gradient, equal pipes -> equal flows
|
| 222 |
+
k = np.full(4, 0.5)
|
| 223 |
+
Qtot = 4
|
| 224 |
+
initial_guess = array([2., 0., 2., 0.])
|
| 225 |
+
final_flows = optimize.root(pressure_network, initial_guess,
|
| 226 |
+
method='hybr', args=(Qtot, k)).x
|
| 227 |
+
assert_array_almost_equal(final_flows, np.ones(4))
|
| 228 |
+
|
| 229 |
+
def test_pressure_network_with_gradient(self):
|
| 230 |
+
# root/hybr with gradient, equal pipes -> equal flows
|
| 231 |
+
k = np.full(4, 0.5)
|
| 232 |
+
Qtot = 4
|
| 233 |
+
initial_guess = array([[2., 0., 2., 0.]])
|
| 234 |
+
final_flows = optimize.root(pressure_network, initial_guess,
|
| 235 |
+
args=(Qtot, k), method='hybr',
|
| 236 |
+
jac=pressure_network_jacobian).x
|
| 237 |
+
assert_array_almost_equal(final_flows, np.ones(4))
|
| 238 |
+
|
| 239 |
+
def test_pressure_network_with_gradient_combined(self):
|
| 240 |
+
# root/hybr with gradient and function combined, equal pipes -> equal
|
| 241 |
+
# flows
|
| 242 |
+
k = np.full(4, 0.5)
|
| 243 |
+
Qtot = 4
|
| 244 |
+
initial_guess = array([2., 0., 2., 0.])
|
| 245 |
+
final_flows = optimize.root(pressure_network_fun_and_grad,
|
| 246 |
+
initial_guess, args=(Qtot, k),
|
| 247 |
+
method='hybr', jac=True).x
|
| 248 |
+
assert_array_almost_equal(final_flows, np.ones(4))
|
| 249 |
+
|
| 250 |
+
|
| 251 |
+
class TestRootLM:
|
| 252 |
+
def test_pressure_network_no_gradient(self):
|
| 253 |
+
# root/lm without gradient, equal pipes -> equal flows
|
| 254 |
+
k = np.full(4, 0.5)
|
| 255 |
+
Qtot = 4
|
| 256 |
+
initial_guess = array([2., 0., 2., 0.])
|
| 257 |
+
final_flows = optimize.root(pressure_network, initial_guess,
|
| 258 |
+
method='lm', args=(Qtot, k)).x
|
| 259 |
+
assert_array_almost_equal(final_flows, np.ones(4))
|
| 260 |
+
|
| 261 |
+
|
| 262 |
+
class TestNfev:
|
| 263 |
+
def zero_f(self, y):
|
| 264 |
+
self.nfev += 1
|
| 265 |
+
return y**2-3
|
| 266 |
+
|
| 267 |
+
@pytest.mark.parametrize('method', ['hybr', 'lm', 'broyden1',
|
| 268 |
+
'broyden2', 'anderson',
|
| 269 |
+
'linearmixing', 'diagbroyden',
|
| 270 |
+
'excitingmixing', 'krylov',
|
| 271 |
+
'df-sane'])
|
| 272 |
+
def test_root_nfev(self, method):
|
| 273 |
+
self.nfev = 0
|
| 274 |
+
solution = optimize.root(self.zero_f, 100, method=method)
|
| 275 |
+
assert solution.nfev == self.nfev
|
| 276 |
+
|
| 277 |
+
def test_fsolve_nfev(self):
|
| 278 |
+
self.nfev = 0
|
| 279 |
+
x, info, ier, mesg = optimize.fsolve(self.zero_f, 100, full_output=True)
|
| 280 |
+
assert info['nfev'] == self.nfev
|
| 281 |
+
|
| 282 |
+
|
| 283 |
+
class TestLeastSq:
|
| 284 |
+
def setup_method(self):
|
| 285 |
+
x = np.linspace(0, 10, 40)
|
| 286 |
+
a,b,c = 3.1, 42, -304.2
|
| 287 |
+
self.x = x
|
| 288 |
+
self.abc = a,b,c
|
| 289 |
+
y_true = a*x**2 + b*x + c
|
| 290 |
+
np.random.seed(0)
|
| 291 |
+
self.y_meas = y_true + 0.01*np.random.standard_normal(y_true.shape)
|
| 292 |
+
|
| 293 |
+
def residuals(self, p, y, x):
|
| 294 |
+
a,b,c = p
|
| 295 |
+
err = y-(a*x**2 + b*x + c)
|
| 296 |
+
return err
|
| 297 |
+
|
| 298 |
+
def residuals_jacobian(self, _p, _y, x):
|
| 299 |
+
return -np.vstack([x**2, x, np.ones_like(x)]).T
|
| 300 |
+
|
| 301 |
+
def test_basic(self):
|
| 302 |
+
p0 = array([0,0,0])
|
| 303 |
+
params_fit, ier = leastsq(self.residuals, p0,
|
| 304 |
+
args=(self.y_meas, self.x))
|
| 305 |
+
assert_(ier in (1,2,3,4), 'solution not found (ier=%d)' % ier)
|
| 306 |
+
# low precision due to random
|
| 307 |
+
assert_array_almost_equal(params_fit, self.abc, decimal=2)
|
| 308 |
+
|
| 309 |
+
def test_basic_with_gradient(self):
|
| 310 |
+
p0 = array([0,0,0])
|
| 311 |
+
params_fit, ier = leastsq(self.residuals, p0,
|
| 312 |
+
args=(self.y_meas, self.x),
|
| 313 |
+
Dfun=self.residuals_jacobian)
|
| 314 |
+
assert_(ier in (1,2,3,4), 'solution not found (ier=%d)' % ier)
|
| 315 |
+
# low precision due to random
|
| 316 |
+
assert_array_almost_equal(params_fit, self.abc, decimal=2)
|
| 317 |
+
|
| 318 |
+
def test_full_output(self):
|
| 319 |
+
p0 = array([[0,0,0]])
|
| 320 |
+
full_output = leastsq(self.residuals, p0,
|
| 321 |
+
args=(self.y_meas, self.x),
|
| 322 |
+
full_output=True)
|
| 323 |
+
params_fit, cov_x, infodict, mesg, ier = full_output
|
| 324 |
+
assert_(ier in (1,2,3,4), f'solution not found: {mesg}')
|
| 325 |
+
|
| 326 |
+
def test_input_untouched(self):
|
| 327 |
+
p0 = array([0,0,0],dtype=float64)
|
| 328 |
+
p0_copy = array(p0, copy=True)
|
| 329 |
+
full_output = leastsq(self.residuals, p0,
|
| 330 |
+
args=(self.y_meas, self.x),
|
| 331 |
+
full_output=True)
|
| 332 |
+
params_fit, cov_x, infodict, mesg, ier = full_output
|
| 333 |
+
assert_(ier in (1,2,3,4), f'solution not found: {mesg}')
|
| 334 |
+
assert_array_equal(p0, p0_copy)
|
| 335 |
+
|
| 336 |
+
def test_wrong_shape_func_callable(self):
|
| 337 |
+
func = ReturnShape(1)
|
| 338 |
+
# x0 is a list of two elements, but func will return an array with
|
| 339 |
+
# length 1, so this should result in a TypeError.
|
| 340 |
+
x0 = [1.5, 2.0]
|
| 341 |
+
assert_raises(TypeError, optimize.leastsq, func, x0)
|
| 342 |
+
|
| 343 |
+
def test_wrong_shape_func_function(self):
|
| 344 |
+
# x0 is a list of two elements, but func will return an array with
|
| 345 |
+
# length 1, so this should result in a TypeError.
|
| 346 |
+
x0 = [1.5, 2.0]
|
| 347 |
+
assert_raises(TypeError, optimize.leastsq, dummy_func, x0, args=((1,),))
|
| 348 |
+
|
| 349 |
+
def test_wrong_shape_Dfun_callable(self):
|
| 350 |
+
func = ReturnShape(1)
|
| 351 |
+
deriv_func = ReturnShape((2,2))
|
| 352 |
+
assert_raises(TypeError, optimize.leastsq, func, x0=[0,1], Dfun=deriv_func)
|
| 353 |
+
|
| 354 |
+
def test_wrong_shape_Dfun_function(self):
|
| 355 |
+
def func(x):
|
| 356 |
+
return dummy_func(x, (2,))
|
| 357 |
+
def deriv_func(x):
|
| 358 |
+
return dummy_func(x, (3, 3))
|
| 359 |
+
assert_raises(TypeError, optimize.leastsq, func, x0=[0,1], Dfun=deriv_func)
|
| 360 |
+
|
| 361 |
+
def test_float32(self):
|
| 362 |
+
# Regression test for gh-1447
|
| 363 |
+
def func(p,x,y):
|
| 364 |
+
q = p[0]*np.exp(-(x-p[1])**2/(2.0*p[2]**2))+p[3]
|
| 365 |
+
return q - y
|
| 366 |
+
|
| 367 |
+
x = np.array([1.475,1.429,1.409,1.419,1.455,1.519,1.472, 1.368,1.286,
|
| 368 |
+
1.231], dtype=np.float32)
|
| 369 |
+
y = np.array([0.0168,0.0193,0.0211,0.0202,0.0171,0.0151,0.0185,0.0258,
|
| 370 |
+
0.034,0.0396], dtype=np.float32)
|
| 371 |
+
p0 = np.array([1.0,1.0,1.0,1.0])
|
| 372 |
+
p1, success = optimize.leastsq(func, p0, args=(x,y))
|
| 373 |
+
|
| 374 |
+
assert_(success in [1,2,3,4])
|
| 375 |
+
assert_((func(p1,x,y)**2).sum() < 1e-4 * (func(p0,x,y)**2).sum())
|
| 376 |
+
|
| 377 |
+
def test_func_can_raise(self):
|
| 378 |
+
def func(*args):
|
| 379 |
+
raise ValueError('I raised')
|
| 380 |
+
|
| 381 |
+
with assert_raises(ValueError, match='I raised'):
|
| 382 |
+
optimize.leastsq(func, x0=[0])
|
| 383 |
+
|
| 384 |
+
def test_Dfun_can_raise(self):
|
| 385 |
+
def func(x):
|
| 386 |
+
return x - np.array([10])
|
| 387 |
+
|
| 388 |
+
def deriv_func(*args):
|
| 389 |
+
raise ValueError('I raised')
|
| 390 |
+
|
| 391 |
+
with assert_raises(ValueError, match='I raised'):
|
| 392 |
+
optimize.leastsq(func, x0=[0], Dfun=deriv_func)
|
| 393 |
+
|
| 394 |
+
def test_reentrant_func(self):
|
| 395 |
+
def func(*args):
|
| 396 |
+
self.test_basic()
|
| 397 |
+
return self.residuals(*args)
|
| 398 |
+
|
| 399 |
+
p0 = array([0,0,0])
|
| 400 |
+
params_fit, ier = leastsq(func, p0,
|
| 401 |
+
args=(self.y_meas, self.x))
|
| 402 |
+
assert_(ier in (1,2,3,4), 'solution not found (ier=%d)' % ier)
|
| 403 |
+
# low precision due to random
|
| 404 |
+
assert_array_almost_equal(params_fit, self.abc, decimal=2)
|
| 405 |
+
|
| 406 |
+
def test_reentrant_Dfun(self):
|
| 407 |
+
def deriv_func(*args):
|
| 408 |
+
self.test_basic()
|
| 409 |
+
return self.residuals_jacobian(*args)
|
| 410 |
+
|
| 411 |
+
p0 = array([0,0,0])
|
| 412 |
+
params_fit, ier = leastsq(self.residuals, p0,
|
| 413 |
+
args=(self.y_meas, self.x),
|
| 414 |
+
Dfun=deriv_func)
|
| 415 |
+
assert_(ier in (1,2,3,4), 'solution not found (ier=%d)' % ier)
|
| 416 |
+
# low precision due to random
|
| 417 |
+
assert_array_almost_equal(params_fit, self.abc, decimal=2)
|
| 418 |
+
|
| 419 |
+
def test_concurrent_no_gradient(self):
|
| 420 |
+
v = sequence_parallel([self.test_basic] * 10)
|
| 421 |
+
assert all([result is None for result in v])
|
| 422 |
+
|
| 423 |
+
def test_concurrent_with_gradient(self):
|
| 424 |
+
v = sequence_parallel([self.test_basic_with_gradient] * 10)
|
| 425 |
+
assert all([result is None for result in v])
|
| 426 |
+
|
| 427 |
+
def test_func_input_output_length_check(self):
|
| 428 |
+
|
| 429 |
+
def func(x):
|
| 430 |
+
return 2 * (x[0] - 3) ** 2 + 1
|
| 431 |
+
|
| 432 |
+
with assert_raises(TypeError,
|
| 433 |
+
match='Improper input: func input vector length N='):
|
| 434 |
+
optimize.leastsq(func, x0=[0, 1])
|
| 435 |
+
|
| 436 |
+
|
| 437 |
+
class TestCurveFit:
|
| 438 |
+
def setup_method(self):
|
| 439 |
+
self.y = array([1.0, 3.2, 9.5, 13.7])
|
| 440 |
+
self.x = array([1.0, 2.0, 3.0, 4.0])
|
| 441 |
+
|
| 442 |
+
def test_one_argument(self):
|
| 443 |
+
def func(x,a):
|
| 444 |
+
return x**a
|
| 445 |
+
popt, pcov = curve_fit(func, self.x, self.y)
|
| 446 |
+
assert_(len(popt) == 1)
|
| 447 |
+
assert_(pcov.shape == (1,1))
|
| 448 |
+
assert_almost_equal(popt[0], 1.9149, decimal=4)
|
| 449 |
+
assert_almost_equal(pcov[0,0], 0.0016, decimal=4)
|
| 450 |
+
|
| 451 |
+
# Test if we get the same with full_output. Regression test for #1415.
|
| 452 |
+
# Also test if check_finite can be turned off.
|
| 453 |
+
res = curve_fit(func, self.x, self.y,
|
| 454 |
+
full_output=1, check_finite=False)
|
| 455 |
+
(popt2, pcov2, infodict, errmsg, ier) = res
|
| 456 |
+
assert_array_almost_equal(popt, popt2)
|
| 457 |
+
|
| 458 |
+
def test_two_argument(self):
|
| 459 |
+
def func(x, a, b):
|
| 460 |
+
return b*x**a
|
| 461 |
+
popt, pcov = curve_fit(func, self.x, self.y)
|
| 462 |
+
assert_(len(popt) == 2)
|
| 463 |
+
assert_(pcov.shape == (2,2))
|
| 464 |
+
assert_array_almost_equal(popt, [1.7989, 1.1642], decimal=4)
|
| 465 |
+
assert_array_almost_equal(pcov, [[0.0852, -0.1260], [-0.1260, 0.1912]],
|
| 466 |
+
decimal=4)
|
| 467 |
+
|
| 468 |
+
def test_func_is_classmethod(self):
|
| 469 |
+
class test_self:
|
| 470 |
+
"""This class tests if curve_fit passes the correct number of
|
| 471 |
+
arguments when the model function is a class instance method.
|
| 472 |
+
"""
|
| 473 |
+
|
| 474 |
+
def func(self, x, a, b):
|
| 475 |
+
return b * x**a
|
| 476 |
+
|
| 477 |
+
test_self_inst = test_self()
|
| 478 |
+
popt, pcov = curve_fit(test_self_inst.func, self.x, self.y)
|
| 479 |
+
assert_(pcov.shape == (2,2))
|
| 480 |
+
assert_array_almost_equal(popt, [1.7989, 1.1642], decimal=4)
|
| 481 |
+
assert_array_almost_equal(pcov, [[0.0852, -0.1260], [-0.1260, 0.1912]],
|
| 482 |
+
decimal=4)
|
| 483 |
+
|
| 484 |
+
def test_regression_2639(self):
|
| 485 |
+
# This test fails if epsfcn in leastsq is too large.
|
| 486 |
+
x = [574.14200000000005, 574.154, 574.16499999999996,
|
| 487 |
+
574.17700000000002, 574.18799999999999, 574.19899999999996,
|
| 488 |
+
574.21100000000001, 574.22199999999998, 574.23400000000004,
|
| 489 |
+
574.245]
|
| 490 |
+
y = [859.0, 997.0, 1699.0, 2604.0, 2013.0, 1964.0, 2435.0,
|
| 491 |
+
1550.0, 949.0, 841.0]
|
| 492 |
+
guess = [574.1861428571428, 574.2155714285715, 1302.0, 1302.0,
|
| 493 |
+
0.0035019999999983615, 859.0]
|
| 494 |
+
good = [5.74177150e+02, 5.74209188e+02, 1.74187044e+03, 1.58646166e+03,
|
| 495 |
+
1.0068462e-02, 8.57450661e+02]
|
| 496 |
+
|
| 497 |
+
def f_double_gauss(x, x0, x1, A0, A1, sigma, c):
|
| 498 |
+
return (A0*np.exp(-(x-x0)**2/(2.*sigma**2))
|
| 499 |
+
+ A1*np.exp(-(x-x1)**2/(2.*sigma**2)) + c)
|
| 500 |
+
popt, pcov = curve_fit(f_double_gauss, x, y, guess, maxfev=10000)
|
| 501 |
+
assert_allclose(popt, good, rtol=1e-5)
|
| 502 |
+
|
| 503 |
+
def test_pcov(self):
|
| 504 |
+
xdata = np.array([0, 1, 2, 3, 4, 5])
|
| 505 |
+
ydata = np.array([1, 1, 5, 7, 8, 12])
|
| 506 |
+
sigma = np.array([1, 2, 1, 2, 1, 2])
|
| 507 |
+
|
| 508 |
+
def f(x, a, b):
|
| 509 |
+
return a*x + b
|
| 510 |
+
|
| 511 |
+
for method in ['lm', 'trf', 'dogbox']:
|
| 512 |
+
popt, pcov = curve_fit(f, xdata, ydata, p0=[2, 0], sigma=sigma,
|
| 513 |
+
method=method)
|
| 514 |
+
perr_scaled = np.sqrt(np.diag(pcov))
|
| 515 |
+
assert_allclose(perr_scaled, [0.20659803, 0.57204404], rtol=1e-3)
|
| 516 |
+
|
| 517 |
+
popt, pcov = curve_fit(f, xdata, ydata, p0=[2, 0], sigma=3*sigma,
|
| 518 |
+
method=method)
|
| 519 |
+
perr_scaled = np.sqrt(np.diag(pcov))
|
| 520 |
+
assert_allclose(perr_scaled, [0.20659803, 0.57204404], rtol=1e-3)
|
| 521 |
+
|
| 522 |
+
popt, pcov = curve_fit(f, xdata, ydata, p0=[2, 0], sigma=sigma,
|
| 523 |
+
absolute_sigma=True, method=method)
|
| 524 |
+
perr = np.sqrt(np.diag(pcov))
|
| 525 |
+
assert_allclose(perr, [0.30714756, 0.85045308], rtol=1e-3)
|
| 526 |
+
|
| 527 |
+
popt, pcov = curve_fit(f, xdata, ydata, p0=[2, 0], sigma=3*sigma,
|
| 528 |
+
absolute_sigma=True, method=method)
|
| 529 |
+
perr = np.sqrt(np.diag(pcov))
|
| 530 |
+
assert_allclose(perr, [3*0.30714756, 3*0.85045308], rtol=1e-3)
|
| 531 |
+
|
| 532 |
+
# infinite variances
|
| 533 |
+
|
| 534 |
+
def f_flat(x, a, b):
|
| 535 |
+
return a*x
|
| 536 |
+
|
| 537 |
+
pcov_expected = np.array([np.inf]*4).reshape(2, 2)
|
| 538 |
+
|
| 539 |
+
with suppress_warnings() as sup:
|
| 540 |
+
sup.filter(OptimizeWarning,
|
| 541 |
+
"Covariance of the parameters could not be estimated")
|
| 542 |
+
popt, pcov = curve_fit(f_flat, xdata, ydata, p0=[2, 0], sigma=sigma)
|
| 543 |
+
popt1, pcov1 = curve_fit(f, xdata[:2], ydata[:2], p0=[2, 0])
|
| 544 |
+
|
| 545 |
+
assert_(pcov.shape == (2, 2))
|
| 546 |
+
assert_array_equal(pcov, pcov_expected)
|
| 547 |
+
|
| 548 |
+
assert_(pcov1.shape == (2, 2))
|
| 549 |
+
assert_array_equal(pcov1, pcov_expected)
|
| 550 |
+
|
| 551 |
+
def test_array_like(self):
|
| 552 |
+
# Test sequence input. Regression test for gh-3037.
|
| 553 |
+
def f_linear(x, a, b):
|
| 554 |
+
return a*x + b
|
| 555 |
+
|
| 556 |
+
x = [1, 2, 3, 4]
|
| 557 |
+
y = [3, 5, 7, 9]
|
| 558 |
+
assert_allclose(curve_fit(f_linear, x, y)[0], [2, 1], atol=1e-10)
|
| 559 |
+
|
| 560 |
+
def test_indeterminate_covariance(self):
|
| 561 |
+
# Test that a warning is returned when pcov is indeterminate
|
| 562 |
+
xdata = np.array([1, 2, 3, 4, 5, 6])
|
| 563 |
+
ydata = np.array([1, 2, 3, 4, 5.5, 6])
|
| 564 |
+
assert_warns(OptimizeWarning, curve_fit,
|
| 565 |
+
lambda x, a, b: a*x, xdata, ydata)
|
| 566 |
+
|
| 567 |
+
def test_NaN_handling(self):
|
| 568 |
+
# Test for correct handling of NaNs in input data: gh-3422
|
| 569 |
+
|
| 570 |
+
# create input with NaNs
|
| 571 |
+
xdata = np.array([1, np.nan, 3])
|
| 572 |
+
ydata = np.array([1, 2, 3])
|
| 573 |
+
|
| 574 |
+
assert_raises(ValueError, curve_fit,
|
| 575 |
+
lambda x, a, b: a*x + b, xdata, ydata)
|
| 576 |
+
assert_raises(ValueError, curve_fit,
|
| 577 |
+
lambda x, a, b: a*x + b, ydata, xdata)
|
| 578 |
+
|
| 579 |
+
assert_raises(ValueError, curve_fit, lambda x, a, b: a*x + b,
|
| 580 |
+
xdata, ydata, **{"check_finite": True})
|
| 581 |
+
|
| 582 |
+
@staticmethod
|
| 583 |
+
def _check_nan_policy(f, xdata_with_nan, xdata_without_nan,
|
| 584 |
+
ydata_with_nan, ydata_without_nan, method):
|
| 585 |
+
kwargs = {'f': f, 'xdata': xdata_with_nan, 'ydata': ydata_with_nan,
|
| 586 |
+
'method': method, 'check_finite': False}
|
| 587 |
+
# propagate test
|
| 588 |
+
error_msg = ("`nan_policy='propagate'` is not supported "
|
| 589 |
+
"by this function.")
|
| 590 |
+
with assert_raises(ValueError, match=error_msg):
|
| 591 |
+
curve_fit(**kwargs, nan_policy="propagate", maxfev=2000)
|
| 592 |
+
|
| 593 |
+
# raise test
|
| 594 |
+
with assert_raises(ValueError, match="The input contains nan"):
|
| 595 |
+
curve_fit(**kwargs, nan_policy="raise")
|
| 596 |
+
|
| 597 |
+
# omit test
|
| 598 |
+
result_with_nan, _ = curve_fit(**kwargs, nan_policy="omit")
|
| 599 |
+
kwargs['xdata'] = xdata_without_nan
|
| 600 |
+
kwargs['ydata'] = ydata_without_nan
|
| 601 |
+
result_without_nan, _ = curve_fit(**kwargs)
|
| 602 |
+
assert_allclose(result_with_nan, result_without_nan)
|
| 603 |
+
|
| 604 |
+
# not valid policy test
|
| 605 |
+
# check for argument names in any order
|
| 606 |
+
error_msg = (r"nan_policy must be one of \{(?:'raise'|'omit'|None)"
|
| 607 |
+
r"(?:, ?(?:'raise'|'omit'|None))*\}")
|
| 608 |
+
with assert_raises(ValueError, match=error_msg):
|
| 609 |
+
curve_fit(**kwargs, nan_policy="hi")
|
| 610 |
+
|
| 611 |
+
@pytest.mark.parametrize('method', ["lm", "trf", "dogbox"])
|
| 612 |
+
def test_nan_policy_1d(self, method):
|
| 613 |
+
def f(x, a, b):
|
| 614 |
+
return a*x + b
|
| 615 |
+
|
| 616 |
+
xdata_with_nan = np.array([2, 3, np.nan, 4, 4, np.nan])
|
| 617 |
+
ydata_with_nan = np.array([1, 2, 5, 3, np.nan, 7])
|
| 618 |
+
xdata_without_nan = np.array([2, 3, 4])
|
| 619 |
+
ydata_without_nan = np.array([1, 2, 3])
|
| 620 |
+
|
| 621 |
+
self._check_nan_policy(f, xdata_with_nan, xdata_without_nan,
|
| 622 |
+
ydata_with_nan, ydata_without_nan, method)
|
| 623 |
+
|
| 624 |
+
@pytest.mark.parametrize('method', ["lm", "trf", "dogbox"])
|
| 625 |
+
def test_nan_policy_2d(self, method):
|
| 626 |
+
def f(x, a, b):
|
| 627 |
+
x1 = x[0, :]
|
| 628 |
+
x2 = x[1, :]
|
| 629 |
+
return a*x1 + b + x2
|
| 630 |
+
|
| 631 |
+
xdata_with_nan = np.array([[2, 3, np.nan, 4, 4, np.nan, 5],
|
| 632 |
+
[2, 3, np.nan, np.nan, 4, np.nan, 7]])
|
| 633 |
+
ydata_with_nan = np.array([1, 2, 5, 3, np.nan, 7, 10])
|
| 634 |
+
xdata_without_nan = np.array([[2, 3, 5], [2, 3, 7]])
|
| 635 |
+
ydata_without_nan = np.array([1, 2, 10])
|
| 636 |
+
|
| 637 |
+
self._check_nan_policy(f, xdata_with_nan, xdata_without_nan,
|
| 638 |
+
ydata_with_nan, ydata_without_nan, method)
|
| 639 |
+
|
| 640 |
+
@pytest.mark.parametrize('n', [2, 3])
|
| 641 |
+
@pytest.mark.parametrize('method', ["lm", "trf", "dogbox"])
|
| 642 |
+
def test_nan_policy_2_3d(self, n, method):
|
| 643 |
+
def f(x, a, b):
|
| 644 |
+
x1 = x[..., 0, :].squeeze()
|
| 645 |
+
x2 = x[..., 1, :].squeeze()
|
| 646 |
+
return a*x1 + b + x2
|
| 647 |
+
|
| 648 |
+
xdata_with_nan = np.array([[[2, 3, np.nan, 4, 4, np.nan, 5],
|
| 649 |
+
[2, 3, np.nan, np.nan, 4, np.nan, 7]]])
|
| 650 |
+
xdata_with_nan = xdata_with_nan.squeeze() if n == 2 else xdata_with_nan
|
| 651 |
+
ydata_with_nan = np.array([1, 2, 5, 3, np.nan, 7, 10])
|
| 652 |
+
xdata_without_nan = np.array([[[2, 3, 5], [2, 3, 7]]])
|
| 653 |
+
ydata_without_nan = np.array([1, 2, 10])
|
| 654 |
+
|
| 655 |
+
self._check_nan_policy(f, xdata_with_nan, xdata_without_nan,
|
| 656 |
+
ydata_with_nan, ydata_without_nan, method)
|
| 657 |
+
|
| 658 |
+
def test_empty_inputs(self):
|
| 659 |
+
# Test both with and without bounds (regression test for gh-9864)
|
| 660 |
+
assert_raises(ValueError, curve_fit, lambda x, a: a*x, [], [])
|
| 661 |
+
assert_raises(ValueError, curve_fit, lambda x, a: a*x, [], [],
|
| 662 |
+
bounds=(1, 2))
|
| 663 |
+
assert_raises(ValueError, curve_fit, lambda x, a: a*x, [1], [])
|
| 664 |
+
assert_raises(ValueError, curve_fit, lambda x, a: a*x, [2], [],
|
| 665 |
+
bounds=(1, 2))
|
| 666 |
+
|
| 667 |
+
def test_function_zero_params(self):
|
| 668 |
+
# Fit args is zero, so "Unable to determine number of fit parameters."
|
| 669 |
+
assert_raises(ValueError, curve_fit, lambda x: x, [1, 2], [3, 4])
|
| 670 |
+
|
| 671 |
+
def test_None_x(self): # Added in GH10196
|
| 672 |
+
popt, pcov = curve_fit(lambda _, a: a * np.arange(10),
|
| 673 |
+
None, 2 * np.arange(10))
|
| 674 |
+
assert_allclose(popt, [2.])
|
| 675 |
+
|
| 676 |
+
def test_method_argument(self):
|
| 677 |
+
def f(x, a, b):
|
| 678 |
+
return a * np.exp(-b*x)
|
| 679 |
+
|
| 680 |
+
xdata = np.linspace(0, 1, 11)
|
| 681 |
+
ydata = f(xdata, 2., 2.)
|
| 682 |
+
|
| 683 |
+
for method in ['trf', 'dogbox', 'lm', None]:
|
| 684 |
+
popt, pcov = curve_fit(f, xdata, ydata, method=method)
|
| 685 |
+
assert_allclose(popt, [2., 2.])
|
| 686 |
+
|
| 687 |
+
assert_raises(ValueError, curve_fit, f, xdata, ydata, method='unknown')
|
| 688 |
+
|
| 689 |
+
def test_full_output(self):
|
| 690 |
+
def f(x, a, b):
|
| 691 |
+
return a * np.exp(-b * x)
|
| 692 |
+
|
| 693 |
+
xdata = np.linspace(0, 1, 11)
|
| 694 |
+
ydata = f(xdata, 2., 2.)
|
| 695 |
+
|
| 696 |
+
for method in ['trf', 'dogbox', 'lm', None]:
|
| 697 |
+
popt, pcov, infodict, errmsg, ier = curve_fit(
|
| 698 |
+
f, xdata, ydata, method=method, full_output=True)
|
| 699 |
+
assert_allclose(popt, [2., 2.])
|
| 700 |
+
assert "nfev" in infodict
|
| 701 |
+
assert "fvec" in infodict
|
| 702 |
+
if method == 'lm' or method is None:
|
| 703 |
+
assert "fjac" in infodict
|
| 704 |
+
assert "ipvt" in infodict
|
| 705 |
+
assert "qtf" in infodict
|
| 706 |
+
assert isinstance(errmsg, str)
|
| 707 |
+
assert ier in (1, 2, 3, 4)
|
| 708 |
+
|
| 709 |
+
def test_bounds(self):
|
| 710 |
+
def f(x, a, b):
|
| 711 |
+
return a * np.exp(-b*x)
|
| 712 |
+
|
| 713 |
+
xdata = np.linspace(0, 1, 11)
|
| 714 |
+
ydata = f(xdata, 2., 2.)
|
| 715 |
+
|
| 716 |
+
# The minimum w/out bounds is at [2., 2.],
|
| 717 |
+
# and with bounds it's at [1.5, smth].
|
| 718 |
+
lb = [1., 0]
|
| 719 |
+
ub = [1.5, 3.]
|
| 720 |
+
|
| 721 |
+
# Test that both variants of the bounds yield the same result
|
| 722 |
+
bounds = (lb, ub)
|
| 723 |
+
bounds_class = Bounds(lb, ub)
|
| 724 |
+
for method in [None, 'trf', 'dogbox']:
|
| 725 |
+
popt, pcov = curve_fit(f, xdata, ydata, bounds=bounds,
|
| 726 |
+
method=method)
|
| 727 |
+
assert_allclose(popt[0], 1.5)
|
| 728 |
+
|
| 729 |
+
popt_class, pcov_class = curve_fit(f, xdata, ydata,
|
| 730 |
+
bounds=bounds_class,
|
| 731 |
+
method=method)
|
| 732 |
+
assert_allclose(popt_class, popt)
|
| 733 |
+
|
| 734 |
+
# With bounds, the starting estimate is feasible.
|
| 735 |
+
popt, pcov = curve_fit(f, xdata, ydata, method='trf',
|
| 736 |
+
bounds=([0., 0], [0.6, np.inf]))
|
| 737 |
+
assert_allclose(popt[0], 0.6)
|
| 738 |
+
|
| 739 |
+
# method='lm' doesn't support bounds.
|
| 740 |
+
assert_raises(ValueError, curve_fit, f, xdata, ydata, bounds=bounds,
|
| 741 |
+
method='lm')
|
| 742 |
+
|
| 743 |
+
def test_bounds_p0(self):
|
| 744 |
+
# This test is for issue #5719. The problem was that an initial guess
|
| 745 |
+
# was ignored when 'trf' or 'dogbox' methods were invoked.
|
| 746 |
+
def f(x, a):
|
| 747 |
+
return np.sin(x + a)
|
| 748 |
+
|
| 749 |
+
xdata = np.linspace(-2*np.pi, 2*np.pi, 40)
|
| 750 |
+
ydata = np.sin(xdata)
|
| 751 |
+
bounds = (-3 * np.pi, 3 * np.pi)
|
| 752 |
+
for method in ['trf', 'dogbox']:
|
| 753 |
+
popt_1, _ = curve_fit(f, xdata, ydata, p0=2.1*np.pi)
|
| 754 |
+
popt_2, _ = curve_fit(f, xdata, ydata, p0=2.1*np.pi,
|
| 755 |
+
bounds=bounds, method=method)
|
| 756 |
+
|
| 757 |
+
# If the initial guess is ignored, then popt_2 would be close 0.
|
| 758 |
+
assert_allclose(popt_1, popt_2)
|
| 759 |
+
|
| 760 |
+
def test_jac(self):
|
| 761 |
+
# Test that Jacobian callable is handled correctly and
|
| 762 |
+
# weighted if sigma is provided.
|
| 763 |
+
def f(x, a, b):
|
| 764 |
+
return a * np.exp(-b*x)
|
| 765 |
+
|
| 766 |
+
def jac(x, a, b):
|
| 767 |
+
e = np.exp(-b*x)
|
| 768 |
+
return np.vstack((e, -a * x * e)).T
|
| 769 |
+
|
| 770 |
+
xdata = np.linspace(0, 1, 11)
|
| 771 |
+
ydata = f(xdata, 2., 2.)
|
| 772 |
+
|
| 773 |
+
# Test numerical options for least_squares backend.
|
| 774 |
+
for method in ['trf', 'dogbox']:
|
| 775 |
+
for scheme in ['2-point', '3-point', 'cs']:
|
| 776 |
+
popt, pcov = curve_fit(f, xdata, ydata, jac=scheme,
|
| 777 |
+
method=method)
|
| 778 |
+
assert_allclose(popt, [2, 2])
|
| 779 |
+
|
| 780 |
+
# Test the analytic option.
|
| 781 |
+
for method in ['lm', 'trf', 'dogbox']:
|
| 782 |
+
popt, pcov = curve_fit(f, xdata, ydata, method=method, jac=jac)
|
| 783 |
+
assert_allclose(popt, [2, 2])
|
| 784 |
+
|
| 785 |
+
# Now add an outlier and provide sigma.
|
| 786 |
+
ydata[5] = 100
|
| 787 |
+
sigma = np.ones(xdata.shape[0])
|
| 788 |
+
sigma[5] = 200
|
| 789 |
+
for method in ['lm', 'trf', 'dogbox']:
|
| 790 |
+
popt, pcov = curve_fit(f, xdata, ydata, sigma=sigma, method=method,
|
| 791 |
+
jac=jac)
|
| 792 |
+
# Still the optimization process is influenced somehow,
|
| 793 |
+
# have to set rtol=1e-3.
|
| 794 |
+
assert_allclose(popt, [2, 2], rtol=1e-3)
|
| 795 |
+
|
| 796 |
+
def test_maxfev_and_bounds(self):
|
| 797 |
+
# gh-6340: with no bounds, curve_fit accepts parameter maxfev (via leastsq)
|
| 798 |
+
# but with bounds, the parameter is `max_nfev` (via least_squares)
|
| 799 |
+
x = np.arange(0, 10)
|
| 800 |
+
y = 2*x
|
| 801 |
+
popt1, _ = curve_fit(lambda x,p: p*x, x, y, bounds=(0, 3), maxfev=100)
|
| 802 |
+
popt2, _ = curve_fit(lambda x,p: p*x, x, y, bounds=(0, 3), max_nfev=100)
|
| 803 |
+
|
| 804 |
+
assert_allclose(popt1, 2, atol=1e-14)
|
| 805 |
+
assert_allclose(popt2, 2, atol=1e-14)
|
| 806 |
+
|
| 807 |
+
def test_curvefit_simplecovariance(self):
|
| 808 |
+
|
| 809 |
+
def func(x, a, b):
|
| 810 |
+
return a * np.exp(-b*x)
|
| 811 |
+
|
| 812 |
+
def jac(x, a, b):
|
| 813 |
+
e = np.exp(-b*x)
|
| 814 |
+
return np.vstack((e, -a * x * e)).T
|
| 815 |
+
|
| 816 |
+
np.random.seed(0)
|
| 817 |
+
xdata = np.linspace(0, 4, 50)
|
| 818 |
+
y = func(xdata, 2.5, 1.3)
|
| 819 |
+
ydata = y + 0.2 * np.random.normal(size=len(xdata))
|
| 820 |
+
|
| 821 |
+
sigma = np.zeros(len(xdata)) + 0.2
|
| 822 |
+
covar = np.diag(sigma**2)
|
| 823 |
+
|
| 824 |
+
for jac1, jac2 in [(jac, jac), (None, None)]:
|
| 825 |
+
for absolute_sigma in [False, True]:
|
| 826 |
+
popt1, pcov1 = curve_fit(func, xdata, ydata, sigma=sigma,
|
| 827 |
+
jac=jac1, absolute_sigma=absolute_sigma)
|
| 828 |
+
popt2, pcov2 = curve_fit(func, xdata, ydata, sigma=covar,
|
| 829 |
+
jac=jac2, absolute_sigma=absolute_sigma)
|
| 830 |
+
|
| 831 |
+
assert_allclose(popt1, popt2, atol=1e-14)
|
| 832 |
+
assert_allclose(pcov1, pcov2, atol=1e-14)
|
| 833 |
+
|
| 834 |
+
def test_curvefit_covariance(self):
|
| 835 |
+
|
| 836 |
+
def funcp(x, a, b):
|
| 837 |
+
rotn = np.array([[1./np.sqrt(2), -1./np.sqrt(2), 0],
|
| 838 |
+
[1./np.sqrt(2), 1./np.sqrt(2), 0],
|
| 839 |
+
[0, 0, 1.0]])
|
| 840 |
+
return rotn.dot(a * np.exp(-b*x))
|
| 841 |
+
|
| 842 |
+
def jacp(x, a, b):
|
| 843 |
+
rotn = np.array([[1./np.sqrt(2), -1./np.sqrt(2), 0],
|
| 844 |
+
[1./np.sqrt(2), 1./np.sqrt(2), 0],
|
| 845 |
+
[0, 0, 1.0]])
|
| 846 |
+
e = np.exp(-b*x)
|
| 847 |
+
return rotn.dot(np.vstack((e, -a * x * e)).T)
|
| 848 |
+
|
| 849 |
+
def func(x, a, b):
|
| 850 |
+
return a * np.exp(-b*x)
|
| 851 |
+
|
| 852 |
+
def jac(x, a, b):
|
| 853 |
+
e = np.exp(-b*x)
|
| 854 |
+
return np.vstack((e, -a * x * e)).T
|
| 855 |
+
|
| 856 |
+
np.random.seed(0)
|
| 857 |
+
xdata = np.arange(1, 4)
|
| 858 |
+
y = func(xdata, 2.5, 1.0)
|
| 859 |
+
ydata = y + 0.2 * np.random.normal(size=len(xdata))
|
| 860 |
+
sigma = np.zeros(len(xdata)) + 0.2
|
| 861 |
+
covar = np.diag(sigma**2)
|
| 862 |
+
# Get a rotation matrix, and obtain ydatap = R ydata
|
| 863 |
+
# Chisq = ydata^T C^{-1} ydata
|
| 864 |
+
# = ydata^T R^T R C^{-1} R^T R ydata
|
| 865 |
+
# = ydatap^T Cp^{-1} ydatap
|
| 866 |
+
# Cp^{-1} = R C^{-1} R^T
|
| 867 |
+
# Cp = R C R^T, since R^-1 = R^T
|
| 868 |
+
rotn = np.array([[1./np.sqrt(2), -1./np.sqrt(2), 0],
|
| 869 |
+
[1./np.sqrt(2), 1./np.sqrt(2), 0],
|
| 870 |
+
[0, 0, 1.0]])
|
| 871 |
+
ydatap = rotn.dot(ydata)
|
| 872 |
+
covarp = rotn.dot(covar).dot(rotn.T)
|
| 873 |
+
|
| 874 |
+
for jac1, jac2 in [(jac, jacp), (None, None)]:
|
| 875 |
+
for absolute_sigma in [False, True]:
|
| 876 |
+
popt1, pcov1 = curve_fit(func, xdata, ydata, sigma=sigma,
|
| 877 |
+
jac=jac1, absolute_sigma=absolute_sigma)
|
| 878 |
+
popt2, pcov2 = curve_fit(funcp, xdata, ydatap, sigma=covarp,
|
| 879 |
+
jac=jac2, absolute_sigma=absolute_sigma)
|
| 880 |
+
|
| 881 |
+
assert_allclose(popt1, popt2, rtol=1.2e-7, atol=1e-14)
|
| 882 |
+
assert_allclose(pcov1, pcov2, rtol=1.2e-7, atol=1e-14)
|
| 883 |
+
|
| 884 |
+
@pytest.mark.parametrize("absolute_sigma", [False, True])
|
| 885 |
+
def test_curvefit_scalar_sigma(self, absolute_sigma):
|
| 886 |
+
def func(x, a, b):
|
| 887 |
+
return a * x + b
|
| 888 |
+
|
| 889 |
+
x, y = self.x, self.y
|
| 890 |
+
_, pcov1 = curve_fit(func, x, y, sigma=2, absolute_sigma=absolute_sigma)
|
| 891 |
+
# Explicitly building the sigma 1D array
|
| 892 |
+
_, pcov2 = curve_fit(
|
| 893 |
+
func, x, y, sigma=np.full_like(y, 2), absolute_sigma=absolute_sigma
|
| 894 |
+
)
|
| 895 |
+
assert np.all(pcov1 == pcov2)
|
| 896 |
+
|
| 897 |
+
def test_dtypes(self):
|
| 898 |
+
# regression test for gh-9581: curve_fit fails if x and y dtypes differ
|
| 899 |
+
x = np.arange(-3, 5)
|
| 900 |
+
y = 1.5*x + 3.0 + 0.5*np.sin(x)
|
| 901 |
+
|
| 902 |
+
def func(x, a, b):
|
| 903 |
+
return a*x + b
|
| 904 |
+
|
| 905 |
+
for method in ['lm', 'trf', 'dogbox']:
|
| 906 |
+
for dtx in [np.float32, np.float64]:
|
| 907 |
+
for dty in [np.float32, np.float64]:
|
| 908 |
+
x = x.astype(dtx)
|
| 909 |
+
y = y.astype(dty)
|
| 910 |
+
|
| 911 |
+
with warnings.catch_warnings():
|
| 912 |
+
warnings.simplefilter("error", OptimizeWarning)
|
| 913 |
+
p, cov = curve_fit(func, x, y, method=method)
|
| 914 |
+
|
| 915 |
+
assert np.isfinite(cov).all()
|
| 916 |
+
assert not np.allclose(p, 1) # curve_fit's initial value
|
| 917 |
+
|
| 918 |
+
def test_dtypes2(self):
|
| 919 |
+
# regression test for gh-7117: curve_fit fails if
|
| 920 |
+
# both inputs are float32
|
| 921 |
+
def hyperbola(x, s_1, s_2, o_x, o_y, c):
|
| 922 |
+
b_2 = (s_1 + s_2) / 2
|
| 923 |
+
b_1 = (s_2 - s_1) / 2
|
| 924 |
+
return o_y + b_1*(x-o_x) + b_2*np.sqrt((x-o_x)**2 + c**2/4)
|
| 925 |
+
|
| 926 |
+
min_fit = np.array([-3.0, 0.0, -2.0, -10.0, 0.0])
|
| 927 |
+
max_fit = np.array([0.0, 3.0, 3.0, 0.0, 10.0])
|
| 928 |
+
guess = np.array([-2.5/3.0, 4/3.0, 1.0, -4.0, 0.5])
|
| 929 |
+
|
| 930 |
+
params = [-2, .4, -1, -5, 9.5]
|
| 931 |
+
xdata = np.array([-32, -16, -8, 4, 4, 8, 16, 32])
|
| 932 |
+
ydata = hyperbola(xdata, *params)
|
| 933 |
+
|
| 934 |
+
# run optimization twice, with xdata being float32 and float64
|
| 935 |
+
popt_64, _ = curve_fit(f=hyperbola, xdata=xdata, ydata=ydata, p0=guess,
|
| 936 |
+
bounds=(min_fit, max_fit))
|
| 937 |
+
|
| 938 |
+
xdata = xdata.astype(np.float32)
|
| 939 |
+
ydata = hyperbola(xdata, *params)
|
| 940 |
+
|
| 941 |
+
popt_32, _ = curve_fit(f=hyperbola, xdata=xdata, ydata=ydata, p0=guess,
|
| 942 |
+
bounds=(min_fit, max_fit))
|
| 943 |
+
|
| 944 |
+
assert_allclose(popt_32, popt_64, atol=2e-5)
|
| 945 |
+
|
| 946 |
+
def test_broadcast_y(self):
|
| 947 |
+
xdata = np.arange(10)
|
| 948 |
+
target = 4.7 * xdata ** 2 + 3.5 * xdata + np.random.rand(len(xdata))
|
| 949 |
+
def fit_func(x, a, b):
|
| 950 |
+
return a * x ** 2 + b * x - target
|
| 951 |
+
for method in ['lm', 'trf', 'dogbox']:
|
| 952 |
+
popt0, pcov0 = curve_fit(fit_func,
|
| 953 |
+
xdata=xdata,
|
| 954 |
+
ydata=np.zeros_like(xdata),
|
| 955 |
+
method=method)
|
| 956 |
+
popt1, pcov1 = curve_fit(fit_func,
|
| 957 |
+
xdata=xdata,
|
| 958 |
+
ydata=0,
|
| 959 |
+
method=method)
|
| 960 |
+
assert_allclose(pcov0, pcov1)
|
| 961 |
+
|
| 962 |
+
def test_args_in_kwargs(self):
|
| 963 |
+
# Ensure that `args` cannot be passed as keyword argument to `curve_fit`
|
| 964 |
+
|
| 965 |
+
def func(x, a, b):
|
| 966 |
+
return a * x + b
|
| 967 |
+
|
| 968 |
+
with assert_raises(ValueError):
|
| 969 |
+
curve_fit(func,
|
| 970 |
+
xdata=[1, 2, 3, 4],
|
| 971 |
+
ydata=[5, 9, 13, 17],
|
| 972 |
+
p0=[1],
|
| 973 |
+
args=(1,))
|
| 974 |
+
|
| 975 |
+
def test_data_point_number_validation(self):
|
| 976 |
+
def func(x, a, b, c, d, e):
|
| 977 |
+
return a * np.exp(-b * x) + c + d + e
|
| 978 |
+
|
| 979 |
+
with assert_raises(TypeError, match="The number of func parameters="):
|
| 980 |
+
curve_fit(func,
|
| 981 |
+
xdata=[1, 2, 3, 4],
|
| 982 |
+
ydata=[5, 9, 13, 17])
|
| 983 |
+
|
| 984 |
+
@pytest.mark.filterwarnings('ignore::RuntimeWarning')
|
| 985 |
+
def test_gh4555(self):
|
| 986 |
+
# gh-4555 reported that covariance matrices returned by `leastsq`
|
| 987 |
+
# can have negative diagonal elements and eigenvalues. (In fact,
|
| 988 |
+
# they can also be asymmetric.) This shows up in the output of
|
| 989 |
+
# `scipy.optimize.curve_fit`. Check that it has been resolved.giit
|
| 990 |
+
def f(x, a, b, c, d, e):
|
| 991 |
+
return a*np.log(x + 1 + b) + c*np.log(x + 1 + d) + e
|
| 992 |
+
|
| 993 |
+
rng = np.random.default_rng(408113519974467917)
|
| 994 |
+
n = 100
|
| 995 |
+
x = np.arange(n)
|
| 996 |
+
y = np.linspace(2, 7, n) + rng.random(n)
|
| 997 |
+
p, cov = optimize.curve_fit(f, x, y, maxfev=100000)
|
| 998 |
+
assert np.all(np.diag(cov) > 0)
|
| 999 |
+
eigs = linalg.eigh(cov)[0] # separate line for debugging
|
| 1000 |
+
# some platforms see a small negative eigevenvalue
|
| 1001 |
+
assert np.all(eigs > -1e-2)
|
| 1002 |
+
assert_allclose(cov, cov.T)
|
| 1003 |
+
|
| 1004 |
+
def test_gh4555b(self):
|
| 1005 |
+
# check that PR gh-17247 did not significantly change covariance matrix
|
| 1006 |
+
# for simple cases
|
| 1007 |
+
rng = np.random.default_rng(408113519974467917)
|
| 1008 |
+
|
| 1009 |
+
def func(x, a, b, c):
|
| 1010 |
+
return a * np.exp(-b * x) + c
|
| 1011 |
+
|
| 1012 |
+
xdata = np.linspace(0, 4, 50)
|
| 1013 |
+
y = func(xdata, 2.5, 1.3, 0.5)
|
| 1014 |
+
y_noise = 0.2 * rng.normal(size=xdata.size)
|
| 1015 |
+
ydata = y + y_noise
|
| 1016 |
+
_, res = curve_fit(func, xdata, ydata)
|
| 1017 |
+
# reference from commit 1d80a2f254380d2b45733258ca42eb6b55c8755b
|
| 1018 |
+
ref = [[+0.0158972536486215, 0.0069207183284242, -0.0007474400714749],
|
| 1019 |
+
[+0.0069207183284242, 0.0205057958128679, +0.0053997711275403],
|
| 1020 |
+
[-0.0007474400714749, 0.0053997711275403, +0.0027833930320877]]
|
| 1021 |
+
# Linux_Python_38_32bit_full fails with default tolerance
|
| 1022 |
+
assert_allclose(res, ref, 2e-7)
|
| 1023 |
+
|
| 1024 |
+
def test_gh13670(self):
|
| 1025 |
+
# gh-13670 reported that `curve_fit` executes callables
|
| 1026 |
+
# with the same values of the parameters at the beginning of
|
| 1027 |
+
# optimization. Check that this has been resolved.
|
| 1028 |
+
|
| 1029 |
+
rng = np.random.default_rng(8250058582555444926)
|
| 1030 |
+
x = np.linspace(0, 3, 101)
|
| 1031 |
+
y = 2 * x + 1 + rng.normal(size=101) * 0.5
|
| 1032 |
+
|
| 1033 |
+
def line(x, *p):
|
| 1034 |
+
assert not np.all(line.last_p == p)
|
| 1035 |
+
line.last_p = p
|
| 1036 |
+
return x * p[0] + p[1]
|
| 1037 |
+
|
| 1038 |
+
def jac(x, *p):
|
| 1039 |
+
assert not np.all(jac.last_p == p)
|
| 1040 |
+
jac.last_p = p
|
| 1041 |
+
return np.array([x, np.ones_like(x)]).T
|
| 1042 |
+
|
| 1043 |
+
line.last_p = None
|
| 1044 |
+
jac.last_p = None
|
| 1045 |
+
p0 = np.array([1.0, 5.0])
|
| 1046 |
+
curve_fit(line, x, y, p0, method='lm', jac=jac)
|
| 1047 |
+
|
| 1048 |
+
|
| 1049 |
+
class TestFixedPoint:
|
| 1050 |
+
|
| 1051 |
+
def test_scalar_trivial(self):
|
| 1052 |
+
# f(x) = 2x; fixed point should be x=0
|
| 1053 |
+
def func(x):
|
| 1054 |
+
return 2.0*x
|
| 1055 |
+
x0 = 1.0
|
| 1056 |
+
x = fixed_point(func, x0)
|
| 1057 |
+
assert_almost_equal(x, 0.0)
|
| 1058 |
+
|
| 1059 |
+
def test_scalar_basic1(self):
|
| 1060 |
+
# f(x) = x**2; x0=1.05; fixed point should be x=1
|
| 1061 |
+
def func(x):
|
| 1062 |
+
return x**2
|
| 1063 |
+
x0 = 1.05
|
| 1064 |
+
x = fixed_point(func, x0)
|
| 1065 |
+
assert_almost_equal(x, 1.0)
|
| 1066 |
+
|
| 1067 |
+
def test_scalar_basic2(self):
|
| 1068 |
+
# f(x) = x**0.5; x0=1.05; fixed point should be x=1
|
| 1069 |
+
def func(x):
|
| 1070 |
+
return x**0.5
|
| 1071 |
+
x0 = 1.05
|
| 1072 |
+
x = fixed_point(func, x0)
|
| 1073 |
+
assert_almost_equal(x, 1.0)
|
| 1074 |
+
|
| 1075 |
+
def test_array_trivial(self):
|
| 1076 |
+
def func(x):
|
| 1077 |
+
return 2.0*x
|
| 1078 |
+
x0 = [0.3, 0.15]
|
| 1079 |
+
with np.errstate(all='ignore'):
|
| 1080 |
+
x = fixed_point(func, x0)
|
| 1081 |
+
assert_almost_equal(x, [0.0, 0.0])
|
| 1082 |
+
|
| 1083 |
+
def test_array_basic1(self):
|
| 1084 |
+
# f(x) = c * x**2; fixed point should be x=1/c
|
| 1085 |
+
def func(x, c):
|
| 1086 |
+
return c * x**2
|
| 1087 |
+
c = array([0.75, 1.0, 1.25])
|
| 1088 |
+
x0 = [1.1, 1.15, 0.9]
|
| 1089 |
+
with np.errstate(all='ignore'):
|
| 1090 |
+
x = fixed_point(func, x0, args=(c,))
|
| 1091 |
+
assert_almost_equal(x, 1.0/c)
|
| 1092 |
+
|
| 1093 |
+
def test_array_basic2(self):
|
| 1094 |
+
# f(x) = c * x**0.5; fixed point should be x=c**2
|
| 1095 |
+
def func(x, c):
|
| 1096 |
+
return c * x**0.5
|
| 1097 |
+
c = array([0.75, 1.0, 1.25])
|
| 1098 |
+
x0 = [0.8, 1.1, 1.1]
|
| 1099 |
+
x = fixed_point(func, x0, args=(c,))
|
| 1100 |
+
assert_almost_equal(x, c**2)
|
| 1101 |
+
|
| 1102 |
+
def test_lambertw(self):
|
| 1103 |
+
# python-list/2010-December/594592.html
|
| 1104 |
+
xxroot = fixed_point(lambda xx: np.exp(-2.0*xx)/2.0, 1.0,
|
| 1105 |
+
args=(), xtol=1e-12, maxiter=500)
|
| 1106 |
+
assert_allclose(xxroot, np.exp(-2.0*xxroot)/2.0)
|
| 1107 |
+
assert_allclose(xxroot, lambertw(1)/2)
|
| 1108 |
+
|
| 1109 |
+
def test_no_acceleration(self):
|
| 1110 |
+
# github issue 5460
|
| 1111 |
+
ks = 2
|
| 1112 |
+
kl = 6
|
| 1113 |
+
m = 1.3
|
| 1114 |
+
n0 = 1.001
|
| 1115 |
+
i0 = ((m-1)/m)*(kl/ks/m)**(1/(m-1))
|
| 1116 |
+
|
| 1117 |
+
def func(n):
|
| 1118 |
+
return np.log(kl/ks/n) / np.log(i0*n/(n - 1)) + 1
|
| 1119 |
+
|
| 1120 |
+
n = fixed_point(func, n0, method='iteration')
|
| 1121 |
+
assert_allclose(n, m)
|
llava_next/lib/python3.10/site-packages/scipy/optimize/tests/test_quadratic_assignment.py
ADDED
|
@@ -0,0 +1,431 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import pytest
|
| 2 |
+
import numpy as np
|
| 3 |
+
from scipy.optimize import quadratic_assignment, OptimizeWarning
|
| 4 |
+
from scipy.optimize._qap import _calc_score as _score
|
| 5 |
+
from numpy.testing import assert_equal, assert_, assert_warns
|
| 6 |
+
|
| 7 |
+
|
| 8 |
+
################
|
| 9 |
+
# Common Tests #
|
| 10 |
+
################
|
| 11 |
+
|
| 12 |
+
def chr12c():
|
| 13 |
+
A = [
|
| 14 |
+
[0, 90, 10, 0, 0, 0, 0, 0, 0, 0, 0, 0],
|
| 15 |
+
[90, 0, 0, 23, 0, 0, 0, 0, 0, 0, 0, 0],
|
| 16 |
+
[10, 0, 0, 0, 43, 0, 0, 0, 0, 0, 0, 0],
|
| 17 |
+
[0, 23, 0, 0, 0, 88, 0, 0, 0, 0, 0, 0],
|
| 18 |
+
[0, 0, 43, 0, 0, 0, 26, 0, 0, 0, 0, 0],
|
| 19 |
+
[0, 0, 0, 88, 0, 0, 0, 16, 0, 0, 0, 0],
|
| 20 |
+
[0, 0, 0, 0, 26, 0, 0, 0, 1, 0, 0, 0],
|
| 21 |
+
[0, 0, 0, 0, 0, 16, 0, 0, 0, 96, 0, 0],
|
| 22 |
+
[0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 29, 0],
|
| 23 |
+
[0, 0, 0, 0, 0, 0, 0, 96, 0, 0, 0, 37],
|
| 24 |
+
[0, 0, 0, 0, 0, 0, 0, 0, 29, 0, 0, 0],
|
| 25 |
+
[0, 0, 0, 0, 0, 0, 0, 0, 0, 37, 0, 0],
|
| 26 |
+
]
|
| 27 |
+
B = [
|
| 28 |
+
[0, 36, 54, 26, 59, 72, 9, 34, 79, 17, 46, 95],
|
| 29 |
+
[36, 0, 73, 35, 90, 58, 30, 78, 35, 44, 79, 36],
|
| 30 |
+
[54, 73, 0, 21, 10, 97, 58, 66, 69, 61, 54, 63],
|
| 31 |
+
[26, 35, 21, 0, 93, 12, 46, 40, 37, 48, 68, 85],
|
| 32 |
+
[59, 90, 10, 93, 0, 64, 5, 29, 76, 16, 5, 76],
|
| 33 |
+
[72, 58, 97, 12, 64, 0, 96, 55, 38, 54, 0, 34],
|
| 34 |
+
[9, 30, 58, 46, 5, 96, 0, 83, 35, 11, 56, 37],
|
| 35 |
+
[34, 78, 66, 40, 29, 55, 83, 0, 44, 12, 15, 80],
|
| 36 |
+
[79, 35, 69, 37, 76, 38, 35, 44, 0, 64, 39, 33],
|
| 37 |
+
[17, 44, 61, 48, 16, 54, 11, 12, 64, 0, 70, 86],
|
| 38 |
+
[46, 79, 54, 68, 5, 0, 56, 15, 39, 70, 0, 18],
|
| 39 |
+
[95, 36, 63, 85, 76, 34, 37, 80, 33, 86, 18, 0],
|
| 40 |
+
]
|
| 41 |
+
A, B = np.array(A), np.array(B)
|
| 42 |
+
n = A.shape[0]
|
| 43 |
+
|
| 44 |
+
opt_perm = np.array([7, 5, 1, 3, 10, 4, 8, 6, 9, 11, 2, 12]) - [1] * n
|
| 45 |
+
|
| 46 |
+
return A, B, opt_perm
|
| 47 |
+
|
| 48 |
+
|
| 49 |
+
class QAPCommonTests:
|
| 50 |
+
"""
|
| 51 |
+
Base class for `quadratic_assignment` tests.
|
| 52 |
+
"""
|
| 53 |
+
def setup_method(self):
|
| 54 |
+
np.random.seed(0)
|
| 55 |
+
|
| 56 |
+
# Test global optima of problem from Umeyama IVB
|
| 57 |
+
# https://pcl.sitehost.iu.edu/rgoldsto/papers/weighted%20graph%20match2.pdf
|
| 58 |
+
# Graph matching maximum is in the paper
|
| 59 |
+
# QAP minimum determined by brute force
|
| 60 |
+
def test_accuracy_1(self):
|
| 61 |
+
# besides testing accuracy, check that A and B can be lists
|
| 62 |
+
A = [[0, 3, 4, 2],
|
| 63 |
+
[0, 0, 1, 2],
|
| 64 |
+
[1, 0, 0, 1],
|
| 65 |
+
[0, 0, 1, 0]]
|
| 66 |
+
|
| 67 |
+
B = [[0, 4, 2, 4],
|
| 68 |
+
[0, 0, 1, 0],
|
| 69 |
+
[0, 2, 0, 2],
|
| 70 |
+
[0, 1, 2, 0]]
|
| 71 |
+
|
| 72 |
+
res = quadratic_assignment(A, B, method=self.method,
|
| 73 |
+
options={"rng": 0, "maximize": False})
|
| 74 |
+
assert_equal(res.fun, 10)
|
| 75 |
+
assert_equal(res.col_ind, np.array([1, 2, 3, 0]))
|
| 76 |
+
|
| 77 |
+
res = quadratic_assignment(A, B, method=self.method,
|
| 78 |
+
options={"rng": 0, "maximize": True})
|
| 79 |
+
|
| 80 |
+
if self.method == 'faq':
|
| 81 |
+
# Global optimum is 40, but FAQ gets 37
|
| 82 |
+
assert_equal(res.fun, 37)
|
| 83 |
+
assert_equal(res.col_ind, np.array([0, 2, 3, 1]))
|
| 84 |
+
else:
|
| 85 |
+
assert_equal(res.fun, 40)
|
| 86 |
+
assert_equal(res.col_ind, np.array([0, 3, 1, 2]))
|
| 87 |
+
|
| 88 |
+
res = quadratic_assignment(A, B, method=self.method,
|
| 89 |
+
options={"rng": 0, "maximize": True})
|
| 90 |
+
|
| 91 |
+
# Test global optima of problem from Umeyama IIIB
|
| 92 |
+
# https://pcl.sitehost.iu.edu/rgoldsto/papers/weighted%20graph%20match2.pdf
|
| 93 |
+
# Graph matching maximum is in the paper
|
| 94 |
+
# QAP minimum determined by brute force
|
| 95 |
+
def test_accuracy_2(self):
|
| 96 |
+
|
| 97 |
+
A = np.array([[0, 5, 8, 6],
|
| 98 |
+
[5, 0, 5, 1],
|
| 99 |
+
[8, 5, 0, 2],
|
| 100 |
+
[6, 1, 2, 0]])
|
| 101 |
+
|
| 102 |
+
B = np.array([[0, 1, 8, 4],
|
| 103 |
+
[1, 0, 5, 2],
|
| 104 |
+
[8, 5, 0, 5],
|
| 105 |
+
[4, 2, 5, 0]])
|
| 106 |
+
|
| 107 |
+
res = quadratic_assignment(A, B, method=self.method,
|
| 108 |
+
options={"rng": 0, "maximize": False})
|
| 109 |
+
if self.method == 'faq':
|
| 110 |
+
# Global optimum is 176, but FAQ gets 178
|
| 111 |
+
assert_equal(res.fun, 178)
|
| 112 |
+
assert_equal(res.col_ind, np.array([1, 0, 3, 2]))
|
| 113 |
+
else:
|
| 114 |
+
assert_equal(res.fun, 176)
|
| 115 |
+
assert_equal(res.col_ind, np.array([1, 2, 3, 0]))
|
| 116 |
+
|
| 117 |
+
res = quadratic_assignment(A, B, method=self.method,
|
| 118 |
+
options={"rng": 0, "maximize": True})
|
| 119 |
+
assert_equal(res.fun, 286)
|
| 120 |
+
assert_equal(res.col_ind, np.array([2, 3, 0, 1]))
|
| 121 |
+
|
| 122 |
+
def test_accuracy_3(self):
|
| 123 |
+
|
| 124 |
+
A, B, opt_perm = chr12c()
|
| 125 |
+
|
| 126 |
+
# basic minimization
|
| 127 |
+
res = quadratic_assignment(A, B, method=self.method,
|
| 128 |
+
options={"rng": 0})
|
| 129 |
+
assert_(11156 <= res.fun < 21000)
|
| 130 |
+
assert_equal(res.fun, _score(A, B, res.col_ind))
|
| 131 |
+
|
| 132 |
+
# basic maximization
|
| 133 |
+
res = quadratic_assignment(A, B, method=self.method,
|
| 134 |
+
options={"rng": 0, 'maximize': True})
|
| 135 |
+
assert_(74000 <= res.fun < 85000)
|
| 136 |
+
assert_equal(res.fun, _score(A, B, res.col_ind))
|
| 137 |
+
|
| 138 |
+
# check ofv with strictly partial match
|
| 139 |
+
seed_cost = np.array([4, 8, 10])
|
| 140 |
+
seed = np.asarray([seed_cost, opt_perm[seed_cost]]).T
|
| 141 |
+
res = quadratic_assignment(A, B, method=self.method,
|
| 142 |
+
options={'partial_match': seed})
|
| 143 |
+
assert_(11156 <= res.fun < 21000)
|
| 144 |
+
assert_equal(res.col_ind[seed_cost], opt_perm[seed_cost])
|
| 145 |
+
|
| 146 |
+
# check performance when partial match is the global optimum
|
| 147 |
+
seed = np.asarray([np.arange(len(A)), opt_perm]).T
|
| 148 |
+
res = quadratic_assignment(A, B, method=self.method,
|
| 149 |
+
options={'partial_match': seed})
|
| 150 |
+
assert_equal(res.col_ind, seed[:, 1].T)
|
| 151 |
+
assert_equal(res.fun, 11156)
|
| 152 |
+
assert_equal(res.nit, 0)
|
| 153 |
+
|
| 154 |
+
# check performance with zero sized matrix inputs
|
| 155 |
+
empty = np.empty((0, 0))
|
| 156 |
+
res = quadratic_assignment(empty, empty, method=self.method,
|
| 157 |
+
options={"rng": 0})
|
| 158 |
+
assert_equal(res.nit, 0)
|
| 159 |
+
assert_equal(res.fun, 0)
|
| 160 |
+
|
| 161 |
+
def test_unknown_options(self):
|
| 162 |
+
A, B, opt_perm = chr12c()
|
| 163 |
+
|
| 164 |
+
def f():
|
| 165 |
+
quadratic_assignment(A, B, method=self.method,
|
| 166 |
+
options={"ekki-ekki": True})
|
| 167 |
+
assert_warns(OptimizeWarning, f)
|
| 168 |
+
|
| 169 |
+
|
| 170 |
+
class TestFAQ(QAPCommonTests):
|
| 171 |
+
method = "faq"
|
| 172 |
+
|
| 173 |
+
def test_options(self):
|
| 174 |
+
# cost and distance matrices of QAPLIB instance chr12c
|
| 175 |
+
A, B, opt_perm = chr12c()
|
| 176 |
+
n = len(A)
|
| 177 |
+
|
| 178 |
+
# check that max_iter is obeying with low input value
|
| 179 |
+
res = quadratic_assignment(A, B,
|
| 180 |
+
options={'maxiter': 5})
|
| 181 |
+
assert_equal(res.nit, 5)
|
| 182 |
+
|
| 183 |
+
# test with shuffle
|
| 184 |
+
res = quadratic_assignment(A, B,
|
| 185 |
+
options={'shuffle_input': True})
|
| 186 |
+
assert_(11156 <= res.fun < 21000)
|
| 187 |
+
|
| 188 |
+
# test with randomized init
|
| 189 |
+
res = quadratic_assignment(A, B,
|
| 190 |
+
options={'rng': 1, 'P0': "randomized"})
|
| 191 |
+
assert_(11156 <= res.fun < 21000)
|
| 192 |
+
|
| 193 |
+
# check with specified P0
|
| 194 |
+
K = np.ones((n, n)) / float(n)
|
| 195 |
+
K = _doubly_stochastic(K)
|
| 196 |
+
res = quadratic_assignment(A, B,
|
| 197 |
+
options={'P0': K})
|
| 198 |
+
assert_(11156 <= res.fun < 21000)
|
| 199 |
+
|
| 200 |
+
def test_specific_input_validation(self):
|
| 201 |
+
|
| 202 |
+
A = np.identity(2)
|
| 203 |
+
B = A
|
| 204 |
+
|
| 205 |
+
# method is implicitly faq
|
| 206 |
+
|
| 207 |
+
# ValueError Checks: making sure single value parameters are of
|
| 208 |
+
# correct value
|
| 209 |
+
with pytest.raises(ValueError, match="Invalid 'P0' parameter"):
|
| 210 |
+
quadratic_assignment(A, B, options={'P0': "random"})
|
| 211 |
+
with pytest.raises(
|
| 212 |
+
ValueError, match="'maxiter' must be a positive integer"):
|
| 213 |
+
quadratic_assignment(A, B, options={'maxiter': -1})
|
| 214 |
+
with pytest.raises(ValueError, match="'tol' must be a positive float"):
|
| 215 |
+
quadratic_assignment(A, B, options={'tol': -1})
|
| 216 |
+
|
| 217 |
+
# TypeError Checks: making sure single value parameters are of
|
| 218 |
+
# correct type
|
| 219 |
+
with pytest.raises(TypeError):
|
| 220 |
+
quadratic_assignment(A, B, options={'maxiter': 1.5})
|
| 221 |
+
|
| 222 |
+
# test P0 matrix input
|
| 223 |
+
with pytest.raises(
|
| 224 |
+
ValueError,
|
| 225 |
+
match="`P0` matrix must have shape m' x m', where m'=n-m"):
|
| 226 |
+
quadratic_assignment(
|
| 227 |
+
np.identity(4), np.identity(4),
|
| 228 |
+
options={'P0': np.ones((3, 3))}
|
| 229 |
+
)
|
| 230 |
+
|
| 231 |
+
K = [[0.4, 0.2, 0.3],
|
| 232 |
+
[0.3, 0.6, 0.2],
|
| 233 |
+
[0.2, 0.2, 0.7]]
|
| 234 |
+
# matrix that isn't quite doubly stochastic
|
| 235 |
+
with pytest.raises(
|
| 236 |
+
ValueError, match="`P0` matrix must be doubly stochastic"):
|
| 237 |
+
quadratic_assignment(
|
| 238 |
+
np.identity(3), np.identity(3), options={'P0': K}
|
| 239 |
+
)
|
| 240 |
+
|
| 241 |
+
|
| 242 |
+
class Test2opt(QAPCommonTests):
|
| 243 |
+
method = "2opt"
|
| 244 |
+
|
| 245 |
+
def test_deterministic(self):
|
| 246 |
+
# np.random.seed(0) executes before every method
|
| 247 |
+
n = 20
|
| 248 |
+
|
| 249 |
+
A = np.random.rand(n, n)
|
| 250 |
+
B = np.random.rand(n, n)
|
| 251 |
+
res1 = quadratic_assignment(A, B, method=self.method)
|
| 252 |
+
|
| 253 |
+
np.random.seed(0)
|
| 254 |
+
|
| 255 |
+
A = np.random.rand(n, n)
|
| 256 |
+
B = np.random.rand(n, n)
|
| 257 |
+
res2 = quadratic_assignment(A, B, method=self.method)
|
| 258 |
+
|
| 259 |
+
assert_equal(res1.nit, res2.nit)
|
| 260 |
+
|
| 261 |
+
def test_partial_guess(self):
|
| 262 |
+
n = 5
|
| 263 |
+
A = np.random.rand(n, n)
|
| 264 |
+
B = np.random.rand(n, n)
|
| 265 |
+
|
| 266 |
+
res1 = quadratic_assignment(A, B, method=self.method,
|
| 267 |
+
options={'rng': 0})
|
| 268 |
+
guess = np.array([np.arange(5), res1.col_ind]).T
|
| 269 |
+
res2 = quadratic_assignment(A, B, method=self.method,
|
| 270 |
+
options={'rng': 0, 'partial_guess': guess})
|
| 271 |
+
fix = [2, 4]
|
| 272 |
+
match = np.array([np.arange(5)[fix], res1.col_ind[fix]]).T
|
| 273 |
+
res3 = quadratic_assignment(A, B, method=self.method,
|
| 274 |
+
options={'rng': 0, 'partial_guess': guess,
|
| 275 |
+
'partial_match': match})
|
| 276 |
+
assert_(res1.nit != n*(n+1)/2)
|
| 277 |
+
assert_equal(res2.nit, n*(n+1)/2) # tests each swap exactly once
|
| 278 |
+
assert_equal(res3.nit, (n-2)*(n-1)/2) # tests free swaps exactly once
|
| 279 |
+
|
| 280 |
+
def test_specific_input_validation(self):
|
| 281 |
+
# can't have more seed nodes than cost/dist nodes
|
| 282 |
+
_rm = _range_matrix
|
| 283 |
+
with pytest.raises(
|
| 284 |
+
ValueError,
|
| 285 |
+
match="`partial_guess` can have only as many entries as"):
|
| 286 |
+
quadratic_assignment(np.identity(3), np.identity(3),
|
| 287 |
+
method=self.method,
|
| 288 |
+
options={'partial_guess': _rm(5, 2)})
|
| 289 |
+
# test for only two seed columns
|
| 290 |
+
with pytest.raises(
|
| 291 |
+
ValueError, match="`partial_guess` must have two columns"):
|
| 292 |
+
quadratic_assignment(
|
| 293 |
+
np.identity(3), np.identity(3), method=self.method,
|
| 294 |
+
options={'partial_guess': _range_matrix(2, 3)}
|
| 295 |
+
)
|
| 296 |
+
# test that seed has no more than two dimensions
|
| 297 |
+
with pytest.raises(
|
| 298 |
+
ValueError, match="`partial_guess` must have exactly two"):
|
| 299 |
+
quadratic_assignment(
|
| 300 |
+
np.identity(3), np.identity(3), method=self.method,
|
| 301 |
+
options={'partial_guess': np.random.rand(3, 2, 2)}
|
| 302 |
+
)
|
| 303 |
+
# seeds cannot be negative valued
|
| 304 |
+
with pytest.raises(
|
| 305 |
+
ValueError, match="`partial_guess` must contain only pos"):
|
| 306 |
+
quadratic_assignment(
|
| 307 |
+
np.identity(3), np.identity(3), method=self.method,
|
| 308 |
+
options={'partial_guess': -1 * _range_matrix(2, 2)}
|
| 309 |
+
)
|
| 310 |
+
# seeds can't have values greater than number of nodes
|
| 311 |
+
with pytest.raises(
|
| 312 |
+
ValueError,
|
| 313 |
+
match="`partial_guess` entries must be less than number"):
|
| 314 |
+
quadratic_assignment(
|
| 315 |
+
np.identity(5), np.identity(5), method=self.method,
|
| 316 |
+
options={'partial_guess': 2 * _range_matrix(4, 2)}
|
| 317 |
+
)
|
| 318 |
+
# columns of seed matrix must be unique
|
| 319 |
+
with pytest.raises(
|
| 320 |
+
ValueError,
|
| 321 |
+
match="`partial_guess` column entries must be unique"):
|
| 322 |
+
quadratic_assignment(
|
| 323 |
+
np.identity(3), np.identity(3), method=self.method,
|
| 324 |
+
options={'partial_guess': np.ones((2, 2))}
|
| 325 |
+
)
|
| 326 |
+
|
| 327 |
+
|
| 328 |
+
class TestQAPOnce:
|
| 329 |
+
def setup_method(self):
|
| 330 |
+
np.random.seed(0)
|
| 331 |
+
|
| 332 |
+
# these don't need to be repeated for each method
|
| 333 |
+
def test_common_input_validation(self):
|
| 334 |
+
# test that non square matrices return error
|
| 335 |
+
with pytest.raises(ValueError, match="`A` must be square"):
|
| 336 |
+
quadratic_assignment(
|
| 337 |
+
np.random.random((3, 4)),
|
| 338 |
+
np.random.random((3, 3)),
|
| 339 |
+
)
|
| 340 |
+
with pytest.raises(ValueError, match="`B` must be square"):
|
| 341 |
+
quadratic_assignment(
|
| 342 |
+
np.random.random((3, 3)),
|
| 343 |
+
np.random.random((3, 4)),
|
| 344 |
+
)
|
| 345 |
+
# test that cost and dist matrices have no more than two dimensions
|
| 346 |
+
with pytest.raises(
|
| 347 |
+
ValueError, match="`A` and `B` must have exactly two"):
|
| 348 |
+
quadratic_assignment(
|
| 349 |
+
np.random.random((3, 3, 3)),
|
| 350 |
+
np.random.random((3, 3, 3)),
|
| 351 |
+
)
|
| 352 |
+
# test that cost and dist matrices of different sizes return error
|
| 353 |
+
with pytest.raises(
|
| 354 |
+
ValueError,
|
| 355 |
+
match="`A` and `B` matrices must be of equal size"):
|
| 356 |
+
quadratic_assignment(
|
| 357 |
+
np.random.random((3, 3)),
|
| 358 |
+
np.random.random((4, 4)),
|
| 359 |
+
)
|
| 360 |
+
# can't have more seed nodes than cost/dist nodes
|
| 361 |
+
_rm = _range_matrix
|
| 362 |
+
with pytest.raises(
|
| 363 |
+
ValueError,
|
| 364 |
+
match="`partial_match` can have only as many seeds as"):
|
| 365 |
+
quadratic_assignment(np.identity(3), np.identity(3),
|
| 366 |
+
options={'partial_match': _rm(5, 2)})
|
| 367 |
+
# test for only two seed columns
|
| 368 |
+
with pytest.raises(
|
| 369 |
+
ValueError, match="`partial_match` must have two columns"):
|
| 370 |
+
quadratic_assignment(
|
| 371 |
+
np.identity(3), np.identity(3),
|
| 372 |
+
options={'partial_match': _range_matrix(2, 3)}
|
| 373 |
+
)
|
| 374 |
+
# test that seed has no more than two dimensions
|
| 375 |
+
with pytest.raises(
|
| 376 |
+
ValueError, match="`partial_match` must have exactly two"):
|
| 377 |
+
quadratic_assignment(
|
| 378 |
+
np.identity(3), np.identity(3),
|
| 379 |
+
options={'partial_match': np.random.rand(3, 2, 2)}
|
| 380 |
+
)
|
| 381 |
+
# seeds cannot be negative valued
|
| 382 |
+
with pytest.raises(
|
| 383 |
+
ValueError, match="`partial_match` must contain only pos"):
|
| 384 |
+
quadratic_assignment(
|
| 385 |
+
np.identity(3), np.identity(3),
|
| 386 |
+
options={'partial_match': -1 * _range_matrix(2, 2)}
|
| 387 |
+
)
|
| 388 |
+
# seeds can't have values greater than number of nodes
|
| 389 |
+
with pytest.raises(
|
| 390 |
+
ValueError,
|
| 391 |
+
match="`partial_match` entries must be less than number"):
|
| 392 |
+
quadratic_assignment(
|
| 393 |
+
np.identity(5), np.identity(5),
|
| 394 |
+
options={'partial_match': 2 * _range_matrix(4, 2)}
|
| 395 |
+
)
|
| 396 |
+
# columns of seed matrix must be unique
|
| 397 |
+
with pytest.raises(
|
| 398 |
+
ValueError,
|
| 399 |
+
match="`partial_match` column entries must be unique"):
|
| 400 |
+
quadratic_assignment(
|
| 401 |
+
np.identity(3), np.identity(3),
|
| 402 |
+
options={'partial_match': np.ones((2, 2))}
|
| 403 |
+
)
|
| 404 |
+
|
| 405 |
+
|
| 406 |
+
def _range_matrix(a, b):
|
| 407 |
+
mat = np.zeros((a, b))
|
| 408 |
+
for i in range(b):
|
| 409 |
+
mat[:, i] = np.arange(a)
|
| 410 |
+
return mat
|
| 411 |
+
|
| 412 |
+
|
| 413 |
+
def _doubly_stochastic(P, tol=1e-3):
|
| 414 |
+
# cleaner implementation of btaba/sinkhorn_knopp
|
| 415 |
+
|
| 416 |
+
max_iter = 1000
|
| 417 |
+
c = 1 / P.sum(axis=0)
|
| 418 |
+
r = 1 / (P @ c)
|
| 419 |
+
P_eps = P
|
| 420 |
+
|
| 421 |
+
for it in range(max_iter):
|
| 422 |
+
if ((np.abs(P_eps.sum(axis=1) - 1) < tol).all() and
|
| 423 |
+
(np.abs(P_eps.sum(axis=0) - 1) < tol).all()):
|
| 424 |
+
# All column/row sums ~= 1 within threshold
|
| 425 |
+
break
|
| 426 |
+
|
| 427 |
+
c = 1 / (r @ P)
|
| 428 |
+
r = 1 / (P @ c)
|
| 429 |
+
P_eps = r[:, None] * P * c
|
| 430 |
+
|
| 431 |
+
return P_eps
|
llava_next/lib/python3.10/site-packages/scipy/optimize/tests/test_tnc.py
ADDED
|
@@ -0,0 +1,345 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Unit tests for TNC optimization routine from tnc.py
|
| 3 |
+
"""
|
| 4 |
+
import pytest
|
| 5 |
+
from numpy.testing import assert_allclose, assert_equal
|
| 6 |
+
|
| 7 |
+
import numpy as np
|
| 8 |
+
from math import pow
|
| 9 |
+
|
| 10 |
+
from scipy import optimize
|
| 11 |
+
|
| 12 |
+
|
| 13 |
+
class TestTnc:
|
| 14 |
+
"""TNC non-linear optimization.
|
| 15 |
+
|
| 16 |
+
These tests are taken from Prof. K. Schittkowski's test examples
|
| 17 |
+
for constrained non-linear programming.
|
| 18 |
+
|
| 19 |
+
http://www.uni-bayreuth.de/departments/math/~kschittkowski/home.htm
|
| 20 |
+
|
| 21 |
+
"""
|
| 22 |
+
def setup_method(self):
|
| 23 |
+
# options for minimize
|
| 24 |
+
self.opts = {'disp': False, 'maxfun': 200}
|
| 25 |
+
|
| 26 |
+
# objective functions and Jacobian for each test
|
| 27 |
+
def f1(self, x, a=100.0):
|
| 28 |
+
return a * pow((x[1] - pow(x[0], 2)), 2) + pow(1.0 - x[0], 2)
|
| 29 |
+
|
| 30 |
+
def g1(self, x, a=100.0):
|
| 31 |
+
dif = [0, 0]
|
| 32 |
+
dif[1] = 2 * a * (x[1] - pow(x[0], 2))
|
| 33 |
+
dif[0] = -2.0 * (x[0] * (dif[1] - 1.0) + 1.0)
|
| 34 |
+
return dif
|
| 35 |
+
|
| 36 |
+
def fg1(self, x, a=100.0):
|
| 37 |
+
return self.f1(x, a), self.g1(x, a)
|
| 38 |
+
|
| 39 |
+
def f3(self, x):
|
| 40 |
+
return x[1] + pow(x[1] - x[0], 2) * 1.0e-5
|
| 41 |
+
|
| 42 |
+
def g3(self, x):
|
| 43 |
+
dif = [0, 0]
|
| 44 |
+
dif[0] = -2.0 * (x[1] - x[0]) * 1.0e-5
|
| 45 |
+
dif[1] = 1.0 - dif[0]
|
| 46 |
+
return dif
|
| 47 |
+
|
| 48 |
+
def fg3(self, x):
|
| 49 |
+
return self.f3(x), self.g3(x)
|
| 50 |
+
|
| 51 |
+
def f4(self, x):
|
| 52 |
+
return pow(x[0] + 1.0, 3) / 3.0 + x[1]
|
| 53 |
+
|
| 54 |
+
def g4(self, x):
|
| 55 |
+
dif = [0, 0]
|
| 56 |
+
dif[0] = pow(x[0] + 1.0, 2)
|
| 57 |
+
dif[1] = 1.0
|
| 58 |
+
return dif
|
| 59 |
+
|
| 60 |
+
def fg4(self, x):
|
| 61 |
+
return self.f4(x), self.g4(x)
|
| 62 |
+
|
| 63 |
+
def f5(self, x):
|
| 64 |
+
return np.sin(x[0] + x[1]) + pow(x[0] - x[1], 2) - \
|
| 65 |
+
1.5 * x[0] + 2.5 * x[1] + 1.0
|
| 66 |
+
|
| 67 |
+
def g5(self, x):
|
| 68 |
+
dif = [0, 0]
|
| 69 |
+
v1 = np.cos(x[0] + x[1])
|
| 70 |
+
v2 = 2.0*(x[0] - x[1])
|
| 71 |
+
|
| 72 |
+
dif[0] = v1 + v2 - 1.5
|
| 73 |
+
dif[1] = v1 - v2 + 2.5
|
| 74 |
+
return dif
|
| 75 |
+
|
| 76 |
+
def fg5(self, x):
|
| 77 |
+
return self.f5(x), self.g5(x)
|
| 78 |
+
|
| 79 |
+
def f38(self, x):
|
| 80 |
+
return (100.0 * pow(x[1] - pow(x[0], 2), 2) +
|
| 81 |
+
pow(1.0 - x[0], 2) + 90.0 * pow(x[3] - pow(x[2], 2), 2) +
|
| 82 |
+
pow(1.0 - x[2], 2) + 10.1 * (pow(x[1] - 1.0, 2) +
|
| 83 |
+
pow(x[3] - 1.0, 2)) +
|
| 84 |
+
19.8 * (x[1] - 1.0) * (x[3] - 1.0)) * 1.0e-5
|
| 85 |
+
|
| 86 |
+
def g38(self, x):
|
| 87 |
+
dif = [0, 0, 0, 0]
|
| 88 |
+
dif[0] = (-400.0 * x[0] * (x[1] - pow(x[0], 2)) -
|
| 89 |
+
2.0 * (1.0 - x[0])) * 1.0e-5
|
| 90 |
+
dif[1] = (200.0 * (x[1] - pow(x[0], 2)) + 20.2 * (x[1] - 1.0) +
|
| 91 |
+
19.8 * (x[3] - 1.0)) * 1.0e-5
|
| 92 |
+
dif[2] = (- 360.0 * x[2] * (x[3] - pow(x[2], 2)) -
|
| 93 |
+
2.0 * (1.0 - x[2])) * 1.0e-5
|
| 94 |
+
dif[3] = (180.0 * (x[3] - pow(x[2], 2)) + 20.2 * (x[3] - 1.0) +
|
| 95 |
+
19.8 * (x[1] - 1.0)) * 1.0e-5
|
| 96 |
+
return dif
|
| 97 |
+
|
| 98 |
+
def fg38(self, x):
|
| 99 |
+
return self.f38(x), self.g38(x)
|
| 100 |
+
|
| 101 |
+
def f45(self, x):
|
| 102 |
+
return 2.0 - x[0] * x[1] * x[2] * x[3] * x[4] / 120.0
|
| 103 |
+
|
| 104 |
+
def g45(self, x):
|
| 105 |
+
dif = [0] * 5
|
| 106 |
+
dif[0] = - x[1] * x[2] * x[3] * x[4] / 120.0
|
| 107 |
+
dif[1] = - x[0] * x[2] * x[3] * x[4] / 120.0
|
| 108 |
+
dif[2] = - x[0] * x[1] * x[3] * x[4] / 120.0
|
| 109 |
+
dif[3] = - x[0] * x[1] * x[2] * x[4] / 120.0
|
| 110 |
+
dif[4] = - x[0] * x[1] * x[2] * x[3] / 120.0
|
| 111 |
+
return dif
|
| 112 |
+
|
| 113 |
+
def fg45(self, x):
|
| 114 |
+
return self.f45(x), self.g45(x)
|
| 115 |
+
|
| 116 |
+
# tests
|
| 117 |
+
# minimize with method=TNC
|
| 118 |
+
def test_minimize_tnc1(self):
|
| 119 |
+
x0, bnds = [-2, 1], ([-np.inf, None], [-1.5, None])
|
| 120 |
+
xopt = [1, 1]
|
| 121 |
+
iterx = [] # to test callback
|
| 122 |
+
|
| 123 |
+
res = optimize.minimize(self.f1, x0, method='TNC', jac=self.g1,
|
| 124 |
+
bounds=bnds, options=self.opts,
|
| 125 |
+
callback=iterx.append)
|
| 126 |
+
assert_allclose(res.fun, self.f1(xopt), atol=1e-8)
|
| 127 |
+
assert_equal(len(iterx), res.nit)
|
| 128 |
+
|
| 129 |
+
def test_minimize_tnc1b(self):
|
| 130 |
+
x0, bnds = np.array([-2, 1]), ([-np.inf, None], [-1.5, None])
|
| 131 |
+
xopt = [1, 1]
|
| 132 |
+
x = optimize.minimize(self.f1, x0, method='TNC',
|
| 133 |
+
bounds=bnds, options=self.opts).x
|
| 134 |
+
assert_allclose(self.f1(x), self.f1(xopt), atol=1e-4)
|
| 135 |
+
|
| 136 |
+
def test_minimize_tnc1c(self):
|
| 137 |
+
x0, bnds = [-2, 1], ([-np.inf, None],[-1.5, None])
|
| 138 |
+
xopt = [1, 1]
|
| 139 |
+
x = optimize.minimize(self.fg1, x0, method='TNC',
|
| 140 |
+
jac=True, bounds=bnds,
|
| 141 |
+
options=self.opts).x
|
| 142 |
+
assert_allclose(self.f1(x), self.f1(xopt), atol=1e-8)
|
| 143 |
+
|
| 144 |
+
def test_minimize_tnc2(self):
|
| 145 |
+
x0, bnds = [-2, 1], ([-np.inf, None], [1.5, None])
|
| 146 |
+
xopt = [-1.2210262419616387, 1.5]
|
| 147 |
+
x = optimize.minimize(self.f1, x0, method='TNC',
|
| 148 |
+
jac=self.g1, bounds=bnds,
|
| 149 |
+
options=self.opts).x
|
| 150 |
+
assert_allclose(self.f1(x), self.f1(xopt), atol=1e-8)
|
| 151 |
+
|
| 152 |
+
def test_minimize_tnc3(self):
|
| 153 |
+
x0, bnds = [10, 1], ([-np.inf, None], [0.0, None])
|
| 154 |
+
xopt = [0, 0]
|
| 155 |
+
x = optimize.minimize(self.f3, x0, method='TNC',
|
| 156 |
+
jac=self.g3, bounds=bnds,
|
| 157 |
+
options=self.opts).x
|
| 158 |
+
assert_allclose(self.f3(x), self.f3(xopt), atol=1e-8)
|
| 159 |
+
|
| 160 |
+
def test_minimize_tnc4(self):
|
| 161 |
+
x0,bnds = [1.125, 0.125], [(1, None), (0, None)]
|
| 162 |
+
xopt = [1, 0]
|
| 163 |
+
x = optimize.minimize(self.f4, x0, method='TNC',
|
| 164 |
+
jac=self.g4, bounds=bnds,
|
| 165 |
+
options=self.opts).x
|
| 166 |
+
assert_allclose(self.f4(x), self.f4(xopt), atol=1e-8)
|
| 167 |
+
|
| 168 |
+
def test_minimize_tnc5(self):
|
| 169 |
+
x0, bnds = [0, 0], [(-1.5, 4),(-3, 3)]
|
| 170 |
+
xopt = [-0.54719755119659763, -1.5471975511965976]
|
| 171 |
+
x = optimize.minimize(self.f5, x0, method='TNC',
|
| 172 |
+
jac=self.g5, bounds=bnds,
|
| 173 |
+
options=self.opts).x
|
| 174 |
+
assert_allclose(self.f5(x), self.f5(xopt), atol=1e-8)
|
| 175 |
+
|
| 176 |
+
def test_minimize_tnc38(self):
|
| 177 |
+
x0, bnds = np.array([-3, -1, -3, -1]), [(-10, 10)]*4
|
| 178 |
+
xopt = [1]*4
|
| 179 |
+
x = optimize.minimize(self.f38, x0, method='TNC',
|
| 180 |
+
jac=self.g38, bounds=bnds,
|
| 181 |
+
options=self.opts).x
|
| 182 |
+
assert_allclose(self.f38(x), self.f38(xopt), atol=1e-8)
|
| 183 |
+
|
| 184 |
+
def test_minimize_tnc45(self):
|
| 185 |
+
x0, bnds = [2] * 5, [(0, 1), (0, 2), (0, 3), (0, 4), (0, 5)]
|
| 186 |
+
xopt = [1, 2, 3, 4, 5]
|
| 187 |
+
x = optimize.minimize(self.f45, x0, method='TNC',
|
| 188 |
+
jac=self.g45, bounds=bnds,
|
| 189 |
+
options=self.opts).x
|
| 190 |
+
assert_allclose(self.f45(x), self.f45(xopt), atol=1e-8)
|
| 191 |
+
|
| 192 |
+
# fmin_tnc
|
| 193 |
+
def test_tnc1(self):
|
| 194 |
+
fg, x, bounds = self.fg1, [-2, 1], ([-np.inf, None], [-1.5, None])
|
| 195 |
+
xopt = [1, 1]
|
| 196 |
+
|
| 197 |
+
x, nf, rc = optimize.fmin_tnc(fg, x, bounds=bounds, args=(100.0, ),
|
| 198 |
+
messages=optimize._tnc.MSG_NONE,
|
| 199 |
+
maxfun=200)
|
| 200 |
+
|
| 201 |
+
assert_allclose(self.f1(x), self.f1(xopt), atol=1e-8,
|
| 202 |
+
err_msg="TNC failed with status: " +
|
| 203 |
+
optimize._tnc.RCSTRINGS[rc])
|
| 204 |
+
|
| 205 |
+
def test_tnc1b(self):
|
| 206 |
+
x, bounds = [-2, 1], ([-np.inf, None], [-1.5, None])
|
| 207 |
+
xopt = [1, 1]
|
| 208 |
+
|
| 209 |
+
x, nf, rc = optimize.fmin_tnc(self.f1, x, approx_grad=True,
|
| 210 |
+
bounds=bounds,
|
| 211 |
+
messages=optimize._tnc.MSG_NONE,
|
| 212 |
+
maxfun=200)
|
| 213 |
+
|
| 214 |
+
assert_allclose(self.f1(x), self.f1(xopt), atol=1e-4,
|
| 215 |
+
err_msg="TNC failed with status: " +
|
| 216 |
+
optimize._tnc.RCSTRINGS[rc])
|
| 217 |
+
|
| 218 |
+
def test_tnc1c(self):
|
| 219 |
+
x, bounds = [-2, 1], ([-np.inf, None], [-1.5, None])
|
| 220 |
+
xopt = [1, 1]
|
| 221 |
+
|
| 222 |
+
x, nf, rc = optimize.fmin_tnc(self.f1, x, fprime=self.g1,
|
| 223 |
+
bounds=bounds,
|
| 224 |
+
messages=optimize._tnc.MSG_NONE,
|
| 225 |
+
maxfun=200)
|
| 226 |
+
|
| 227 |
+
assert_allclose(self.f1(x), self.f1(xopt), atol=1e-8,
|
| 228 |
+
err_msg="TNC failed with status: " +
|
| 229 |
+
optimize._tnc.RCSTRINGS[rc])
|
| 230 |
+
|
| 231 |
+
def test_tnc2(self):
|
| 232 |
+
fg, x, bounds = self.fg1, [-2, 1], ([-np.inf, None], [1.5, None])
|
| 233 |
+
xopt = [-1.2210262419616387, 1.5]
|
| 234 |
+
|
| 235 |
+
x, nf, rc = optimize.fmin_tnc(fg, x, bounds=bounds,
|
| 236 |
+
messages=optimize._tnc.MSG_NONE,
|
| 237 |
+
maxfun=200)
|
| 238 |
+
|
| 239 |
+
assert_allclose(self.f1(x), self.f1(xopt), atol=1e-8,
|
| 240 |
+
err_msg="TNC failed with status: " +
|
| 241 |
+
optimize._tnc.RCSTRINGS[rc])
|
| 242 |
+
|
| 243 |
+
def test_tnc3(self):
|
| 244 |
+
fg, x, bounds = self.fg3, [10, 1], ([-np.inf, None], [0.0, None])
|
| 245 |
+
xopt = [0, 0]
|
| 246 |
+
|
| 247 |
+
x, nf, rc = optimize.fmin_tnc(fg, x, bounds=bounds,
|
| 248 |
+
messages=optimize._tnc.MSG_NONE,
|
| 249 |
+
maxfun=200)
|
| 250 |
+
|
| 251 |
+
assert_allclose(self.f3(x), self.f3(xopt), atol=1e-8,
|
| 252 |
+
err_msg="TNC failed with status: " +
|
| 253 |
+
optimize._tnc.RCSTRINGS[rc])
|
| 254 |
+
|
| 255 |
+
def test_tnc4(self):
|
| 256 |
+
fg, x, bounds = self.fg4, [1.125, 0.125], [(1, None), (0, None)]
|
| 257 |
+
xopt = [1, 0]
|
| 258 |
+
|
| 259 |
+
x, nf, rc = optimize.fmin_tnc(fg, x, bounds=bounds,
|
| 260 |
+
messages=optimize._tnc.MSG_NONE,
|
| 261 |
+
maxfun=200)
|
| 262 |
+
|
| 263 |
+
assert_allclose(self.f4(x), self.f4(xopt), atol=1e-8,
|
| 264 |
+
err_msg="TNC failed with status: " +
|
| 265 |
+
optimize._tnc.RCSTRINGS[rc])
|
| 266 |
+
|
| 267 |
+
def test_tnc5(self):
|
| 268 |
+
fg, x, bounds = self.fg5, [0, 0], [(-1.5, 4),(-3, 3)]
|
| 269 |
+
xopt = [-0.54719755119659763, -1.5471975511965976]
|
| 270 |
+
|
| 271 |
+
x, nf, rc = optimize.fmin_tnc(fg, x, bounds=bounds,
|
| 272 |
+
messages=optimize._tnc.MSG_NONE,
|
| 273 |
+
maxfun=200)
|
| 274 |
+
|
| 275 |
+
assert_allclose(self.f5(x), self.f5(xopt), atol=1e-8,
|
| 276 |
+
err_msg="TNC failed with status: " +
|
| 277 |
+
optimize._tnc.RCSTRINGS[rc])
|
| 278 |
+
|
| 279 |
+
def test_tnc38(self):
|
| 280 |
+
fg, x, bounds = self.fg38, np.array([-3, -1, -3, -1]), [(-10, 10)]*4
|
| 281 |
+
xopt = [1]*4
|
| 282 |
+
|
| 283 |
+
x, nf, rc = optimize.fmin_tnc(fg, x, bounds=bounds,
|
| 284 |
+
messages=optimize._tnc.MSG_NONE,
|
| 285 |
+
maxfun=200)
|
| 286 |
+
|
| 287 |
+
assert_allclose(self.f38(x), self.f38(xopt), atol=1e-8,
|
| 288 |
+
err_msg="TNC failed with status: " +
|
| 289 |
+
optimize._tnc.RCSTRINGS[rc])
|
| 290 |
+
|
| 291 |
+
def test_tnc45(self):
|
| 292 |
+
fg, x, bounds = self.fg45, [2] * 5, [(0, 1), (0, 2), (0, 3),
|
| 293 |
+
(0, 4), (0, 5)]
|
| 294 |
+
xopt = [1, 2, 3, 4, 5]
|
| 295 |
+
|
| 296 |
+
x, nf, rc = optimize.fmin_tnc(fg, x, bounds=bounds,
|
| 297 |
+
messages=optimize._tnc.MSG_NONE,
|
| 298 |
+
maxfun=200)
|
| 299 |
+
|
| 300 |
+
assert_allclose(self.f45(x), self.f45(xopt), atol=1e-8,
|
| 301 |
+
err_msg="TNC failed with status: " +
|
| 302 |
+
optimize._tnc.RCSTRINGS[rc])
|
| 303 |
+
|
| 304 |
+
def test_raising_exceptions(self):
|
| 305 |
+
# tnc was ported to cython from hand-crafted cpython code
|
| 306 |
+
# check that Exception handling works.
|
| 307 |
+
def myfunc(x):
|
| 308 |
+
raise RuntimeError("myfunc")
|
| 309 |
+
|
| 310 |
+
def myfunc1(x):
|
| 311 |
+
return optimize.rosen(x)
|
| 312 |
+
|
| 313 |
+
def callback(x):
|
| 314 |
+
raise ValueError("callback")
|
| 315 |
+
|
| 316 |
+
with pytest.raises(RuntimeError):
|
| 317 |
+
optimize.minimize(myfunc, [0, 1], method="TNC")
|
| 318 |
+
|
| 319 |
+
with pytest.raises(ValueError):
|
| 320 |
+
optimize.minimize(
|
| 321 |
+
myfunc1, [0, 1], method="TNC", callback=callback
|
| 322 |
+
)
|
| 323 |
+
|
| 324 |
+
def test_callback_shouldnt_affect_minimization(self):
|
| 325 |
+
# gh14879. The output of a TNC minimization was different depending
|
| 326 |
+
# on whether a callback was used or not. The two should be equivalent.
|
| 327 |
+
# The issue was that TNC was unscaling/scaling x, and this process was
|
| 328 |
+
# altering x in the process. Now the callback uses an unscaled
|
| 329 |
+
# temporary copy of x.
|
| 330 |
+
def callback(x):
|
| 331 |
+
pass
|
| 332 |
+
|
| 333 |
+
fun = optimize.rosen
|
| 334 |
+
bounds = [(0, 10)] * 4
|
| 335 |
+
x0 = [1, 2, 3, 4.]
|
| 336 |
+
res = optimize.minimize(
|
| 337 |
+
fun, x0, bounds=bounds, method="TNC", options={"maxfun": 1000}
|
| 338 |
+
)
|
| 339 |
+
res2 = optimize.minimize(
|
| 340 |
+
fun, x0, bounds=bounds, method="TNC", options={"maxfun": 1000},
|
| 341 |
+
callback=callback
|
| 342 |
+
)
|
| 343 |
+
assert_allclose(res2.x, res.x)
|
| 344 |
+
assert_allclose(res2.fun, res.fun)
|
| 345 |
+
assert_equal(res2.nfev, res.nfev)
|
llava_next/lib/python3.10/site-packages/scipy/optimize/tests/test_trustregion_exact.py
ADDED
|
@@ -0,0 +1,354 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Unit tests for trust-region iterative subproblem.
|
| 3 |
+
|
| 4 |
+
To run it in its simplest form::
|
| 5 |
+
nosetests test_optimize.py
|
| 6 |
+
|
| 7 |
+
"""
|
| 8 |
+
import pytest
|
| 9 |
+
import numpy as np
|
| 10 |
+
from scipy.optimize._trustregion_exact import (
|
| 11 |
+
estimate_smallest_singular_value,
|
| 12 |
+
singular_leading_submatrix,
|
| 13 |
+
IterativeSubproblem)
|
| 14 |
+
from scipy.linalg import (svd, get_lapack_funcs, det, qr, norm)
|
| 15 |
+
from numpy.testing import (assert_array_equal,
|
| 16 |
+
assert_equal, assert_array_almost_equal)
|
| 17 |
+
|
| 18 |
+
|
| 19 |
+
def random_entry(n, min_eig, max_eig, case):
|
| 20 |
+
|
| 21 |
+
# Generate random matrix
|
| 22 |
+
rand = np.random.uniform(-1, 1, (n, n))
|
| 23 |
+
|
| 24 |
+
# QR decomposition
|
| 25 |
+
Q, _, _ = qr(rand, pivoting='True')
|
| 26 |
+
|
| 27 |
+
# Generate random eigenvalues
|
| 28 |
+
eigvalues = np.random.uniform(min_eig, max_eig, n)
|
| 29 |
+
eigvalues = np.sort(eigvalues)[::-1]
|
| 30 |
+
|
| 31 |
+
# Generate matrix
|
| 32 |
+
Qaux = np.multiply(eigvalues, Q)
|
| 33 |
+
A = np.dot(Qaux, Q.T)
|
| 34 |
+
|
| 35 |
+
# Generate gradient vector accordingly
|
| 36 |
+
# to the case is being tested.
|
| 37 |
+
if case == 'hard':
|
| 38 |
+
g = np.zeros(n)
|
| 39 |
+
g[:-1] = np.random.uniform(-1, 1, n-1)
|
| 40 |
+
g = np.dot(Q, g)
|
| 41 |
+
elif case == 'jac_equal_zero':
|
| 42 |
+
g = np.zeros(n)
|
| 43 |
+
else:
|
| 44 |
+
g = np.random.uniform(-1, 1, n)
|
| 45 |
+
|
| 46 |
+
return A, g
|
| 47 |
+
|
| 48 |
+
|
| 49 |
+
class TestEstimateSmallestSingularValue:
|
| 50 |
+
|
| 51 |
+
def test_for_ill_condiotioned_matrix(self):
|
| 52 |
+
|
| 53 |
+
# Ill-conditioned triangular matrix
|
| 54 |
+
C = np.array([[1, 2, 3, 4],
|
| 55 |
+
[0, 0.05, 60, 7],
|
| 56 |
+
[0, 0, 0.8, 9],
|
| 57 |
+
[0, 0, 0, 10]])
|
| 58 |
+
|
| 59 |
+
# Get svd decomposition
|
| 60 |
+
U, s, Vt = svd(C)
|
| 61 |
+
|
| 62 |
+
# Get smallest singular value and correspondent right singular vector.
|
| 63 |
+
smin_svd = s[-1]
|
| 64 |
+
zmin_svd = Vt[-1, :]
|
| 65 |
+
|
| 66 |
+
# Estimate smallest singular value
|
| 67 |
+
smin, zmin = estimate_smallest_singular_value(C)
|
| 68 |
+
|
| 69 |
+
# Check the estimation
|
| 70 |
+
assert_array_almost_equal(smin, smin_svd, decimal=8)
|
| 71 |
+
assert_array_almost_equal(abs(zmin), abs(zmin_svd), decimal=8)
|
| 72 |
+
|
| 73 |
+
|
| 74 |
+
class TestSingularLeadingSubmatrix:
|
| 75 |
+
|
| 76 |
+
def test_for_already_singular_leading_submatrix(self):
|
| 77 |
+
|
| 78 |
+
# Define test matrix A.
|
| 79 |
+
# Note that the leading 2x2 submatrix is singular.
|
| 80 |
+
A = np.array([[1, 2, 3],
|
| 81 |
+
[2, 4, 5],
|
| 82 |
+
[3, 5, 6]])
|
| 83 |
+
|
| 84 |
+
# Get Cholesky from lapack functions
|
| 85 |
+
cholesky, = get_lapack_funcs(('potrf',), (A,))
|
| 86 |
+
|
| 87 |
+
# Compute Cholesky Decomposition
|
| 88 |
+
c, k = cholesky(A, lower=False, overwrite_a=False, clean=True)
|
| 89 |
+
|
| 90 |
+
delta, v = singular_leading_submatrix(A, c, k)
|
| 91 |
+
|
| 92 |
+
A[k-1, k-1] += delta
|
| 93 |
+
|
| 94 |
+
# Check if the leading submatrix is singular.
|
| 95 |
+
assert_array_almost_equal(det(A[:k, :k]), 0)
|
| 96 |
+
|
| 97 |
+
# Check if `v` fulfil the specified properties
|
| 98 |
+
quadratic_term = np.dot(v, np.dot(A, v))
|
| 99 |
+
assert_array_almost_equal(quadratic_term, 0)
|
| 100 |
+
|
| 101 |
+
def test_for_simetric_indefinite_matrix(self):
|
| 102 |
+
|
| 103 |
+
# Define test matrix A.
|
| 104 |
+
# Note that the leading 5x5 submatrix is indefinite.
|
| 105 |
+
A = np.asarray([[1, 2, 3, 7, 8],
|
| 106 |
+
[2, 5, 5, 9, 0],
|
| 107 |
+
[3, 5, 11, 1, 2],
|
| 108 |
+
[7, 9, 1, 7, 5],
|
| 109 |
+
[8, 0, 2, 5, 8]])
|
| 110 |
+
|
| 111 |
+
# Get Cholesky from lapack functions
|
| 112 |
+
cholesky, = get_lapack_funcs(('potrf',), (A,))
|
| 113 |
+
|
| 114 |
+
# Compute Cholesky Decomposition
|
| 115 |
+
c, k = cholesky(A, lower=False, overwrite_a=False, clean=True)
|
| 116 |
+
|
| 117 |
+
delta, v = singular_leading_submatrix(A, c, k)
|
| 118 |
+
|
| 119 |
+
A[k-1, k-1] += delta
|
| 120 |
+
|
| 121 |
+
# Check if the leading submatrix is singular.
|
| 122 |
+
assert_array_almost_equal(det(A[:k, :k]), 0)
|
| 123 |
+
|
| 124 |
+
# Check if `v` fulfil the specified properties
|
| 125 |
+
quadratic_term = np.dot(v, np.dot(A, v))
|
| 126 |
+
assert_array_almost_equal(quadratic_term, 0)
|
| 127 |
+
|
| 128 |
+
def test_for_first_element_equal_to_zero(self):
|
| 129 |
+
|
| 130 |
+
# Define test matrix A.
|
| 131 |
+
# Note that the leading 2x2 submatrix is singular.
|
| 132 |
+
A = np.array([[0, 3, 11],
|
| 133 |
+
[3, 12, 5],
|
| 134 |
+
[11, 5, 6]])
|
| 135 |
+
|
| 136 |
+
# Get Cholesky from lapack functions
|
| 137 |
+
cholesky, = get_lapack_funcs(('potrf',), (A,))
|
| 138 |
+
|
| 139 |
+
# Compute Cholesky Decomposition
|
| 140 |
+
c, k = cholesky(A, lower=False, overwrite_a=False, clean=True)
|
| 141 |
+
|
| 142 |
+
delta, v = singular_leading_submatrix(A, c, k)
|
| 143 |
+
|
| 144 |
+
A[k-1, k-1] += delta
|
| 145 |
+
|
| 146 |
+
# Check if the leading submatrix is singular
|
| 147 |
+
assert_array_almost_equal(det(A[:k, :k]), 0)
|
| 148 |
+
|
| 149 |
+
# Check if `v` fulfil the specified properties
|
| 150 |
+
quadratic_term = np.dot(v, np.dot(A, v))
|
| 151 |
+
assert_array_almost_equal(quadratic_term, 0)
|
| 152 |
+
|
| 153 |
+
|
| 154 |
+
class TestIterativeSubproblem:
|
| 155 |
+
|
| 156 |
+
def test_for_the_easy_case(self):
|
| 157 |
+
|
| 158 |
+
# `H` is chosen such that `g` is not orthogonal to the
|
| 159 |
+
# eigenvector associated with the smallest eigenvalue `s`.
|
| 160 |
+
H = [[10, 2, 3, 4],
|
| 161 |
+
[2, 1, 7, 1],
|
| 162 |
+
[3, 7, 1, 7],
|
| 163 |
+
[4, 1, 7, 2]]
|
| 164 |
+
g = [1, 1, 1, 1]
|
| 165 |
+
|
| 166 |
+
# Trust Radius
|
| 167 |
+
trust_radius = 1
|
| 168 |
+
|
| 169 |
+
# Solve Subproblem
|
| 170 |
+
subprob = IterativeSubproblem(x=0,
|
| 171 |
+
fun=lambda x: 0,
|
| 172 |
+
jac=lambda x: np.array(g),
|
| 173 |
+
hess=lambda x: np.array(H),
|
| 174 |
+
k_easy=1e-10,
|
| 175 |
+
k_hard=1e-10)
|
| 176 |
+
p, hits_boundary = subprob.solve(trust_radius)
|
| 177 |
+
|
| 178 |
+
assert_array_almost_equal(p, [0.00393332, -0.55260862,
|
| 179 |
+
0.67065477, -0.49480341])
|
| 180 |
+
assert_array_almost_equal(hits_boundary, True)
|
| 181 |
+
|
| 182 |
+
def test_for_the_hard_case(self):
|
| 183 |
+
|
| 184 |
+
# `H` is chosen such that `g` is orthogonal to the
|
| 185 |
+
# eigenvector associated with the smallest eigenvalue `s`.
|
| 186 |
+
H = [[10, 2, 3, 4],
|
| 187 |
+
[2, 1, 7, 1],
|
| 188 |
+
[3, 7, 1, 7],
|
| 189 |
+
[4, 1, 7, 2]]
|
| 190 |
+
g = [6.4852641521327437, 1, 1, 1]
|
| 191 |
+
s = -8.2151519874416614
|
| 192 |
+
|
| 193 |
+
# Trust Radius
|
| 194 |
+
trust_radius = 1
|
| 195 |
+
|
| 196 |
+
# Solve Subproblem
|
| 197 |
+
subprob = IterativeSubproblem(x=0,
|
| 198 |
+
fun=lambda x: 0,
|
| 199 |
+
jac=lambda x: np.array(g),
|
| 200 |
+
hess=lambda x: np.array(H),
|
| 201 |
+
k_easy=1e-10,
|
| 202 |
+
k_hard=1e-10)
|
| 203 |
+
p, hits_boundary = subprob.solve(trust_radius)
|
| 204 |
+
|
| 205 |
+
assert_array_almost_equal(-s, subprob.lambda_current)
|
| 206 |
+
|
| 207 |
+
def test_for_interior_convergence(self):
|
| 208 |
+
|
| 209 |
+
H = [[1.812159, 0.82687265, 0.21838879, -0.52487006, 0.25436988],
|
| 210 |
+
[0.82687265, 2.66380283, 0.31508988, -0.40144163, 0.08811588],
|
| 211 |
+
[0.21838879, 0.31508988, 2.38020726, -0.3166346, 0.27363867],
|
| 212 |
+
[-0.52487006, -0.40144163, -0.3166346, 1.61927182, -0.42140166],
|
| 213 |
+
[0.25436988, 0.08811588, 0.27363867, -0.42140166, 1.33243101]]
|
| 214 |
+
|
| 215 |
+
g = [0.75798952, 0.01421945, 0.33847612, 0.83725004, -0.47909534]
|
| 216 |
+
|
| 217 |
+
# Solve Subproblem
|
| 218 |
+
subprob = IterativeSubproblem(x=0,
|
| 219 |
+
fun=lambda x: 0,
|
| 220 |
+
jac=lambda x: np.array(g),
|
| 221 |
+
hess=lambda x: np.array(H))
|
| 222 |
+
p, hits_boundary = subprob.solve(1.1)
|
| 223 |
+
|
| 224 |
+
assert_array_almost_equal(p, [-0.68585435, 0.1222621, -0.22090999,
|
| 225 |
+
-0.67005053, 0.31586769])
|
| 226 |
+
assert_array_almost_equal(hits_boundary, False)
|
| 227 |
+
assert_array_almost_equal(subprob.lambda_current, 0)
|
| 228 |
+
assert_array_almost_equal(subprob.niter, 1)
|
| 229 |
+
|
| 230 |
+
def test_for_jac_equal_zero(self):
|
| 231 |
+
|
| 232 |
+
H = [[0.88547534, 2.90692271, 0.98440885, -0.78911503, -0.28035809],
|
| 233 |
+
[2.90692271, -0.04618819, 0.32867263, -0.83737945, 0.17116396],
|
| 234 |
+
[0.98440885, 0.32867263, -0.87355957, -0.06521957, -1.43030957],
|
| 235 |
+
[-0.78911503, -0.83737945, -0.06521957, -1.645709, -0.33887298],
|
| 236 |
+
[-0.28035809, 0.17116396, -1.43030957, -0.33887298, -1.68586978]]
|
| 237 |
+
|
| 238 |
+
g = [0, 0, 0, 0, 0]
|
| 239 |
+
|
| 240 |
+
# Solve Subproblem
|
| 241 |
+
subprob = IterativeSubproblem(x=0,
|
| 242 |
+
fun=lambda x: 0,
|
| 243 |
+
jac=lambda x: np.array(g),
|
| 244 |
+
hess=lambda x: np.array(H),
|
| 245 |
+
k_easy=1e-10,
|
| 246 |
+
k_hard=1e-10)
|
| 247 |
+
p, hits_boundary = subprob.solve(1.1)
|
| 248 |
+
|
| 249 |
+
assert_array_almost_equal(p, [0.06910534, -0.01432721,
|
| 250 |
+
-0.65311947, -0.23815972,
|
| 251 |
+
-0.84954934])
|
| 252 |
+
assert_array_almost_equal(hits_boundary, True)
|
| 253 |
+
|
| 254 |
+
def test_for_jac_very_close_to_zero(self):
|
| 255 |
+
|
| 256 |
+
H = [[0.88547534, 2.90692271, 0.98440885, -0.78911503, -0.28035809],
|
| 257 |
+
[2.90692271, -0.04618819, 0.32867263, -0.83737945, 0.17116396],
|
| 258 |
+
[0.98440885, 0.32867263, -0.87355957, -0.06521957, -1.43030957],
|
| 259 |
+
[-0.78911503, -0.83737945, -0.06521957, -1.645709, -0.33887298],
|
| 260 |
+
[-0.28035809, 0.17116396, -1.43030957, -0.33887298, -1.68586978]]
|
| 261 |
+
|
| 262 |
+
g = [0, 0, 0, 0, 1e-15]
|
| 263 |
+
|
| 264 |
+
# Solve Subproblem
|
| 265 |
+
subprob = IterativeSubproblem(x=0,
|
| 266 |
+
fun=lambda x: 0,
|
| 267 |
+
jac=lambda x: np.array(g),
|
| 268 |
+
hess=lambda x: np.array(H),
|
| 269 |
+
k_easy=1e-10,
|
| 270 |
+
k_hard=1e-10)
|
| 271 |
+
p, hits_boundary = subprob.solve(1.1)
|
| 272 |
+
|
| 273 |
+
assert_array_almost_equal(p, [0.06910534, -0.01432721,
|
| 274 |
+
-0.65311947, -0.23815972,
|
| 275 |
+
-0.84954934])
|
| 276 |
+
assert_array_almost_equal(hits_boundary, True)
|
| 277 |
+
|
| 278 |
+
@pytest.mark.fail_slow(5)
|
| 279 |
+
def test_for_random_entries(self):
|
| 280 |
+
# Seed
|
| 281 |
+
np.random.seed(1)
|
| 282 |
+
|
| 283 |
+
# Dimension
|
| 284 |
+
n = 5
|
| 285 |
+
|
| 286 |
+
for case in ('easy', 'hard', 'jac_equal_zero'):
|
| 287 |
+
|
| 288 |
+
eig_limits = [(-20, -15),
|
| 289 |
+
(-10, -5),
|
| 290 |
+
(-10, 0),
|
| 291 |
+
(-5, 5),
|
| 292 |
+
(-10, 10),
|
| 293 |
+
(0, 10),
|
| 294 |
+
(5, 10),
|
| 295 |
+
(15, 20)]
|
| 296 |
+
|
| 297 |
+
for min_eig, max_eig in eig_limits:
|
| 298 |
+
# Generate random symmetric matrix H with
|
| 299 |
+
# eigenvalues between min_eig and max_eig.
|
| 300 |
+
H, g = random_entry(n, min_eig, max_eig, case)
|
| 301 |
+
|
| 302 |
+
# Trust radius
|
| 303 |
+
trust_radius_list = [0.1, 0.3, 0.6, 0.8, 1, 1.2, 3.3, 5.5, 10]
|
| 304 |
+
|
| 305 |
+
for trust_radius in trust_radius_list:
|
| 306 |
+
# Solve subproblem with very high accuracy
|
| 307 |
+
subprob_ac = IterativeSubproblem(0,
|
| 308 |
+
lambda x: 0,
|
| 309 |
+
lambda x: g,
|
| 310 |
+
lambda x: H,
|
| 311 |
+
k_easy=1e-10,
|
| 312 |
+
k_hard=1e-10)
|
| 313 |
+
|
| 314 |
+
p_ac, hits_boundary_ac = subprob_ac.solve(trust_radius)
|
| 315 |
+
|
| 316 |
+
# Compute objective function value
|
| 317 |
+
J_ac = 1/2*np.dot(p_ac, np.dot(H, p_ac))+np.dot(g, p_ac)
|
| 318 |
+
|
| 319 |
+
stop_criteria = [(0.1, 2),
|
| 320 |
+
(0.5, 1.1),
|
| 321 |
+
(0.9, 1.01)]
|
| 322 |
+
|
| 323 |
+
for k_opt, k_trf in stop_criteria:
|
| 324 |
+
|
| 325 |
+
# k_easy and k_hard computed in function
|
| 326 |
+
# of k_opt and k_trf accordingly to
|
| 327 |
+
# Conn, A. R., Gould, N. I., & Toint, P. L. (2000).
|
| 328 |
+
# "Trust region methods". Siam. p. 197.
|
| 329 |
+
k_easy = min(k_trf-1,
|
| 330 |
+
1-np.sqrt(k_opt))
|
| 331 |
+
k_hard = 1-k_opt
|
| 332 |
+
|
| 333 |
+
# Solve subproblem
|
| 334 |
+
subprob = IterativeSubproblem(0,
|
| 335 |
+
lambda x: 0,
|
| 336 |
+
lambda x: g,
|
| 337 |
+
lambda x: H,
|
| 338 |
+
k_easy=k_easy,
|
| 339 |
+
k_hard=k_hard)
|
| 340 |
+
p, hits_boundary = subprob.solve(trust_radius)
|
| 341 |
+
|
| 342 |
+
# Compute objective function value
|
| 343 |
+
J = 1/2*np.dot(p, np.dot(H, p))+np.dot(g, p)
|
| 344 |
+
|
| 345 |
+
# Check if it respect k_trf
|
| 346 |
+
if hits_boundary:
|
| 347 |
+
assert_array_equal(np.abs(norm(p)-trust_radius) <=
|
| 348 |
+
(k_trf-1)*trust_radius, True)
|
| 349 |
+
else:
|
| 350 |
+
assert_equal(norm(p) <= trust_radius, True)
|
| 351 |
+
|
| 352 |
+
# Check if it respect k_opt
|
| 353 |
+
assert_equal(J <= k_opt*J_ac, True)
|
| 354 |
+
|
llava_next/lib/python3.10/site-packages/scipy/optimize/tests/test_trustregion_krylov.py
ADDED
|
@@ -0,0 +1,171 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Unit tests for Krylov space trust-region subproblem solver.
|
| 3 |
+
|
| 4 |
+
To run it in its simplest form::
|
| 5 |
+
nosetests test_optimize.py
|
| 6 |
+
|
| 7 |
+
"""
|
| 8 |
+
import numpy as np
|
| 9 |
+
from scipy.optimize._trlib import (get_trlib_quadratic_subproblem)
|
| 10 |
+
from numpy.testing import (assert_,
|
| 11 |
+
assert_almost_equal,
|
| 12 |
+
assert_equal, assert_array_almost_equal)
|
| 13 |
+
|
| 14 |
+
KrylovQP = get_trlib_quadratic_subproblem(tol_rel_i=1e-8, tol_rel_b=1e-6)
|
| 15 |
+
KrylovQP_disp = get_trlib_quadratic_subproblem(tol_rel_i=1e-8, tol_rel_b=1e-6,
|
| 16 |
+
disp=True)
|
| 17 |
+
|
| 18 |
+
class TestKrylovQuadraticSubproblem:
|
| 19 |
+
|
| 20 |
+
def test_for_the_easy_case(self):
|
| 21 |
+
|
| 22 |
+
# `H` is chosen such that `g` is not orthogonal to the
|
| 23 |
+
# eigenvector associated with the smallest eigenvalue.
|
| 24 |
+
H = np.array([[1.0, 0.0, 4.0],
|
| 25 |
+
[0.0, 2.0, 0.0],
|
| 26 |
+
[4.0, 0.0, 3.0]])
|
| 27 |
+
g = np.array([5.0, 0.0, 4.0])
|
| 28 |
+
|
| 29 |
+
# Trust Radius
|
| 30 |
+
trust_radius = 1.0
|
| 31 |
+
|
| 32 |
+
# Solve Subproblem
|
| 33 |
+
subprob = KrylovQP(x=0,
|
| 34 |
+
fun=lambda x: 0,
|
| 35 |
+
jac=lambda x: g,
|
| 36 |
+
hess=lambda x: None,
|
| 37 |
+
hessp=lambda x, y: H.dot(y))
|
| 38 |
+
p, hits_boundary = subprob.solve(trust_radius)
|
| 39 |
+
|
| 40 |
+
assert_array_almost_equal(p, np.array([-1.0, 0.0, 0.0]))
|
| 41 |
+
assert_equal(hits_boundary, True)
|
| 42 |
+
# check kkt satisfaction
|
| 43 |
+
assert_almost_equal(
|
| 44 |
+
np.linalg.norm(H.dot(p) + subprob.lam * p + g),
|
| 45 |
+
0.0)
|
| 46 |
+
# check trust region constraint
|
| 47 |
+
assert_almost_equal(np.linalg.norm(p), trust_radius)
|
| 48 |
+
|
| 49 |
+
trust_radius = 0.5
|
| 50 |
+
p, hits_boundary = subprob.solve(trust_radius)
|
| 51 |
+
|
| 52 |
+
assert_array_almost_equal(p,
|
| 53 |
+
np.array([-0.46125446, 0., -0.19298788]))
|
| 54 |
+
assert_equal(hits_boundary, True)
|
| 55 |
+
# check kkt satisfaction
|
| 56 |
+
assert_almost_equal(
|
| 57 |
+
np.linalg.norm(H.dot(p) + subprob.lam * p + g),
|
| 58 |
+
0.0)
|
| 59 |
+
# check trust region constraint
|
| 60 |
+
assert_almost_equal(np.linalg.norm(p), trust_radius)
|
| 61 |
+
|
| 62 |
+
def test_for_the_hard_case(self):
|
| 63 |
+
|
| 64 |
+
# `H` is chosen such that `g` is orthogonal to the
|
| 65 |
+
# eigenvector associated with the smallest eigenvalue.
|
| 66 |
+
H = np.array([[1.0, 0.0, 4.0],
|
| 67 |
+
[0.0, 2.0, 0.0],
|
| 68 |
+
[4.0, 0.0, 3.0]])
|
| 69 |
+
g = np.array([0.0, 2.0, 0.0])
|
| 70 |
+
|
| 71 |
+
# Trust Radius
|
| 72 |
+
trust_radius = 1.0
|
| 73 |
+
|
| 74 |
+
# Solve Subproblem
|
| 75 |
+
subprob = KrylovQP(x=0,
|
| 76 |
+
fun=lambda x: 0,
|
| 77 |
+
jac=lambda x: g,
|
| 78 |
+
hess=lambda x: None,
|
| 79 |
+
hessp=lambda x, y: H.dot(y))
|
| 80 |
+
p, hits_boundary = subprob.solve(trust_radius)
|
| 81 |
+
|
| 82 |
+
assert_array_almost_equal(p, np.array([0.0, -1.0, 0.0]))
|
| 83 |
+
# check kkt satisfaction
|
| 84 |
+
assert_almost_equal(
|
| 85 |
+
np.linalg.norm(H.dot(p) + subprob.lam * p + g),
|
| 86 |
+
0.0)
|
| 87 |
+
# check trust region constraint
|
| 88 |
+
assert_almost_equal(np.linalg.norm(p), trust_radius)
|
| 89 |
+
|
| 90 |
+
trust_radius = 0.5
|
| 91 |
+
p, hits_boundary = subprob.solve(trust_radius)
|
| 92 |
+
|
| 93 |
+
assert_array_almost_equal(p, np.array([0.0, -0.5, 0.0]))
|
| 94 |
+
# check kkt satisfaction
|
| 95 |
+
assert_almost_equal(
|
| 96 |
+
np.linalg.norm(H.dot(p) + subprob.lam * p + g),
|
| 97 |
+
0.0)
|
| 98 |
+
# check trust region constraint
|
| 99 |
+
assert_almost_equal(np.linalg.norm(p), trust_radius)
|
| 100 |
+
|
| 101 |
+
def test_for_interior_convergence(self):
|
| 102 |
+
|
| 103 |
+
H = np.array([[1.812159, 0.82687265, 0.21838879, -0.52487006, 0.25436988],
|
| 104 |
+
[0.82687265, 2.66380283, 0.31508988, -0.40144163, 0.08811588],
|
| 105 |
+
[0.21838879, 0.31508988, 2.38020726, -0.3166346, 0.27363867],
|
| 106 |
+
[-0.52487006, -0.40144163, -0.3166346, 1.61927182, -0.42140166],
|
| 107 |
+
[0.25436988, 0.08811588, 0.27363867, -0.42140166, 1.33243101]])
|
| 108 |
+
g = np.array([0.75798952, 0.01421945, 0.33847612, 0.83725004, -0.47909534])
|
| 109 |
+
trust_radius = 1.1
|
| 110 |
+
|
| 111 |
+
# Solve Subproblem
|
| 112 |
+
subprob = KrylovQP(x=0,
|
| 113 |
+
fun=lambda x: 0,
|
| 114 |
+
jac=lambda x: g,
|
| 115 |
+
hess=lambda x: None,
|
| 116 |
+
hessp=lambda x, y: H.dot(y))
|
| 117 |
+
p, hits_boundary = subprob.solve(trust_radius)
|
| 118 |
+
|
| 119 |
+
# check kkt satisfaction
|
| 120 |
+
assert_almost_equal(
|
| 121 |
+
np.linalg.norm(H.dot(p) + subprob.lam * p + g),
|
| 122 |
+
0.0)
|
| 123 |
+
|
| 124 |
+
assert_array_almost_equal(p, [-0.68585435, 0.1222621, -0.22090999,
|
| 125 |
+
-0.67005053, 0.31586769])
|
| 126 |
+
assert_array_almost_equal(hits_boundary, False)
|
| 127 |
+
|
| 128 |
+
def test_for_very_close_to_zero(self):
|
| 129 |
+
|
| 130 |
+
H = np.array([[0.88547534, 2.90692271, 0.98440885, -0.78911503, -0.28035809],
|
| 131 |
+
[2.90692271, -0.04618819, 0.32867263, -0.83737945, 0.17116396],
|
| 132 |
+
[0.98440885, 0.32867263, -0.87355957, -0.06521957, -1.43030957],
|
| 133 |
+
[-0.78911503, -0.83737945, -0.06521957, -1.645709, -0.33887298],
|
| 134 |
+
[-0.28035809, 0.17116396, -1.43030957, -0.33887298, -1.68586978]])
|
| 135 |
+
g = np.array([0, 0, 0, 0, 1e-6])
|
| 136 |
+
trust_radius = 1.1
|
| 137 |
+
|
| 138 |
+
# Solve Subproblem
|
| 139 |
+
subprob = KrylovQP(x=0,
|
| 140 |
+
fun=lambda x: 0,
|
| 141 |
+
jac=lambda x: g,
|
| 142 |
+
hess=lambda x: None,
|
| 143 |
+
hessp=lambda x, y: H.dot(y))
|
| 144 |
+
p, hits_boundary = subprob.solve(trust_radius)
|
| 145 |
+
|
| 146 |
+
# check kkt satisfaction
|
| 147 |
+
assert_almost_equal(
|
| 148 |
+
np.linalg.norm(H.dot(p) + subprob.lam * p + g),
|
| 149 |
+
0.0)
|
| 150 |
+
# check trust region constraint
|
| 151 |
+
assert_almost_equal(np.linalg.norm(p), trust_radius)
|
| 152 |
+
|
| 153 |
+
assert_array_almost_equal(p, [0.06910534, -0.01432721,
|
| 154 |
+
-0.65311947, -0.23815972,
|
| 155 |
+
-0.84954934])
|
| 156 |
+
assert_array_almost_equal(hits_boundary, True)
|
| 157 |
+
|
| 158 |
+
def test_disp(self, capsys):
|
| 159 |
+
H = -np.eye(5)
|
| 160 |
+
g = np.array([0, 0, 0, 0, 1e-6])
|
| 161 |
+
trust_radius = 1.1
|
| 162 |
+
|
| 163 |
+
subprob = KrylovQP_disp(x=0,
|
| 164 |
+
fun=lambda x: 0,
|
| 165 |
+
jac=lambda x: g,
|
| 166 |
+
hess=lambda x: None,
|
| 167 |
+
hessp=lambda x, y: H.dot(y))
|
| 168 |
+
p, hits_boundary = subprob.solve(trust_radius)
|
| 169 |
+
out, err = capsys.readouterr()
|
| 170 |
+
assert_(out.startswith(' TR Solving trust region problem'), repr(out))
|
| 171 |
+
|
parrot/lib/python3.10/site-packages/torch/distributed/_shard/__pycache__/__init__.cpython-310.pyc
ADDED
|
Binary file (300 Bytes). View file
|
|
|
parrot/lib/python3.10/site-packages/torch/distributed/_shard/__pycache__/_utils.cpython-310.pyc
ADDED
|
Binary file (1.04 kB). View file
|
|
|
parrot/lib/python3.10/site-packages/torch/distributed/_shard/__pycache__/api.cpython-310.pyc
ADDED
|
Binary file (9.8 kB). View file
|
|
|
parrot/lib/python3.10/site-packages/torch/distributed/_shard/__pycache__/common_op_utils.cpython-310.pyc
ADDED
|
Binary file (2.28 kB). View file
|
|
|
parrot/lib/python3.10/site-packages/torch/distributed/_shard/__pycache__/metadata.cpython-310.pyc
ADDED
|
Binary file (2.32 kB). View file
|
|
|
parrot/lib/python3.10/site-packages/torch/distributed/_shard/__pycache__/op_registry_utils.cpython-310.pyc
ADDED
|
Binary file (1.2 kB). View file
|
|
|
parrot/lib/python3.10/site-packages/torch/distributed/_shard/__pycache__/sharder.cpython-310.pyc
ADDED
|
Binary file (1.33 kB). View file
|
|
|
parrot/lib/python3.10/site-packages/torch/distributed/_shard/sharded_tensor/__pycache__/logger.cpython-310.pyc
ADDED
|
Binary file (1.14 kB). View file
|
|
|
parrot/lib/python3.10/site-packages/torch/distributed/_shard/sharded_tensor/__pycache__/shard.cpython-310.pyc
ADDED
|
Binary file (2.44 kB). View file
|
|
|
parrot/lib/python3.10/site-packages/torch/distributed/_shard/sharded_tensor/_ops/__pycache__/_common.cpython-310.pyc
ADDED
|
Binary file (4.23 kB). View file
|
|
|
parrot/lib/python3.10/site-packages/torch/distributed/_shard/sharded_tensor/_ops/__pycache__/binary_cmp.cpython-310.pyc
ADDED
|
Binary file (1.96 kB). View file
|
|
|
parrot/lib/python3.10/site-packages/torch/distributed/_shard/sharded_tensor/_ops/__pycache__/tensor_ops.cpython-310.pyc
ADDED
|
Binary file (5.54 kB). View file
|
|
|
parrot/lib/python3.10/site-packages/torch/distributed/_shard/sharded_tensor/_ops/_common.py
ADDED
|
@@ -0,0 +1,108 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# mypy: allow-untyped-defs
|
| 2 |
+
import functools
|
| 3 |
+
from torch.distributed._shard.sharded_tensor import (
|
| 4 |
+
_sharded_op_impl,
|
| 5 |
+
Shard,
|
| 6 |
+
ShardedTensor,
|
| 7 |
+
)
|
| 8 |
+
from torch.distributed._shard.common_op_utils import _basic_validation
|
| 9 |
+
|
| 10 |
+
def _sharded_op_common(op, early_stop_func, extra_check):
|
| 11 |
+
"""
|
| 12 |
+
Inject sharded tensor op registration with common logics executed before
|
| 13 |
+
different behaviors are done on either local shards or a local tensor.
|
| 14 |
+
|
| 15 |
+
Example::
|
| 16 |
+
>>> # xdoctest: +SKIP("Undefined variables")
|
| 17 |
+
>>> op = torch.transpose
|
| 18 |
+
>>> @_sharded_op_impl(op)
|
| 19 |
+
>>> @_sharded_op_common(op, early_stop_func, extra_check)
|
| 20 |
+
>>> def sharded_tensor_op(types, args, kwargs, process_group):
|
| 21 |
+
>>> ...
|
| 22 |
+
>>>
|
| 23 |
+
>>> st = sharded_tensor.rand(32, 16)
|
| 24 |
+
>>> st.transpose(1, 2)
|
| 25 |
+
>>> # This will call '_sharded_op_common'
|
| 26 |
+
|
| 27 |
+
Args:
|
| 28 |
+
op: The op to be registered and applied to all shards of the st.
|
| 29 |
+
early_stop_func (Callable, optional): the func for early stop.
|
| 30 |
+
Default: if ``None``, no early stop.
|
| 31 |
+
extra_check (Callable, optional): the func for extra condition check.
|
| 32 |
+
Default: if ``None``, no extra check.
|
| 33 |
+
|
| 34 |
+
Return:
|
| 35 |
+
func (Callable): Torch function for which we want to provide a sharded
|
| 36 |
+
implementation (ex: torch.transpose)
|
| 37 |
+
"""
|
| 38 |
+
def decorator_sharded_func(wrapped_func):
|
| 39 |
+
@functools.wraps(wrapped_func)
|
| 40 |
+
def wrapper(types, args=(), kwargs=None, pg=None):
|
| 41 |
+
_basic_validation(op, args, kwargs)
|
| 42 |
+
|
| 43 |
+
st = args[0]
|
| 44 |
+
if kwargs is None:
|
| 45 |
+
kwargs = {}
|
| 46 |
+
if extra_check:
|
| 47 |
+
extra_check(*args, **kwargs)
|
| 48 |
+
if early_stop_func:
|
| 49 |
+
early_stop = early_stop_func(*args, **kwargs)
|
| 50 |
+
if early_stop:
|
| 51 |
+
return st
|
| 52 |
+
return wrapped_func(types, args, kwargs, pg)
|
| 53 |
+
|
| 54 |
+
return wrapper
|
| 55 |
+
|
| 56 |
+
return decorator_sharded_func
|
| 57 |
+
|
| 58 |
+
def _register_sharded_op_on_local_shards(
|
| 59 |
+
op, early_stop_func=None, extra_check=None, customized_func=None
|
| 60 |
+
):
|
| 61 |
+
"""
|
| 62 |
+
Handles ``__torch_function__`` dispatch for ops which are performed on
|
| 63 |
+
each shard of the sharded tensor such as elementwise op like
|
| 64 |
+
``torch.nn.functional.gelu`` or ``torch.nn.functional.relu``.
|
| 65 |
+
|
| 66 |
+
For more complicated ops, a customized func can be used to generate
|
| 67 |
+
the new shards and sharded tensor size.
|
| 68 |
+
|
| 69 |
+
This function expects that the original ShardingSpec for the ShardedTensor
|
| 70 |
+
is preserved irrespective of whether or not a customized function is used.
|
| 71 |
+
|
| 72 |
+
Args:
|
| 73 |
+
op: The op to be registered and applied to all shards of the st.
|
| 74 |
+
early_stop_func (Callable, optional): the func for early stop.
|
| 75 |
+
Default: if ``None``, no early stop.
|
| 76 |
+
extra_check (Callable, optional): the func for extra condition check.
|
| 77 |
+
Default: if ``None``, no extra check.
|
| 78 |
+
customized_func (Callable, optional): the func for customized logic
|
| 79 |
+
to generate new shards and sharded tensor size.
|
| 80 |
+
Default: if ``None``, we simply lower to the real op call with
|
| 81 |
+
all local shards of the st.
|
| 82 |
+
|
| 83 |
+
Return:
|
| 84 |
+
func (Callable): registered implementation for sharded op for
|
| 85 |
+
``__torch_function__`` dispatch.
|
| 86 |
+
"""
|
| 87 |
+
@_sharded_op_impl(op)
|
| 88 |
+
@_sharded_op_common(op, early_stop_func, extra_check)
|
| 89 |
+
def sharded_tensor_op_on_local_shards(types, args=(), kwargs=None, pg=None):
|
| 90 |
+
st = args[0]
|
| 91 |
+
st_metadata = st.metadata()
|
| 92 |
+
local_shards = st.local_shards()
|
| 93 |
+
local_shards_new = []
|
| 94 |
+
if customized_func:
|
| 95 |
+
local_shards_new, st_metadata = customized_func(args, kwargs, pg)
|
| 96 |
+
else:
|
| 97 |
+
for local_shard in local_shards:
|
| 98 |
+
args = (local_shard.tensor, *args[1:])
|
| 99 |
+
local_shards_new.append(
|
| 100 |
+
Shard(op(*args, **kwargs), local_shard.metadata)
|
| 101 |
+
)
|
| 102 |
+
return ShardedTensor._init_from_local_shards_and_global_metadata(
|
| 103 |
+
local_shards_new,
|
| 104 |
+
st_metadata,
|
| 105 |
+
process_group=pg,
|
| 106 |
+
init_rrefs=st._init_rrefs,
|
| 107 |
+
sharding_spec=st.sharding_spec()
|
| 108 |
+
)
|
parrot/lib/python3.10/site-packages/torch/distributed/_shard/sharded_tensor/_ops/binary_cmp.py
ADDED
|
@@ -0,0 +1,69 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# mypy: allow-untyped-defs
|
| 2 |
+
import torch
|
| 3 |
+
import torch.distributed as dist
|
| 4 |
+
import torch.distributed.distributed_c10d as distributed_c10d
|
| 5 |
+
from torch.distributed._shard.sharded_tensor import (
|
| 6 |
+
ShardedTensor,
|
| 7 |
+
_sharded_op_impl
|
| 8 |
+
)
|
| 9 |
+
|
| 10 |
+
def _communicate_result(result, pg):
|
| 11 |
+
# Gather results from all ranks.
|
| 12 |
+
if result:
|
| 13 |
+
result_tensor = torch.ones(1, device=torch.device(torch.cuda.current_device()))
|
| 14 |
+
else:
|
| 15 |
+
result_tensor = torch.zeros(1, device=torch.device(torch.cuda.current_device()))
|
| 16 |
+
|
| 17 |
+
dist.all_reduce(result_tensor, group=pg)
|
| 18 |
+
|
| 19 |
+
expected_result = torch.ones(1, device=torch.device(torch.cuda.current_device())) * dist.get_world_size(pg)
|
| 20 |
+
|
| 21 |
+
return torch.equal(result_tensor, expected_result)
|
| 22 |
+
|
| 23 |
+
def binary_cmp(cmp_fun, types, args, kwargs=None, process_group=None):
|
| 24 |
+
if len(args) != 2:
|
| 25 |
+
raise ValueError(f'Expected two arguments for torch.{cmp_fun.__name__}')
|
| 26 |
+
|
| 27 |
+
result = True
|
| 28 |
+
st1 = args[0]
|
| 29 |
+
st2 = args[1]
|
| 30 |
+
if not (isinstance(st1, ShardedTensor) and isinstance(st2, ShardedTensor)):
|
| 31 |
+
raise TypeError(f'Both arguments to torch.{cmp_fun.__name__} need to be of type ShardedTensor')
|
| 32 |
+
|
| 33 |
+
# Verify same PG
|
| 34 |
+
if st1._process_group != st2._process_group:
|
| 35 |
+
return False
|
| 36 |
+
|
| 37 |
+
if distributed_c10d._rank_not_in_group(st1._process_group) or distributed_c10d._rank_not_in_group(st2._process_group):
|
| 38 |
+
return distributed_c10d._rank_not_in_group(st1._process_group) == distributed_c10d._rank_not_in_group(st2._process_group)
|
| 39 |
+
|
| 40 |
+
# Verify metadata
|
| 41 |
+
if st1.metadata() != st2.metadata():
|
| 42 |
+
return _communicate_result(False, st1._process_group)
|
| 43 |
+
|
| 44 |
+
# Verify number of local shards
|
| 45 |
+
st1_local_shards = st1.local_shards()
|
| 46 |
+
st2_local_shards = st2.local_shards()
|
| 47 |
+
if len(st1_local_shards) != len(st2_local_shards):
|
| 48 |
+
return _communicate_result(False, st1._process_group)
|
| 49 |
+
|
| 50 |
+
# kwargs must be dict-like
|
| 51 |
+
if kwargs is None:
|
| 52 |
+
kwargs = {}
|
| 53 |
+
# Verify each local shard
|
| 54 |
+
for idx in range(len(st1_local_shards)):
|
| 55 |
+
if st1_local_shards[idx].metadata != st2_local_shards[idx].metadata:
|
| 56 |
+
return _communicate_result(False, st1._process_group)
|
| 57 |
+
if not cmp_fun(st1_local_shards[idx].tensor, st2_local_shards[idx].tensor, **kwargs):
|
| 58 |
+
return _communicate_result(False, st1._process_group)
|
| 59 |
+
|
| 60 |
+
|
| 61 |
+
return _communicate_result(True, st1._process_group)
|
| 62 |
+
|
| 63 |
+
@_sharded_op_impl(torch.equal)
|
| 64 |
+
def equal(types, args, kwargs, process_group):
|
| 65 |
+
return binary_cmp(torch.equal, types, args, kwargs, process_group)
|
| 66 |
+
|
| 67 |
+
@_sharded_op_impl(torch.allclose)
|
| 68 |
+
def allclose(types, args, kwargs, process_group):
|
| 69 |
+
return binary_cmp(torch.allclose, types, args, kwargs, process_group)
|
parrot/lib/python3.10/site-packages/torch/distributed/_shard/sharded_tensor/_ops/init.py
ADDED
|
@@ -0,0 +1,144 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# mypy: allow-untyped-defs
|
| 2 |
+
import torch
|
| 3 |
+
import torch.distributed._shard.sharded_tensor as sharded_tensor
|
| 4 |
+
from torch.distributed._shard.sharded_tensor import (
|
| 5 |
+
_sharded_op_impl,
|
| 6 |
+
)
|
| 7 |
+
|
| 8 |
+
def validate_param(param, param_name):
|
| 9 |
+
if param is None:
|
| 10 |
+
raise ValueError(f"param: {param_name} shouldn't be None!")
|
| 11 |
+
|
| 12 |
+
@_sharded_op_impl(torch.nn.init.uniform_)
|
| 13 |
+
def uniform_(types, args=(), kwargs=None, pg=None):
|
| 14 |
+
r"""
|
| 15 |
+
Fills the Tensor in tensor.local_shards with values drawn from the uniform
|
| 16 |
+
distribution :math:`\mathcal{U}(a, b)`.
|
| 17 |
+
Args:
|
| 18 |
+
tensor: tensor sharded across devices
|
| 19 |
+
a: the lower bound of the uniform distribution
|
| 20 |
+
b: the upper bound of the uniform distribution
|
| 21 |
+
"""
|
| 22 |
+
validate_param(kwargs, "kwargs")
|
| 23 |
+
sharded_tensor = kwargs["tensor"]
|
| 24 |
+
validate_param(sharded_tensor, "tensor")
|
| 25 |
+
a = kwargs['a']
|
| 26 |
+
validate_param(a, "a")
|
| 27 |
+
b = kwargs['b']
|
| 28 |
+
validate_param(b, "b")
|
| 29 |
+
|
| 30 |
+
for shard in sharded_tensor.local_shards():
|
| 31 |
+
torch.nn.init.uniform_(shard.tensor, a=a, b=b)
|
| 32 |
+
return sharded_tensor
|
| 33 |
+
|
| 34 |
+
@_sharded_op_impl(torch.nn.init.normal_)
|
| 35 |
+
def normal_(types, args=(), kwargs=None, pg=None):
|
| 36 |
+
r"""
|
| 37 |
+
Fills the Tensors in tensor.local_shards with values drawn from the normal
|
| 38 |
+
distribution :math:`\mathcal{N}(\text{mean}, \text{std}^2)`.
|
| 39 |
+
Args:
|
| 40 |
+
tensor: tensor sharded across devices
|
| 41 |
+
mean: the mean of the normal distribution
|
| 42 |
+
std: the standard deviation of the normal distribution
|
| 43 |
+
"""
|
| 44 |
+
validate_param(kwargs, "kwargs")
|
| 45 |
+
sharded_tensor = kwargs["tensor"]
|
| 46 |
+
validate_param(sharded_tensor, "tensor")
|
| 47 |
+
mean = kwargs['mean']
|
| 48 |
+
validate_param(mean, "mean")
|
| 49 |
+
std = kwargs['std']
|
| 50 |
+
validate_param(std, "std")
|
| 51 |
+
|
| 52 |
+
for shard in sharded_tensor.local_shards():
|
| 53 |
+
torch.nn.init.normal_(shard.tensor, mean=mean, std=std)
|
| 54 |
+
return sharded_tensor
|
| 55 |
+
|
| 56 |
+
@_sharded_op_impl(torch.nn.init.kaiming_uniform_)
|
| 57 |
+
def kaiming_uniform_(types, args=(), kwargs=None, pg=None):
|
| 58 |
+
r"""
|
| 59 |
+
Fills the Tensors in tensor.local_shards with values according to the method
|
| 60 |
+
described in `Delving deep into rectifiers: Surpassing human-level
|
| 61 |
+
performance on ImageNet classification` - He, K. et al. (2015), using a
|
| 62 |
+
uniform distribution. The resulting tensor will have values sampled from
|
| 63 |
+
:math:`\mathcal{U}(-\text{bound}, \text{bound})` where
|
| 64 |
+
.. math::
|
| 65 |
+
\text{bound} = \text{gain} \times \sqrt{\frac{3}{\text{fan\_mode}}}
|
| 66 |
+
Also known as He initialization.
|
| 67 |
+
Args:
|
| 68 |
+
tensor: tensor sharded across devices
|
| 69 |
+
a: the negative slope of the rectifier used after this layer (only
|
| 70 |
+
used with ``'leaky_relu'``)
|
| 71 |
+
mode: either ``'fan_in'`` (default) or ``'fan_out'``. Choosing ``'fan_in'``
|
| 72 |
+
preserves the magnitude of the variance of the weights in the
|
| 73 |
+
forward pass. Choosing ``'fan_out'`` preserves the magnitudes in the
|
| 74 |
+
backwards pass.
|
| 75 |
+
nonlinearity: the non-linear function (`nn.functional` name),
|
| 76 |
+
recommended to use only with ``'relu'`` or ``'leaky_relu'`` (default).
|
| 77 |
+
"""
|
| 78 |
+
validate_param(kwargs, "kwargs")
|
| 79 |
+
sharded_tensor = kwargs["tensor"]
|
| 80 |
+
validate_param(sharded_tensor, "tensor")
|
| 81 |
+
a = kwargs['a']
|
| 82 |
+
validate_param(a, "a")
|
| 83 |
+
mode = kwargs['mode']
|
| 84 |
+
validate_param(mode, "mode")
|
| 85 |
+
nonlinearity = kwargs['nonlinearity']
|
| 86 |
+
validate_param(nonlinearity, "nonlinearity")
|
| 87 |
+
|
| 88 |
+
for shard in sharded_tensor.local_shards():
|
| 89 |
+
torch.nn.init.kaiming_uniform_(shard.tensor, a=a, mode=mode, nonlinearity=nonlinearity)
|
| 90 |
+
return sharded_tensor
|
| 91 |
+
|
| 92 |
+
@_sharded_op_impl(torch.nn.init.constant_)
|
| 93 |
+
def constant_(types, args=(), kwargs=None, pg=None):
|
| 94 |
+
r"""
|
| 95 |
+
Fills the input ShardedTensor with the value \text{val}val.
|
| 96 |
+
Args:
|
| 97 |
+
tensor: tensor sharded across devices
|
| 98 |
+
val: the value to fill the tensor with
|
| 99 |
+
"""
|
| 100 |
+
validate_param(kwargs, "kwargs")
|
| 101 |
+
sharded_tensor = kwargs["tensor"]
|
| 102 |
+
validate_param(sharded_tensor, "tensor")
|
| 103 |
+
val = kwargs['val']
|
| 104 |
+
validate_param(val, "val")
|
| 105 |
+
for shard in sharded_tensor.local_shards():
|
| 106 |
+
torch.nn.init.constant_(shard.tensor, val=val)
|
| 107 |
+
return sharded_tensor
|
| 108 |
+
|
| 109 |
+
tensor_like_creation_op_map = {
|
| 110 |
+
torch.full_like: sharded_tensor.full,
|
| 111 |
+
torch.empty_like: sharded_tensor.empty,
|
| 112 |
+
torch.zeros_like: sharded_tensor.zeros,
|
| 113 |
+
torch.ones_like: sharded_tensor.ones,
|
| 114 |
+
torch.rand_like: sharded_tensor.rand,
|
| 115 |
+
torch.randn_like: sharded_tensor.randn,
|
| 116 |
+
}
|
| 117 |
+
|
| 118 |
+
# tensor ops that behave the same as the default tensor
|
| 119 |
+
def register_tensor_creation_op(op):
|
| 120 |
+
@_sharded_op_impl(op)
|
| 121 |
+
def tensor_creation_op(types, args=(), kwargs=None, pg=None):
|
| 122 |
+
"""
|
| 123 |
+
Handles ``__torch_function__`` dispatch for tensor creation ops that
|
| 124 |
+
takes a ShardedTensor as argument, such as ``torch.zeros_like`` or
|
| 125 |
+
``torch.full_like``.
|
| 126 |
+
"""
|
| 127 |
+
creation_op = tensor_like_creation_op_map.get(op, None)
|
| 128 |
+
if creation_op is None:
|
| 129 |
+
raise RuntimeError(f"Tensor creation {op} not supported!")
|
| 130 |
+
if kwargs is None:
|
| 131 |
+
kwargs = {}
|
| 132 |
+
|
| 133 |
+
st = args[0]
|
| 134 |
+
|
| 135 |
+
new_st = creation_op(st.sharding_spec(), st.size(), *args[1:], **kwargs) # type: ignore[operator]
|
| 136 |
+
return new_st
|
| 137 |
+
|
| 138 |
+
|
| 139 |
+
register_tensor_creation_op(torch.full_like)
|
| 140 |
+
register_tensor_creation_op(torch.empty_like)
|
| 141 |
+
register_tensor_creation_op(torch.zeros_like)
|
| 142 |
+
register_tensor_creation_op(torch.ones_like)
|
| 143 |
+
register_tensor_creation_op(torch.rand_like)
|
| 144 |
+
register_tensor_creation_op(torch.randn_like)
|
parrot/lib/python3.10/site-packages/torch/distributed/_shard/sharded_tensor/_ops/misc_ops.py
ADDED
|
@@ -0,0 +1,13 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# mypy: allow-untyped-defs
|
| 2 |
+
import torch
|
| 3 |
+
from torch.distributed._shard.sharded_tensor import (
|
| 4 |
+
_sharded_op_impl,
|
| 5 |
+
)
|
| 6 |
+
|
| 7 |
+
# This is used by `_apply()` within module.py to set new
|
| 8 |
+
# parameters after apply a certain method, we should follow
|
| 9 |
+
# the future behavior of overwriting the existing tensor
|
| 10 |
+
# instead of doing in-place change using `.data = `.
|
| 11 |
+
@_sharded_op_impl(torch._has_compatible_shallow_copy_type)
|
| 12 |
+
def tensor_has_compatible_shallow_copy_type(types, args=(), kwargs=None, pg=None):
|
| 13 |
+
return False
|
parrot/lib/python3.10/site-packages/torch/distributed/_shard/sharded_tensor/_ops/tensor_ops.py
ADDED
|
@@ -0,0 +1,216 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# mypy: allow-untyped-defs
|
| 2 |
+
import copy
|
| 3 |
+
import torch
|
| 4 |
+
from torch.distributed._shard.sharded_tensor import (
|
| 5 |
+
_sharded_op_impl,
|
| 6 |
+
Shard,
|
| 7 |
+
ShardedTensor,
|
| 8 |
+
)
|
| 9 |
+
from ._common import (
|
| 10 |
+
_register_sharded_op_on_local_shards,
|
| 11 |
+
)
|
| 12 |
+
from torch.distributed._shard.common_op_utils import _register_default_op
|
| 13 |
+
|
| 14 |
+
|
| 15 |
+
# Tensor properties access
|
| 16 |
+
_register_default_op(torch.Tensor.shape.__get__, _sharded_op_impl) # type: ignore[attr-defined]
|
| 17 |
+
_register_default_op(torch.Tensor.dtype.__get__, _sharded_op_impl) # type: ignore[attr-defined]
|
| 18 |
+
_register_default_op(torch.Tensor.layout.__get__, _sharded_op_impl) # type: ignore[attr-defined]
|
| 19 |
+
_register_default_op(torch.Tensor.size, _sharded_op_impl)
|
| 20 |
+
_register_default_op(torch.Tensor.dim, _sharded_op_impl)
|
| 21 |
+
_register_default_op(torch.Tensor.ndim.__get__, _sharded_op_impl) # type: ignore[attr-defined]
|
| 22 |
+
_register_default_op(torch.Tensor.is_contiguous, _sharded_op_impl)
|
| 23 |
+
_register_default_op(torch.Tensor.contiguous, _sharded_op_impl)
|
| 24 |
+
_register_default_op(torch.Tensor.is_floating_point, _sharded_op_impl)
|
| 25 |
+
|
| 26 |
+
# __reduce_ex__ to dispatch to get_state/set_state
|
| 27 |
+
_register_default_op(torch.Tensor.__reduce_ex__, _sharded_op_impl)
|
| 28 |
+
|
| 29 |
+
# autograd related properties
|
| 30 |
+
_register_default_op(torch.Tensor.requires_grad.__get__, _sharded_op_impl) # type: ignore[attr-defined]
|
| 31 |
+
# TODO: set grad with a ShardedTensor that consists of all local grads
|
| 32 |
+
_register_default_op(torch.Tensor.grad.__get__, _sharded_op_impl) # type: ignore[union-attr]
|
| 33 |
+
_register_default_op(torch.Tensor.grad_fn.__get__, _sharded_op_impl) # type: ignore[union-attr]
|
| 34 |
+
_register_default_op(torch.Tensor.is_leaf.__get__, _sharded_op_impl) # type: ignore[attr-defined]
|
| 35 |
+
|
| 36 |
+
# device property is ambiguous as from a global prospective,
|
| 37 |
+
# ShardedTensor.device consists of multiple devices (might even across hosts)
|
| 38 |
+
# We choose to return the current device of the local tensor to represent
|
| 39 |
+
# the device property on each rank
|
| 40 |
+
@_sharded_op_impl(torch.Tensor.device.__get__)
|
| 41 |
+
def tensor_device(types, args=(), kwargs=None, pg=None):
|
| 42 |
+
self_st = args[0]
|
| 43 |
+
# Validate types
|
| 44 |
+
if not isinstance(self_st, ShardedTensor):
|
| 45 |
+
raise TypeError("input needs to be a ShardedTensor")
|
| 46 |
+
dev: torch.device
|
| 47 |
+
if self_st._local_shards:
|
| 48 |
+
dev = self_st._local_shards[0].tensor.device
|
| 49 |
+
elif pg and pg._get_backend_name() == "gloo":
|
| 50 |
+
dev = torch.device("cpu")
|
| 51 |
+
else:
|
| 52 |
+
dev = torch.device(torch.cuda.current_device())
|
| 53 |
+
return dev
|
| 54 |
+
|
| 55 |
+
@_sharded_op_impl(torch.Tensor.is_meta.__get__) # type: ignore[attr-defined]
|
| 56 |
+
def st_is_meta(types, args=(), kwargs=None, pg=None):
|
| 57 |
+
return args[0].local_tensor().is_meta
|
| 58 |
+
|
| 59 |
+
|
| 60 |
+
def sharded_type_as_check(*args, **kwargs):
|
| 61 |
+
"""
|
| 62 |
+
Perform extra checks for the sharded_type_as op such as the input needs to
|
| 63 |
+
be either a Tensor or ShardedTensor.
|
| 64 |
+
|
| 65 |
+
Args: same as ``torch.Tensor.type_as``.
|
| 66 |
+
|
| 67 |
+
Return: None
|
| 68 |
+
"""
|
| 69 |
+
if len(args) < 2:
|
| 70 |
+
raise ValueError("Needs to give a tensor to cast type as!")
|
| 71 |
+
if not isinstance(args[1], torch.Tensor) and not isinstance(args[1], ShardedTensor):
|
| 72 |
+
raise ValueError("Needs to give a Tensor or ShardedTensor to cast type as!")
|
| 73 |
+
|
| 74 |
+
|
| 75 |
+
def same_dtype(*args, **kwargs):
|
| 76 |
+
"""
|
| 77 |
+
When the dtype is the same, return the original ShardedTensor.
|
| 78 |
+
|
| 79 |
+
Args: same as ``torch.Tensor.type_as``.
|
| 80 |
+
|
| 81 |
+
Return (bool): Whether to return early or not.
|
| 82 |
+
"""
|
| 83 |
+
return args[0].dtype == args[1].dtype
|
| 84 |
+
|
| 85 |
+
|
| 86 |
+
def sharded_type_as(args, kwargs, pg):
|
| 87 |
+
"""
|
| 88 |
+
Handles ``__torch_function__`` dispatch for the ``torch.Tensor.type_as`` op.
|
| 89 |
+
|
| 90 |
+
Args: same as ``torch.Tensor.type_as``.
|
| 91 |
+
|
| 92 |
+
Return:
|
| 93 |
+
new_local_shards (List[Shard]): Local shards for the new sharded tensor.
|
| 94 |
+
st_meta (ShardedTensorMetadata): Metadata of the new sharded tensor.
|
| 95 |
+
"""
|
| 96 |
+
st = args[0]
|
| 97 |
+
tensor = args[1]
|
| 98 |
+
if isinstance(tensor, ShardedTensor):
|
| 99 |
+
tensor = tensor.local_tensor()
|
| 100 |
+
new_local_shards = []
|
| 101 |
+
for shard in st.local_shards():
|
| 102 |
+
new_local_shards.append(Shard(shard.tensor.type_as(tensor), shard.metadata))
|
| 103 |
+
st_meta = copy.deepcopy(st._metadata)
|
| 104 |
+
st_meta.tensor_properties.dtype = tensor.dtype
|
| 105 |
+
return new_local_shards, st_meta
|
| 106 |
+
|
| 107 |
+
|
| 108 |
+
_register_sharded_op_on_local_shards(
|
| 109 |
+
torch.Tensor.type_as,
|
| 110 |
+
early_stop_func=same_dtype,
|
| 111 |
+
extra_check=sharded_type_as_check,
|
| 112 |
+
customized_func=sharded_type_as,
|
| 113 |
+
)
|
| 114 |
+
|
| 115 |
+
|
| 116 |
+
def sharded_deepcopy(args, kwargs, pg):
|
| 117 |
+
# NOTE: we directly implement deepcopy magic method
|
| 118 |
+
# instead of using the default tensor.__deepcopy__
|
| 119 |
+
# and implement clone(). This is because the default
|
| 120 |
+
# tensor deepcopy copies every attribute, but the
|
| 121 |
+
# process_group in ShardedTensor cannot be deep copied.
|
| 122 |
+
self_st = args[0]
|
| 123 |
+
new_local_shards = copy.deepcopy(self_st.local_shards())
|
| 124 |
+
new_metadata = copy.deepcopy(self_st.metadata())
|
| 125 |
+
return new_local_shards, new_metadata
|
| 126 |
+
|
| 127 |
+
|
| 128 |
+
_register_sharded_op_on_local_shards(
|
| 129 |
+
torch.Tensor.__deepcopy__,
|
| 130 |
+
customized_func=sharded_deepcopy,
|
| 131 |
+
)
|
| 132 |
+
|
| 133 |
+
|
| 134 |
+
@_sharded_op_impl(torch.Tensor.copy_)
|
| 135 |
+
def sharded_inplace_copy(types, args, kwargs, pg):
|
| 136 |
+
# NOTE: inplace op don't need to rewrap
|
| 137 |
+
kwargs = {} if kwargs is None else kwargs
|
| 138 |
+
self_st = args[0]
|
| 139 |
+
new_st = args[1]
|
| 140 |
+
nonblocking = kwargs.get("non_blocking", False)
|
| 141 |
+
for local_shard, new_shard in zip(self_st.local_shards(), new_st.local_shards()):
|
| 142 |
+
if local_shard.metadata != new_shard.metadata:
|
| 143 |
+
raise RuntimeError(
|
| 144 |
+
"inplace copy can only happen between two ShardedTensor with same metadata!"
|
| 145 |
+
)
|
| 146 |
+
for local_shard, new_shard in zip(self_st.local_shards(), new_st.local_shards()):
|
| 147 |
+
local_shard.tensor.copy_(new_shard.tensor, nonblocking)
|
| 148 |
+
|
| 149 |
+
return self_st
|
| 150 |
+
|
| 151 |
+
|
| 152 |
+
def sharded_clone(args, kwargs, pg):
|
| 153 |
+
self_st = args[0]
|
| 154 |
+
desire_memory_format = kwargs.get("memory_format", None)
|
| 155 |
+
if desire_memory_format and desire_memory_format != torch.preserve_format:
|
| 156 |
+
raise RuntimeError("Only support torch.preserve_format for ShardedTensor!")
|
| 157 |
+
cloned_local_shards = [
|
| 158 |
+
Shard(
|
| 159 |
+
local_shard.tensor.clone(memory_format=desire_memory_format),
|
| 160 |
+
metadata=copy.deepcopy(local_shard.metadata),
|
| 161 |
+
)
|
| 162 |
+
for local_shard in self_st.local_shards()
|
| 163 |
+
]
|
| 164 |
+
new_metadata = copy.deepcopy(self_st.metadata())
|
| 165 |
+
return cloned_local_shards, new_metadata
|
| 166 |
+
|
| 167 |
+
|
| 168 |
+
_register_sharded_op_on_local_shards(
|
| 169 |
+
torch.Tensor.clone,
|
| 170 |
+
customized_func=sharded_clone,
|
| 171 |
+
)
|
| 172 |
+
|
| 173 |
+
|
| 174 |
+
def sharded_detach(args, kwargs, pg):
|
| 175 |
+
self_st = args[0]
|
| 176 |
+
detached_local_shards = [
|
| 177 |
+
Shard(
|
| 178 |
+
local_shard.tensor.detach(),
|
| 179 |
+
metadata=copy.deepcopy(local_shard.metadata),
|
| 180 |
+
)
|
| 181 |
+
for local_shard in self_st.local_shards()
|
| 182 |
+
]
|
| 183 |
+
new_metadata = copy.deepcopy(self_st.metadata())
|
| 184 |
+
new_metadata.tensor_properties.requires_grad = False
|
| 185 |
+
return detached_local_shards, new_metadata
|
| 186 |
+
|
| 187 |
+
|
| 188 |
+
_register_sharded_op_on_local_shards(
|
| 189 |
+
torch.Tensor.detach,
|
| 190 |
+
customized_func=sharded_detach,
|
| 191 |
+
)
|
| 192 |
+
|
| 193 |
+
|
| 194 |
+
@_sharded_op_impl(torch.Tensor.requires_grad_)
|
| 195 |
+
def tensor_requires_grad_set(types, args=(), kwargs=None, pg=None):
|
| 196 |
+
self_st = args[0]
|
| 197 |
+
# Validate types
|
| 198 |
+
if not isinstance(self_st, ShardedTensor):
|
| 199 |
+
raise TypeError("input needs to be a ShardedTensor")
|
| 200 |
+
|
| 201 |
+
if kwargs is None:
|
| 202 |
+
kwargs = {}
|
| 203 |
+
|
| 204 |
+
requires_grad = args[1] if len(args) > 1 else kwargs.get("requires_grad", True)
|
| 205 |
+
if requires_grad == self_st.requires_grad:
|
| 206 |
+
return self_st
|
| 207 |
+
|
| 208 |
+
for local_shard in self_st.local_shards():
|
| 209 |
+
local_shard.tensor.requires_grad_(requires_grad)
|
| 210 |
+
|
| 211 |
+
# update the wrapper class property
|
| 212 |
+
with torch._C.DisableTorchFunctionSubclass():
|
| 213 |
+
self_st.requires_grad_(requires_grad)
|
| 214 |
+
# update the metadata in the meanwhile
|
| 215 |
+
self_st._metadata.tensor_properties.requires_grad = requires_grad
|
| 216 |
+
return self_st
|
parrot/lib/python3.10/site-packages/torch/distributed/_shard/sharded_tensor/utils.py
ADDED
|
@@ -0,0 +1,219 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# mypy: allow-untyped-defs
|
| 2 |
+
import collections.abc
|
| 3 |
+
import copy
|
| 4 |
+
from typing import Optional, List, Sequence, TYPE_CHECKING
|
| 5 |
+
|
| 6 |
+
import torch
|
| 7 |
+
from torch.distributed import distributed_c10d as c10d
|
| 8 |
+
from torch.distributed import rpc
|
| 9 |
+
from torch.distributed._shard.sharding_spec._internals import (
|
| 10 |
+
check_tensor,
|
| 11 |
+
validate_non_overlapping_shards_metadata,
|
| 12 |
+
)
|
| 13 |
+
|
| 14 |
+
from .metadata import TensorProperties, ShardedTensorMetadata
|
| 15 |
+
from .shard import Shard
|
| 16 |
+
|
| 17 |
+
if TYPE_CHECKING:
|
| 18 |
+
from torch.distributed._shard.metadata import ShardMetadata
|
| 19 |
+
|
| 20 |
+
def _parse_and_validate_remote_device(pg, remote_device):
|
| 21 |
+
if remote_device is None:
|
| 22 |
+
raise ValueError("remote device is None")
|
| 23 |
+
|
| 24 |
+
worker_name = remote_device.worker_name()
|
| 25 |
+
rank = remote_device.rank()
|
| 26 |
+
device = remote_device.device()
|
| 27 |
+
|
| 28 |
+
# Validate rank, skip validation if rank is not part of process group.
|
| 29 |
+
if rank is not None and not c10d._rank_not_in_group(pg):
|
| 30 |
+
pg_global_ranks = c10d.get_process_group_ranks(pg)
|
| 31 |
+
if rank not in pg_global_ranks:
|
| 32 |
+
raise ValueError(
|
| 33 |
+
f"Global rank {rank} does not exist in input process group: {pg_global_ranks}"
|
| 34 |
+
)
|
| 35 |
+
|
| 36 |
+
if worker_name is not None:
|
| 37 |
+
if not rpc._is_current_rpc_agent_set():
|
| 38 |
+
raise RuntimeError(
|
| 39 |
+
f"RPC framework needs to be initialized for using worker names: {worker_name}"
|
| 40 |
+
)
|
| 41 |
+
|
| 42 |
+
workers = rpc._get_current_rpc_agent().get_worker_infos()
|
| 43 |
+
for worker in workers:
|
| 44 |
+
if worker.name == worker_name:
|
| 45 |
+
return worker.id, device
|
| 46 |
+
|
| 47 |
+
raise ValueError(f"Invalid worker name: {worker_name}")
|
| 48 |
+
|
| 49 |
+
return rank, device
|
| 50 |
+
|
| 51 |
+
def _validate_output_tensor_for_gather(
|
| 52 |
+
my_rank: int,
|
| 53 |
+
dst_rank: int,
|
| 54 |
+
size: torch.Size,
|
| 55 |
+
dst_tensor: Optional[torch.Tensor],
|
| 56 |
+
) -> None:
|
| 57 |
+
if dst_rank == my_rank:
|
| 58 |
+
if dst_tensor is None:
|
| 59 |
+
raise ValueError(
|
| 60 |
+
f"Argument ``dst_tensor`` must be specified on destination rank {dst_rank}"
|
| 61 |
+
)
|
| 62 |
+
if tuple(size) != (dst_tensor.size()):
|
| 63 |
+
raise ValueError(
|
| 64 |
+
f"Argument ``dst_tensor`` have size {tuple(dst_tensor.size())},"
|
| 65 |
+
f"but should be {tuple(size)}"
|
| 66 |
+
)
|
| 67 |
+
elif dst_tensor:
|
| 68 |
+
raise ValueError(
|
| 69 |
+
"Argument ``dst_tensor`` must NOT be specified "
|
| 70 |
+
"on non-destination ranks."
|
| 71 |
+
)
|
| 72 |
+
|
| 73 |
+
def _flatten_tensor_size(size) -> torch.Size:
|
| 74 |
+
"""
|
| 75 |
+
Checks if tensor size is valid, then flatten/return a torch.Size object.
|
| 76 |
+
"""
|
| 77 |
+
if len(size) == 1 and isinstance(size[0], collections.abc.Sequence):
|
| 78 |
+
dims = list(*size)
|
| 79 |
+
else:
|
| 80 |
+
dims = list(size)
|
| 81 |
+
|
| 82 |
+
for dim in dims:
|
| 83 |
+
if not isinstance(dim, int):
|
| 84 |
+
raise TypeError(f'size has to be a sequence of ints, found: {dims}')
|
| 85 |
+
|
| 86 |
+
return torch.Size(dims)
|
| 87 |
+
|
| 88 |
+
def _raise_if_mismatch(expected, actual, prop_name, ranks, is_local=True):
|
| 89 |
+
if is_local:
|
| 90 |
+
assert isinstance(ranks, int)
|
| 91 |
+
if expected != actual:
|
| 92 |
+
raise ValueError(f"Local shards' tensor {prop_name} property need to be the same on rank:{ranks}! "
|
| 93 |
+
f"Found one local shard tensor {prop_name}={expected}, "
|
| 94 |
+
f"the other local shard tensor {prop_name}={actual}.")
|
| 95 |
+
else:
|
| 96 |
+
# compare failure check across ranks, ranks list should have two rank
|
| 97 |
+
assert len(ranks) == 2
|
| 98 |
+
if expected != actual:
|
| 99 |
+
raise ValueError(f"ShardedTensor {prop_name} property does not match from different ranks! "
|
| 100 |
+
f"Found {prop_name}={expected} on rank:{ranks[0]}, "
|
| 101 |
+
f"and {prop_name}={actual} on rank:{ranks[1]}.")
|
| 102 |
+
|
| 103 |
+
|
| 104 |
+
def build_metadata_from_local_shards(
|
| 105 |
+
local_shards: List[Shard],
|
| 106 |
+
global_size: torch.Size,
|
| 107 |
+
current_rank: int,
|
| 108 |
+
pg: c10d.ProcessGroup
|
| 109 |
+
) -> ShardedTensorMetadata:
|
| 110 |
+
|
| 111 |
+
assert len(local_shards) > 0, "must have local shards!"
|
| 112 |
+
local_shard_metadatas: List[ShardMetadata] = []
|
| 113 |
+
|
| 114 |
+
first_shard_dtype = local_shards[0].tensor.dtype
|
| 115 |
+
first_shard_layout = local_shards[0].tensor.layout
|
| 116 |
+
first_shard_requires_grad = local_shards[0].tensor.requires_grad
|
| 117 |
+
first_shard_is_pinned = local_shards[0].tensor.is_pinned()
|
| 118 |
+
|
| 119 |
+
# 1). Validate local tensors and associated metadatas
|
| 120 |
+
for local_shard in local_shards:
|
| 121 |
+
local_shard_tensor = local_shard.tensor
|
| 122 |
+
local_shard_meta = local_shard.metadata
|
| 123 |
+
local_shard_metadatas.append(local_shard_meta)
|
| 124 |
+
rank, local_device = _parse_and_validate_remote_device(pg, local_shard_meta.placement)
|
| 125 |
+
|
| 126 |
+
if local_shard_tensor.layout != torch.strided or local_shard_tensor.layout != first_shard_layout:
|
| 127 |
+
raise ValueError(
|
| 128 |
+
f'Only torch.strided layout is currently supported, but found '
|
| 129 |
+
f'{local_shard_tensor.layout} on rank:{current_rank}!'
|
| 130 |
+
)
|
| 131 |
+
|
| 132 |
+
if not local_shard_tensor.is_contiguous():
|
| 133 |
+
raise ValueError('Only torch.contiguous_format memory_format is currently supported!')
|
| 134 |
+
|
| 135 |
+
if rank != current_rank:
|
| 136 |
+
raise ValueError(
|
| 137 |
+
f"Local shard metadata's rank does not match with the rank in its process group! "
|
| 138 |
+
f'Found current rank in the process group: {current_rank}, '
|
| 139 |
+
f"local ShardMetadata placement's rank: {rank}"
|
| 140 |
+
)
|
| 141 |
+
if local_shard_tensor.device != local_device:
|
| 142 |
+
raise ValueError(
|
| 143 |
+
f"Local shard tensor device does not match with local Shard's placement! "
|
| 144 |
+
f"Found local shard tensor device: {local_shard_tensor.device}, "
|
| 145 |
+
f"local shard metadata placement device: {local_device}"
|
| 146 |
+
)
|
| 147 |
+
|
| 148 |
+
_raise_if_mismatch(local_shard_meta.shard_sizes, list(local_shard_tensor.size()), "size", current_rank)
|
| 149 |
+
_raise_if_mismatch(local_shard_tensor.is_pinned(), first_shard_is_pinned, "pin_memory", current_rank)
|
| 150 |
+
_raise_if_mismatch(local_shard_tensor.dtype, first_shard_dtype, "dtype", current_rank)
|
| 151 |
+
_raise_if_mismatch(local_shard_tensor.requires_grad, first_shard_requires_grad, "requires_grad", current_rank)
|
| 152 |
+
|
| 153 |
+
# 2). Build a "local" ShardedTensorMetadata with all local shards on this rank, then
|
| 154 |
+
# do all_gather to collect local_sharded_tensor_metadata from all ranks
|
| 155 |
+
local_tensor_properties = TensorProperties(
|
| 156 |
+
dtype=first_shard_dtype,
|
| 157 |
+
layout=first_shard_layout,
|
| 158 |
+
requires_grad=first_shard_requires_grad,
|
| 159 |
+
memory_format=torch.contiguous_format,
|
| 160 |
+
pin_memory=first_shard_is_pinned
|
| 161 |
+
)
|
| 162 |
+
|
| 163 |
+
local_sharded_tensor_metadata = ShardedTensorMetadata(
|
| 164 |
+
shards_metadata=local_shard_metadatas,
|
| 165 |
+
size=global_size,
|
| 166 |
+
tensor_properties=local_tensor_properties)
|
| 167 |
+
|
| 168 |
+
return local_sharded_tensor_metadata
|
| 169 |
+
|
| 170 |
+
|
| 171 |
+
def build_global_metadata(gathered_metadatas: Sequence[Optional[ShardedTensorMetadata]]):
|
| 172 |
+
global_sharded_tensor_metadata = None
|
| 173 |
+
global_metadata_rank = 0
|
| 174 |
+
|
| 175 |
+
for rank, rank_metadata in enumerate(gathered_metadatas):
|
| 176 |
+
if rank_metadata is None:
|
| 177 |
+
continue
|
| 178 |
+
|
| 179 |
+
if global_sharded_tensor_metadata is None:
|
| 180 |
+
global_sharded_tensor_metadata = copy.deepcopy(rank_metadata)
|
| 181 |
+
global_metadata_rank = rank
|
| 182 |
+
else:
|
| 183 |
+
_raise_if_mismatch(global_sharded_tensor_metadata.size,
|
| 184 |
+
rank_metadata.size,
|
| 185 |
+
"global_size",
|
| 186 |
+
[global_metadata_rank, rank],
|
| 187 |
+
is_local=False)
|
| 188 |
+
|
| 189 |
+
# don't need to check layout and memory format as we already checked in local shards validation stage
|
| 190 |
+
_raise_if_mismatch(global_sharded_tensor_metadata.tensor_properties.dtype,
|
| 191 |
+
rank_metadata.tensor_properties.dtype,
|
| 192 |
+
"dtype",
|
| 193 |
+
[global_metadata_rank, rank],
|
| 194 |
+
is_local=False)
|
| 195 |
+
|
| 196 |
+
_raise_if_mismatch(global_sharded_tensor_metadata.tensor_properties.requires_grad,
|
| 197 |
+
rank_metadata.tensor_properties.requires_grad,
|
| 198 |
+
"requires_grad",
|
| 199 |
+
[global_metadata_rank, rank],
|
| 200 |
+
is_local=False)
|
| 201 |
+
|
| 202 |
+
_raise_if_mismatch(global_sharded_tensor_metadata.tensor_properties.pin_memory,
|
| 203 |
+
rank_metadata.tensor_properties.pin_memory,
|
| 204 |
+
"pin_memory",
|
| 205 |
+
[global_metadata_rank, rank],
|
| 206 |
+
is_local=False)
|
| 207 |
+
# pass all validations, extend shards metadata
|
| 208 |
+
global_sharded_tensor_metadata.shards_metadata.extend(rank_metadata.shards_metadata)
|
| 209 |
+
|
| 210 |
+
if global_sharded_tensor_metadata is not None:
|
| 211 |
+
# check if shards_metadata have overlap shards
|
| 212 |
+
validate_non_overlapping_shards_metadata(global_sharded_tensor_metadata.shards_metadata)
|
| 213 |
+
|
| 214 |
+
# check if the shards_metadata is compatible with global size of the sharded tensor.
|
| 215 |
+
check_tensor(global_sharded_tensor_metadata.shards_metadata, global_sharded_tensor_metadata.size)
|
| 216 |
+
else:
|
| 217 |
+
raise ValueError("ShardedTensor have no local shards on all ranks!")
|
| 218 |
+
|
| 219 |
+
return global_sharded_tensor_metadata
|
parrot/lib/python3.10/site-packages/torch/distributed/_shard/sharding_plan/__init__.py
ADDED
|
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from .api import (
|
| 2 |
+
ShardingPlan,
|
| 3 |
+
ShardingPlanner
|
| 4 |
+
)
|
parrot/lib/python3.10/site-packages/torch/distributed/_shard/sharding_plan/__pycache__/__init__.cpython-310.pyc
ADDED
|
Binary file (256 Bytes). View file
|
|
|