repo
stringlengths
2
99
file
stringlengths
13
225
code
stringlengths
0
18.3M
file_length
int64
0
18.3M
avg_line_length
float64
0
1.36M
max_line_length
int64
0
4.26M
extension_type
stringclasses
1 value
scipy
scipy-main/scipy/_build_utils/compiler_helper.py
""" Helpers for detection of compiler features """ import tempfile import os import sys from numpy.distutils.system_info import dict_append def try_compile(compiler, code=None, flags=[], ext=None): """Returns True if the compiler is able to compile the given code""" from distutils.errors import CompileError from numpy.distutils.fcompiler import FCompiler if code is None: if isinstance(compiler, FCompiler): code = " program main\n return\n end" else: code = 'int main (int argc, char **argv) { return 0; }' ext = ext or compiler.src_extensions[0] with tempfile.TemporaryDirectory() as temp_dir: fname = os.path.join(temp_dir, 'main'+ext) with open(fname, 'w') as f: f.write(code) try: compiler.compile([fname], output_dir=temp_dir, extra_postargs=flags) except CompileError: return False return True def has_flag(compiler, flag, ext=None): """Returns True if the compiler supports the given flag""" return try_compile(compiler, flags=[flag], ext=ext) def get_cxx_std_flag(compiler): """Detects compiler flag for c++14, c++11, or None if not detected""" # GNU C compiler documentation uses single dash: # https://gcc.gnu.org/onlinedocs/gcc/Standards.html # but silently understands two dashes, like --std=c++11 too. # Other GCC compatible compilers, like Intel C Compiler on Linux do not. gnu_flags = ['-std=c++14', '-std=c++11'] flags_by_cc = { 'msvc': ['/std:c++14', None], 'intelw': ['/Qstd=c++14', '/Qstd=c++11'], 'intelem': ['-std=c++14', '-std=c++11'] } flags = flags_by_cc.get(compiler.compiler_type, gnu_flags) for flag in flags: if flag is None: return None if has_flag(compiler, flag, ext='.cpp'): return flag from numpy.distutils import log log.warn('Could not detect c++ standard flag') return None def get_c_std_flag(compiler): """Detects compiler flag to enable C99""" gnu_flag = '-std=c99' flag_by_cc = { 'msvc': None, 'intelw': '/Qstd=c99', 'intelem': '-std=c99' } flag = flag_by_cc.get(compiler.compiler_type, gnu_flag) if flag is None: return None if has_flag(compiler, flag, ext='.c'): return flag from numpy.distutils import log log.warn('Could not detect c99 standard flag') return None def try_add_flag(args, compiler, flag, ext=None): """Appends flag to the list of arguments if supported by the compiler""" if try_compile(compiler, flags=args+[flag], ext=ext): args.append(flag) def set_c_flags_hook(build_ext, ext): """Sets basic compiler flags for compiling C99 code""" std_flag = get_c_std_flag(build_ext.compiler) if std_flag is not None: ext.extra_compile_args.append(std_flag) def set_cxx_flags_hook(build_ext, ext): """Sets basic compiler flags for compiling C++11 code""" cc = build_ext._cxx_compiler args = ext.extra_compile_args std_flag = get_cxx_std_flag(cc) if std_flag is not None: args.append(std_flag) if sys.platform == 'darwin': # Set min macOS version min_macos_flag = '-mmacosx-version-min=10.9' if has_flag(cc, min_macos_flag): args.append(min_macos_flag) ext.extra_link_args.append(min_macos_flag) def set_cxx_flags_clib_hook(build_clib, build_info): cc = build_clib.compiler new_args = [] new_link_args = [] std_flag = get_cxx_std_flag(cc) if std_flag is not None: new_args.append(std_flag) if sys.platform == 'darwin': # Set min macOS version min_macos_flag = '-mmacosx-version-min=10.9' if has_flag(cc, min_macos_flag): new_args.append(min_macos_flag) new_link_args.append(min_macos_flag) dict_append(build_info, extra_compiler_args=new_args, extra_link_args=new_link_args)
4,030
28.859259
80
py
scipy
scipy-main/scipy/_build_utils/system_info.py
def combine_dict(*dicts, **kw): """ Combine Numpy distutils style library configuration dictionaries. Parameters ---------- *dicts Dictionaries of keys. List-valued keys will be concatenated. Otherwise, duplicate keys with different values result to an error. The input arguments are not modified. **kw Keyword arguments are treated as an additional dictionary (the first one, i.e., prepended). Returns ------- combined Dictionary with combined values. """ new_dict = {} for d in (kw,) + dicts: for key, value in d.items(): if new_dict.get(key, None) is not None: old_value = new_dict[key] if isinstance(value, (list, tuple)): if isinstance(old_value, (list, tuple)): new_dict[key] = list(old_value) + list(value) continue elif value == old_value: continue raise ValueError("Conflicting configuration dicts: {!r} {!r}" "".format(new_dict, d)) else: new_dict[key] = value return new_dict
1,225
30.435897
77
py
scipy
scipy-main/scipy/_build_utils/tests/test_scipy_version.py
import re import scipy from numpy.testing import assert_ def test_valid_scipy_version(): # Verify that the SciPy version is a valid one (no .post suffix or other # nonsense). See NumPy issue gh-6431 for an issue caused by an invalid # version. version_pattern = r"^[0-9]+\.[0-9]+\.[0-9]+(|a[0-9]|b[0-9]|rc[0-9])" dev_suffix = r"(\.dev0\+.+([0-9a-f]{7}|Unknown))" if scipy.version.release: res = re.match(version_pattern, scipy.__version__) else: res = re.match(version_pattern + dev_suffix, scipy.__version__) assert_(res is not None, scipy.__version__)
606
30.947368
76
py
scipy
scipy-main/scipy/_build_utils/tests/__init__.py
0
0
0
py
scipy
scipy-main/scipy/optimize/_differentialevolution.py
""" differential_evolution: The differential evolution global optimization algorithm Added by Andrew Nelson 2014 """ import warnings import numpy as np from scipy.optimize import OptimizeResult, minimize from scipy.optimize._optimize import _status_message from scipy._lib._util import check_random_state, MapWrapper, _FunctionWrapper from scipy.optimize._constraints import (Bounds, new_bounds_to_old, NonlinearConstraint, LinearConstraint) from scipy.sparse import issparse __all__ = ['differential_evolution'] _MACHEPS = np.finfo(np.float64).eps def differential_evolution(func, bounds, args=(), strategy='best1bin', maxiter=1000, popsize=15, tol=0.01, mutation=(0.5, 1), recombination=0.7, seed=None, callback=None, disp=False, polish=True, init='latinhypercube', atol=0, updating='immediate', workers=1, constraints=(), x0=None, *, integrality=None, vectorized=False): """Finds the global minimum of a multivariate function. The differential evolution method [1]_ is stochastic in nature. It does not use gradient methods to find the minimum, and can search large areas of candidate space, but often requires larger numbers of function evaluations than conventional gradient-based techniques. The algorithm is due to Storn and Price [2]_. Parameters ---------- func : callable The objective function to be minimized. Must be in the form ``f(x, *args)``, where ``x`` is the argument in the form of a 1-D array and ``args`` is a tuple of any additional fixed parameters needed to completely specify the function. The number of parameters, N, is equal to ``len(x)``. bounds : sequence or `Bounds` Bounds for variables. There are two ways to specify the bounds: 1. Instance of `Bounds` class. 2. ``(min, max)`` pairs for each element in ``x``, defining the finite lower and upper bounds for the optimizing argument of `func`. The total number of bounds is used to determine the number of parameters, N. If there are parameters whose bounds are equal the total number of free parameters is ``N - N_equal``. args : tuple, optional Any additional fixed parameters needed to completely specify the objective function. strategy : str, optional The differential evolution strategy to use. Should be one of: - 'best1bin' - 'best1exp' - 'rand1exp' - 'randtobest1exp' - 'currenttobest1exp' - 'best2exp' - 'rand2exp' - 'randtobest1bin' - 'currenttobest1bin' - 'best2bin' - 'rand2bin' - 'rand1bin' The default is 'best1bin'. maxiter : int, optional The maximum number of generations over which the entire population is evolved. The maximum number of function evaluations (with no polishing) is: ``(maxiter + 1) * popsize * (N - N_equal)`` popsize : int, optional A multiplier for setting the total population size. The population has ``popsize * (N - N_equal)`` individuals. This keyword is overridden if an initial population is supplied via the `init` keyword. When using ``init='sobol'`` the population size is calculated as the next power of 2 after ``popsize * (N - N_equal)``. tol : float, optional Relative tolerance for convergence, the solving stops when ``np.std(pop) <= atol + tol * np.abs(np.mean(population_energies))``, where and `atol` and `tol` are the absolute and relative tolerance respectively. mutation : float or tuple(float, float), optional The mutation constant. In the literature this is also known as differential weight, being denoted by F. If specified as a float it should be in the range [0, 2]. If specified as a tuple ``(min, max)`` dithering is employed. Dithering randomly changes the mutation constant on a generation by generation basis. The mutation constant for that generation is taken from ``U[min, max)``. Dithering can help speed convergence significantly. Increasing the mutation constant increases the search radius, but will slow down convergence. recombination : float, optional The recombination constant, should be in the range [0, 1]. In the literature this is also known as the crossover probability, being denoted by CR. Increasing this value allows a larger number of mutants to progress into the next generation, but at the risk of population stability. seed : {None, int, `numpy.random.Generator`, `numpy.random.RandomState`}, optional If `seed` is None (or `np.random`), the `numpy.random.RandomState` singleton is used. If `seed` is an int, a new ``RandomState`` instance is used, seeded with `seed`. If `seed` is already a ``Generator`` or ``RandomState`` instance then that instance is used. Specify `seed` for repeatable minimizations. disp : bool, optional Prints the evaluated `func` at every iteration. callback : callable, `callback(xk, convergence=val)`, optional A function to follow the progress of the minimization. ``xk`` is the best solution found so far. ``val`` represents the fractional value of the population convergence. When ``val`` is greater than one the function halts. If callback returns `True`, then the minimization is halted (any polishing is still carried out). polish : bool, optional If True (default), then `scipy.optimize.minimize` with the `L-BFGS-B` method is used to polish the best population member at the end, which can improve the minimization slightly. If a constrained problem is being studied then the `trust-constr` method is used instead. For large problems with many constraints, polishing can take a long time due to the Jacobian computations. init : str or array-like, optional Specify which type of population initialization is performed. Should be one of: - 'latinhypercube' - 'sobol' - 'halton' - 'random' - array specifying the initial population. The array should have shape ``(S, N)``, where S is the total population size and N is the number of parameters. `init` is clipped to `bounds` before use. The default is 'latinhypercube'. Latin Hypercube sampling tries to maximize coverage of the available parameter space. 'sobol' and 'halton' are superior alternatives and maximize even more the parameter space. 'sobol' will enforce an initial population size which is calculated as the next power of 2 after ``popsize * (N - N_equal)``. 'halton' has no requirements but is a bit less efficient. See `scipy.stats.qmc` for more details. 'random' initializes the population randomly - this has the drawback that clustering can occur, preventing the whole of parameter space being covered. Use of an array to specify a population could be used, for example, to create a tight bunch of initial guesses in an location where the solution is known to exist, thereby reducing time for convergence. atol : float, optional Absolute tolerance for convergence, the solving stops when ``np.std(pop) <= atol + tol * np.abs(np.mean(population_energies))``, where and `atol` and `tol` are the absolute and relative tolerance respectively. updating : {'immediate', 'deferred'}, optional If ``'immediate'``, the best solution vector is continuously updated within a single generation [4]_. This can lead to faster convergence as trial vectors can take advantage of continuous improvements in the best solution. With ``'deferred'``, the best solution vector is updated once per generation. Only ``'deferred'`` is compatible with parallelization or vectorization, and the `workers` and `vectorized` keywords can over-ride this option. .. versionadded:: 1.2.0 workers : int or map-like callable, optional If `workers` is an int the population is subdivided into `workers` sections and evaluated in parallel (uses `multiprocessing.Pool <multiprocessing>`). Supply -1 to use all available CPU cores. Alternatively supply a map-like callable, such as `multiprocessing.Pool.map` for evaluating the population in parallel. This evaluation is carried out as ``workers(func, iterable)``. This option will override the `updating` keyword to ``updating='deferred'`` if ``workers != 1``. This option overrides the `vectorized` keyword if ``workers != 1``. Requires that `func` be pickleable. .. versionadded:: 1.2.0 constraints : {NonLinearConstraint, LinearConstraint, Bounds} Constraints on the solver, over and above those applied by the `bounds` kwd. Uses the approach by Lampinen [5]_. .. versionadded:: 1.4.0 x0 : None or array-like, optional Provides an initial guess to the minimization. Once the population has been initialized this vector replaces the first (best) member. This replacement is done even if `init` is given an initial population. ``x0.shape == (N,)``. .. versionadded:: 1.7.0 integrality : 1-D array, optional For each decision variable, a boolean value indicating whether the decision variable is constrained to integer values. The array is broadcast to ``(N,)``. If any decision variables are constrained to be integral, they will not be changed during polishing. Only integer values lying between the lower and upper bounds are used. If there are no integer values lying between the bounds then a `ValueError` is raised. .. versionadded:: 1.9.0 vectorized : bool, optional If ``vectorized is True``, `func` is sent an `x` array with ``x.shape == (N, S)``, and is expected to return an array of shape ``(S,)``, where `S` is the number of solution vectors to be calculated. If constraints are applied, each of the functions used to construct a `Constraint` object should accept an `x` array with ``x.shape == (N, S)``, and return an array of shape ``(M, S)``, where `M` is the number of constraint components. This option is an alternative to the parallelization offered by `workers`, and may help in optimization speed by reducing interpreter overhead from multiple function calls. This keyword is ignored if ``workers != 1``. This option will override the `updating` keyword to ``updating='deferred'``. See the notes section for further discussion on when to use ``'vectorized'``, and when to use ``'workers'``. .. versionadded:: 1.9.0 Returns ------- res : OptimizeResult The optimization result represented as a `OptimizeResult` object. Important attributes are: ``x`` the solution array, ``success`` a Boolean flag indicating if the optimizer exited successfully and ``message`` which describes the cause of the termination. See `OptimizeResult` for a description of other attributes. If `polish` was employed, and a lower minimum was obtained by the polishing, then OptimizeResult also contains the ``jac`` attribute. If the eventual solution does not satisfy the applied constraints ``success`` will be `False`. Notes ----- Differential evolution is a stochastic population based method that is useful for global optimization problems. At each pass through the population the algorithm mutates each candidate solution by mixing with other candidate solutions to create a trial candidate. There are several strategies [3]_ for creating trial candidates, which suit some problems more than others. The 'best1bin' strategy is a good starting point for many systems. In this strategy two members of the population are randomly chosen. Their difference is used to mutate the best member (the 'best' in 'best1bin'), :math:`b_0`, so far: .. math:: b' = b_0 + mutation * (population[rand0] - population[rand1]) A trial vector is then constructed. Starting with a randomly chosen ith parameter the trial is sequentially filled (in modulo) with parameters from ``b'`` or the original candidate. The choice of whether to use ``b'`` or the original candidate is made with a binomial distribution (the 'bin' in 'best1bin') - a random number in [0, 1) is generated. If this number is less than the `recombination` constant then the parameter is loaded from ``b'``, otherwise it is loaded from the original candidate. The final parameter is always loaded from ``b'``. Once the trial candidate is built its fitness is assessed. If the trial is better than the original candidate then it takes its place. If it is also better than the best overall candidate it also replaces that. To improve your chances of finding a global minimum use higher `popsize` values, with higher `mutation` and (dithering), but lower `recombination` values. This has the effect of widening the search radius, but slowing convergence. By default the best solution vector is updated continuously within a single iteration (``updating='immediate'``). This is a modification [4]_ of the original differential evolution algorithm which can lead to faster convergence as trial vectors can immediately benefit from improved solutions. To use the original Storn and Price behaviour, updating the best solution once per iteration, set ``updating='deferred'``. The ``'deferred'`` approach is compatible with both parallelization and vectorization (``'workers'`` and ``'vectorized'`` keywords). These may improve minimization speed by using computer resources more efficiently. The ``'workers'`` distribute calculations over multiple processors. By default the Python `multiprocessing` module is used, but other approaches are also possible, such as the Message Passing Interface (MPI) used on clusters [6]_ [7]_. The overhead from these approaches (creating new Processes, etc) may be significant, meaning that computational speed doesn't necessarily scale with the number of processors used. Parallelization is best suited to computationally expensive objective functions. If the objective function is less expensive, then ``'vectorized'`` may aid by only calling the objective function once per iteration, rather than multiple times for all the population members; the interpreter overhead is reduced. .. versionadded:: 0.15.0 References ---------- .. [1] Differential evolution, Wikipedia, http://en.wikipedia.org/wiki/Differential_evolution .. [2] Storn, R and Price, K, Differential Evolution - a Simple and Efficient Heuristic for Global Optimization over Continuous Spaces, Journal of Global Optimization, 1997, 11, 341 - 359. .. [3] https://www.sciencedirect.com/science/article/pii/S111001682100613X#s0030 .. [4] Wormington, M., Panaccione, C., Matney, K. M., Bowen, D. K., - Characterization of structures from X-ray scattering data using genetic algorithms, Phil. Trans. R. Soc. Lond. A, 1999, 357, 2827-2848 .. [5] Lampinen, J., A constraint handling approach for the differential evolution algorithm. Proceedings of the 2002 Congress on Evolutionary Computation. CEC'02 (Cat. No. 02TH8600). Vol. 2. IEEE, 2002. .. [6] https://mpi4py.readthedocs.io/en/stable/ .. [7] https://schwimmbad.readthedocs.io/en/latest/ Examples -------- Let us consider the problem of minimizing the Rosenbrock function. This function is implemented in `rosen` in `scipy.optimize`. >>> import numpy as np >>> from scipy.optimize import rosen, differential_evolution >>> bounds = [(0,2), (0, 2), (0, 2), (0, 2), (0, 2)] >>> result = differential_evolution(rosen, bounds) >>> result.x, result.fun (array([1., 1., 1., 1., 1.]), 1.9216496320061384e-19) Now repeat, but with parallelization. >>> result = differential_evolution(rosen, bounds, updating='deferred', ... workers=2) >>> result.x, result.fun (array([1., 1., 1., 1., 1.]), 1.9216496320061384e-19) Let's do a constrained minimization. >>> from scipy.optimize import LinearConstraint, Bounds We add the constraint that the sum of ``x[0]`` and ``x[1]`` must be less than or equal to 1.9. This is a linear constraint, which may be written ``A @ x <= 1.9``, where ``A = array([[1, 1]])``. This can be encoded as a `LinearConstraint` instance: >>> lc = LinearConstraint([[1, 1]], -np.inf, 1.9) Specify limits using a `Bounds` object. >>> bounds = Bounds([0., 0.], [2., 2.]) >>> result = differential_evolution(rosen, bounds, constraints=lc, ... seed=1) >>> result.x, result.fun (array([0.96632622, 0.93367155]), 0.0011352416852625719) Next find the minimum of the Ackley function (https://en.wikipedia.org/wiki/Test_functions_for_optimization). >>> def ackley(x): ... arg1 = -0.2 * np.sqrt(0.5 * (x[0] ** 2 + x[1] ** 2)) ... arg2 = 0.5 * (np.cos(2. * np.pi * x[0]) + np.cos(2. * np.pi * x[1])) ... return -20. * np.exp(arg1) - np.exp(arg2) + 20. + np.e >>> bounds = [(-5, 5), (-5, 5)] >>> result = differential_evolution(ackley, bounds, seed=1) >>> result.x, result.fun (array([0., 0.]), 4.440892098500626e-16) The Ackley function is written in a vectorized manner, so the ``'vectorized'`` keyword can be employed. Note the reduced number of function evaluations. >>> result = differential_evolution( ... ackley, bounds, vectorized=True, updating='deferred', seed=1 ... ) >>> result.x, result.fun (array([0., 0.]), 4.440892098500626e-16) """ # using a context manager means that any created Pool objects are # cleared up. with DifferentialEvolutionSolver(func, bounds, args=args, strategy=strategy, maxiter=maxiter, popsize=popsize, tol=tol, mutation=mutation, recombination=recombination, seed=seed, polish=polish, callback=callback, disp=disp, init=init, atol=atol, updating=updating, workers=workers, constraints=constraints, x0=x0, integrality=integrality, vectorized=vectorized) as solver: ret = solver.solve() return ret class DifferentialEvolutionSolver: """This class implements the differential evolution solver Parameters ---------- func : callable The objective function to be minimized. Must be in the form ``f(x, *args)``, where ``x`` is the argument in the form of a 1-D array and ``args`` is a tuple of any additional fixed parameters needed to completely specify the function. The number of parameters, N, is equal to ``len(x)``. bounds : sequence or `Bounds` Bounds for variables. There are two ways to specify the bounds: 1. Instance of `Bounds` class. 2. ``(min, max)`` pairs for each element in ``x``, defining the finite lower and upper bounds for the optimizing argument of `func`. The total number of bounds is used to determine the number of parameters, N. If there are parameters whose bounds are equal the total number of free parameters is ``N - N_equal``. args : tuple, optional Any additional fixed parameters needed to completely specify the objective function. strategy : str, optional The differential evolution strategy to use. Should be one of: - 'best1bin' - 'best1exp' - 'rand1exp' - 'randtobest1exp' - 'currenttobest1exp' - 'best2exp' - 'rand2exp' - 'randtobest1bin' - 'currenttobest1bin' - 'best2bin' - 'rand2bin' - 'rand1bin' The default is 'best1bin' maxiter : int, optional The maximum number of generations over which the entire population is evolved. The maximum number of function evaluations (with no polishing) is: ``(maxiter + 1) * popsize * (N - N_equal)`` popsize : int, optional A multiplier for setting the total population size. The population has ``popsize * (N - N_equal)`` individuals. This keyword is overridden if an initial population is supplied via the `init` keyword. When using ``init='sobol'`` the population size is calculated as the next power of 2 after ``popsize * (N - N_equal)``. tol : float, optional Relative tolerance for convergence, the solving stops when ``np.std(pop) <= atol + tol * np.abs(np.mean(population_energies))``, where and `atol` and `tol` are the absolute and relative tolerance respectively. mutation : float or tuple(float, float), optional The mutation constant. In the literature this is also known as differential weight, being denoted by F. If specified as a float it should be in the range [0, 2]. If specified as a tuple ``(min, max)`` dithering is employed. Dithering randomly changes the mutation constant on a generation by generation basis. The mutation constant for that generation is taken from U[min, max). Dithering can help speed convergence significantly. Increasing the mutation constant increases the search radius, but will slow down convergence. recombination : float, optional The recombination constant, should be in the range [0, 1]. In the literature this is also known as the crossover probability, being denoted by CR. Increasing this value allows a larger number of mutants to progress into the next generation, but at the risk of population stability. seed : {None, int, `numpy.random.Generator`, `numpy.random.RandomState`}, optional If `seed` is None (or `np.random`), the `numpy.random.RandomState` singleton is used. If `seed` is an int, a new ``RandomState`` instance is used, seeded with `seed`. If `seed` is already a ``Generator`` or ``RandomState`` instance then that instance is used. Specify `seed` for repeatable minimizations. disp : bool, optional Prints the evaluated `func` at every iteration. callback : callable, `callback(xk, convergence=val)`, optional A function to follow the progress of the minimization. ``xk`` is the current value of ``x0``. ``val`` represents the fractional value of the population convergence. When ``val`` is greater than one the function halts. If callback returns `True`, then the minimization is halted (any polishing is still carried out). polish : bool, optional If True (default), then `scipy.optimize.minimize` with the `L-BFGS-B` method is used to polish the best population member at the end, which can improve the minimization slightly. If a constrained problem is being studied then the `trust-constr` method is used instead. For large problems with many constraints, polishing can take a long time due to the Jacobian computations. maxfun : int, optional Set the maximum number of function evaluations. However, it probably makes more sense to set `maxiter` instead. init : str or array-like, optional Specify which type of population initialization is performed. Should be one of: - 'latinhypercube' - 'sobol' - 'halton' - 'random' - array specifying the initial population. The array should have shape ``(S, N)``, where S is the total population size and N is the number of parameters. `init` is clipped to `bounds` before use. The default is 'latinhypercube'. Latin Hypercube sampling tries to maximize coverage of the available parameter space. 'sobol' and 'halton' are superior alternatives and maximize even more the parameter space. 'sobol' will enforce an initial population size which is calculated as the next power of 2 after ``popsize * (N - N_equal)``. 'halton' has no requirements but is a bit less efficient. See `scipy.stats.qmc` for more details. 'random' initializes the population randomly - this has the drawback that clustering can occur, preventing the whole of parameter space being covered. Use of an array to specify a population could be used, for example, to create a tight bunch of initial guesses in an location where the solution is known to exist, thereby reducing time for convergence. atol : float, optional Absolute tolerance for convergence, the solving stops when ``np.std(pop) <= atol + tol * np.abs(np.mean(population_energies))``, where and `atol` and `tol` are the absolute and relative tolerance respectively. updating : {'immediate', 'deferred'}, optional If ``'immediate'``, the best solution vector is continuously updated within a single generation [4]_. This can lead to faster convergence as trial vectors can take advantage of continuous improvements in the best solution. With ``'deferred'``, the best solution vector is updated once per generation. Only ``'deferred'`` is compatible with parallelization or vectorization, and the `workers` and `vectorized` keywords can over-ride this option. workers : int or map-like callable, optional If `workers` is an int the population is subdivided into `workers` sections and evaluated in parallel (uses `multiprocessing.Pool <multiprocessing>`). Supply `-1` to use all cores available to the Process. Alternatively supply a map-like callable, such as `multiprocessing.Pool.map` for evaluating the population in parallel. This evaluation is carried out as ``workers(func, iterable)``. This option will override the `updating` keyword to `updating='deferred'` if `workers != 1`. Requires that `func` be pickleable. constraints : {NonLinearConstraint, LinearConstraint, Bounds} Constraints on the solver, over and above those applied by the `bounds` kwd. Uses the approach by Lampinen. x0 : None or array-like, optional Provides an initial guess to the minimization. Once the population has been initialized this vector replaces the first (best) member. This replacement is done even if `init` is given an initial population. ``x0.shape == (N,)``. integrality : 1-D array, optional For each decision variable, a boolean value indicating whether the decision variable is constrained to integer values. The array is broadcast to ``(N,)``. If any decision variables are constrained to be integral, they will not be changed during polishing. Only integer values lying between the lower and upper bounds are used. If there are no integer values lying between the bounds then a `ValueError` is raised. vectorized : bool, optional If ``vectorized is True``, `func` is sent an `x` array with ``x.shape == (N, S)``, and is expected to return an array of shape ``(S,)``, where `S` is the number of solution vectors to be calculated. If constraints are applied, each of the functions used to construct a `Constraint` object should accept an `x` array with ``x.shape == (N, S)``, and return an array of shape ``(M, S)``, where `M` is the number of constraint components. This option is an alternative to the parallelization offered by `workers`, and may help in optimization speed. This keyword is ignored if ``workers != 1``. This option will override the `updating` keyword to ``updating='deferred'``. """ # Dispatch of mutation strategy method (binomial or exponential). _binomial = {'best1bin': '_best1', 'randtobest1bin': '_randtobest1', 'currenttobest1bin': '_currenttobest1', 'best2bin': '_best2', 'rand2bin': '_rand2', 'rand1bin': '_rand1'} _exponential = {'best1exp': '_best1', 'rand1exp': '_rand1', 'randtobest1exp': '_randtobest1', 'currenttobest1exp': '_currenttobest1', 'best2exp': '_best2', 'rand2exp': '_rand2'} __init_error_msg = ("The population initialization method must be one of " "'latinhypercube' or 'random', or an array of shape " "(S, N) where N is the number of parameters and S>5") def __init__(self, func, bounds, args=(), strategy='best1bin', maxiter=1000, popsize=15, tol=0.01, mutation=(0.5, 1), recombination=0.7, seed=None, maxfun=np.inf, callback=None, disp=False, polish=True, init='latinhypercube', atol=0, updating='immediate', workers=1, constraints=(), x0=None, *, integrality=None, vectorized=False): if strategy in self._binomial: self.mutation_func = getattr(self, self._binomial[strategy]) elif strategy in self._exponential: self.mutation_func = getattr(self, self._exponential[strategy]) else: raise ValueError("Please select a valid mutation strategy") self.strategy = strategy self.callback = callback self.polish = polish # set the updating / parallelisation options if updating in ['immediate', 'deferred']: self._updating = updating self.vectorized = vectorized # want to use parallelisation, but updating is immediate if workers != 1 and updating == 'immediate': warnings.warn("differential_evolution: the 'workers' keyword has" " overridden updating='immediate' to" " updating='deferred'", UserWarning, stacklevel=2) self._updating = 'deferred' if vectorized and workers != 1: warnings.warn("differential_evolution: the 'workers' keyword" " overrides the 'vectorized' keyword", stacklevel=2) self.vectorized = vectorized = False if vectorized and updating == 'immediate': warnings.warn("differential_evolution: the 'vectorized' keyword" " has overridden updating='immediate' to updating" "='deferred'", UserWarning, stacklevel=2) self._updating = 'deferred' # an object with a map method. if vectorized: def maplike_for_vectorized_func(func, x): # send an array (N, S) to the user func, # expect to receive (S,). Transposition is required because # internally the population is held as (S, N) return np.atleast_1d(func(x.T)) workers = maplike_for_vectorized_func self._mapwrapper = MapWrapper(workers) # relative and absolute tolerances for convergence self.tol, self.atol = tol, atol # Mutation constant should be in [0, 2). If specified as a sequence # then dithering is performed. self.scale = mutation if (not np.all(np.isfinite(mutation)) or np.any(np.array(mutation) >= 2) or np.any(np.array(mutation) < 0)): raise ValueError('The mutation constant must be a float in ' 'U[0, 2), or specified as a tuple(min, max)' ' where min < max and min, max are in U[0, 2).') self.dither = None if hasattr(mutation, '__iter__') and len(mutation) > 1: self.dither = [mutation[0], mutation[1]] self.dither.sort() self.cross_over_probability = recombination # we create a wrapped function to allow the use of map (and Pool.map # in the future) self.func = _FunctionWrapper(func, args) self.args = args # convert tuple of lower and upper bounds to limits # [(low_0, high_0), ..., (low_n, high_n] # -> [[low_0, ..., low_n], [high_0, ..., high_n]] if isinstance(bounds, Bounds): self.limits = np.array(new_bounds_to_old(bounds.lb, bounds.ub, len(bounds.lb)), dtype=float).T else: self.limits = np.array(bounds, dtype='float').T if (np.size(self.limits, 0) != 2 or not np.all(np.isfinite(self.limits))): raise ValueError('bounds should be a sequence containing ' 'real valued (min, max) pairs for each value' ' in x') if maxiter is None: # the default used to be None maxiter = 1000 self.maxiter = maxiter if maxfun is None: # the default used to be None maxfun = np.inf self.maxfun = maxfun # population is scaled to between [0, 1]. # We have to scale between parameter <-> population # save these arguments for _scale_parameter and # _unscale_parameter. This is an optimization self.__scale_arg1 = 0.5 * (self.limits[0] + self.limits[1]) self.__scale_arg2 = np.fabs(self.limits[0] - self.limits[1]) with np.errstate(divide='ignore'): # if lb == ub then the following line will be 1/0, which is why # we ignore the divide by zero warning. The result from 1/0 is # inf, so replace those values by 0. self.__recip_scale_arg2 = 1 / self.__scale_arg2 self.__recip_scale_arg2[~np.isfinite(self.__recip_scale_arg2)] = 0 self.parameter_count = np.size(self.limits, 1) self.random_number_generator = check_random_state(seed) # Which parameters are going to be integers? if np.any(integrality): # # user has provided a truth value for integer constraints integrality = np.broadcast_to( integrality, self.parameter_count ) integrality = np.asarray(integrality, bool) # For integrality parameters change the limits to only allow # integer values lying between the limits. lb, ub = np.copy(self.limits) lb = np.ceil(lb) ub = np.floor(ub) if not (lb[integrality] <= ub[integrality]).all(): # there's a parameter that doesn't have an integer value # lying between the limits raise ValueError("One of the integrality constraints does not" " have any possible integer values between" " the lower/upper bounds.") nlb = np.nextafter(lb[integrality] - 0.5, np.inf) nub = np.nextafter(ub[integrality] + 0.5, -np.inf) self.integrality = integrality self.limits[0, self.integrality] = nlb self.limits[1, self.integrality] = nub else: self.integrality = False # check for equal bounds eb = self.limits[0] == self.limits[1] eb_count = np.count_nonzero(eb) # default population initialization is a latin hypercube design, but # there are other population initializations possible. # the minimum is 5 because 'best2bin' requires a population that's at # least 5 long # 202301 - reduced population size to account for parameters with # equal bounds. If there are no varying parameters set N to at least 1 self.num_population_members = max( 5, popsize * max(1, self.parameter_count - eb_count) ) self.population_shape = (self.num_population_members, self.parameter_count) self._nfev = 0 # check first str otherwise will fail to compare str with array if isinstance(init, str): if init == 'latinhypercube': self.init_population_lhs() elif init == 'sobol': # must be Ns = 2**m for Sobol' n_s = int(2 ** np.ceil(np.log2(self.num_population_members))) self.num_population_members = n_s self.population_shape = (self.num_population_members, self.parameter_count) self.init_population_qmc(qmc_engine='sobol') elif init == 'halton': self.init_population_qmc(qmc_engine='halton') elif init == 'random': self.init_population_random() else: raise ValueError(self.__init_error_msg) else: self.init_population_array(init) if x0 is not None: # scale to within unit interval and # ensure parameters are within bounds. x0_scaled = self._unscale_parameters(np.asarray(x0)) if ((x0_scaled > 1.0) | (x0_scaled < 0.0)).any(): raise ValueError( "Some entries in x0 lay outside the specified bounds" ) self.population[0] = x0_scaled # infrastructure for constraints self.constraints = constraints self._wrapped_constraints = [] if hasattr(constraints, '__len__'): # sequence of constraints, this will also deal with default # keyword parameter for c in constraints: self._wrapped_constraints.append( _ConstraintWrapper(c, self.x) ) else: self._wrapped_constraints = [ _ConstraintWrapper(constraints, self.x) ] self.total_constraints = np.sum( [c.num_constr for c in self._wrapped_constraints] ) self.constraint_violation = np.zeros((self.num_population_members, 1)) self.feasible = np.ones(self.num_population_members, bool) self.disp = disp def init_population_lhs(self): """ Initializes the population with Latin Hypercube Sampling. Latin Hypercube Sampling ensures that each parameter is uniformly sampled over its range. """ rng = self.random_number_generator # Each parameter range needs to be sampled uniformly. The scaled # parameter range ([0, 1)) needs to be split into # `self.num_population_members` segments, each of which has the following # size: segsize = 1.0 / self.num_population_members # Within each segment we sample from a uniform random distribution. # We need to do this sampling for each parameter. samples = (segsize * rng.uniform(size=self.population_shape) # Offset each segment to cover the entire parameter range [0, 1) + np.linspace(0., 1., self.num_population_members, endpoint=False)[:, np.newaxis]) # Create an array for population of candidate solutions. self.population = np.zeros_like(samples) # Initialize population of candidate solutions by permutation of the # random samples. for j in range(self.parameter_count): order = rng.permutation(range(self.num_population_members)) self.population[:, j] = samples[order, j] # reset population energies self.population_energies = np.full(self.num_population_members, np.inf) # reset number of function evaluations counter self._nfev = 0 def init_population_qmc(self, qmc_engine): """Initializes the population with a QMC method. QMC methods ensures that each parameter is uniformly sampled over its range. Parameters ---------- qmc_engine : str The QMC method to use for initialization. Can be one of ``latinhypercube``, ``sobol`` or ``halton``. """ from scipy.stats import qmc rng = self.random_number_generator # Create an array for population of candidate solutions. if qmc_engine == 'latinhypercube': sampler = qmc.LatinHypercube(d=self.parameter_count, seed=rng) elif qmc_engine == 'sobol': sampler = qmc.Sobol(d=self.parameter_count, seed=rng) elif qmc_engine == 'halton': sampler = qmc.Halton(d=self.parameter_count, seed=rng) else: raise ValueError(self.__init_error_msg) self.population = sampler.random(n=self.num_population_members) # reset population energies self.population_energies = np.full(self.num_population_members, np.inf) # reset number of function evaluations counter self._nfev = 0 def init_population_random(self): """ Initializes the population at random. This type of initialization can possess clustering, Latin Hypercube sampling is generally better. """ rng = self.random_number_generator self.population = rng.uniform(size=self.population_shape) # reset population energies self.population_energies = np.full(self.num_population_members, np.inf) # reset number of function evaluations counter self._nfev = 0 def init_population_array(self, init): """ Initializes the population with a user specified population. Parameters ---------- init : np.ndarray Array specifying subset of the initial population. The array should have shape (S, N), where N is the number of parameters. The population is clipped to the lower and upper bounds. """ # make sure you're using a float array popn = np.asfarray(init) if (np.size(popn, 0) < 5 or popn.shape[1] != self.parameter_count or len(popn.shape) != 2): raise ValueError("The population supplied needs to have shape" " (S, len(x)), where S > 4.") # scale values and clip to bounds, assigning to population self.population = np.clip(self._unscale_parameters(popn), 0, 1) self.num_population_members = np.size(self.population, 0) self.population_shape = (self.num_population_members, self.parameter_count) # reset population energies self.population_energies = np.full(self.num_population_members, np.inf) # reset number of function evaluations counter self._nfev = 0 @property def x(self): """ The best solution from the solver """ return self._scale_parameters(self.population[0]) @property def convergence(self): """ The standard deviation of the population energies divided by their mean. """ if np.any(np.isinf(self.population_energies)): return np.inf return (np.std(self.population_energies) / (np.abs(np.mean(self.population_energies)) + _MACHEPS)) def converged(self): """ Return True if the solver has converged. """ if np.any(np.isinf(self.population_energies)): return False return (np.std(self.population_energies) <= self.atol + self.tol * np.abs(np.mean(self.population_energies))) def solve(self): """ Runs the DifferentialEvolutionSolver. Returns ------- res : OptimizeResult The optimization result represented as a ``OptimizeResult`` object. Important attributes are: ``x`` the solution array, ``success`` a Boolean flag indicating if the optimizer exited successfully and ``message`` which describes the cause of the termination. See `OptimizeResult` for a description of other attributes. If `polish` was employed, and a lower minimum was obtained by the polishing, then OptimizeResult also contains the ``jac`` attribute. """ nit, warning_flag = 0, False status_message = _status_message['success'] # The population may have just been initialized (all entries are # np.inf). If it has you have to calculate the initial energies. # Although this is also done in the evolve generator it's possible # that someone can set maxiter=0, at which point we still want the # initial energies to be calculated (the following loop isn't run). if np.all(np.isinf(self.population_energies)): self.feasible, self.constraint_violation = ( self._calculate_population_feasibilities(self.population)) # only work out population energies for feasible solutions self.population_energies[self.feasible] = ( self._calculate_population_energies( self.population[self.feasible])) self._promote_lowest_energy() # do the optimization. for nit in range(1, self.maxiter + 1): # evolve the population by a generation try: next(self) except StopIteration: warning_flag = True if self._nfev > self.maxfun: status_message = _status_message['maxfev'] elif self._nfev == self.maxfun: status_message = ('Maximum number of function evaluations' ' has been reached.') break if self.disp: print("differential_evolution step %d: f(x)= %g" % (nit, self.population_energies[0])) if self.callback: c = self.tol / (self.convergence + _MACHEPS) warning_flag = bool(self.callback(self.x, convergence=c)) if warning_flag: status_message = ('callback function requested stop early' ' by returning True') # should the solver terminate? if warning_flag or self.converged(): break else: status_message = _status_message['maxiter'] warning_flag = True DE_result = OptimizeResult( x=self.x, fun=self.population_energies[0], nfev=self._nfev, nit=nit, message=status_message, success=(warning_flag is not True)) if self.polish and not np.all(self.integrality): # can't polish if all the parameters are integers if np.any(self.integrality): # set the lower/upper bounds equal so that any integrality # constraints work. limits, integrality = self.limits, self.integrality limits[0, integrality] = DE_result.x[integrality] limits[1, integrality] = DE_result.x[integrality] polish_method = 'L-BFGS-B' if self._wrapped_constraints: polish_method = 'trust-constr' constr_violation = self._constraint_violation_fn(DE_result.x) if np.any(constr_violation > 0.): warnings.warn("differential evolution didn't find a" " solution satisfying the constraints," " attempting to polish from the least" " infeasible solution", UserWarning) if self.disp: print(f"Polishing solution with '{polish_method}'") result = minimize(self.func, np.copy(DE_result.x), method=polish_method, bounds=self.limits.T, constraints=self.constraints) self._nfev += result.nfev DE_result.nfev = self._nfev # Polishing solution is only accepted if there is an improvement in # cost function, the polishing was successful and the solution lies # within the bounds. if (result.fun < DE_result.fun and result.success and np.all(result.x <= self.limits[1]) and np.all(self.limits[0] <= result.x)): DE_result.fun = result.fun DE_result.x = result.x DE_result.jac = result.jac # to keep internal state consistent self.population_energies[0] = result.fun self.population[0] = self._unscale_parameters(result.x) if self._wrapped_constraints: DE_result.constr = [c.violation(DE_result.x) for c in self._wrapped_constraints] DE_result.constr_violation = np.max( np.concatenate(DE_result.constr)) DE_result.maxcv = DE_result.constr_violation if DE_result.maxcv > 0: # if the result is infeasible then success must be False DE_result.success = False DE_result.message = ("The solution does not satisfy the " f"constraints, MAXCV = {DE_result.maxcv}") return DE_result def _calculate_population_energies(self, population): """ Calculate the energies of a population. Parameters ---------- population : ndarray An array of parameter vectors normalised to [0, 1] using lower and upper limits. Has shape ``(np.size(population, 0), N)``. Returns ------- energies : ndarray An array of energies corresponding to each population member. If maxfun will be exceeded during this call, then the number of function evaluations will be reduced and energies will be right-padded with np.inf. Has shape ``(np.size(population, 0),)`` """ num_members = np.size(population, 0) # S is the number of function evals left to stay under the # maxfun budget S = min(num_members, self.maxfun - self._nfev) energies = np.full(num_members, np.inf) parameters_pop = self._scale_parameters(population) try: calc_energies = list( self._mapwrapper(self.func, parameters_pop[0:S]) ) calc_energies = np.squeeze(calc_energies) except (TypeError, ValueError) as e: # wrong number of arguments for _mapwrapper # or wrong length returned from the mapper raise RuntimeError( "The map-like callable must be of the form f(func, iterable), " "returning a sequence of numbers the same length as 'iterable'" ) from e if calc_energies.size != S: if self.vectorized: raise RuntimeError("The vectorized function must return an" " array of shape (S,) when given an array" " of shape (len(x), S)") raise RuntimeError("func(x, *args) must return a scalar value") energies[0:S] = calc_energies if self.vectorized: self._nfev += 1 else: self._nfev += S return energies def _promote_lowest_energy(self): # swaps 'best solution' into first population entry idx = np.arange(self.num_population_members) feasible_solutions = idx[self.feasible] if feasible_solutions.size: # find the best feasible solution idx_t = np.argmin(self.population_energies[feasible_solutions]) l = feasible_solutions[idx_t] else: # no solution was feasible, use 'best' infeasible solution, which # will violate constraints the least l = np.argmin(np.sum(self.constraint_violation, axis=1)) self.population_energies[[0, l]] = self.population_energies[[l, 0]] self.population[[0, l], :] = self.population[[l, 0], :] self.feasible[[0, l]] = self.feasible[[l, 0]] self.constraint_violation[[0, l], :] = ( self.constraint_violation[[l, 0], :]) def _constraint_violation_fn(self, x): """ Calculates total constraint violation for all the constraints, for a set of solutions. Parameters ---------- x : ndarray Solution vector(s). Has shape (S, N), or (N,), where S is the number of solutions to investigate and N is the number of parameters. Returns ------- cv : ndarray Total violation of constraints. Has shape ``(S, M)``, where M is the total number of constraint components (which is not necessarily equal to len(self._wrapped_constraints)). """ # how many solution vectors you're calculating constraint violations # for S = np.size(x) // self.parameter_count _out = np.zeros((S, self.total_constraints)) offset = 0 for con in self._wrapped_constraints: # the input/output of the (vectorized) constraint function is # {(N, S), (N,)} --> (M, S) # The input to _constraint_violation_fn is (S, N) or (N,), so # transpose to pass it to the constraint. The output is transposed # from (M, S) to (S, M) for further use. c = con.violation(x.T).T # The shape of c should be (M,), (1, M), or (S, M). Check for # those shapes, as an incorrect shape indicates that the # user constraint function didn't return the right thing, and # the reshape operation will fail. Intercept the wrong shape # to give a reasonable error message. I'm not sure what failure # modes an inventive user will come up with. if c.shape[-1] != con.num_constr or (S > 1 and c.shape[0] != S): raise RuntimeError("An array returned from a Constraint has" " the wrong shape. If `vectorized is False`" " the Constraint should return an array of" " shape (M,). If `vectorized is True` then" " the Constraint must return an array of" " shape (M, S), where S is the number of" " solution vectors and M is the number of" " constraint components in a given" " Constraint object.") # the violation function may return a 1D array, but is it a # sequence of constraints for one solution (S=1, M>=1), or the # value of a single constraint for a sequence of solutions # (S>=1, M=1) c = np.reshape(c, (S, con.num_constr)) _out[:, offset:offset + con.num_constr] = c offset += con.num_constr return _out def _calculate_population_feasibilities(self, population): """ Calculate the feasibilities of a population. Parameters ---------- population : ndarray An array of parameter vectors normalised to [0, 1] using lower and upper limits. Has shape ``(np.size(population, 0), N)``. Returns ------- feasible, constraint_violation : ndarray, ndarray Boolean array of feasibility for each population member, and an array of the constraint violation for each population member. constraint_violation has shape ``(np.size(population, 0), M)``, where M is the number of constraints. """ num_members = np.size(population, 0) if not self._wrapped_constraints: # shortcut for no constraints return np.ones(num_members, bool), np.zeros((num_members, 1)) # (S, N) parameters_pop = self._scale_parameters(population) if self.vectorized: # (S, M) constraint_violation = np.array( self._constraint_violation_fn(parameters_pop) ) else: # (S, 1, M) constraint_violation = np.array([self._constraint_violation_fn(x) for x in parameters_pop]) # if you use the list comprehension in the line above it will # create an array of shape (S, 1, M), because each iteration # generates an array of (1, M). In comparison the vectorized # version returns (S, M). It's therefore necessary to remove axis 1 constraint_violation = constraint_violation[:, 0] feasible = ~(np.sum(constraint_violation, axis=1) > 0) return feasible, constraint_violation def __iter__(self): return self def __enter__(self): return self def __exit__(self, *args): return self._mapwrapper.__exit__(*args) def _accept_trial(self, energy_trial, feasible_trial, cv_trial, energy_orig, feasible_orig, cv_orig): """ Trial is accepted if: * it satisfies all constraints and provides a lower or equal objective function value, while both the compared solutions are feasible - or - * it is feasible while the original solution is infeasible, - or - * it is infeasible, but provides a lower or equal constraint violation for all constraint functions. This test corresponds to section III of Lampinen [1]_. Parameters ---------- energy_trial : float Energy of the trial solution feasible_trial : float Feasibility of trial solution cv_trial : array-like Excess constraint violation for the trial solution energy_orig : float Energy of the original solution feasible_orig : float Feasibility of original solution cv_orig : array-like Excess constraint violation for the original solution Returns ------- accepted : bool """ if feasible_orig and feasible_trial: return energy_trial <= energy_orig elif feasible_trial and not feasible_orig: return True elif not feasible_trial and (cv_trial <= cv_orig).all(): # cv_trial < cv_orig would imply that both trial and orig are not # feasible return True return False def __next__(self): """ Evolve the population by a single generation Returns ------- x : ndarray The best solution from the solver. fun : float Value of objective function obtained from the best solution. """ # the population may have just been initialized (all entries are # np.inf). If it has you have to calculate the initial energies if np.all(np.isinf(self.population_energies)): self.feasible, self.constraint_violation = ( self._calculate_population_feasibilities(self.population)) # only need to work out population energies for those that are # feasible self.population_energies[self.feasible] = ( self._calculate_population_energies( self.population[self.feasible])) self._promote_lowest_energy() if self.dither is not None: self.scale = self.random_number_generator.uniform(self.dither[0], self.dither[1]) if self._updating == 'immediate': # update best solution immediately for candidate in range(self.num_population_members): if self._nfev > self.maxfun: raise StopIteration # create a trial solution trial = self._mutate(candidate) # ensuring that it's in the range [0, 1) self._ensure_constraint(trial) # scale from [0, 1) to the actual parameter value parameters = self._scale_parameters(trial) # determine the energy of the objective function if self._wrapped_constraints: cv = self._constraint_violation_fn(parameters) feasible = False energy = np.inf if not np.sum(cv) > 0: # solution is feasible feasible = True energy = self.func(parameters) self._nfev += 1 else: feasible = True cv = np.atleast_2d([0.]) energy = self.func(parameters) self._nfev += 1 # compare trial and population member if self._accept_trial(energy, feasible, cv, self.population_energies[candidate], self.feasible[candidate], self.constraint_violation[candidate]): self.population[candidate] = trial self.population_energies[candidate] = np.squeeze(energy) self.feasible[candidate] = feasible self.constraint_violation[candidate] = cv # if the trial candidate is also better than the best # solution then promote it. if self._accept_trial(energy, feasible, cv, self.population_energies[0], self.feasible[0], self.constraint_violation[0]): self._promote_lowest_energy() elif self._updating == 'deferred': # update best solution once per generation if self._nfev >= self.maxfun: raise StopIteration # 'deferred' approach, vectorised form. # create trial solutions trial_pop = np.array( [self._mutate(i) for i in range(self.num_population_members)]) # enforce bounds self._ensure_constraint(trial_pop) # determine the energies of the objective function, but only for # feasible trials feasible, cv = self._calculate_population_feasibilities(trial_pop) trial_energies = np.full(self.num_population_members, np.inf) # only calculate for feasible entries trial_energies[feasible] = self._calculate_population_energies( trial_pop[feasible]) # which solutions are 'improved'? loc = [self._accept_trial(*val) for val in zip(trial_energies, feasible, cv, self.population_energies, self.feasible, self.constraint_violation)] loc = np.array(loc) self.population = np.where(loc[:, np.newaxis], trial_pop, self.population) self.population_energies = np.where(loc, trial_energies, self.population_energies) self.feasible = np.where(loc, feasible, self.feasible) self.constraint_violation = np.where(loc[:, np.newaxis], cv, self.constraint_violation) # make sure the best solution is updated if updating='deferred'. # put the lowest energy into the best solution position. self._promote_lowest_energy() return self.x, self.population_energies[0] def _scale_parameters(self, trial): """Scale from a number between 0 and 1 to parameters.""" # trial either has shape (N, ) or (L, N), where L is the number of # solutions being scaled scaled = self.__scale_arg1 + (trial - 0.5) * self.__scale_arg2 if np.any(self.integrality): i = np.broadcast_to(self.integrality, scaled.shape) scaled[i] = np.round(scaled[i]) return scaled def _unscale_parameters(self, parameters): """Scale from parameters to a number between 0 and 1.""" return (parameters - self.__scale_arg1) * self.__recip_scale_arg2 + 0.5 def _ensure_constraint(self, trial): """Make sure the parameters lie between the limits.""" mask = np.where((trial > 1) | (trial < 0)) trial[mask] = self.random_number_generator.uniform(size=mask[0].shape) def _mutate(self, candidate): """Create a trial vector based on a mutation strategy.""" trial = np.copy(self.population[candidate]) rng = self.random_number_generator fill_point = rng.choice(self.parameter_count) if self.strategy in ['currenttobest1exp', 'currenttobest1bin']: bprime = self.mutation_func(candidate, self._select_samples(candidate, 5)) else: bprime = self.mutation_func(self._select_samples(candidate, 5)) if self.strategy in self._binomial: crossovers = rng.uniform(size=self.parameter_count) crossovers = crossovers < self.cross_over_probability # the last one is always from the bprime vector for binomial # If you fill in modulo with a loop you have to set the last one to # true. If you don't use a loop then you can have any random entry # be True. crossovers[fill_point] = True trial = np.where(crossovers, bprime, trial) return trial elif self.strategy in self._exponential: i = 0 crossovers = rng.uniform(size=self.parameter_count) crossovers = crossovers < self.cross_over_probability crossovers[0] = True while (i < self.parameter_count and crossovers[i]): trial[fill_point] = bprime[fill_point] fill_point = (fill_point + 1) % self.parameter_count i += 1 return trial def _best1(self, samples): """best1bin, best1exp""" r0, r1 = samples[:2] return (self.population[0] + self.scale * (self.population[r0] - self.population[r1])) def _rand1(self, samples): """rand1bin, rand1exp""" r0, r1, r2 = samples[:3] return (self.population[r0] + self.scale * (self.population[r1] - self.population[r2])) def _randtobest1(self, samples): """randtobest1bin, randtobest1exp""" r0, r1, r2 = samples[:3] bprime = np.copy(self.population[r0]) bprime += self.scale * (self.population[0] - bprime) bprime += self.scale * (self.population[r1] - self.population[r2]) return bprime def _currenttobest1(self, candidate, samples): """currenttobest1bin, currenttobest1exp""" r0, r1 = samples[:2] bprime = (self.population[candidate] + self.scale * (self.population[0] - self.population[candidate] + self.population[r0] - self.population[r1])) return bprime def _best2(self, samples): """best2bin, best2exp""" r0, r1, r2, r3 = samples[:4] bprime = (self.population[0] + self.scale * (self.population[r0] + self.population[r1] - self.population[r2] - self.population[r3])) return bprime def _rand2(self, samples): """rand2bin, rand2exp""" r0, r1, r2, r3, r4 = samples bprime = (self.population[r0] + self.scale * (self.population[r1] + self.population[r2] - self.population[r3] - self.population[r4])) return bprime def _select_samples(self, candidate, number_samples): """ obtain random integers from range(self.num_population_members), without replacement. You can't have the original candidate either. """ pool = np.arange(self.num_population_members) self.random_number_generator.shuffle(pool) idxs = [] while len(idxs) < number_samples and len(pool) > 0: idx = pool[0] pool = pool[1:] if idx != candidate: idxs.append(idx) return idxs class _ConstraintWrapper: """Object to wrap/evaluate user defined constraints. Very similar in practice to `PreparedConstraint`, except that no evaluation of jac/hess is performed (explicit or implicit). If created successfully, it will contain the attributes listed below. Parameters ---------- constraint : {`NonlinearConstraint`, `LinearConstraint`, `Bounds`} Constraint to check and prepare. x0 : array_like Initial vector of independent variables, shape (N,) Attributes ---------- fun : callable Function defining the constraint wrapped by one of the convenience classes. bounds : 2-tuple Contains lower and upper bounds for the constraints --- lb and ub. These are converted to ndarray and have a size equal to the number of the constraints. """ def __init__(self, constraint, x0): self.constraint = constraint if isinstance(constraint, NonlinearConstraint): def fun(x): x = np.asarray(x) return np.atleast_1d(constraint.fun(x)) elif isinstance(constraint, LinearConstraint): def fun(x): if issparse(constraint.A): A = constraint.A else: A = np.atleast_2d(constraint.A) return A.dot(x) elif isinstance(constraint, Bounds): def fun(x): return np.asarray(x) else: raise ValueError("`constraint` of an unknown type is passed.") self.fun = fun lb = np.asarray(constraint.lb, dtype=float) ub = np.asarray(constraint.ub, dtype=float) x0 = np.asarray(x0) # find out the number of constraints f0 = fun(x0) self.num_constr = m = f0.size self.parameter_count = x0.size if lb.ndim == 0: lb = np.resize(lb, m) if ub.ndim == 0: ub = np.resize(ub, m) self.bounds = (lb, ub) def __call__(self, x): return np.atleast_1d(self.fun(x)) def violation(self, x): """How much the constraint is exceeded by. Parameters ---------- x : array-like Vector of independent variables, (N, S), where N is number of parameters and S is the number of solutions to be investigated. Returns ------- excess : array-like How much the constraint is exceeded by, for each of the constraints specified by `_ConstraintWrapper.fun`. Has shape (M, S) where M is the number of constraint components. """ # expect ev to have shape (num_constr, S) or (num_constr,) ev = self.fun(np.asarray(x)) try: excess_lb = np.maximum(self.bounds[0] - ev.T, 0) excess_ub = np.maximum(ev.T - self.bounds[1], 0) except ValueError as e: raise RuntimeError("An array returned from a Constraint has" " the wrong shape. If `vectorized is False`" " the Constraint should return an array of" " shape (M,). If `vectorized is True` then" " the Constraint must return an array of" " shape (M, S), where S is the number of" " solution vectors and M is the number of" " constraint components in a given" " Constraint object.") from e v = (excess_lb + excess_ub).T return v
74,758
43.001766
86
py
scipy
scipy-main/scipy/optimize/_zeros_py.py
import warnings from collections import namedtuple import operator from . import _zeros from ._optimize import OptimizeResult, _call_callback_maybe_halt import numpy as np _iter = 100 _xtol = 2e-12 _rtol = 4 * np.finfo(float).eps __all__ = ['newton', 'bisect', 'ridder', 'brentq', 'brenth', 'toms748', 'RootResults'] # Must agree with CONVERGED, SIGNERR, CONVERR, ... in zeros.h _ECONVERGED = 0 _ESIGNERR = -1 _ECONVERR = -2 _EVALUEERR = -3 _ECALLBACK = -4 _EINPROGRESS = 1 CONVERGED = 'converged' SIGNERR = 'sign error' CONVERR = 'convergence error' VALUEERR = 'value error' INPROGRESS = 'No error' flag_map = {_ECONVERGED: CONVERGED, _ESIGNERR: SIGNERR, _ECONVERR: CONVERR, _EVALUEERR: VALUEERR, _EINPROGRESS: INPROGRESS} class RootResults(OptimizeResult): """Represents the root finding result. Attributes ---------- root : float Estimated root location. iterations : int Number of iterations needed to find the root. function_calls : int Number of times the function was called. converged : bool True if the routine converged. flag : str Description of the cause of termination. """ def __init__(self, root, iterations, function_calls, flag): self.root = root self.iterations = iterations self.function_calls = function_calls self.converged = flag == _ECONVERGED if flag in flag_map: self.flag = flag_map[flag] else: self.flag = flag def results_c(full_output, r): if full_output: x, funcalls, iterations, flag = r results = RootResults(root=x, iterations=iterations, function_calls=funcalls, flag=flag) return x, results else: return r def _results_select(full_output, r): """Select from a tuple of (root, funccalls, iterations, flag)""" x, funcalls, iterations, flag = r if full_output: results = RootResults(root=x, iterations=iterations, function_calls=funcalls, flag=flag) return x, results return x def _wrap_nan_raise(f): def f_raise(x, *args): fx = f(x, *args) f_raise._function_calls += 1 if np.isnan(fx): msg = (f'The function value at x={x} is NaN; ' 'solver cannot continue.') err = ValueError(msg) err._x = x err._function_calls = f_raise._function_calls raise err return fx f_raise._function_calls = 0 return f_raise def newton(func, x0, fprime=None, args=(), tol=1.48e-8, maxiter=50, fprime2=None, x1=None, rtol=0.0, full_output=False, disp=True): """ Find a root of a real or complex function using the Newton-Raphson (or secant or Halley's) method. Find a root of the scalar-valued function `func` given a nearby scalar starting point `x0`. The Newton-Raphson method is used if the derivative `fprime` of `func` is provided, otherwise the secant method is used. If the second order derivative `fprime2` of `func` is also provided, then Halley's method is used. If `x0` is a sequence with more than one item, `newton` returns an array: the roots of the function from each (scalar) starting point in `x0`. In this case, `func` must be vectorized to return a sequence or array of the same shape as its first argument. If `fprime` (`fprime2`) is given, then its return must also have the same shape: each element is the first (second) derivative of `func` with respect to its only variable evaluated at each element of its first argument. `newton` is for finding roots of a scalar-valued functions of a single variable. For problems involving several variables, see `root`. Parameters ---------- func : callable The function whose root is wanted. It must be a function of a single variable of the form ``f(x,a,b,c...)``, where ``a,b,c...`` are extra arguments that can be passed in the `args` parameter. x0 : float, sequence, or ndarray An initial estimate of the root that should be somewhere near the actual root. If not scalar, then `func` must be vectorized and return a sequence or array of the same shape as its first argument. fprime : callable, optional The derivative of the function when available and convenient. If it is None (default), then the secant method is used. args : tuple, optional Extra arguments to be used in the function call. tol : float, optional The allowable error of the root's value. If `func` is complex-valued, a larger `tol` is recommended as both the real and imaginary parts of `x` contribute to ``|x - x0|``. maxiter : int, optional Maximum number of iterations. fprime2 : callable, optional The second order derivative of the function when available and convenient. If it is None (default), then the normal Newton-Raphson or the secant method is used. If it is not None, then Halley's method is used. x1 : float, optional Another estimate of the root that should be somewhere near the actual root. Used if `fprime` is not provided. rtol : float, optional Tolerance (relative) for termination. full_output : bool, optional If `full_output` is False (default), the root is returned. If True and `x0` is scalar, the return value is ``(x, r)``, where ``x`` is the root and ``r`` is a `RootResults` object. If True and `x0` is non-scalar, the return value is ``(x, converged, zero_der)`` (see Returns section for details). disp : bool, optional If True, raise a RuntimeError if the algorithm didn't converge, with the error message containing the number of iterations and current function value. Otherwise, the convergence status is recorded in a `RootResults` return object. Ignored if `x0` is not scalar. *Note: this has little to do with displaying, however, the `disp` keyword cannot be renamed for backwards compatibility.* Returns ------- root : float, sequence, or ndarray Estimated location where function is zero. r : `RootResults`, optional Present if ``full_output=True`` and `x0` is scalar. Object containing information about the convergence. In particular, ``r.converged`` is True if the routine converged. converged : ndarray of bool, optional Present if ``full_output=True`` and `x0` is non-scalar. For vector functions, indicates which elements converged successfully. zero_der : ndarray of bool, optional Present if ``full_output=True`` and `x0` is non-scalar. For vector functions, indicates which elements had a zero derivative. See Also -------- root_scalar : interface to root solvers for scalar functions root : interface to root solvers for multi-input, multi-output functions Notes ----- The convergence rate of the Newton-Raphson method is quadratic, the Halley method is cubic, and the secant method is sub-quadratic. This means that if the function is well-behaved the actual error in the estimated root after the nth iteration is approximately the square (cube for Halley) of the error after the (n-1)th step. However, the stopping criterion used here is the step size and there is no guarantee that a root has been found. Consequently, the result should be verified. Safer algorithms are brentq, brenth, ridder, and bisect, but they all require that the root first be bracketed in an interval where the function changes sign. The brentq algorithm is recommended for general use in one dimensional problems when such an interval has been found. When `newton` is used with arrays, it is best suited for the following types of problems: * The initial guesses, `x0`, are all relatively the same distance from the roots. * Some or all of the extra arguments, `args`, are also arrays so that a class of similar problems can be solved together. * The size of the initial guesses, `x0`, is larger than O(100) elements. Otherwise, a naive loop may perform as well or better than a vector. Examples -------- >>> import numpy as np >>> import matplotlib.pyplot as plt >>> from scipy import optimize >>> def f(x): ... return (x**3 - 1) # only one real root at x = 1 ``fprime`` is not provided, use the secant method: >>> root = optimize.newton(f, 1.5) >>> root 1.0000000000000016 >>> root = optimize.newton(f, 1.5, fprime2=lambda x: 6 * x) >>> root 1.0000000000000016 Only ``fprime`` is provided, use the Newton-Raphson method: >>> root = optimize.newton(f, 1.5, fprime=lambda x: 3 * x**2) >>> root 1.0 Both ``fprime2`` and ``fprime`` are provided, use Halley's method: >>> root = optimize.newton(f, 1.5, fprime=lambda x: 3 * x**2, ... fprime2=lambda x: 6 * x) >>> root 1.0 When we want to find roots for a set of related starting values and/or function parameters, we can provide both of those as an array of inputs: >>> f = lambda x, a: x**3 - a >>> fder = lambda x, a: 3 * x**2 >>> rng = np.random.default_rng() >>> x = rng.standard_normal(100) >>> a = np.arange(-50, 50) >>> vec_res = optimize.newton(f, x, fprime=fder, args=(a, ), maxiter=200) The above is the equivalent of solving for each value in ``(x, a)`` separately in a for-loop, just faster: >>> loop_res = [optimize.newton(f, x0, fprime=fder, args=(a0,), ... maxiter=200) ... for x0, a0 in zip(x, a)] >>> np.allclose(vec_res, loop_res) True Plot the results found for all values of ``a``: >>> analytical_result = np.sign(a) * np.abs(a)**(1/3) >>> fig, ax = plt.subplots() >>> ax.plot(a, analytical_result, 'o') >>> ax.plot(a, vec_res, '.') >>> ax.set_xlabel('$a$') >>> ax.set_ylabel('$x$ where $f(x, a)=0$') >>> plt.show() """ if tol <= 0: raise ValueError("tol too small (%g <= 0)" % tol) maxiter = operator.index(maxiter) if maxiter < 1: raise ValueError("maxiter must be greater than 0") if np.size(x0) > 1: return _array_newton(func, x0, fprime, args, tol, maxiter, fprime2, full_output) # Convert to float (don't use float(x0); this works also for complex x0) x0 = np.asarray(x0)[()] p0 = x0 funcalls = 0 if fprime is not None: # Newton-Raphson method for itr in range(maxiter): # first evaluate fval fval = func(p0, *args) funcalls += 1 # If fval is 0, a root has been found, then terminate if fval == 0: return _results_select( full_output, (p0, funcalls, itr, _ECONVERGED)) fder = fprime(p0, *args) funcalls += 1 if fder == 0: msg = "Derivative was zero." if disp: msg += ( " Failed to converge after %d iterations, value is %s." % (itr + 1, p0)) raise RuntimeError(msg) warnings.warn(msg, RuntimeWarning) return _results_select( full_output, (p0, funcalls, itr + 1, _ECONVERR)) newton_step = fval / fder if fprime2: fder2 = fprime2(p0, *args) funcalls += 1 # Halley's method: # newton_step /= (1.0 - 0.5 * newton_step * fder2 / fder) # Only do it if denominator stays close enough to 1 # Rationale: If 1-adj < 0, then Halley sends x in the # opposite direction to Newton. Doesn't happen if x is close # enough to root. adj = newton_step * fder2 / fder / 2 if np.abs(adj) < 1: newton_step /= 1.0 - adj p = p0 - newton_step if np.isclose(p, p0, rtol=rtol, atol=tol): return _results_select( full_output, (p, funcalls, itr + 1, _ECONVERGED)) p0 = p else: # Secant method if x1 is not None: if x1 == x0: raise ValueError("x1 and x0 must be different") p1 = x1 else: eps = 1e-4 p1 = x0 * (1 + eps) p1 += (eps if p1 >= 0 else -eps) q0 = func(p0, *args) funcalls += 1 q1 = func(p1, *args) funcalls += 1 if abs(q1) < abs(q0): p0, p1, q0, q1 = p1, p0, q1, q0 for itr in range(maxiter): if q1 == q0: if p1 != p0: msg = "Tolerance of %s reached." % (p1 - p0) if disp: msg += ( " Failed to converge after %d iterations, value is %s." % (itr + 1, p1)) raise RuntimeError(msg) warnings.warn(msg, RuntimeWarning) p = (p1 + p0) / 2.0 return _results_select( full_output, (p, funcalls, itr + 1, _ECONVERR)) else: if abs(q1) > abs(q0): p = (-q0 / q1 * p1 + p0) / (1 - q0 / q1) else: p = (-q1 / q0 * p0 + p1) / (1 - q1 / q0) if np.isclose(p, p1, rtol=rtol, atol=tol): return _results_select( full_output, (p, funcalls, itr + 1, _ECONVERGED)) p0, q0 = p1, q1 p1 = p q1 = func(p1, *args) funcalls += 1 if disp: msg = ("Failed to converge after %d iterations, value is %s." % (itr + 1, p)) raise RuntimeError(msg) return _results_select(full_output, (p, funcalls, itr + 1, _ECONVERR)) def _array_newton(func, x0, fprime, args, tol, maxiter, fprime2, full_output): """ A vectorized version of Newton, Halley, and secant methods for arrays. Do not use this method directly. This method is called from `newton` when ``np.size(x0) > 1`` is ``True``. For docstring, see `newton`. """ # Explicitly copy `x0` as `p` will be modified inplace, but the # user's array should not be altered. p = np.array(x0, copy=True) failures = np.ones_like(p, dtype=bool) nz_der = np.ones_like(failures) if fprime is not None: # Newton-Raphson method for iteration in range(maxiter): # first evaluate fval fval = np.asarray(func(p, *args)) # If all fval are 0, all roots have been found, then terminate if not fval.any(): failures = fval.astype(bool) break fder = np.asarray(fprime(p, *args)) nz_der = (fder != 0) # stop iterating if all derivatives are zero if not nz_der.any(): break # Newton step dp = fval[nz_der] / fder[nz_der] if fprime2 is not None: fder2 = np.asarray(fprime2(p, *args)) dp = dp / (1.0 - 0.5 * dp * fder2[nz_der] / fder[nz_der]) # only update nonzero derivatives p = np.asarray(p, dtype=np.result_type(p, dp, np.float64)) p[nz_der] -= dp failures[nz_der] = np.abs(dp) >= tol # items not yet converged # stop iterating if there aren't any failures, not incl zero der if not failures[nz_der].any(): break else: # Secant method dx = np.finfo(float).eps**0.33 p1 = p * (1 + dx) + np.where(p >= 0, dx, -dx) q0 = np.asarray(func(p, *args)) q1 = np.asarray(func(p1, *args)) active = np.ones_like(p, dtype=bool) for iteration in range(maxiter): nz_der = (q1 != q0) # stop iterating if all derivatives are zero if not nz_der.any(): p = (p1 + p) / 2.0 break # Secant Step dp = (q1 * (p1 - p))[nz_der] / (q1 - q0)[nz_der] # only update nonzero derivatives p = np.asarray(p, dtype=np.result_type(p, p1, dp, np.float64)) p[nz_der] = p1[nz_der] - dp active_zero_der = ~nz_der & active p[active_zero_der] = (p1 + p)[active_zero_der] / 2.0 active &= nz_der # don't assign zero derivatives again failures[nz_der] = np.abs(dp) >= tol # not yet converged # stop iterating if there aren't any failures, not incl zero der if not failures[nz_der].any(): break p1, p = p, p1 q0 = q1 q1 = np.asarray(func(p1, *args)) zero_der = ~nz_der & failures # don't include converged with zero-ders if zero_der.any(): # Secant warnings if fprime is None: nonzero_dp = (p1 != p) # non-zero dp, but infinite newton step zero_der_nz_dp = (zero_der & nonzero_dp) if zero_der_nz_dp.any(): rms = np.sqrt( sum((p1[zero_der_nz_dp] - p[zero_der_nz_dp]) ** 2) ) warnings.warn( f'RMS of {rms:g} reached', RuntimeWarning) # Newton or Halley warnings else: all_or_some = 'all' if zero_der.all() else 'some' msg = f'{all_or_some:s} derivatives were zero' warnings.warn(msg, RuntimeWarning) elif failures.any(): all_or_some = 'all' if failures.all() else 'some' msg = '{:s} failed to converge after {:d} iterations'.format( all_or_some, maxiter ) if failures.all(): raise RuntimeError(msg) warnings.warn(msg, RuntimeWarning) if full_output: result = namedtuple('result', ('root', 'converged', 'zero_der')) p = result(p, ~failures, zero_der) return p def bisect(f, a, b, args=(), xtol=_xtol, rtol=_rtol, maxiter=_iter, full_output=False, disp=True): """ Find root of a function within an interval using bisection. Basic bisection routine to find a root of the function `f` between the arguments `a` and `b`. `f(a)` and `f(b)` cannot have the same signs. Slow but sure. Parameters ---------- f : function Python function returning a number. `f` must be continuous, and f(a) and f(b) must have opposite signs. a : scalar One end of the bracketing interval [a,b]. b : scalar The other end of the bracketing interval [a,b]. xtol : number, optional The computed root ``x0`` will satisfy ``np.allclose(x, x0, atol=xtol, rtol=rtol)``, where ``x`` is the exact root. The parameter must be positive. rtol : number, optional The computed root ``x0`` will satisfy ``np.allclose(x, x0, atol=xtol, rtol=rtol)``, where ``x`` is the exact root. The parameter cannot be smaller than its default value of ``4*np.finfo(float).eps``. maxiter : int, optional If convergence is not achieved in `maxiter` iterations, an error is raised. Must be >= 0. args : tuple, optional Containing extra arguments for the function `f`. `f` is called by ``apply(f, (x)+args)``. full_output : bool, optional If `full_output` is False, the root is returned. If `full_output` is True, the return value is ``(x, r)``, where x is the root, and r is a `RootResults` object. disp : bool, optional If True, raise RuntimeError if the algorithm didn't converge. Otherwise, the convergence status is recorded in a `RootResults` return object. Returns ------- root : float Root of `f` between `a` and `b`. r : `RootResults` (present if ``full_output = True``) Object containing information about the convergence. In particular, ``r.converged`` is True if the routine converged. Examples -------- >>> def f(x): ... return (x**2 - 1) >>> from scipy import optimize >>> root = optimize.bisect(f, 0, 2) >>> root 1.0 >>> root = optimize.bisect(f, -2, 0) >>> root -1.0 See Also -------- brentq, brenth, bisect, newton fixed_point : scalar fixed-point finder fsolve : n-dimensional root-finding """ if not isinstance(args, tuple): args = (args,) maxiter = operator.index(maxiter) if xtol <= 0: raise ValueError("xtol too small (%g <= 0)" % xtol) if rtol < _rtol: raise ValueError(f"rtol too small ({rtol:g} < {_rtol:g})") f = _wrap_nan_raise(f) r = _zeros._bisect(f, a, b, xtol, rtol, maxiter, args, full_output, disp) return results_c(full_output, r) def ridder(f, a, b, args=(), xtol=_xtol, rtol=_rtol, maxiter=_iter, full_output=False, disp=True): """ Find a root of a function in an interval using Ridder's method. Parameters ---------- f : function Python function returning a number. f must be continuous, and f(a) and f(b) must have opposite signs. a : scalar One end of the bracketing interval [a,b]. b : scalar The other end of the bracketing interval [a,b]. xtol : number, optional The computed root ``x0`` will satisfy ``np.allclose(x, x0, atol=xtol, rtol=rtol)``, where ``x`` is the exact root. The parameter must be positive. rtol : number, optional The computed root ``x0`` will satisfy ``np.allclose(x, x0, atol=xtol, rtol=rtol)``, where ``x`` is the exact root. The parameter cannot be smaller than its default value of ``4*np.finfo(float).eps``. maxiter : int, optional If convergence is not achieved in `maxiter` iterations, an error is raised. Must be >= 0. args : tuple, optional Containing extra arguments for the function `f`. `f` is called by ``apply(f, (x)+args)``. full_output : bool, optional If `full_output` is False, the root is returned. If `full_output` is True, the return value is ``(x, r)``, where `x` is the root, and `r` is a `RootResults` object. disp : bool, optional If True, raise RuntimeError if the algorithm didn't converge. Otherwise, the convergence status is recorded in any `RootResults` return object. Returns ------- root : float Root of `f` between `a` and `b`. r : `RootResults` (present if ``full_output = True``) Object containing information about the convergence. In particular, ``r.converged`` is True if the routine converged. See Also -------- brentq, brenth, bisect, newton : 1-D root-finding fixed_point : scalar fixed-point finder Notes ----- Uses [Ridders1979]_ method to find a root of the function `f` between the arguments `a` and `b`. Ridders' method is faster than bisection, but not generally as fast as the Brent routines. [Ridders1979]_ provides the classic description and source of the algorithm. A description can also be found in any recent edition of Numerical Recipes. The routine used here diverges slightly from standard presentations in order to be a bit more careful of tolerance. References ---------- .. [Ridders1979] Ridders, C. F. J. "A New Algorithm for Computing a Single Root of a Real Continuous Function." IEEE Trans. Circuits Systems 26, 979-980, 1979. Examples -------- >>> def f(x): ... return (x**2 - 1) >>> from scipy import optimize >>> root = optimize.ridder(f, 0, 2) >>> root 1.0 >>> root = optimize.ridder(f, -2, 0) >>> root -1.0 """ if not isinstance(args, tuple): args = (args,) maxiter = operator.index(maxiter) if xtol <= 0: raise ValueError("xtol too small (%g <= 0)" % xtol) if rtol < _rtol: raise ValueError(f"rtol too small ({rtol:g} < {_rtol:g})") f = _wrap_nan_raise(f) r = _zeros._ridder(f, a, b, xtol, rtol, maxiter, args, full_output, disp) return results_c(full_output, r) def brentq(f, a, b, args=(), xtol=_xtol, rtol=_rtol, maxiter=_iter, full_output=False, disp=True): """ Find a root of a function in a bracketing interval using Brent's method. Uses the classic Brent's method to find a root of the function `f` on the sign changing interval [a , b]. Generally considered the best of the rootfinding routines here. It is a safe version of the secant method that uses inverse quadratic extrapolation. Brent's method combines root bracketing, interval bisection, and inverse quadratic interpolation. It is sometimes known as the van Wijngaarden-Dekker-Brent method. Brent (1973) claims convergence is guaranteed for functions computable within [a,b]. [Brent1973]_ provides the classic description of the algorithm. Another description can be found in a recent edition of Numerical Recipes, including [PressEtal1992]_. A third description is at http://mathworld.wolfram.com/BrentsMethod.html. It should be easy to understand the algorithm just by reading our code. Our code diverges a bit from standard presentations: we choose a different formula for the extrapolation step. Parameters ---------- f : function Python function returning a number. The function :math:`f` must be continuous, and :math:`f(a)` and :math:`f(b)` must have opposite signs. a : scalar One end of the bracketing interval :math:`[a, b]`. b : scalar The other end of the bracketing interval :math:`[a, b]`. xtol : number, optional The computed root ``x0`` will satisfy ``np.allclose(x, x0, atol=xtol, rtol=rtol)``, where ``x`` is the exact root. The parameter must be positive. For nice functions, Brent's method will often satisfy the above condition with ``xtol/2`` and ``rtol/2``. [Brent1973]_ rtol : number, optional The computed root ``x0`` will satisfy ``np.allclose(x, x0, atol=xtol, rtol=rtol)``, where ``x`` is the exact root. The parameter cannot be smaller than its default value of ``4*np.finfo(float).eps``. For nice functions, Brent's method will often satisfy the above condition with ``xtol/2`` and ``rtol/2``. [Brent1973]_ maxiter : int, optional If convergence is not achieved in `maxiter` iterations, an error is raised. Must be >= 0. args : tuple, optional Containing extra arguments for the function `f`. `f` is called by ``apply(f, (x)+args)``. full_output : bool, optional If `full_output` is False, the root is returned. If `full_output` is True, the return value is ``(x, r)``, where `x` is the root, and `r` is a `RootResults` object. disp : bool, optional If True, raise RuntimeError if the algorithm didn't converge. Otherwise, the convergence status is recorded in any `RootResults` return object. Returns ------- root : float Root of `f` between `a` and `b`. r : `RootResults` (present if ``full_output = True``) Object containing information about the convergence. In particular, ``r.converged`` is True if the routine converged. Notes ----- `f` must be continuous. f(a) and f(b) must have opposite signs. Related functions fall into several classes: multivariate local optimizers `fmin`, `fmin_powell`, `fmin_cg`, `fmin_bfgs`, `fmin_ncg` nonlinear least squares minimizer `leastsq` constrained multivariate optimizers `fmin_l_bfgs_b`, `fmin_tnc`, `fmin_cobyla` global optimizers `basinhopping`, `brute`, `differential_evolution` local scalar minimizers `fminbound`, `brent`, `golden`, `bracket` N-D root-finding `fsolve` 1-D root-finding `brenth`, `ridder`, `bisect`, `newton` scalar fixed-point finder `fixed_point` References ---------- .. [Brent1973] Brent, R. P., *Algorithms for Minimization Without Derivatives*. Englewood Cliffs, NJ: Prentice-Hall, 1973. Ch. 3-4. .. [PressEtal1992] Press, W. H.; Flannery, B. P.; Teukolsky, S. A.; and Vetterling, W. T. *Numerical Recipes in FORTRAN: The Art of Scientific Computing*, 2nd ed. Cambridge, England: Cambridge University Press, pp. 352-355, 1992. Section 9.3: "Van Wijngaarden-Dekker-Brent Method." Examples -------- >>> def f(x): ... return (x**2 - 1) >>> from scipy import optimize >>> root = optimize.brentq(f, -2, 0) >>> root -1.0 >>> root = optimize.brentq(f, 0, 2) >>> root 1.0 """ if not isinstance(args, tuple): args = (args,) maxiter = operator.index(maxiter) if xtol <= 0: raise ValueError("xtol too small (%g <= 0)" % xtol) if rtol < _rtol: raise ValueError(f"rtol too small ({rtol:g} < {_rtol:g})") f = _wrap_nan_raise(f) r = _zeros._brentq(f, a, b, xtol, rtol, maxiter, args, full_output, disp) return results_c(full_output, r) def brenth(f, a, b, args=(), xtol=_xtol, rtol=_rtol, maxiter=_iter, full_output=False, disp=True): """Find a root of a function in a bracketing interval using Brent's method with hyperbolic extrapolation. A variation on the classic Brent routine to find a root of the function f between the arguments a and b that uses hyperbolic extrapolation instead of inverse quadratic extrapolation. Bus & Dekker (1975) guarantee convergence for this method, claiming that the upper bound of function evaluations here is 4 or 5 times lesser than that for bisection. f(a) and f(b) cannot have the same signs. Generally, on a par with the brent routine, but not as heavily tested. It is a safe version of the secant method that uses hyperbolic extrapolation. The version here is by Chuck Harris, and implements Algorithm M of [BusAndDekker1975]_, where further details (convergence properties, additional remarks and such) can be found Parameters ---------- f : function Python function returning a number. f must be continuous, and f(a) and f(b) must have opposite signs. a : scalar One end of the bracketing interval [a,b]. b : scalar The other end of the bracketing interval [a,b]. xtol : number, optional The computed root ``x0`` will satisfy ``np.allclose(x, x0, atol=xtol, rtol=rtol)``, where ``x`` is the exact root. The parameter must be positive. As with `brentq`, for nice functions the method will often satisfy the above condition with ``xtol/2`` and ``rtol/2``. rtol : number, optional The computed root ``x0`` will satisfy ``np.allclose(x, x0, atol=xtol, rtol=rtol)``, where ``x`` is the exact root. The parameter cannot be smaller than its default value of ``4*np.finfo(float).eps``. As with `brentq`, for nice functions the method will often satisfy the above condition with ``xtol/2`` and ``rtol/2``. maxiter : int, optional If convergence is not achieved in `maxiter` iterations, an error is raised. Must be >= 0. args : tuple, optional Containing extra arguments for the function `f`. `f` is called by ``apply(f, (x)+args)``. full_output : bool, optional If `full_output` is False, the root is returned. If `full_output` is True, the return value is ``(x, r)``, where `x` is the root, and `r` is a `RootResults` object. disp : bool, optional If True, raise RuntimeError if the algorithm didn't converge. Otherwise, the convergence status is recorded in any `RootResults` return object. Returns ------- root : float Root of `f` between `a` and `b`. r : `RootResults` (present if ``full_output = True``) Object containing information about the convergence. In particular, ``r.converged`` is True if the routine converged. See Also -------- fmin, fmin_powell, fmin_cg, fmin_bfgs, fmin_ncg : multivariate local optimizers leastsq : nonlinear least squares minimizer fmin_l_bfgs_b, fmin_tnc, fmin_cobyla : constrained multivariate optimizers basinhopping, differential_evolution, brute : global optimizers fminbound, brent, golden, bracket : local scalar minimizers fsolve : N-D root-finding brentq, brenth, ridder, bisect, newton : 1-D root-finding fixed_point : scalar fixed-point finder References ---------- .. [BusAndDekker1975] Bus, J. C. P., Dekker, T. J., "Two Efficient Algorithms with Guaranteed Convergence for Finding a Zero of a Function", ACM Transactions on Mathematical Software, Vol. 1, Issue 4, Dec. 1975, pp. 330-345. Section 3: "Algorithm M". :doi:`10.1145/355656.355659` Examples -------- >>> def f(x): ... return (x**2 - 1) >>> from scipy import optimize >>> root = optimize.brenth(f, -2, 0) >>> root -1.0 >>> root = optimize.brenth(f, 0, 2) >>> root 1.0 """ if not isinstance(args, tuple): args = (args,) maxiter = operator.index(maxiter) if xtol <= 0: raise ValueError("xtol too small (%g <= 0)" % xtol) if rtol < _rtol: raise ValueError(f"rtol too small ({rtol:g} < {_rtol:g})") f = _wrap_nan_raise(f) r = _zeros._brenth(f, a, b, xtol, rtol, maxiter, args, full_output, disp) return results_c(full_output, r) ################################ # TOMS "Algorithm 748: Enclosing Zeros of Continuous Functions", by # Alefeld, G. E. and Potra, F. A. and Shi, Yixun, # See [1] def _notclose(fs, rtol=_rtol, atol=_xtol): # Ensure not None, not 0, all finite, and not very close to each other notclosefvals = ( all(fs) and all(np.isfinite(fs)) and not any(any(np.isclose(_f, fs[i + 1:], rtol=rtol, atol=atol)) for i, _f in enumerate(fs[:-1]))) return notclosefvals def _secant(xvals, fvals): """Perform a secant step, taking a little care""" # Secant has many "mathematically" equivalent formulations # x2 = x0 - (x1 - x0)/(f1 - f0) * f0 # = x1 - (x1 - x0)/(f1 - f0) * f1 # = (-x1 * f0 + x0 * f1) / (f1 - f0) # = (-f0 / f1 * x1 + x0) / (1 - f0 / f1) # = (-f1 / f0 * x0 + x1) / (1 - f1 / f0) x0, x1 = xvals[:2] f0, f1 = fvals[:2] if f0 == f1: return np.nan if np.abs(f1) > np.abs(f0): x2 = (-f0 / f1 * x1 + x0) / (1 - f0 / f1) else: x2 = (-f1 / f0 * x0 + x1) / (1 - f1 / f0) return x2 def _update_bracket(ab, fab, c, fc): """Update a bracket given (c, fc), return the discarded endpoints.""" fa, fb = fab idx = (0 if np.sign(fa) * np.sign(fc) > 0 else 1) rx, rfx = ab[idx], fab[idx] fab[idx] = fc ab[idx] = c return rx, rfx def _compute_divided_differences(xvals, fvals, N=None, full=True, forward=True): """Return a matrix of divided differences for the xvals, fvals pairs DD[i, j] = f[x_{i-j}, ..., x_i] for 0 <= j <= i If full is False, just return the main diagonal(or last row): f[a], f[a, b] and f[a, b, c]. If forward is False, return f[c], f[b, c], f[a, b, c].""" if full: if forward: xvals = np.asarray(xvals) else: xvals = np.array(xvals)[::-1] M = len(xvals) N = M if N is None else min(N, M) DD = np.zeros([M, N]) DD[:, 0] = fvals[:] for i in range(1, N): DD[i:, i] = (np.diff(DD[i - 1:, i - 1]) / (xvals[i:] - xvals[:M - i])) return DD xvals = np.asarray(xvals) dd = np.array(fvals) row = np.array(fvals) idx2Use = (0 if forward else -1) dd[0] = fvals[idx2Use] for i in range(1, len(xvals)): denom = xvals[i:i + len(row) - 1] - xvals[:len(row) - 1] row = np.diff(row)[:] / denom dd[i] = row[idx2Use] return dd def _interpolated_poly(xvals, fvals, x): """Compute p(x) for the polynomial passing through the specified locations. Use Neville's algorithm to compute p(x) where p is the minimal degree polynomial passing through the points xvals, fvals""" xvals = np.asarray(xvals) N = len(xvals) Q = np.zeros([N, N]) D = np.zeros([N, N]) Q[:, 0] = fvals[:] D[:, 0] = fvals[:] for k in range(1, N): alpha = D[k:, k - 1] - Q[k - 1:N - 1, k - 1] diffik = xvals[0:N - k] - xvals[k:N] Q[k:, k] = (xvals[k:] - x) / diffik * alpha D[k:, k] = (xvals[:N - k] - x) / diffik * alpha # Expect Q[-1, 1:] to be small relative to Q[-1, 0] as x approaches a root return np.sum(Q[-1, 1:]) + Q[-1, 0] def _inverse_poly_zero(a, b, c, d, fa, fb, fc, fd): """Inverse cubic interpolation f-values -> x-values Given four points (fa, a), (fb, b), (fc, c), (fd, d) with fa, fb, fc, fd all distinct, find poly IP(y) through the 4 points and compute x=IP(0). """ return _interpolated_poly([fa, fb, fc, fd], [a, b, c, d], 0) def _newton_quadratic(ab, fab, d, fd, k): """Apply Newton-Raphson like steps, using divided differences to approximate f' ab is a real interval [a, b] containing a root, fab holds the real values of f(a), f(b) d is a real number outside [ab, b] k is the number of steps to apply """ a, b = ab fa, fb = fab _, B, A = _compute_divided_differences([a, b, d], [fa, fb, fd], forward=True, full=False) # _P is the quadratic polynomial through the 3 points def _P(x): # Horner evaluation of fa + B * (x - a) + A * (x - a) * (x - b) return (A * (x - b) + B) * (x - a) + fa if A == 0: r = a - fa / B else: r = (a if np.sign(A) * np.sign(fa) > 0 else b) # Apply k Newton-Raphson steps to _P(x), starting from x=r for i in range(k): r1 = r - _P(r) / (B + A * (2 * r - a - b)) if not (ab[0] < r1 < ab[1]): if (ab[0] < r < ab[1]): return r r = sum(ab) / 2.0 break r = r1 return r class TOMS748Solver: """Solve f(x, *args) == 0 using Algorithm748 of Alefeld, Potro & Shi. """ _MU = 0.5 _K_MIN = 1 _K_MAX = 100 # A very high value for real usage. Expect 1, 2, maybe 3. def __init__(self): self.f = None self.args = None self.function_calls = 0 self.iterations = 0 self.k = 2 # ab=[a,b] is a global interval containing a root self.ab = [np.nan, np.nan] # fab is function values at a, b self.fab = [np.nan, np.nan] self.d = None self.fd = None self.e = None self.fe = None self.disp = False self.xtol = _xtol self.rtol = _rtol self.maxiter = _iter def configure(self, xtol, rtol, maxiter, disp, k): self.disp = disp self.xtol = xtol self.rtol = rtol self.maxiter = maxiter # Silently replace a low value of k with 1 self.k = max(k, self._K_MIN) # Noisily replace a high value of k with self._K_MAX if self.k > self._K_MAX: msg = "toms748: Overriding k: ->%d" % self._K_MAX warnings.warn(msg, RuntimeWarning) self.k = self._K_MAX def _callf(self, x, error=True): """Call the user-supplied function, update book-keeping""" fx = self.f(x, *self.args) self.function_calls += 1 if not np.isfinite(fx) and error: raise ValueError(f"Invalid function value: f({x:f}) -> {fx} ") return fx def get_result(self, x, flag=_ECONVERGED): r"""Package the result and statistics into a tuple.""" return (x, self.function_calls, self.iterations, flag) def _update_bracket(self, c, fc): return _update_bracket(self.ab, self.fab, c, fc) def start(self, f, a, b, args=()): r"""Prepare for the iterations.""" self.function_calls = 0 self.iterations = 0 self.f = f self.args = args self.ab[:] = [a, b] if not np.isfinite(a) or np.imag(a) != 0: raise ValueError("Invalid x value: %s " % (a)) if not np.isfinite(b) or np.imag(b) != 0: raise ValueError("Invalid x value: %s " % (b)) fa = self._callf(a) if not np.isfinite(fa) or np.imag(fa) != 0: raise ValueError(f"Invalid function value: f({a:f}) -> {fa} ") if fa == 0: return _ECONVERGED, a fb = self._callf(b) if not np.isfinite(fb) or np.imag(fb) != 0: raise ValueError(f"Invalid function value: f({b:f}) -> {fb} ") if fb == 0: return _ECONVERGED, b if np.sign(fb) * np.sign(fa) > 0: raise ValueError("f(a) and f(b) must have different signs, but " "f({:e})={:e}, f({:e})={:e} ".format(a, fa, b, fb)) self.fab[:] = [fa, fb] return _EINPROGRESS, sum(self.ab) / 2.0 def get_status(self): """Determine the current status.""" a, b = self.ab[:2] if np.isclose(a, b, rtol=self.rtol, atol=self.xtol): return _ECONVERGED, sum(self.ab) / 2.0 if self.iterations >= self.maxiter: return _ECONVERR, sum(self.ab) / 2.0 return _EINPROGRESS, sum(self.ab) / 2.0 def iterate(self): """Perform one step in the algorithm. Implements Algorithm 4.1(k=1) or 4.2(k=2) in [APS1995] """ self.iterations += 1 eps = np.finfo(float).eps d, fd, e, fe = self.d, self.fd, self.e, self.fe ab_width = self.ab[1] - self.ab[0] # Need the start width below c = None for nsteps in range(2, self.k+2): # If the f-values are sufficiently separated, perform an inverse # polynomial interpolation step. Otherwise, nsteps repeats of # an approximate Newton-Raphson step. if _notclose(self.fab + [fd, fe], rtol=0, atol=32*eps): c0 = _inverse_poly_zero(self.ab[0], self.ab[1], d, e, self.fab[0], self.fab[1], fd, fe) if self.ab[0] < c0 < self.ab[1]: c = c0 if c is None: c = _newton_quadratic(self.ab, self.fab, d, fd, nsteps) fc = self._callf(c) if fc == 0: return _ECONVERGED, c # re-bracket e, fe = d, fd d, fd = self._update_bracket(c, fc) # u is the endpoint with the smallest f-value uix = (0 if np.abs(self.fab[0]) < np.abs(self.fab[1]) else 1) u, fu = self.ab[uix], self.fab[uix] _, A = _compute_divided_differences(self.ab, self.fab, forward=(uix == 0), full=False) c = u - 2 * fu / A if np.abs(c - u) > 0.5 * (self.ab[1] - self.ab[0]): c = sum(self.ab) / 2.0 else: if np.isclose(c, u, rtol=eps, atol=0): # c didn't change (much). # Either because the f-values at the endpoints have vastly # differing magnitudes, or because the root is very close to # that endpoint frs = np.frexp(self.fab)[1] if frs[uix] < frs[1 - uix] - 50: # Differ by more than 2**50 c = (31 * self.ab[uix] + self.ab[1 - uix]) / 32 else: # Make a bigger adjustment, about the # size of the requested tolerance. mm = (1 if uix == 0 else -1) adj = mm * np.abs(c) * self.rtol + mm * self.xtol c = u + adj if not self.ab[0] < c < self.ab[1]: c = sum(self.ab) / 2.0 fc = self._callf(c) if fc == 0: return _ECONVERGED, c e, fe = d, fd d, fd = self._update_bracket(c, fc) # If the width of the new interval did not decrease enough, bisect if self.ab[1] - self.ab[0] > self._MU * ab_width: e, fe = d, fd z = sum(self.ab) / 2.0 fz = self._callf(z) if fz == 0: return _ECONVERGED, z d, fd = self._update_bracket(z, fz) # Record d and e for next iteration self.d, self.fd = d, fd self.e, self.fe = e, fe status, xn = self.get_status() return status, xn def solve(self, f, a, b, args=(), xtol=_xtol, rtol=_rtol, k=2, maxiter=_iter, disp=True): r"""Solve f(x) = 0 given an interval containing a root.""" self.configure(xtol=xtol, rtol=rtol, maxiter=maxiter, disp=disp, k=k) status, xn = self.start(f, a, b, args) if status == _ECONVERGED: return self.get_result(xn) # The first step only has two x-values. c = _secant(self.ab, self.fab) if not self.ab[0] < c < self.ab[1]: c = sum(self.ab) / 2.0 fc = self._callf(c) if fc == 0: return self.get_result(c) self.d, self.fd = self._update_bracket(c, fc) self.e, self.fe = None, None self.iterations += 1 while True: status, xn = self.iterate() if status == _ECONVERGED: return self.get_result(xn) if status == _ECONVERR: fmt = "Failed to converge after %d iterations, bracket is %s" if disp: msg = fmt % (self.iterations + 1, self.ab) raise RuntimeError(msg) return self.get_result(xn, _ECONVERR) def toms748(f, a, b, args=(), k=1, xtol=_xtol, rtol=_rtol, maxiter=_iter, full_output=False, disp=True): """ Find a root using TOMS Algorithm 748 method. Implements the Algorithm 748 method of Alefeld, Potro and Shi to find a root of the function `f` on the interval `[a , b]`, where `f(a)` and `f(b)` must have opposite signs. It uses a mixture of inverse cubic interpolation and "Newton-quadratic" steps. [APS1995]. Parameters ---------- f : function Python function returning a scalar. The function :math:`f` must be continuous, and :math:`f(a)` and :math:`f(b)` have opposite signs. a : scalar, lower boundary of the search interval b : scalar, upper boundary of the search interval args : tuple, optional containing extra arguments for the function `f`. `f` is called by ``f(x, *args)``. k : int, optional The number of Newton quadratic steps to perform each iteration. ``k>=1``. xtol : scalar, optional The computed root ``x0`` will satisfy ``np.allclose(x, x0, atol=xtol, rtol=rtol)``, where ``x`` is the exact root. The parameter must be positive. rtol : scalar, optional The computed root ``x0`` will satisfy ``np.allclose(x, x0, atol=xtol, rtol=rtol)``, where ``x`` is the exact root. maxiter : int, optional If convergence is not achieved in `maxiter` iterations, an error is raised. Must be >= 0. full_output : bool, optional If `full_output` is False, the root is returned. If `full_output` is True, the return value is ``(x, r)``, where `x` is the root, and `r` is a `RootResults` object. disp : bool, optional If True, raise RuntimeError if the algorithm didn't converge. Otherwise, the convergence status is recorded in the `RootResults` return object. Returns ------- root : float Approximate root of `f` r : `RootResults` (present if ``full_output = True``) Object containing information about the convergence. In particular, ``r.converged`` is True if the routine converged. See Also -------- brentq, brenth, ridder, bisect, newton fsolve : find roots in N dimensions. Notes ----- `f` must be continuous. Algorithm 748 with ``k=2`` is asymptotically the most efficient algorithm known for finding roots of a four times continuously differentiable function. In contrast with Brent's algorithm, which may only decrease the length of the enclosing bracket on the last step, Algorithm 748 decreases it each iteration with the same asymptotic efficiency as it finds the root. For easy statement of efficiency indices, assume that `f` has 4 continuouous deriviatives. For ``k=1``, the convergence order is at least 2.7, and with about asymptotically 2 function evaluations per iteration, the efficiency index is approximately 1.65. For ``k=2``, the order is about 4.6 with asymptotically 3 function evaluations per iteration, and the efficiency index 1.66. For higher values of `k`, the efficiency index approaches the kth root of ``(3k-2)``, hence ``k=1`` or ``k=2`` are usually appropriate. References ---------- .. [APS1995] Alefeld, G. E. and Potra, F. A. and Shi, Yixun, *Algorithm 748: Enclosing Zeros of Continuous Functions*, ACM Trans. Math. Softw. Volume 221(1995) doi = {10.1145/210089.210111} Examples -------- >>> def f(x): ... return (x**3 - 1) # only one real root at x = 1 >>> from scipy import optimize >>> root, results = optimize.toms748(f, 0, 2, full_output=True) >>> root 1.0 >>> results converged: True flag: converged function_calls: 11 iterations: 5 root: 1.0 """ if xtol <= 0: raise ValueError("xtol too small (%g <= 0)" % xtol) if rtol < _rtol / 4: raise ValueError(f"rtol too small ({rtol:g} < {_rtol/4:g})") maxiter = operator.index(maxiter) if maxiter < 1: raise ValueError("maxiter must be greater than 0") if not np.isfinite(a): raise ValueError("a is not finite %s" % a) if not np.isfinite(b): raise ValueError("b is not finite %s" % b) if a >= b: raise ValueError(f"a and b are not an interval [{a}, {b}]") if not k >= 1: raise ValueError("k too small (%s < 1)" % k) if not isinstance(args, tuple): args = (args,) f = _wrap_nan_raise(f) solver = TOMS748Solver() result = solver.solve(f, a, b, args=args, k=k, xtol=xtol, rtol=rtol, maxiter=maxiter, disp=disp) x, function_calls, iterations, flag = result return _results_select(full_output, (x, function_calls, iterations, flag)) def _chandrupatla(func, a, b, *, args=(), xatol=_xtol, xrtol=_rtol, fatol=None, frtol=0, maxiter=_iter, callback=None): """Find the root of an elementwise function using Chandrupatla's algorithm. For each element of the output of `func`, `chandrupatla` seeks the scalar root that makes the element 0. This function allows for `a`, `b`, and the output of `func` to be of any broadcastable shapes. Parameters ---------- func : callable The function whose root is desired. The signature must be:: func(x: ndarray, *args) -> ndarray where each element of ``x`` is a finite real and ``args`` is a tuple, which may contain an arbitrary number of components of any type(s). ``func`` must be an elementwise function: each element ``func(x)[i]`` must equal ``func(x[i])`` for all indices ``i``. `_chandrupatla` seeks an array ``x`` such that ``func(x)`` is an array of zeros. a, b : array_like The lower and upper bounds of the root of the function. Must be broadcastable with one another. args : tuple, optional Additional positional arguments to be passed to `func`. xatol, xrtol, fatol, frtol : float, optional Absolute and relative tolerances on the root and function value. See Notes for details. maxiter : int, optional The maximum number of iterations of the algorithm to perform. callback : callable, optional An optional user-supplied function to be called before the first iteration and after each iteration. Called as ``callback(res)``, where ``res`` is an ``OptimizeResult`` similar to that returned by `_chandrupatla` (but containing the current iterate's values of all variables). If `callback` raises a ``StopIteration``, the algorithm will terminate immediately and `_chandrupatla` will return a result. Returns ------- res : OptimizeResult An instance of `scipy.optimize.OptimizeResult` with the following attributes. The descriptions are written as though the values will be scalars; however, if `func` returns an array, the outputs will be arrays of the same shape. x : float The root of the function, if the algorithm terminated successfully. nfev : int The number of times the function was called to find the root. nit : int The number of iterations of Chandrupatla's algorithm performed. status : int An integer representing the exit status of the algorithm. ``0`` : The algorithm converged to the specified tolerances. ``-1`` : The algorithm encountered an invalid bracket. ``-2`` : The maximum number of iterations was reached. ``-3`` : A non-finite value was encountered. ``-4`` : Iteration was terminated by `callback`. ``1`` : The algorithm is proceeding normally (in `callback` only). success : bool ``True`` when the algorithm terminated successfully (status ``0``). fun : float The value of `func` evaluated at `x`. xl, xr : float The lower and upper ends of the bracket. fl, fr : float The function value at the lower and upper ends of the bracket. Notes ----- Implemented based on Chandrupatla's original paper [1]_. If ``xl`` and ``xr`` are the left and right ends of the bracket, ``xmin = xl if abs(func(xl)) <= abs(func(xr)) else xr``, and ``fmin0 = min(func(a), func(b))``, then the algorithm is considered to have converged when ``abs(xr - xl) < xatol + abs(xmin) * xrtol`` or ``fun(xmin) <= fatol + abs(fmin0) * frtol``. This is equivalent to the termination condition described in [1]_ with ``xrtol = 4e-10``, ``xatol = 1e-5``, and ``fatol = frtol = 0``. The default values are ``xatol = 2e-12``, ``xrtol = 4 * np.finfo(float).eps``, ``frtol = 0``, and ``fatol`` is the smallest normal number of the ``dtype`` returned by ``func``. References ---------- .. [1] Chandrupatla, Tirupathi R. "A new hybrid quadratic/bisection algorithm for finding the zero of a nonlinear function without using derivatives". Advances in Engineering Software, 28(3), 145-149. https://doi.org/10.1016/s0965-9978(96)00051-8 See Also -------- brentq, brenth, ridder, bisect, newton Examples -------- >>> from scipy import optimize >>> def f(x, c): ... return x**3 - 2*x - c >>> c = 5 >>> res = optimize._zeros_py._chandrupatla(f, 0, 3, args=(c,)) >>> res.x 2.0945514818937463 >>> c = [3, 4, 5] >>> res = optimize._zeros_py._chandrupatla(f, 0, 3, args=(c,)) >>> res.x array([1.8932892 , 2. , 2.09455148]) """ res = _chandrupatla_iv(func, a, b, args, xatol, xrtol, fatol, frtol, maxiter, callback) func, a, b, args, xatol, xrtol, fatol, frtol, maxiter, callback = res # Initialization xs, fs, args, shape, dtype = _scalar_optimization_initialize(func, (a, b), args) x1, x2 = xs f1, f2 = fs status = np.full_like(x1, _EINPROGRESS, dtype=int) # in progress nit, nfev = 0, 2 # two function evaluations performed above fatol = np.finfo(dtype).tiny if fatol is None else fatol frtol = frtol * np.minimum(np.abs(f1), np.abs(f2)) work = OptimizeResult(x1=x1, f1=f1, x2=x2, f2=f2, x3=None, f3=None, t=0.5, xatol=xatol, xrtol=xrtol, fatol=fatol, frtol=frtol, nit=nit, nfev=nfev, status=status) res_work_pairs = [('status', 'status'), ('x', 'xmin'), ('fun', 'fmin'), ('nit', 'nit'), ('nfev', 'nfev'), ('xl', 'x1'), ('fl', 'f1'), ('xr', 'x2'), ('fr', 'f2')] def pre_func_eval(work): # [1] Figure 1 (first box) x = work.x1 + work.t * (work.x2 - work.x1) return x def post_func_eval(x, f, work): # [1] Figure 1 (first diamond and boxes) # Note: y/n are reversed in figure; compare to BASIC in appendix work.x3, work.f3 = work.x2.copy(), work.f2.copy() j = np.sign(f) == np.sign(work.f1) nj = ~j work.x3[j], work.f3[j] = work.x1[j], work.f1[j] work.x2[nj], work.f2[nj] = work.x1[nj], work.f1[nj] work.x1, work.f1 = x, f def check_termination(work): # [1] Figure 1 (second diamond) # Check for all terminal conditions and record statuses. # See [1] Section 4 (first two sentences) i = np.abs(work.f1) < np.abs(work.f2) work.xmin = np.choose(i, (work.x2, work.x1)) work.fmin = np.choose(i, (work.f2, work.f1)) stop = np.zeros_like(work.x1, dtype=bool) # termination condition met # This is the convergence criterion used in bisect. Chandrupatla's # criterion is equivalent to this except with a factor of 4 on `xrtol`. work.dx = abs(work.x2 - work.x1) work.tol = abs(work.xmin) * work.xrtol + work.xatol i = work.dx < work.tol # Modify in place to incorporate tolerance on function value. Note that # `frtol` has been redefined as `frtol = frtol * np.minimum(f1, f2)`, # where `f1` and `f2` are the function evaluated at the original ends of # the bracket. i |= np.abs(work.fmin) <= work.fatol + work.frtol work.status[i] = _ECONVERGED stop[i] = True i = (np.sign(work.f1) == np.sign(work.f2)) & ~stop work.xmin[i], work.fmin[i], work.status[i] = np.nan, np.nan, _ESIGNERR stop[i] = True i = ~((np.isfinite(work.x1) & np.isfinite(work.x2) & np.isfinite(work.f1) & np.isfinite(work.f2)) | stop) work.xmin[i], work.fmin[i], work.status[i] = np.nan, np.nan, _EVALUEERR stop[i] = True return stop def post_termination_check(work): # [1] Figure 1 (third diamond and boxes / Equation 1) xi1 = (work.x1 - work.x2) / (work.x3 - work.x2) phi1 = (work.f1 - work.f2) / (work.f3 - work.f2) alpha = (work.x3 - work.x1) / (work.x2 - work.x1) j = ((1 - np.sqrt(1 - xi1)) < phi1) & (phi1 < np.sqrt(xi1)) f1j, f2j, f3j, alphaj = work.f1[j], work.f2[j], work.f3[j], alpha[j] t = np.full_like(alpha, 0.5) t[j] = (f1j / (f1j - f2j) * f3j / (f3j - f2j) - alphaj * f1j / (f3j - f1j) * f2j / (f2j - f3j)) # [1] Figure 1 (last box; see also BASIC in appendix with comment # "Adjust T Away from the Interval Boundary") tl = 0.5 * work.tol / work.dx work.t = np.clip(t, tl, 1 - tl) def customize_result(res): xl, xr, fl, fr = res['xl'], res['xr'], res['fl'], res['fr'] i = res['xl'] < res['xr'] res['xl'] = np.choose(i, (xr, xl)) res['xr'] = np.choose(i, (xl, xr)) res['fl'] = np.choose(i, (fr, fl)) res['fr'] = np.choose(i, (fl, fr)) return _scalar_optimization_loop(work, callback, shape, maxiter, func, args, dtype, pre_func_eval, post_func_eval, check_termination, post_termination_check, customize_result, res_work_pairs) def _scalar_optimization_loop(work, callback, shape, maxiter, func, args, dtype, pre_func_eval, post_func_eval, check_termination, post_termination_check, customize_result, res_work_pairs): """Main loop of a vectorized scalar optimization algorithm Parameters ---------- work : OptimizeResult All variables that need to be retained between iterations. Must contain attributes `nit`, `nfev`, and `success` callback : callable User-specified callback function shape : tuple of ints The shape of all output arrays maxiter : Maximum number of iterations of the algorithm func : callable The user-specified callable that is being optimized or solved args : tuple Additional positional arguments to be passed to `func`. dtype : NumPy dtype The common dtype of all abscissae and function values pre_func_eval : callable A function that accepts `work` and returns `x`, the active elements of `x` at which `func` will be evaluated. May modify attributes of `work` with any algorithmic steps that need to happen at the beginning of an iteration, before `func` is evaluated, post_func_eval : callable A function that accepts `x`, `func(x)`, and `work`. May modify attributes of `work` with any algorithmic steps that need to happen in the middle of an iteration, after `func` is evaluated but before the termination check. check_termination : callable A function that accepts `work` and returns `stop`, a boolean array indicating which of the active elements have met a termination condition. post_termination_check : callable A function that accepts `work`. May modify `work` with any algorithmic steps that need to happen after the termination check and before the end of the iteration. customize_result : callable A function that accepts `res`. May modify `res` according to preferences (e.g. rearrange elements between attributes). res_work_pairs : list of (str, str) Identifies correspondence between attributes of `res` and attributes of `work`; i.e., attributes of active elements of `work` will be copied to the appropriate indices of `res` when appropriate. The order determines the order in which OptimizeResult attributes will be pretty-printed. Returns ------- res : OptimizeResult The final result object Notes ----- Besides providing structure, this framework provides several important services for a vectorized optimization algorithm. - It handles common tasks involving iteration count, function evaluation count, a user-specified callback, and associated termination conditions. - It compresses the attributes of `work` to eliminate unnecessary computation on elements that have already converged. """ cb_terminate = False # Initialize the result object and active element index array n_elements = int(np.prod(shape)) or 1 active = np.arange(n_elements) # in-progress element indices res_dict = {i: np.zeros(n_elements, dtype=dtype) for i, j in res_work_pairs} res_dict['success'] = np.zeros(n_elements, dtype=bool) res_dict['status'] = np.full(n_elements, _EINPROGRESS) res_dict['nit'] = res_dict['nit'].astype(int) res_dict['nfev'] = res_dict['nfev'].astype(int) res = OptimizeResult(res_dict) work.args = args active = _scalar_optimization_check_termination( work, res, res_work_pairs, active, check_termination) if callback is not None: temp = _scalar_optimization_prepare_result( work, res, res_work_pairs, active, shape, customize_result) if _call_callback_maybe_halt(callback, temp): cb_terminate = True while work.nit < maxiter and active.size and not cb_terminate: x = pre_func_eval(work) f = func(x, *work.args) f = np.asarray(f, dtype=dtype).ravel() work.nfev += 1 post_func_eval(x, f, work) work.nit += 1 active = _scalar_optimization_check_termination( work, res, res_work_pairs, active, check_termination) if callback is not None: temp = _scalar_optimization_prepare_result( work, res, res_work_pairs, active, shape, customize_result) if _call_callback_maybe_halt(callback, temp): cb_terminate = True break if active.size == 0: break post_termination_check(work) work.status[:] = _ECALLBACK if cb_terminate else _ECONVERR return _scalar_optimization_prepare_result( work, res, res_work_pairs, active, shape, customize_result) def _chandrupatla_iv(func, a, b, args, xatol, xrtol, fatol, frtol, maxiter, callback): # Input validation for `_chandrupatla` if not callable(func): raise ValueError('`func` must be callable.') # a and b have more complex IV that is taken care of during initialization if not np.iterable(args): args = (args,) tols = np.asarray([xatol, xrtol, fatol if fatol is not None else 1, frtol]) if (not np.issubdtype(tols.dtype, np.number) or np.any(tols < 0) or tols.shape != (4,)): raise ValueError('Tolerances must be non-negative scalars.') maxiter_int = int(maxiter) if maxiter != maxiter_int or maxiter < 0: raise ValueError('`maxiter` must be a non-negative integer.') if callback is not None and not callable(callback): raise ValueError('`callback` must be callable.') return func, a, b, args, xatol, xrtol, fatol, frtol, maxiter, callback def _scalar_optimization_initialize(func, xs, args): """Initialize abscissa, function, and args arrays for elementwise function Parameters ---------- func : callable An elementwise function with signature func(x: ndarray, *args) -> ndarray where each element of ``x`` is a finite real and ``args`` is a tuple, which may contain an arbitrary number of arrays that are broadcastable with ``x``. xs : tuple of arrays Finite real abscissa arrays. Must be broadcastable. args : tuple, optional Additional positional arguments to be passed to `func`. Returns ------- xs, fs, args : tuple of arrays Broadcasted, writeable, 1D abscissa and function value arrays (or NumPy floats, if appropriate). The dtypes of the `xs` and `fs` are `xfat`; the dtype of the `args` are unchanged. shape : tuple of ints Original shape of broadcasted arrays. xfat : NumPy dtype Result dtype of abscissae, function values, and args determined using `np.result_type`, except integer types are promoted to `np.float64`. Raises ------ ValueError If the result dtype is not that of a real scalar Notes ----- Useful for initializing the input of SciPy functions that accept an elementwise callable, abscissae, and arguments; e.g. `scipy.optimize._chandrupatla`. """ nx = len(xs) # Try to preserve `dtype`, but we need to ensure that the arguments are at # least floats before passing them into the function; integers can overflow # and cause failure. # There might be benefit to combining the `xs` into a single array and # calling `func` once on the combined array. For now, keep them separate. xas = np.broadcast_arrays(*xs, *args) # broadcast and rename xat = np.result_type(*[xa.dtype for xa in xas]) xat = np.float64 if np.issubdtype(xat, np.integer) else xat xs, args = xas[:nx], xas[nx:] xs = [x.astype(xat, copy=False)[()] for x in xs] fs = [np.asarray(func(x, *args)) for x in xs] shape = xs[0].shape # These algorithms tend to mix the dtypes of the abscissae and function # values, so figure out what the result will be and convert them all to # that time from the outset. xfat = np.result_type(*([f.dtype for f in fs] + [xat])) if not np.issubdtype(xfat, np.floating): raise ValueError("Abscissae and function output must be real numbers.") xs = [x.astype(xfat, copy=True)[()] for x in xs] fs = [f.astype(xfat, copy=True)[()] for f in fs] # To ensure that we can do indexing, we'll work with at least 1d arrays, # but remember the appropriate shape of the output. xs = [x.ravel() for x in xs] fs = [f.ravel() for f in fs] args = [arg.flatten() for arg in args] return xs, fs, args, shape, xfat def _scalar_optimization_check_termination(work, res, res_work_pairs, active, check_termination): # Checks termination conditions, updates elements of `res` with # corresponding elements of `work`, and compresses `work`. stop = check_termination(work) if np.any(stop): # update the active elements of the result object with the active # elements for which a termination condition has been met _scalar_optimization_update_active(work, res, res_work_pairs, active, stop) # compress the arrays to avoid unnecessary computation proceed = ~stop active = active[proceed] for key, val in work.items(): work[key] = val[proceed] if isinstance(val, np.ndarray) else val work.args = [arg[proceed] for arg in work.args] return active def _scalar_optimization_update_active(work, res, res_work_pairs, active, mask=None): # Update `active` indices of the arrays in result object `res` with the # contents of the scalars and arrays in `update_dict`. When provided, # `mask` is a boolean array applied both to the arrays in `update_dict` # that are to be used and to the arrays in `res` that are to be updated. update_dict = {key1: work[key2] for key1, key2 in res_work_pairs} update_dict['success'] = work.status == 0 if mask is not None: active_mask = active[mask] for key, val in update_dict.items(): res[key][active_mask] = val[mask] if np.size(val) > 1 else val else: for key, val in update_dict.items(): res[key][active] = val def _scalar_optimization_prepare_result(work, res, res_work_pairs, active, shape, customize_result): # Prepare the result object `res` by creating a copy, copying the latest # data from work, running the provided result customization function, # and reshaping the data to the original shapes. res = res.copy() _scalar_optimization_update_active(work, res, res_work_pairs, active) customize_result(res) for key, val in res.items(): res[key] = np.reshape(val, shape)[()] res['_order_keys'] = ['success'] + [i for i, j in res_work_pairs] return OptimizeResult(**res)
72,277
37.323436
83
py
scipy
scipy-main/scipy/optimize/lbfgsb.py
# This file is not meant for public use and will be removed in SciPy v2.0.0. # Use the `scipy.optimize` namespace for importing the functions # included below. import warnings from . import _lbfgsb_py __all__ = [ # noqa: F822 'LbfgsInvHessProduct', 'LinearOperator', 'MemoizeJac', 'OptimizeResult', 'array', 'asarray', 'float64', 'fmin_l_bfgs_b', 'old_bound_to_new', 'zeros', ] def __dir__(): return __all__ def __getattr__(name): if name not in __all__: raise AttributeError( "scipy.optimize.lbfgsb is deprecated and has no attribute " f"{name}. Try looking in scipy.optimize instead.") warnings.warn(f"Please use `{name}` from the `scipy.optimize` namespace, " "the `scipy.optimize.lbfgsb` namespace is deprecated.", category=DeprecationWarning, stacklevel=2) return getattr(_lbfgsb_py, name)
929
23.473684
78
py
scipy
scipy-main/scipy/optimize/_trustregion_dogleg.py
"""Dog-leg trust-region optimization.""" import numpy as np import scipy.linalg from ._trustregion import (_minimize_trust_region, BaseQuadraticSubproblem) __all__ = [] def _minimize_dogleg(fun, x0, args=(), jac=None, hess=None, **trust_region_options): """ Minimization of scalar function of one or more variables using the dog-leg trust-region algorithm. Options ------- initial_trust_radius : float Initial trust-region radius. max_trust_radius : float Maximum value of the trust-region radius. No steps that are longer than this value will be proposed. eta : float Trust region related acceptance stringency for proposed steps. gtol : float Gradient norm must be less than `gtol` before successful termination. """ if jac is None: raise ValueError('Jacobian is required for dogleg minimization') if not callable(hess): raise ValueError('Hessian is required for dogleg minimization') return _minimize_trust_region(fun, x0, args=args, jac=jac, hess=hess, subproblem=DoglegSubproblem, **trust_region_options) class DoglegSubproblem(BaseQuadraticSubproblem): """Quadratic subproblem solved by the dogleg method""" def cauchy_point(self): """ The Cauchy point is minimal along the direction of steepest descent. """ if self._cauchy_point is None: g = self.jac Bg = self.hessp(g) self._cauchy_point = -(np.dot(g, g) / np.dot(g, Bg)) * g return self._cauchy_point def newton_point(self): """ The Newton point is a global minimum of the approximate function. """ if self._newton_point is None: g = self.jac B = self.hess cho_info = scipy.linalg.cho_factor(B) self._newton_point = -scipy.linalg.cho_solve(cho_info, g) return self._newton_point def solve(self, trust_radius): """ Minimize a function using the dog-leg trust-region algorithm. This algorithm requires function values and first and second derivatives. It also performs a costly Hessian decomposition for most iterations, and the Hessian is required to be positive definite. Parameters ---------- trust_radius : float We are allowed to wander only this far away from the origin. Returns ------- p : ndarray The proposed step. hits_boundary : bool True if the proposed step is on the boundary of the trust region. Notes ----- The Hessian is required to be positive definite. References ---------- .. [1] Jorge Nocedal and Stephen Wright, Numerical Optimization, second edition, Springer-Verlag, 2006, page 73. """ # Compute the Newton point. # This is the optimum for the quadratic model function. # If it is inside the trust radius then return this point. p_best = self.newton_point() if scipy.linalg.norm(p_best) < trust_radius: hits_boundary = False return p_best, hits_boundary # Compute the Cauchy point. # This is the predicted optimum along the direction of steepest descent. p_u = self.cauchy_point() # If the Cauchy point is outside the trust region, # then return the point where the path intersects the boundary. p_u_norm = scipy.linalg.norm(p_u) if p_u_norm >= trust_radius: p_boundary = p_u * (trust_radius / p_u_norm) hits_boundary = True return p_boundary, hits_boundary # Compute the intersection of the trust region boundary # and the line segment connecting the Cauchy and Newton points. # This requires solving a quadratic equation. # ||p_u + t*(p_best - p_u)||**2 == trust_radius**2 # Solve this for positive time t using the quadratic formula. _, tb = self.get_boundaries_intersections(p_u, p_best - p_u, trust_radius) p_boundary = p_u + tb * (p_best - p_u) hits_boundary = True return p_boundary, hits_boundary
4,389
34.691057
81
py
scipy
scipy-main/scipy/optimize/_qap.py
import numpy as np import operator from . import (linear_sum_assignment, OptimizeResult) from ._optimize import _check_unknown_options from scipy._lib._util import check_random_state import itertools QUADRATIC_ASSIGNMENT_METHODS = ['faq', '2opt'] def quadratic_assignment(A, B, method="faq", options=None): r""" Approximates solution to the quadratic assignment problem and the graph matching problem. Quadratic assignment solves problems of the following form: .. math:: \min_P & \ {\ \text{trace}(A^T P B P^T)}\\ \mbox{s.t. } & {P \ \epsilon \ \mathcal{P}}\\ where :math:`\mathcal{P}` is the set of all permutation matrices, and :math:`A` and :math:`B` are square matrices. Graph matching tries to *maximize* the same objective function. This algorithm can be thought of as finding the alignment of the nodes of two graphs that minimizes the number of induced edge disagreements, or, in the case of weighted graphs, the sum of squared edge weight differences. Note that the quadratic assignment problem is NP-hard. The results given here are approximations and are not guaranteed to be optimal. Parameters ---------- A : 2-D array, square The square matrix :math:`A` in the objective function above. B : 2-D array, square The square matrix :math:`B` in the objective function above. method : str in {'faq', '2opt'} (default: 'faq') The algorithm used to solve the problem. :ref:`'faq' <optimize.qap-faq>` (default) and :ref:`'2opt' <optimize.qap-2opt>` are available. options : dict, optional A dictionary of solver options. All solvers support the following: maximize : bool (default: False) Maximizes the objective function if ``True``. partial_match : 2-D array of integers, optional (default: None) Fixes part of the matching. Also known as a "seed" [2]_. Each row of `partial_match` specifies a pair of matched nodes: node ``partial_match[i, 0]`` of `A` is matched to node ``partial_match[i, 1]`` of `B`. The array has shape ``(m, 2)``, where ``m`` is not greater than the number of nodes, :math:`n`. rng : {None, int, `numpy.random.Generator`, `numpy.random.RandomState`}, optional If `seed` is None (or `np.random`), the `numpy.random.RandomState` singleton is used. If `seed` is an int, a new ``RandomState`` instance is used, seeded with `seed`. If `seed` is already a ``Generator`` or ``RandomState`` instance then that instance is used. For method-specific options, see :func:`show_options('quadratic_assignment') <show_options>`. Returns ------- res : OptimizeResult `OptimizeResult` containing the following fields. col_ind : 1-D array Column indices corresponding to the best permutation found of the nodes of `B`. fun : float The objective value of the solution. nit : int The number of iterations performed during optimization. Notes ----- The default method :ref:`'faq' <optimize.qap-faq>` uses the Fast Approximate QAP algorithm [1]_; it typically offers the best combination of speed and accuracy. Method :ref:`'2opt' <optimize.qap-2opt>` can be computationally expensive, but may be a useful alternative, or it can be used to refine the solution returned by another method. References ---------- .. [1] J.T. Vogelstein, J.M. Conroy, V. Lyzinski, L.J. Podrazik, S.G. Kratzer, E.T. Harley, D.E. Fishkind, R.J. Vogelstein, and C.E. Priebe, "Fast approximate quadratic programming for graph matching," PLOS one, vol. 10, no. 4, p. e0121002, 2015, :doi:`10.1371/journal.pone.0121002` .. [2] D. Fishkind, S. Adali, H. Patsolic, L. Meng, D. Singh, V. Lyzinski, C. Priebe, "Seeded graph matching", Pattern Recognit. 87 (2019): 203-215, :doi:`10.1016/j.patcog.2018.09.014` .. [3] "2-opt," Wikipedia. https://en.wikipedia.org/wiki/2-opt Examples -------- >>> import numpy as np >>> from scipy.optimize import quadratic_assignment >>> A = np.array([[0, 80, 150, 170], [80, 0, 130, 100], ... [150, 130, 0, 120], [170, 100, 120, 0]]) >>> B = np.array([[0, 5, 2, 7], [0, 0, 3, 8], ... [0, 0, 0, 3], [0, 0, 0, 0]]) >>> res = quadratic_assignment(A, B) >>> print(res) fun: 3260 col_ind: [0 3 2 1] nit: 9 The see the relationship between the returned ``col_ind`` and ``fun``, use ``col_ind`` to form the best permutation matrix found, then evaluate the objective function :math:`f(P) = trace(A^T P B P^T )`. >>> perm = res['col_ind'] >>> P = np.eye(len(A), dtype=int)[perm] >>> fun = np.trace(A.T @ P @ B @ P.T) >>> print(fun) 3260 Alternatively, to avoid constructing the permutation matrix explicitly, directly permute the rows and columns of the distance matrix. >>> fun = np.trace(A.T @ B[perm][:, perm]) >>> print(fun) 3260 Although not guaranteed in general, ``quadratic_assignment`` happens to have found the globally optimal solution. >>> from itertools import permutations >>> perm_opt, fun_opt = None, np.inf >>> for perm in permutations([0, 1, 2, 3]): ... perm = np.array(perm) ... fun = np.trace(A.T @ B[perm][:, perm]) ... if fun < fun_opt: ... fun_opt, perm_opt = fun, perm >>> print(np.array_equal(perm_opt, res['col_ind'])) True Here is an example for which the default method, :ref:`'faq' <optimize.qap-faq>`, does not find the global optimum. >>> A = np.array([[0, 5, 8, 6], [5, 0, 5, 1], ... [8, 5, 0, 2], [6, 1, 2, 0]]) >>> B = np.array([[0, 1, 8, 4], [1, 0, 5, 2], ... [8, 5, 0, 5], [4, 2, 5, 0]]) >>> res = quadratic_assignment(A, B) >>> print(res) fun: 178 col_ind: [1 0 3 2] nit: 13 If accuracy is important, consider using :ref:`'2opt' <optimize.qap-2opt>` to refine the solution. >>> guess = np.array([np.arange(len(A)), res.col_ind]).T >>> res = quadratic_assignment(A, B, method="2opt", ... options = {'partial_guess': guess}) >>> print(res) fun: 176 col_ind: [1 2 3 0] nit: 17 """ if options is None: options = {} method = method.lower() methods = {"faq": _quadratic_assignment_faq, "2opt": _quadratic_assignment_2opt} if method not in methods: raise ValueError(f"method {method} must be in {methods}.") res = methods[method](A, B, **options) return res def _calc_score(A, B, perm): # equivalent to objective function but avoids matmul return np.sum(A * B[perm][:, perm]) def _common_input_validation(A, B, partial_match): A = np.atleast_2d(A) B = np.atleast_2d(B) if partial_match is None: partial_match = np.array([[], []]).T partial_match = np.atleast_2d(partial_match).astype(int) msg = None if A.shape[0] != A.shape[1]: msg = "`A` must be square" elif B.shape[0] != B.shape[1]: msg = "`B` must be square" elif A.ndim != 2 or B.ndim != 2: msg = "`A` and `B` must have exactly two dimensions" elif A.shape != B.shape: msg = "`A` and `B` matrices must be of equal size" elif partial_match.shape[0] > A.shape[0]: msg = "`partial_match` can have only as many seeds as there are nodes" elif partial_match.shape[1] != 2: msg = "`partial_match` must have two columns" elif partial_match.ndim != 2: msg = "`partial_match` must have exactly two dimensions" elif (partial_match < 0).any(): msg = "`partial_match` must contain only positive indices" elif (partial_match >= len(A)).any(): msg = "`partial_match` entries must be less than number of nodes" elif (not len(set(partial_match[:, 0])) == len(partial_match[:, 0]) or not len(set(partial_match[:, 1])) == len(partial_match[:, 1])): msg = "`partial_match` column entries must be unique" if msg is not None: raise ValueError(msg) return A, B, partial_match def _quadratic_assignment_faq(A, B, maximize=False, partial_match=None, rng=None, P0="barycenter", shuffle_input=False, maxiter=30, tol=0.03, **unknown_options): r"""Solve the quadratic assignment problem (approximately). This function solves the Quadratic Assignment Problem (QAP) and the Graph Matching Problem (GMP) using the Fast Approximate QAP Algorithm (FAQ) [1]_. Quadratic assignment solves problems of the following form: .. math:: \min_P & \ {\ \text{trace}(A^T P B P^T)}\\ \mbox{s.t. } & {P \ \epsilon \ \mathcal{P}}\\ where :math:`\mathcal{P}` is the set of all permutation matrices, and :math:`A` and :math:`B` are square matrices. Graph matching tries to *maximize* the same objective function. This algorithm can be thought of as finding the alignment of the nodes of two graphs that minimizes the number of induced edge disagreements, or, in the case of weighted graphs, the sum of squared edge weight differences. Note that the quadratic assignment problem is NP-hard. The results given here are approximations and are not guaranteed to be optimal. Parameters ---------- A : 2-D array, square The square matrix :math:`A` in the objective function above. B : 2-D array, square The square matrix :math:`B` in the objective function above. method : str in {'faq', '2opt'} (default: 'faq') The algorithm used to solve the problem. This is the method-specific documentation for 'faq'. :ref:`'2opt' <optimize.qap-2opt>` is also available. Options ------- maximize : bool (default: False) Maximizes the objective function if ``True``. partial_match : 2-D array of integers, optional (default: None) Fixes part of the matching. Also known as a "seed" [2]_. Each row of `partial_match` specifies a pair of matched nodes: node ``partial_match[i, 0]`` of `A` is matched to node ``partial_match[i, 1]`` of `B`. The array has shape ``(m, 2)``, where ``m`` is not greater than the number of nodes, :math:`n`. rng : {None, int, `numpy.random.Generator`, `numpy.random.RandomState`}, optional If `seed` is None (or `np.random`), the `numpy.random.RandomState` singleton is used. If `seed` is an int, a new ``RandomState`` instance is used, seeded with `seed`. If `seed` is already a ``Generator`` or ``RandomState`` instance then that instance is used. P0 : 2-D array, "barycenter", or "randomized" (default: "barycenter") Initial position. Must be a doubly-stochastic matrix [3]_. If the initial position is an array, it must be a doubly stochastic matrix of size :math:`m' \times m'` where :math:`m' = n - m`. If ``"barycenter"`` (default), the initial position is the barycenter of the Birkhoff polytope (the space of doubly stochastic matrices). This is a :math:`m' \times m'` matrix with all entries equal to :math:`1 / m'`. If ``"randomized"`` the initial search position is :math:`P_0 = (J + K) / 2`, where :math:`J` is the barycenter and :math:`K` is a random doubly stochastic matrix. shuffle_input : bool (default: False) Set to `True` to resolve degenerate gradients randomly. For non-degenerate gradients this option has no effect. maxiter : int, positive (default: 30) Integer specifying the max number of Frank-Wolfe iterations performed. tol : float (default: 0.03) Tolerance for termination. Frank-Wolfe iteration terminates when :math:`\frac{||P_{i}-P_{i+1}||_F}{\sqrt{m')}} \leq tol`, where :math:`i` is the iteration number. Returns ------- res : OptimizeResult `OptimizeResult` containing the following fields. col_ind : 1-D array Column indices corresponding to the best permutation found of the nodes of `B`. fun : float The objective value of the solution. nit : int The number of Frank-Wolfe iterations performed. Notes ----- The algorithm may be sensitive to the initial permutation matrix (or search "position") due to the possibility of several local minima within the feasible region. A barycenter initialization is more likely to result in a better solution than a single random initialization. However, calling ``quadratic_assignment`` several times with different random initializations may result in a better optimum at the cost of longer total execution time. Examples -------- As mentioned above, a barycenter initialization often results in a better solution than a single random initialization. >>> from numpy.random import default_rng >>> rng = default_rng() >>> n = 15 >>> A = rng.random((n, n)) >>> B = rng.random((n, n)) >>> res = quadratic_assignment(A, B) # FAQ is default method >>> print(res.fun) 46.871483385480545 # may vary >>> options = {"P0": "randomized"} # use randomized initialization >>> res = quadratic_assignment(A, B, options=options) >>> print(res.fun) 47.224831071310625 # may vary However, consider running from several randomized initializations and keeping the best result. >>> res = min([quadratic_assignment(A, B, options=options) ... for i in range(30)], key=lambda x: x.fun) >>> print(res.fun) 46.671852533681516 # may vary The '2-opt' method can be used to further refine the results. >>> options = {"partial_guess": np.array([np.arange(n), res.col_ind]).T} >>> res = quadratic_assignment(A, B, method="2opt", options=options) >>> print(res.fun) 46.47160735721583 # may vary References ---------- .. [1] J.T. Vogelstein, J.M. Conroy, V. Lyzinski, L.J. Podrazik, S.G. Kratzer, E.T. Harley, D.E. Fishkind, R.J. Vogelstein, and C.E. Priebe, "Fast approximate quadratic programming for graph matching," PLOS one, vol. 10, no. 4, p. e0121002, 2015, :doi:`10.1371/journal.pone.0121002` .. [2] D. Fishkind, S. Adali, H. Patsolic, L. Meng, D. Singh, V. Lyzinski, C. Priebe, "Seeded graph matching", Pattern Recognit. 87 (2019): 203-215, :doi:`10.1016/j.patcog.2018.09.014` .. [3] "Doubly stochastic Matrix," Wikipedia. https://en.wikipedia.org/wiki/Doubly_stochastic_matrix """ _check_unknown_options(unknown_options) maxiter = operator.index(maxiter) # ValueError check A, B, partial_match = _common_input_validation(A, B, partial_match) msg = None if isinstance(P0, str) and P0 not in {'barycenter', 'randomized'}: msg = "Invalid 'P0' parameter string" elif maxiter <= 0: msg = "'maxiter' must be a positive integer" elif tol <= 0: msg = "'tol' must be a positive float" if msg is not None: raise ValueError(msg) rng = check_random_state(rng) n = len(A) # number of vertices in graphs n_seeds = len(partial_match) # number of seeds n_unseed = n - n_seeds # [1] Algorithm 1 Line 1 - choose initialization if not isinstance(P0, str): P0 = np.atleast_2d(P0) if P0.shape != (n_unseed, n_unseed): msg = "`P0` matrix must have shape m' x m', where m'=n-m" elif ((P0 < 0).any() or not np.allclose(np.sum(P0, axis=0), 1) or not np.allclose(np.sum(P0, axis=1), 1)): msg = "`P0` matrix must be doubly stochastic" if msg is not None: raise ValueError(msg) elif P0 == 'barycenter': P0 = np.ones((n_unseed, n_unseed)) / n_unseed elif P0 == 'randomized': J = np.ones((n_unseed, n_unseed)) / n_unseed # generate a nxn matrix where each entry is a random number [0, 1] # would use rand, but Generators don't have it # would use random, but old mtrand.RandomStates don't have it K = _doubly_stochastic(rng.uniform(size=(n_unseed, n_unseed))) P0 = (J + K) / 2 # check trivial cases if n == 0 or n_seeds == n: score = _calc_score(A, B, partial_match[:, 1]) res = {"col_ind": partial_match[:, 1], "fun": score, "nit": 0} return OptimizeResult(res) obj_func_scalar = 1 if maximize: obj_func_scalar = -1 nonseed_B = np.setdiff1d(range(n), partial_match[:, 1]) if shuffle_input: nonseed_B = rng.permutation(nonseed_B) nonseed_A = np.setdiff1d(range(n), partial_match[:, 0]) perm_A = np.concatenate([partial_match[:, 0], nonseed_A]) perm_B = np.concatenate([partial_match[:, 1], nonseed_B]) # definitions according to Seeded Graph Matching [2]. A11, A12, A21, A22 = _split_matrix(A[perm_A][:, perm_A], n_seeds) B11, B12, B21, B22 = _split_matrix(B[perm_B][:, perm_B], n_seeds) const_sum = A21 @ B21.T + A12.T @ B12 P = P0 # [1] Algorithm 1 Line 2 - loop while stopping criteria not met for n_iter in range(1, maxiter+1): # [1] Algorithm 1 Line 3 - compute the gradient of f(P) = -tr(APB^tP^t) grad_fp = (const_sum + A22 @ P @ B22.T + A22.T @ P @ B22) # [1] Algorithm 1 Line 4 - get direction Q by solving Eq. 8 _, cols = linear_sum_assignment(grad_fp, maximize=maximize) Q = np.eye(n_unseed)[cols] # [1] Algorithm 1 Line 5 - compute the step size # Noting that e.g. trace(Ax) = trace(A)*x, expand and re-collect # terms as ax**2 + bx + c. c does not affect location of minimum # and can be ignored. Also, note that trace(A@B) = (A.T*B).sum(); # apply where possible for efficiency. R = P - Q b21 = ((R.T @ A21) * B21).sum() b12 = ((R.T @ A12.T) * B12.T).sum() AR22 = A22.T @ R BR22 = B22 @ R.T b22a = (AR22 * B22.T[cols]).sum() b22b = (A22 * BR22[cols]).sum() a = (AR22.T * BR22).sum() b = b21 + b12 + b22a + b22b # critical point of ax^2 + bx + c is at x = -d/(2*e) # if a * obj_func_scalar > 0, it is a minimum # if minimum is not in [0, 1], only endpoints need to be considered if a*obj_func_scalar > 0 and 0 <= -b/(2*a) <= 1: alpha = -b/(2*a) else: alpha = np.argmin([0, (b + a)*obj_func_scalar]) # [1] Algorithm 1 Line 6 - Update P P_i1 = alpha * P + (1 - alpha) * Q if np.linalg.norm(P - P_i1) / np.sqrt(n_unseed) < tol: P = P_i1 break P = P_i1 # [1] Algorithm 1 Line 7 - end main loop # [1] Algorithm 1 Line 8 - project onto the set of permutation matrices _, col = linear_sum_assignment(P, maximize=True) perm = np.concatenate((np.arange(n_seeds), col + n_seeds)) unshuffled_perm = np.zeros(n, dtype=int) unshuffled_perm[perm_A] = perm_B[perm] score = _calc_score(A, B, unshuffled_perm) res = {"col_ind": unshuffled_perm, "fun": score, "nit": n_iter} return OptimizeResult(res) def _split_matrix(X, n): # definitions according to Seeded Graph Matching [2]. upper, lower = X[:n], X[n:] return upper[:, :n], upper[:, n:], lower[:, :n], lower[:, n:] def _doubly_stochastic(P, tol=1e-3): # Adapted from @btaba implementation # https://github.com/btaba/sinkhorn_knopp # of Sinkhorn-Knopp algorithm # https://projecteuclid.org/euclid.pjm/1102992505 max_iter = 1000 c = 1 / P.sum(axis=0) r = 1 / (P @ c) P_eps = P for it in range(max_iter): if ((np.abs(P_eps.sum(axis=1) - 1) < tol).all() and (np.abs(P_eps.sum(axis=0) - 1) < tol).all()): # All column/row sums ~= 1 within threshold break c = 1 / (r @ P) r = 1 / (P @ c) P_eps = r[:, None] * P * c return P_eps def _quadratic_assignment_2opt(A, B, maximize=False, rng=None, partial_match=None, partial_guess=None, **unknown_options): r"""Solve the quadratic assignment problem (approximately). This function solves the Quadratic Assignment Problem (QAP) and the Graph Matching Problem (GMP) using the 2-opt algorithm [1]_. Quadratic assignment solves problems of the following form: .. math:: \min_P & \ {\ \text{trace}(A^T P B P^T)}\\ \mbox{s.t. } & {P \ \epsilon \ \mathcal{P}}\\ where :math:`\mathcal{P}` is the set of all permutation matrices, and :math:`A` and :math:`B` are square matrices. Graph matching tries to *maximize* the same objective function. This algorithm can be thought of as finding the alignment of the nodes of two graphs that minimizes the number of induced edge disagreements, or, in the case of weighted graphs, the sum of squared edge weight differences. Note that the quadratic assignment problem is NP-hard. The results given here are approximations and are not guaranteed to be optimal. Parameters ---------- A : 2-D array, square The square matrix :math:`A` in the objective function above. B : 2-D array, square The square matrix :math:`B` in the objective function above. method : str in {'faq', '2opt'} (default: 'faq') The algorithm used to solve the problem. This is the method-specific documentation for '2opt'. :ref:`'faq' <optimize.qap-faq>` is also available. Options ------- maximize : bool (default: False) Maximizes the objective function if ``True``. rng : {None, int, `numpy.random.Generator`, `numpy.random.RandomState`}, optional If `seed` is None (or `np.random`), the `numpy.random.RandomState` singleton is used. If `seed` is an int, a new ``RandomState`` instance is used, seeded with `seed`. If `seed` is already a ``Generator`` or ``RandomState`` instance then that instance is used. partial_match : 2-D array of integers, optional (default: None) Fixes part of the matching. Also known as a "seed" [2]_. Each row of `partial_match` specifies a pair of matched nodes: node ``partial_match[i, 0]`` of `A` is matched to node ``partial_match[i, 1]`` of `B`. The array has shape ``(m, 2)``, where ``m`` is not greater than the number of nodes, :math:`n`. partial_guess : 2-D array of integers, optional (default: None) A guess for the matching between the two matrices. Unlike `partial_match`, `partial_guess` does not fix the indices; they are still free to be optimized. Each row of `partial_guess` specifies a pair of matched nodes: node ``partial_guess[i, 0]`` of `A` is matched to node ``partial_guess[i, 1]`` of `B`. The array has shape ``(m, 2)``, where ``m`` is not greater than the number of nodes, :math:`n`. Returns ------- res : OptimizeResult `OptimizeResult` containing the following fields. col_ind : 1-D array Column indices corresponding to the best permutation found of the nodes of `B`. fun : float The objective value of the solution. nit : int The number of iterations performed during optimization. Notes ----- This is a greedy algorithm that works similarly to bubble sort: beginning with an initial permutation, it iteratively swaps pairs of indices to improve the objective function until no such improvements are possible. References ---------- .. [1] "2-opt," Wikipedia. https://en.wikipedia.org/wiki/2-opt .. [2] D. Fishkind, S. Adali, H. Patsolic, L. Meng, D. Singh, V. Lyzinski, C. Priebe, "Seeded graph matching", Pattern Recognit. 87 (2019): 203-215, https://doi.org/10.1016/j.patcog.2018.09.014 """ _check_unknown_options(unknown_options) rng = check_random_state(rng) A, B, partial_match = _common_input_validation(A, B, partial_match) N = len(A) # check trivial cases if N == 0 or partial_match.shape[0] == N: score = _calc_score(A, B, partial_match[:, 1]) res = {"col_ind": partial_match[:, 1], "fun": score, "nit": 0} return OptimizeResult(res) if partial_guess is None: partial_guess = np.array([[], []]).T partial_guess = np.atleast_2d(partial_guess).astype(int) msg = None if partial_guess.shape[0] > A.shape[0]: msg = ("`partial_guess` can have only as " "many entries as there are nodes") elif partial_guess.shape[1] != 2: msg = "`partial_guess` must have two columns" elif partial_guess.ndim != 2: msg = "`partial_guess` must have exactly two dimensions" elif (partial_guess < 0).any(): msg = "`partial_guess` must contain only positive indices" elif (partial_guess >= len(A)).any(): msg = "`partial_guess` entries must be less than number of nodes" elif (not len(set(partial_guess[:, 0])) == len(partial_guess[:, 0]) or not len(set(partial_guess[:, 1])) == len(partial_guess[:, 1])): msg = "`partial_guess` column entries must be unique" if msg is not None: raise ValueError(msg) fixed_rows = None if partial_match.size or partial_guess.size: # use partial_match and partial_guess for initial permutation, # but randomly permute the rest. guess_rows = np.zeros(N, dtype=bool) guess_cols = np.zeros(N, dtype=bool) fixed_rows = np.zeros(N, dtype=bool) fixed_cols = np.zeros(N, dtype=bool) perm = np.zeros(N, dtype=int) rg, cg = partial_guess.T guess_rows[rg] = True guess_cols[cg] = True perm[guess_rows] = cg # match overrides guess rf, cf = partial_match.T fixed_rows[rf] = True fixed_cols[cf] = True perm[fixed_rows] = cf random_rows = ~fixed_rows & ~guess_rows random_cols = ~fixed_cols & ~guess_cols perm[random_rows] = rng.permutation(np.arange(N)[random_cols]) else: perm = rng.permutation(np.arange(N)) best_score = _calc_score(A, B, perm) i_free = np.arange(N) if fixed_rows is not None: i_free = i_free[~fixed_rows] better = operator.gt if maximize else operator.lt n_iter = 0 done = False while not done: # equivalent to nested for loops i in range(N), j in range(i, N) for i, j in itertools.combinations_with_replacement(i_free, 2): n_iter += 1 perm[i], perm[j] = perm[j], perm[i] score = _calc_score(A, B, perm) if better(score, best_score): best_score = score break # faster to swap back than to create a new list every time perm[i], perm[j] = perm[j], perm[i] else: # no swaps made done = True res = {"col_ind": perm, "fun": best_score, "nit": n_iter} return OptimizeResult(res)
27,658
37.150345
81
py
scipy
scipy-main/scipy/optimize/_nonlin.py
# Copyright (C) 2009, Pauli Virtanen <pav@iki.fi> # Distributed under the same license as SciPy. import sys import numpy as np from scipy.linalg import norm, solve, inv, qr, svd, LinAlgError from numpy import asarray, dot, vdot import scipy.sparse.linalg import scipy.sparse from scipy.linalg import get_blas_funcs import inspect from scipy._lib._util import getfullargspec_no_self as _getfullargspec from ._linesearch import scalar_search_wolfe1, scalar_search_armijo __all__ = [ 'broyden1', 'broyden2', 'anderson', 'linearmixing', 'diagbroyden', 'excitingmixing', 'newton_krylov', 'BroydenFirst', 'KrylovJacobian', 'InverseJacobian'] #------------------------------------------------------------------------------ # Utility functions #------------------------------------------------------------------------------ class NoConvergence(Exception): pass def maxnorm(x): return np.absolute(x).max() def _as_inexact(x): """Return `x` as an array, of either floats or complex floats""" x = asarray(x) if not np.issubdtype(x.dtype, np.inexact): return asarray(x, dtype=np.float_) return x def _array_like(x, x0): """Return ndarray `x` as same array subclass and shape as `x0`""" x = np.reshape(x, np.shape(x0)) wrap = getattr(x0, '__array_wrap__', x.__array_wrap__) return wrap(x) def _safe_norm(v): if not np.isfinite(v).all(): return np.array(np.inf) return norm(v) #------------------------------------------------------------------------------ # Generic nonlinear solver machinery #------------------------------------------------------------------------------ _doc_parts = dict( params_basic=""" F : function(x) -> f Function whose root to find; should take and return an array-like object. xin : array_like Initial guess for the solution """.strip(), params_extra=""" iter : int, optional Number of iterations to make. If omitted (default), make as many as required to meet tolerances. verbose : bool, optional Print status to stdout on every iteration. maxiter : int, optional Maximum number of iterations to make. If more are needed to meet convergence, `NoConvergence` is raised. f_tol : float, optional Absolute tolerance (in max-norm) for the residual. If omitted, default is 6e-6. f_rtol : float, optional Relative tolerance for the residual. If omitted, not used. x_tol : float, optional Absolute minimum step size, as determined from the Jacobian approximation. If the step size is smaller than this, optimization is terminated as successful. If omitted, not used. x_rtol : float, optional Relative minimum step size. If omitted, not used. tol_norm : function(vector) -> scalar, optional Norm to use in convergence check. Default is the maximum norm. line_search : {None, 'armijo' (default), 'wolfe'}, optional Which type of a line search to use to determine the step size in the direction given by the Jacobian approximation. Defaults to 'armijo'. callback : function, optional Optional callback function. It is called on every iteration as ``callback(x, f)`` where `x` is the current solution and `f` the corresponding residual. Returns ------- sol : ndarray An array (of similar array type as `x0`) containing the final solution. Raises ------ NoConvergence When a solution was not found. """.strip() ) def _set_doc(obj): if obj.__doc__: obj.__doc__ = obj.__doc__ % _doc_parts def nonlin_solve(F, x0, jacobian='krylov', iter=None, verbose=False, maxiter=None, f_tol=None, f_rtol=None, x_tol=None, x_rtol=None, tol_norm=None, line_search='armijo', callback=None, full_output=False, raise_exception=True): """ Find a root of a function, in a way suitable for large-scale problems. Parameters ---------- %(params_basic)s jacobian : Jacobian A Jacobian approximation: `Jacobian` object or something that `asjacobian` can transform to one. Alternatively, a string specifying which of the builtin Jacobian approximations to use: krylov, broyden1, broyden2, anderson diagbroyden, linearmixing, excitingmixing %(params_extra)s full_output : bool If true, returns a dictionary `info` containing convergence information. raise_exception : bool If True, a `NoConvergence` exception is raise if no solution is found. See Also -------- asjacobian, Jacobian Notes ----- This algorithm implements the inexact Newton method, with backtracking or full line searches. Several Jacobian approximations are available, including Krylov and Quasi-Newton methods. References ---------- .. [KIM] C. T. Kelley, \"Iterative Methods for Linear and Nonlinear Equations\". Society for Industrial and Applied Mathematics. (1995) https://archive.siam.org/books/kelley/fr16/ """ # Can't use default parameters because it's being explicitly passed as None # from the calling function, so we need to set it here. tol_norm = maxnorm if tol_norm is None else tol_norm condition = TerminationCondition(f_tol=f_tol, f_rtol=f_rtol, x_tol=x_tol, x_rtol=x_rtol, iter=iter, norm=tol_norm) x0 = _as_inexact(x0) def func(z): return _as_inexact(F(_array_like(z, x0))).flatten() x = x0.flatten() dx = np.full_like(x, np.inf) Fx = func(x) Fx_norm = norm(Fx) jacobian = asjacobian(jacobian) jacobian.setup(x.copy(), Fx, func) if maxiter is None: if iter is not None: maxiter = iter + 1 else: maxiter = 100*(x.size+1) if line_search is True: line_search = 'armijo' elif line_search is False: line_search = None if line_search not in (None, 'armijo', 'wolfe'): raise ValueError("Invalid line search") # Solver tolerance selection gamma = 0.9 eta_max = 0.9999 eta_treshold = 0.1 eta = 1e-3 for n in range(maxiter): status = condition.check(Fx, x, dx) if status: break # The tolerance, as computed for scipy.sparse.linalg.* routines tol = min(eta, eta*Fx_norm) dx = -jacobian.solve(Fx, tol=tol) if norm(dx) == 0: raise ValueError("Jacobian inversion yielded zero vector. " "This indicates a bug in the Jacobian " "approximation.") # Line search, or Newton step if line_search: s, x, Fx, Fx_norm_new = _nonlin_line_search(func, x, Fx, dx, line_search) else: s = 1.0 x = x + dx Fx = func(x) Fx_norm_new = norm(Fx) jacobian.update(x.copy(), Fx) if callback: callback(x, Fx) # Adjust forcing parameters for inexact methods eta_A = gamma * Fx_norm_new**2 / Fx_norm**2 if gamma * eta**2 < eta_treshold: eta = min(eta_max, eta_A) else: eta = min(eta_max, max(eta_A, gamma*eta**2)) Fx_norm = Fx_norm_new # Print status if verbose: sys.stdout.write("%d: |F(x)| = %g; step %g\n" % ( n, tol_norm(Fx), s)) sys.stdout.flush() else: if raise_exception: raise NoConvergence(_array_like(x, x0)) else: status = 2 if full_output: info = {'nit': condition.iteration, 'fun': Fx, 'status': status, 'success': status == 1, 'message': {1: 'A solution was found at the specified ' 'tolerance.', 2: 'The maximum number of iterations allowed ' 'has been reached.' }[status] } return _array_like(x, x0), info else: return _array_like(x, x0) _set_doc(nonlin_solve) def _nonlin_line_search(func, x, Fx, dx, search_type='armijo', rdiff=1e-8, smin=1e-2): tmp_s = [0] tmp_Fx = [Fx] tmp_phi = [norm(Fx)**2] s_norm = norm(x) / norm(dx) def phi(s, store=True): if s == tmp_s[0]: return tmp_phi[0] xt = x + s*dx v = func(xt) p = _safe_norm(v)**2 if store: tmp_s[0] = s tmp_phi[0] = p tmp_Fx[0] = v return p def derphi(s): ds = (abs(s) + s_norm + 1) * rdiff return (phi(s+ds, store=False) - phi(s)) / ds if search_type == 'wolfe': s, phi1, phi0 = scalar_search_wolfe1(phi, derphi, tmp_phi[0], xtol=1e-2, amin=smin) elif search_type == 'armijo': s, phi1 = scalar_search_armijo(phi, tmp_phi[0], -tmp_phi[0], amin=smin) if s is None: # XXX: No suitable step length found. Take the full Newton step, # and hope for the best. s = 1.0 x = x + s*dx if s == tmp_s[0]: Fx = tmp_Fx[0] else: Fx = func(x) Fx_norm = norm(Fx) return s, x, Fx, Fx_norm class TerminationCondition: """ Termination condition for an iteration. It is terminated if - |F| < f_rtol*|F_0|, AND - |F| < f_tol AND - |dx| < x_rtol*|x|, AND - |dx| < x_tol """ def __init__(self, f_tol=None, f_rtol=None, x_tol=None, x_rtol=None, iter=None, norm=maxnorm): if f_tol is None: f_tol = np.finfo(np.float_).eps ** (1./3) if f_rtol is None: f_rtol = np.inf if x_tol is None: x_tol = np.inf if x_rtol is None: x_rtol = np.inf self.x_tol = x_tol self.x_rtol = x_rtol self.f_tol = f_tol self.f_rtol = f_rtol self.norm = norm self.iter = iter self.f0_norm = None self.iteration = 0 def check(self, f, x, dx): self.iteration += 1 f_norm = self.norm(f) x_norm = self.norm(x) dx_norm = self.norm(dx) if self.f0_norm is None: self.f0_norm = f_norm if f_norm == 0: return 1 if self.iter is not None: # backwards compatibility with SciPy 0.6.0 return 2 * (self.iteration > self.iter) # NB: condition must succeed for rtol=inf even if norm == 0 return int((f_norm <= self.f_tol and f_norm/self.f_rtol <= self.f0_norm) and (dx_norm <= self.x_tol and dx_norm/self.x_rtol <= x_norm)) #------------------------------------------------------------------------------ # Generic Jacobian approximation #------------------------------------------------------------------------------ class Jacobian: """ Common interface for Jacobians or Jacobian approximations. The optional methods come useful when implementing trust region etc., algorithms that often require evaluating transposes of the Jacobian. Methods ------- solve Returns J^-1 * v update Updates Jacobian to point `x` (where the function has residual `Fx`) matvec : optional Returns J * v rmatvec : optional Returns A^H * v rsolve : optional Returns A^-H * v matmat : optional Returns A * V, where V is a dense matrix with dimensions (N,K). todense : optional Form the dense Jacobian matrix. Necessary for dense trust region algorithms, and useful for testing. Attributes ---------- shape Matrix dimensions (M, N) dtype Data type of the matrix. func : callable, optional Function the Jacobian corresponds to """ def __init__(self, **kw): names = ["solve", "update", "matvec", "rmatvec", "rsolve", "matmat", "todense", "shape", "dtype"] for name, value in kw.items(): if name not in names: raise ValueError("Unknown keyword argument %s" % name) if value is not None: setattr(self, name, kw[name]) if hasattr(self, 'todense'): self.__array__ = lambda: self.todense() def aspreconditioner(self): return InverseJacobian(self) def solve(self, v, tol=0): raise NotImplementedError def update(self, x, F): pass def setup(self, x, F, func): self.func = func self.shape = (F.size, x.size) self.dtype = F.dtype if self.__class__.setup is Jacobian.setup: # Call on the first point unless overridden self.update(x, F) class InverseJacobian: def __init__(self, jacobian): self.jacobian = jacobian self.matvec = jacobian.solve self.update = jacobian.update if hasattr(jacobian, 'setup'): self.setup = jacobian.setup if hasattr(jacobian, 'rsolve'): self.rmatvec = jacobian.rsolve @property def shape(self): return self.jacobian.shape @property def dtype(self): return self.jacobian.dtype def asjacobian(J): """ Convert given object to one suitable for use as a Jacobian. """ spsolve = scipy.sparse.linalg.spsolve if isinstance(J, Jacobian): return J elif inspect.isclass(J) and issubclass(J, Jacobian): return J() elif isinstance(J, np.ndarray): if J.ndim > 2: raise ValueError('array must have rank <= 2') J = np.atleast_2d(np.asarray(J)) if J.shape[0] != J.shape[1]: raise ValueError('array must be square') return Jacobian(matvec=lambda v: dot(J, v), rmatvec=lambda v: dot(J.conj().T, v), solve=lambda v: solve(J, v), rsolve=lambda v: solve(J.conj().T, v), dtype=J.dtype, shape=J.shape) elif scipy.sparse.isspmatrix(J): if J.shape[0] != J.shape[1]: raise ValueError('matrix must be square') return Jacobian(matvec=lambda v: J*v, rmatvec=lambda v: J.conj().T * v, solve=lambda v: spsolve(J, v), rsolve=lambda v: spsolve(J.conj().T, v), dtype=J.dtype, shape=J.shape) elif hasattr(J, 'shape') and hasattr(J, 'dtype') and hasattr(J, 'solve'): return Jacobian(matvec=getattr(J, 'matvec'), rmatvec=getattr(J, 'rmatvec'), solve=J.solve, rsolve=getattr(J, 'rsolve'), update=getattr(J, 'update'), setup=getattr(J, 'setup'), dtype=J.dtype, shape=J.shape) elif callable(J): # Assume it's a function J(x) that returns the Jacobian class Jac(Jacobian): def update(self, x, F): self.x = x def solve(self, v, tol=0): m = J(self.x) if isinstance(m, np.ndarray): return solve(m, v) elif scipy.sparse.isspmatrix(m): return spsolve(m, v) else: raise ValueError("Unknown matrix type") def matvec(self, v): m = J(self.x) if isinstance(m, np.ndarray): return dot(m, v) elif scipy.sparse.isspmatrix(m): return m*v else: raise ValueError("Unknown matrix type") def rsolve(self, v, tol=0): m = J(self.x) if isinstance(m, np.ndarray): return solve(m.conj().T, v) elif scipy.sparse.isspmatrix(m): return spsolve(m.conj().T, v) else: raise ValueError("Unknown matrix type") def rmatvec(self, v): m = J(self.x) if isinstance(m, np.ndarray): return dot(m.conj().T, v) elif scipy.sparse.isspmatrix(m): return m.conj().T * v else: raise ValueError("Unknown matrix type") return Jac() elif isinstance(J, str): return dict(broyden1=BroydenFirst, broyden2=BroydenSecond, anderson=Anderson, diagbroyden=DiagBroyden, linearmixing=LinearMixing, excitingmixing=ExcitingMixing, krylov=KrylovJacobian)[J]() else: raise TypeError('Cannot convert object to a Jacobian') #------------------------------------------------------------------------------ # Broyden #------------------------------------------------------------------------------ class GenericBroyden(Jacobian): def setup(self, x0, f0, func): Jacobian.setup(self, x0, f0, func) self.last_f = f0 self.last_x = x0 if hasattr(self, 'alpha') and self.alpha is None: # Autoscale the initial Jacobian parameter # unless we have already guessed the solution. normf0 = norm(f0) if normf0: self.alpha = 0.5*max(norm(x0), 1) / normf0 else: self.alpha = 1.0 def _update(self, x, f, dx, df, dx_norm, df_norm): raise NotImplementedError def update(self, x, f): df = f - self.last_f dx = x - self.last_x self._update(x, f, dx, df, norm(dx), norm(df)) self.last_f = f self.last_x = x class LowRankMatrix: r""" A matrix represented as .. math:: \alpha I + \sum_{n=0}^{n=M} c_n d_n^\dagger However, if the rank of the matrix reaches the dimension of the vectors, full matrix representation will be used thereon. """ def __init__(self, alpha, n, dtype): self.alpha = alpha self.cs = [] self.ds = [] self.n = n self.dtype = dtype self.collapsed = None @staticmethod def _matvec(v, alpha, cs, ds): axpy, scal, dotc = get_blas_funcs(['axpy', 'scal', 'dotc'], cs[:1] + [v]) w = alpha * v for c, d in zip(cs, ds): a = dotc(d, v) w = axpy(c, w, w.size, a) return w @staticmethod def _solve(v, alpha, cs, ds): """Evaluate w = M^-1 v""" if len(cs) == 0: return v/alpha # (B + C D^H)^-1 = B^-1 - B^-1 C (I + D^H B^-1 C)^-1 D^H B^-1 axpy, dotc = get_blas_funcs(['axpy', 'dotc'], cs[:1] + [v]) c0 = cs[0] A = alpha * np.identity(len(cs), dtype=c0.dtype) for i, d in enumerate(ds): for j, c in enumerate(cs): A[i,j] += dotc(d, c) q = np.zeros(len(cs), dtype=c0.dtype) for j, d in enumerate(ds): q[j] = dotc(d, v) q /= alpha q = solve(A, q) w = v/alpha for c, qc in zip(cs, q): w = axpy(c, w, w.size, -qc) return w def matvec(self, v): """Evaluate w = M v""" if self.collapsed is not None: return np.dot(self.collapsed, v) return LowRankMatrix._matvec(v, self.alpha, self.cs, self.ds) def rmatvec(self, v): """Evaluate w = M^H v""" if self.collapsed is not None: return np.dot(self.collapsed.T.conj(), v) return LowRankMatrix._matvec(v, np.conj(self.alpha), self.ds, self.cs) def solve(self, v, tol=0): """Evaluate w = M^-1 v""" if self.collapsed is not None: return solve(self.collapsed, v) return LowRankMatrix._solve(v, self.alpha, self.cs, self.ds) def rsolve(self, v, tol=0): """Evaluate w = M^-H v""" if self.collapsed is not None: return solve(self.collapsed.T.conj(), v) return LowRankMatrix._solve(v, np.conj(self.alpha), self.ds, self.cs) def append(self, c, d): if self.collapsed is not None: self.collapsed += c[:,None] * d[None,:].conj() return self.cs.append(c) self.ds.append(d) if len(self.cs) > c.size: self.collapse() def __array__(self): if self.collapsed is not None: return self.collapsed Gm = self.alpha*np.identity(self.n, dtype=self.dtype) for c, d in zip(self.cs, self.ds): Gm += c[:,None]*d[None,:].conj() return Gm def collapse(self): """Collapse the low-rank matrix to a full-rank one.""" self.collapsed = np.array(self) self.cs = None self.ds = None self.alpha = None def restart_reduce(self, rank): """ Reduce the rank of the matrix by dropping all vectors. """ if self.collapsed is not None: return assert rank > 0 if len(self.cs) > rank: del self.cs[:] del self.ds[:] def simple_reduce(self, rank): """ Reduce the rank of the matrix by dropping oldest vectors. """ if self.collapsed is not None: return assert rank > 0 while len(self.cs) > rank: del self.cs[0] del self.ds[0] def svd_reduce(self, max_rank, to_retain=None): """ Reduce the rank of the matrix by retaining some SVD components. This corresponds to the \"Broyden Rank Reduction Inverse\" algorithm described in [1]_. Note that the SVD decomposition can be done by solving only a problem whose size is the effective rank of this matrix, which is viable even for large problems. Parameters ---------- max_rank : int Maximum rank of this matrix after reduction. to_retain : int, optional Number of SVD components to retain when reduction is done (ie. rank > max_rank). Default is ``max_rank - 2``. References ---------- .. [1] B.A. van der Rotten, PhD thesis, \"A limited memory Broyden method to solve high-dimensional systems of nonlinear equations\". Mathematisch Instituut, Universiteit Leiden, The Netherlands (2003). https://web.archive.org/web/20161022015821/http://www.math.leidenuniv.nl/scripties/Rotten.pdf """ if self.collapsed is not None: return p = max_rank if to_retain is not None: q = to_retain else: q = p - 2 if self.cs: p = min(p, len(self.cs[0])) q = max(0, min(q, p-1)) m = len(self.cs) if m < p: # nothing to do return C = np.array(self.cs).T D = np.array(self.ds).T D, R = qr(D, mode='economic') C = dot(C, R.T.conj()) U, S, WH = svd(C, full_matrices=False) C = dot(C, inv(WH)) D = dot(D, WH.T.conj()) for k in range(q): self.cs[k] = C[:,k].copy() self.ds[k] = D[:,k].copy() del self.cs[q:] del self.ds[q:] _doc_parts['broyden_params'] = """ alpha : float, optional Initial guess for the Jacobian is ``(-1/alpha)``. reduction_method : str or tuple, optional Method used in ensuring that the rank of the Broyden matrix stays low. Can either be a string giving the name of the method, or a tuple of the form ``(method, param1, param2, ...)`` that gives the name of the method and values for additional parameters. Methods available: - ``restart``: drop all matrix columns. Has no extra parameters. - ``simple``: drop oldest matrix column. Has no extra parameters. - ``svd``: keep only the most significant SVD components. Takes an extra parameter, ``to_retain``, which determines the number of SVD components to retain when rank reduction is done. Default is ``max_rank - 2``. max_rank : int, optional Maximum rank for the Broyden matrix. Default is infinity (i.e., no rank reduction). """.strip() class BroydenFirst(GenericBroyden): r""" Find a root of a function, using Broyden's first Jacobian approximation. This method is also known as \"Broyden's good method\". Parameters ---------- %(params_basic)s %(broyden_params)s %(params_extra)s See Also -------- root : Interface to root finding algorithms for multivariate functions. See ``method='broyden1'`` in particular. Notes ----- This algorithm implements the inverse Jacobian Quasi-Newton update .. math:: H_+ = H + (dx - H df) dx^\dagger H / ( dx^\dagger H df) which corresponds to Broyden's first Jacobian update .. math:: J_+ = J + (df - J dx) dx^\dagger / dx^\dagger dx References ---------- .. [1] B.A. van der Rotten, PhD thesis, \"A limited memory Broyden method to solve high-dimensional systems of nonlinear equations\". Mathematisch Instituut, Universiteit Leiden, The Netherlands (2003). https://web.archive.org/web/20161022015821/http://www.math.leidenuniv.nl/scripties/Rotten.pdf Examples -------- The following functions define a system of nonlinear equations >>> def fun(x): ... return [x[0] + 0.5 * (x[0] - x[1])**3 - 1.0, ... 0.5 * (x[1] - x[0])**3 + x[1]] A solution can be obtained as follows. >>> from scipy import optimize >>> sol = optimize.broyden1(fun, [0, 0]) >>> sol array([0.84116396, 0.15883641]) """ def __init__(self, alpha=None, reduction_method='restart', max_rank=None): GenericBroyden.__init__(self) self.alpha = alpha self.Gm = None if max_rank is None: max_rank = np.inf self.max_rank = max_rank if isinstance(reduction_method, str): reduce_params = () else: reduce_params = reduction_method[1:] reduction_method = reduction_method[0] reduce_params = (max_rank - 1,) + reduce_params if reduction_method == 'svd': self._reduce = lambda: self.Gm.svd_reduce(*reduce_params) elif reduction_method == 'simple': self._reduce = lambda: self.Gm.simple_reduce(*reduce_params) elif reduction_method == 'restart': self._reduce = lambda: self.Gm.restart_reduce(*reduce_params) else: raise ValueError("Unknown rank reduction method '%s'" % reduction_method) def setup(self, x, F, func): GenericBroyden.setup(self, x, F, func) self.Gm = LowRankMatrix(-self.alpha, self.shape[0], self.dtype) def todense(self): return inv(self.Gm) def solve(self, f, tol=0): r = self.Gm.matvec(f) if not np.isfinite(r).all(): # singular; reset the Jacobian approximation self.setup(self.last_x, self.last_f, self.func) return self.Gm.matvec(f) return r def matvec(self, f): return self.Gm.solve(f) def rsolve(self, f, tol=0): return self.Gm.rmatvec(f) def rmatvec(self, f): return self.Gm.rsolve(f) def _update(self, x, f, dx, df, dx_norm, df_norm): self._reduce() # reduce first to preserve secant condition v = self.Gm.rmatvec(dx) c = dx - self.Gm.matvec(df) d = v / vdot(df, v) self.Gm.append(c, d) class BroydenSecond(BroydenFirst): """ Find a root of a function, using Broyden\'s second Jacobian approximation. This method is also known as \"Broyden's bad method\". Parameters ---------- %(params_basic)s %(broyden_params)s %(params_extra)s See Also -------- root : Interface to root finding algorithms for multivariate functions. See ``method='broyden2'`` in particular. Notes ----- This algorithm implements the inverse Jacobian Quasi-Newton update .. math:: H_+ = H + (dx - H df) df^\\dagger / ( df^\\dagger df) corresponding to Broyden's second method. References ---------- .. [1] B.A. van der Rotten, PhD thesis, \"A limited memory Broyden method to solve high-dimensional systems of nonlinear equations\". Mathematisch Instituut, Universiteit Leiden, The Netherlands (2003). https://web.archive.org/web/20161022015821/http://www.math.leidenuniv.nl/scripties/Rotten.pdf Examples -------- The following functions define a system of nonlinear equations >>> def fun(x): ... return [x[0] + 0.5 * (x[0] - x[1])**3 - 1.0, ... 0.5 * (x[1] - x[0])**3 + x[1]] A solution can be obtained as follows. >>> from scipy import optimize >>> sol = optimize.broyden2(fun, [0, 0]) >>> sol array([0.84116365, 0.15883529]) """ def _update(self, x, f, dx, df, dx_norm, df_norm): self._reduce() # reduce first to preserve secant condition v = df c = dx - self.Gm.matvec(df) d = v / df_norm**2 self.Gm.append(c, d) #------------------------------------------------------------------------------ # Broyden-like (restricted memory) #------------------------------------------------------------------------------ class Anderson(GenericBroyden): """ Find a root of a function, using (extended) Anderson mixing. The Jacobian is formed by for a 'best' solution in the space spanned by last `M` vectors. As a result, only a MxM matrix inversions and MxN multiplications are required. [Ey]_ Parameters ---------- %(params_basic)s alpha : float, optional Initial guess for the Jacobian is (-1/alpha). M : float, optional Number of previous vectors to retain. Defaults to 5. w0 : float, optional Regularization parameter for numerical stability. Compared to unity, good values of the order of 0.01. %(params_extra)s See Also -------- root : Interface to root finding algorithms for multivariate functions. See ``method='anderson'`` in particular. References ---------- .. [Ey] V. Eyert, J. Comp. Phys., 124, 271 (1996). Examples -------- The following functions define a system of nonlinear equations >>> def fun(x): ... return [x[0] + 0.5 * (x[0] - x[1])**3 - 1.0, ... 0.5 * (x[1] - x[0])**3 + x[1]] A solution can be obtained as follows. >>> from scipy import optimize >>> sol = optimize.anderson(fun, [0, 0]) >>> sol array([0.84116588, 0.15883789]) """ # Note: # # Anderson method maintains a rank M approximation of the inverse Jacobian, # # J^-1 v ~ -v*alpha + (dX + alpha dF) A^-1 dF^H v # A = W + dF^H dF # W = w0^2 diag(dF^H dF) # # so that for w0 = 0 the secant condition applies for last M iterates, i.e., # # J^-1 df_j = dx_j # # for all j = 0 ... M-1. # # Moreover, (from Sherman-Morrison-Woodbury formula) # # J v ~ [ b I - b^2 C (I + b dF^H A^-1 C)^-1 dF^H ] v # C = (dX + alpha dF) A^-1 # b = -1/alpha # # and after simplification # # J v ~ -v/alpha + (dX/alpha + dF) (dF^H dX - alpha W)^-1 dF^H v # def __init__(self, alpha=None, w0=0.01, M=5): GenericBroyden.__init__(self) self.alpha = alpha self.M = M self.dx = [] self.df = [] self.gamma = None self.w0 = w0 def solve(self, f, tol=0): dx = -self.alpha*f n = len(self.dx) if n == 0: return dx df_f = np.empty(n, dtype=f.dtype) for k in range(n): df_f[k] = vdot(self.df[k], f) try: gamma = solve(self.a, df_f) except LinAlgError: # singular; reset the Jacobian approximation del self.dx[:] del self.df[:] return dx for m in range(n): dx += gamma[m]*(self.dx[m] + self.alpha*self.df[m]) return dx def matvec(self, f): dx = -f/self.alpha n = len(self.dx) if n == 0: return dx df_f = np.empty(n, dtype=f.dtype) for k in range(n): df_f[k] = vdot(self.df[k], f) b = np.empty((n, n), dtype=f.dtype) for i in range(n): for j in range(n): b[i,j] = vdot(self.df[i], self.dx[j]) if i == j and self.w0 != 0: b[i,j] -= vdot(self.df[i], self.df[i])*self.w0**2*self.alpha gamma = solve(b, df_f) for m in range(n): dx += gamma[m]*(self.df[m] + self.dx[m]/self.alpha) return dx def _update(self, x, f, dx, df, dx_norm, df_norm): if self.M == 0: return self.dx.append(dx) self.df.append(df) while len(self.dx) > self.M: self.dx.pop(0) self.df.pop(0) n = len(self.dx) a = np.zeros((n, n), dtype=f.dtype) for i in range(n): for j in range(i, n): if i == j: wd = self.w0**2 else: wd = 0 a[i,j] = (1+wd)*vdot(self.df[i], self.df[j]) a += np.triu(a, 1).T.conj() self.a = a #------------------------------------------------------------------------------ # Simple iterations #------------------------------------------------------------------------------ class DiagBroyden(GenericBroyden): """ Find a root of a function, using diagonal Broyden Jacobian approximation. The Jacobian approximation is derived from previous iterations, by retaining only the diagonal of Broyden matrices. .. warning:: This algorithm may be useful for specific problems, but whether it will work may depend strongly on the problem. Parameters ---------- %(params_basic)s alpha : float, optional Initial guess for the Jacobian is (-1/alpha). %(params_extra)s See Also -------- root : Interface to root finding algorithms for multivariate functions. See ``method='diagbroyden'`` in particular. Examples -------- The following functions define a system of nonlinear equations >>> def fun(x): ... return [x[0] + 0.5 * (x[0] - x[1])**3 - 1.0, ... 0.5 * (x[1] - x[0])**3 + x[1]] A solution can be obtained as follows. >>> from scipy import optimize >>> sol = optimize.diagbroyden(fun, [0, 0]) >>> sol array([0.84116403, 0.15883384]) """ def __init__(self, alpha=None): GenericBroyden.__init__(self) self.alpha = alpha def setup(self, x, F, func): GenericBroyden.setup(self, x, F, func) self.d = np.full((self.shape[0],), 1 / self.alpha, dtype=self.dtype) def solve(self, f, tol=0): return -f / self.d def matvec(self, f): return -f * self.d def rsolve(self, f, tol=0): return -f / self.d.conj() def rmatvec(self, f): return -f * self.d.conj() def todense(self): return np.diag(-self.d) def _update(self, x, f, dx, df, dx_norm, df_norm): self.d -= (df + self.d*dx)*dx/dx_norm**2 class LinearMixing(GenericBroyden): """ Find a root of a function, using a scalar Jacobian approximation. .. warning:: This algorithm may be useful for specific problems, but whether it will work may depend strongly on the problem. Parameters ---------- %(params_basic)s alpha : float, optional The Jacobian approximation is (-1/alpha). %(params_extra)s See Also -------- root : Interface to root finding algorithms for multivariate functions. See ``method='linearmixing'`` in particular. """ def __init__(self, alpha=None): GenericBroyden.__init__(self) self.alpha = alpha def solve(self, f, tol=0): return -f*self.alpha def matvec(self, f): return -f/self.alpha def rsolve(self, f, tol=0): return -f*np.conj(self.alpha) def rmatvec(self, f): return -f/np.conj(self.alpha) def todense(self): return np.diag(np.full(self.shape[0], -1/self.alpha)) def _update(self, x, f, dx, df, dx_norm, df_norm): pass class ExcitingMixing(GenericBroyden): """ Find a root of a function, using a tuned diagonal Jacobian approximation. The Jacobian matrix is diagonal and is tuned on each iteration. .. warning:: This algorithm may be useful for specific problems, but whether it will work may depend strongly on the problem. See Also -------- root : Interface to root finding algorithms for multivariate functions. See ``method='excitingmixing'`` in particular. Parameters ---------- %(params_basic)s alpha : float, optional Initial Jacobian approximation is (-1/alpha). alphamax : float, optional The entries of the diagonal Jacobian are kept in the range ``[alpha, alphamax]``. %(params_extra)s """ def __init__(self, alpha=None, alphamax=1.0): GenericBroyden.__init__(self) self.alpha = alpha self.alphamax = alphamax self.beta = None def setup(self, x, F, func): GenericBroyden.setup(self, x, F, func) self.beta = np.full((self.shape[0],), self.alpha, dtype=self.dtype) def solve(self, f, tol=0): return -f*self.beta def matvec(self, f): return -f/self.beta def rsolve(self, f, tol=0): return -f*self.beta.conj() def rmatvec(self, f): return -f/self.beta.conj() def todense(self): return np.diag(-1/self.beta) def _update(self, x, f, dx, df, dx_norm, df_norm): incr = f*self.last_f > 0 self.beta[incr] += self.alpha self.beta[~incr] = self.alpha np.clip(self.beta, 0, self.alphamax, out=self.beta) #------------------------------------------------------------------------------ # Iterative/Krylov approximated Jacobians #------------------------------------------------------------------------------ class KrylovJacobian(Jacobian): r""" Find a root of a function, using Krylov approximation for inverse Jacobian. This method is suitable for solving large-scale problems. Parameters ---------- %(params_basic)s rdiff : float, optional Relative step size to use in numerical differentiation. method : str or callable, optional Krylov method to use to approximate the Jacobian. Can be a string, or a function implementing the same interface as the iterative solvers in `scipy.sparse.linalg`. If a string, needs to be one of: ``'lgmres'``, ``'gmres'``, ``'bicgstab'``, ``'cgs'``, ``'minres'``, ``'tfqmr'``. The default is `scipy.sparse.linalg.lgmres`. inner_maxiter : int, optional Parameter to pass to the "inner" Krylov solver: maximum number of iterations. Iteration will stop after maxiter steps even if the specified tolerance has not been achieved. inner_M : LinearOperator or InverseJacobian Preconditioner for the inner Krylov iteration. Note that you can use also inverse Jacobians as (adaptive) preconditioners. For example, >>> from scipy.optimize import BroydenFirst, KrylovJacobian >>> from scipy.optimize import InverseJacobian >>> jac = BroydenFirst() >>> kjac = KrylovJacobian(inner_M=InverseJacobian(jac)) If the preconditioner has a method named 'update', it will be called as ``update(x, f)`` after each nonlinear step, with ``x`` giving the current point, and ``f`` the current function value. outer_k : int, optional Size of the subspace kept across LGMRES nonlinear iterations. See `scipy.sparse.linalg.lgmres` for details. inner_kwargs : kwargs Keyword parameters for the "inner" Krylov solver (defined with `method`). Parameter names must start with the `inner_` prefix which will be stripped before passing on the inner method. See, e.g., `scipy.sparse.linalg.gmres` for details. %(params_extra)s See Also -------- root : Interface to root finding algorithms for multivariate functions. See ``method='krylov'`` in particular. scipy.sparse.linalg.gmres scipy.sparse.linalg.lgmres Notes ----- This function implements a Newton-Krylov solver. The basic idea is to compute the inverse of the Jacobian with an iterative Krylov method. These methods require only evaluating the Jacobian-vector products, which are conveniently approximated by a finite difference: .. math:: J v \approx (f(x + \omega*v/|v|) - f(x)) / \omega Due to the use of iterative matrix inverses, these methods can deal with large nonlinear problems. SciPy's `scipy.sparse.linalg` module offers a selection of Krylov solvers to choose from. The default here is `lgmres`, which is a variant of restarted GMRES iteration that reuses some of the information obtained in the previous Newton steps to invert Jacobians in subsequent steps. For a review on Newton-Krylov methods, see for example [1]_, and for the LGMRES sparse inverse method, see [2]_. References ---------- .. [1] C. T. Kelley, Solving Nonlinear Equations with Newton's Method, SIAM, pp.57-83, 2003. :doi:`10.1137/1.9780898718898.ch3` .. [2] D.A. Knoll and D.E. Keyes, J. Comp. Phys. 193, 357 (2004). :doi:`10.1016/j.jcp.2003.08.010` .. [3] A.H. Baker and E.R. Jessup and T. Manteuffel, SIAM J. Matrix Anal. Appl. 26, 962 (2005). :doi:`10.1137/S0895479803422014` Examples -------- The following functions define a system of nonlinear equations >>> def fun(x): ... return [x[0] + 0.5 * x[1] - 1.0, ... 0.5 * (x[1] - x[0]) ** 2] A solution can be obtained as follows. >>> from scipy import optimize >>> sol = optimize.newton_krylov(fun, [0, 0]) >>> sol array([0.66731771, 0.66536458]) """ def __init__(self, rdiff=None, method='lgmres', inner_maxiter=20, inner_M=None, outer_k=10, **kw): self.preconditioner = inner_M self.rdiff = rdiff # Note that this retrieves one of the named functions, or otherwise # uses `method` as is (i.e., for a user-provided callable). self.method = dict( bicgstab=scipy.sparse.linalg.bicgstab, gmres=scipy.sparse.linalg.gmres, lgmres=scipy.sparse.linalg.lgmres, cgs=scipy.sparse.linalg.cgs, minres=scipy.sparse.linalg.minres, tfqmr=scipy.sparse.linalg.tfqmr, ).get(method, method) self.method_kw = dict(maxiter=inner_maxiter, M=self.preconditioner) if self.method is scipy.sparse.linalg.gmres: # Replace GMRES's outer iteration with Newton steps self.method_kw['restart'] = inner_maxiter self.method_kw['maxiter'] = 1 self.method_kw.setdefault('atol', 0) elif self.method in (scipy.sparse.linalg.gcrotmk, scipy.sparse.linalg.bicgstab, scipy.sparse.linalg.cgs): self.method_kw.setdefault('atol', 0) elif self.method is scipy.sparse.linalg.lgmres: self.method_kw['outer_k'] = outer_k # Replace LGMRES's outer iteration with Newton steps self.method_kw['maxiter'] = 1 # Carry LGMRES's `outer_v` vectors across nonlinear iterations self.method_kw.setdefault('outer_v', []) self.method_kw.setdefault('prepend_outer_v', True) # But don't carry the corresponding Jacobian*v products, in case # the Jacobian changes a lot in the nonlinear step # # XXX: some trust-region inspired ideas might be more efficient... # See e.g., Brown & Saad. But needs to be implemented separately # since it's not an inexact Newton method. self.method_kw.setdefault('store_outer_Av', False) self.method_kw.setdefault('atol', 0) for key, value in kw.items(): if not key.startswith('inner_'): raise ValueError("Unknown parameter %s" % key) self.method_kw[key[6:]] = value def _update_diff_step(self): mx = abs(self.x0).max() mf = abs(self.f0).max() self.omega = self.rdiff * max(1, mx) / max(1, mf) def matvec(self, v): nv = norm(v) if nv == 0: return 0*v sc = self.omega / nv r = (self.func(self.x0 + sc*v) - self.f0) / sc if not np.all(np.isfinite(r)) and np.all(np.isfinite(v)): raise ValueError('Function returned non-finite results') return r def solve(self, rhs, tol=0): if 'tol' in self.method_kw: sol, info = self.method(self.op, rhs, **self.method_kw) else: sol, info = self.method(self.op, rhs, tol=tol, **self.method_kw) return sol def update(self, x, f): self.x0 = x self.f0 = f self._update_diff_step() # Update also the preconditioner, if possible if self.preconditioner is not None: if hasattr(self.preconditioner, 'update'): self.preconditioner.update(x, f) def setup(self, x, f, func): Jacobian.setup(self, x, f, func) self.x0 = x self.f0 = f self.op = scipy.sparse.linalg.aslinearoperator(self) if self.rdiff is None: self.rdiff = np.finfo(x.dtype).eps ** (1./2) self._update_diff_step() # Setup also the preconditioner, if possible if self.preconditioner is not None: if hasattr(self.preconditioner, 'setup'): self.preconditioner.setup(x, f, func) #------------------------------------------------------------------------------ # Wrapper functions #------------------------------------------------------------------------------ def _nonlin_wrapper(name, jac): """ Construct a solver wrapper with given name and Jacobian approx. It inspects the keyword arguments of ``jac.__init__``, and allows to use the same arguments in the wrapper function, in addition to the keyword arguments of `nonlin_solve` """ signature = _getfullargspec(jac.__init__) args, varargs, varkw, defaults, kwonlyargs, kwdefaults, _ = signature kwargs = list(zip(args[-len(defaults):], defaults)) kw_str = ", ".join([f"{k}={v!r}" for k, v in kwargs]) if kw_str: kw_str = ", " + kw_str kwkw_str = ", ".join([f"{k}={k}" for k, v in kwargs]) if kwkw_str: kwkw_str = kwkw_str + ", " if kwonlyargs: raise ValueError('Unexpected signature %s' % signature) # Construct the wrapper function so that its keyword arguments # are visible in pydoc.help etc. wrapper = """ def %(name)s(F, xin, iter=None %(kw)s, verbose=False, maxiter=None, f_tol=None, f_rtol=None, x_tol=None, x_rtol=None, tol_norm=None, line_search='armijo', callback=None, **kw): jac = %(jac)s(%(kwkw)s **kw) return nonlin_solve(F, xin, jac, iter, verbose, maxiter, f_tol, f_rtol, x_tol, x_rtol, tol_norm, line_search, callback) """ wrapper = wrapper % dict(name=name, kw=kw_str, jac=jac.__name__, kwkw=kwkw_str) ns = {} ns.update(globals()) exec(wrapper, ns) func = ns[name] func.__doc__ = jac.__doc__ _set_doc(func) return func broyden1 = _nonlin_wrapper('broyden1', BroydenFirst) broyden2 = _nonlin_wrapper('broyden2', BroydenSecond) anderson = _nonlin_wrapper('anderson', Anderson) linearmixing = _nonlin_wrapper('linearmixing', LinearMixing) diagbroyden = _nonlin_wrapper('diagbroyden', DiagBroyden) excitingmixing = _nonlin_wrapper('excitingmixing', ExcitingMixing) newton_krylov = _nonlin_wrapper('newton_krylov', KrylovJacobian)
49,031
30.270408
104
py
scipy
scipy-main/scipy/optimize/_minpack_py.py
import warnings from . import _minpack import numpy as np from numpy import (atleast_1d, triu, shape, transpose, zeros, prod, greater, asarray, inf, finfo, inexact, issubdtype, dtype) from scipy import linalg from scipy.linalg import svd, cholesky, solve_triangular, LinAlgError from scipy._lib._util import _asarray_validated, _lazywhere, _contains_nan from scipy._lib._util import getfullargspec_no_self as _getfullargspec from ._optimize import OptimizeResult, _check_unknown_options, OptimizeWarning from ._lsq import least_squares # from ._lsq.common import make_strictly_feasible from ._lsq.least_squares import prepare_bounds from scipy.optimize._minimize import Bounds error = _minpack.error __all__ = ['fsolve', 'leastsq', 'fixed_point', 'curve_fit'] def _check_func(checker, argname, thefunc, x0, args, numinputs, output_shape=None): res = atleast_1d(thefunc(*((x0[:numinputs],) + args))) if (output_shape is not None) and (shape(res) != output_shape): if (output_shape[0] != 1): if len(output_shape) > 1: if output_shape[1] == 1: return shape(res) msg = "{}: there is a mismatch between the input and output " \ "shape of the '{}' argument".format(checker, argname) func_name = getattr(thefunc, '__name__', None) if func_name: msg += " '%s'." % func_name else: msg += "." msg += f'Shape should be {output_shape} but it is {shape(res)}.' raise TypeError(msg) if issubdtype(res.dtype, inexact): dt = res.dtype else: dt = dtype(float) return shape(res), dt def fsolve(func, x0, args=(), fprime=None, full_output=0, col_deriv=0, xtol=1.49012e-8, maxfev=0, band=None, epsfcn=None, factor=100, diag=None): """ Find the roots of a function. Return the roots of the (non-linear) equations defined by ``func(x) = 0`` given a starting estimate. Parameters ---------- func : callable ``f(x, *args)`` A function that takes at least one (possibly vector) argument, and returns a value of the same length. x0 : ndarray The starting estimate for the roots of ``func(x) = 0``. args : tuple, optional Any extra arguments to `func`. fprime : callable ``f(x, *args)``, optional A function to compute the Jacobian of `func` with derivatives across the rows. By default, the Jacobian will be estimated. full_output : bool, optional If True, return optional outputs. col_deriv : bool, optional Specify whether the Jacobian function computes derivatives down the columns (faster, because there is no transpose operation). xtol : float, optional The calculation will terminate if the relative error between two consecutive iterates is at most `xtol`. maxfev : int, optional The maximum number of calls to the function. If zero, then ``100*(N+1)`` is the maximum where N is the number of elements in `x0`. band : tuple, optional If set to a two-sequence containing the number of sub- and super-diagonals within the band of the Jacobi matrix, the Jacobi matrix is considered banded (only for ``fprime=None``). epsfcn : float, optional A suitable step length for the forward-difference approximation of the Jacobian (for ``fprime=None``). If `epsfcn` is less than the machine precision, it is assumed that the relative errors in the functions are of the order of the machine precision. factor : float, optional A parameter determining the initial step bound (``factor * || diag * x||``). Should be in the interval ``(0.1, 100)``. diag : sequence, optional N positive entries that serve as a scale factors for the variables. Returns ------- x : ndarray The solution (or the result of the last iteration for an unsuccessful call). infodict : dict A dictionary of optional outputs with the keys: ``nfev`` number of function calls ``njev`` number of Jacobian calls ``fvec`` function evaluated at the output ``fjac`` the orthogonal matrix, q, produced by the QR factorization of the final approximate Jacobian matrix, stored column wise ``r`` upper triangular matrix produced by QR factorization of the same matrix ``qtf`` the vector ``(transpose(q) * fvec)`` ier : int An integer flag. Set to 1 if a solution was found, otherwise refer to `mesg` for more information. mesg : str If no solution is found, `mesg` details the cause of failure. See Also -------- root : Interface to root finding algorithms for multivariate functions. See the ``method='hybr'`` in particular. Notes ----- ``fsolve`` is a wrapper around MINPACK's hybrd and hybrj algorithms. Examples -------- Find a solution to the system of equations: ``x0*cos(x1) = 4, x1*x0 - x1 = 5``. >>> import numpy as np >>> from scipy.optimize import fsolve >>> def func(x): ... return [x[0] * np.cos(x[1]) - 4, ... x[1] * x[0] - x[1] - 5] >>> root = fsolve(func, [1, 1]) >>> root array([6.50409711, 0.90841421]) >>> np.isclose(func(root), [0.0, 0.0]) # func(root) should be almost 0.0. array([ True, True]) """ options = {'col_deriv': col_deriv, 'xtol': xtol, 'maxfev': maxfev, 'band': band, 'eps': epsfcn, 'factor': factor, 'diag': diag} res = _root_hybr(func, x0, args, jac=fprime, **options) if full_output: x = res['x'] info = {k: res.get(k) for k in ('nfev', 'njev', 'fjac', 'r', 'qtf') if k in res} info['fvec'] = res['fun'] return x, info, res['status'], res['message'] else: status = res['status'] msg = res['message'] if status == 0: raise TypeError(msg) elif status == 1: pass elif status in [2, 3, 4, 5]: warnings.warn(msg, RuntimeWarning) else: raise TypeError(msg) return res['x'] def _root_hybr(func, x0, args=(), jac=None, col_deriv=0, xtol=1.49012e-08, maxfev=0, band=None, eps=None, factor=100, diag=None, **unknown_options): """ Find the roots of a multivariate function using MINPACK's hybrd and hybrj routines (modified Powell method). Options ------- col_deriv : bool Specify whether the Jacobian function computes derivatives down the columns (faster, because there is no transpose operation). xtol : float The calculation will terminate if the relative error between two consecutive iterates is at most `xtol`. maxfev : int The maximum number of calls to the function. If zero, then ``100*(N+1)`` is the maximum where N is the number of elements in `x0`. band : tuple If set to a two-sequence containing the number of sub- and super-diagonals within the band of the Jacobi matrix, the Jacobi matrix is considered banded (only for ``fprime=None``). eps : float A suitable step length for the forward-difference approximation of the Jacobian (for ``fprime=None``). If `eps` is less than the machine precision, it is assumed that the relative errors in the functions are of the order of the machine precision. factor : float A parameter determining the initial step bound (``factor * || diag * x||``). Should be in the interval ``(0.1, 100)``. diag : sequence N positive entries that serve as a scale factors for the variables. """ _check_unknown_options(unknown_options) epsfcn = eps x0 = asarray(x0).flatten() n = len(x0) if not isinstance(args, tuple): args = (args,) shape, dtype = _check_func('fsolve', 'func', func, x0, args, n, (n,)) if epsfcn is None: epsfcn = finfo(dtype).eps Dfun = jac if Dfun is None: if band is None: ml, mu = -10, -10 else: ml, mu = band[:2] if maxfev == 0: maxfev = 200 * (n + 1) retval = _minpack._hybrd(func, x0, args, 1, xtol, maxfev, ml, mu, epsfcn, factor, diag) else: _check_func('fsolve', 'fprime', Dfun, x0, args, n, (n, n)) if (maxfev == 0): maxfev = 100 * (n + 1) retval = _minpack._hybrj(func, Dfun, x0, args, 1, col_deriv, xtol, maxfev, factor, diag) x, status = retval[0], retval[-1] errors = {0: "Improper input parameters were entered.", 1: "The solution converged.", 2: "The number of calls to function has " "reached maxfev = %d." % maxfev, 3: "xtol=%f is too small, no further improvement " "in the approximate\n solution " "is possible." % xtol, 4: "The iteration is not making good progress, as measured " "by the \n improvement from the last five " "Jacobian evaluations.", 5: "The iteration is not making good progress, " "as measured by the \n improvement from the last " "ten iterations.", 'unknown': "An error occurred."} info = retval[1] info['fun'] = info.pop('fvec') sol = OptimizeResult(x=x, success=(status == 1), status=status) sol.update(info) try: sol['message'] = errors[status] except KeyError: sol['message'] = errors['unknown'] return sol LEASTSQ_SUCCESS = [1, 2, 3, 4] LEASTSQ_FAILURE = [5, 6, 7, 8] def leastsq(func, x0, args=(), Dfun=None, full_output=False, col_deriv=False, ftol=1.49012e-8, xtol=1.49012e-8, gtol=0.0, maxfev=0, epsfcn=None, factor=100, diag=None): """ Minimize the sum of squares of a set of equations. :: x = arg min(sum(func(y)**2,axis=0)) y Parameters ---------- func : callable Should take at least one (possibly length ``N`` vector) argument and returns ``M`` floating point numbers. It must not return NaNs or fitting might fail. ``M`` must be greater than or equal to ``N``. x0 : ndarray The starting estimate for the minimization. args : tuple, optional Any extra arguments to func are placed in this tuple. Dfun : callable, optional A function or method to compute the Jacobian of func with derivatives across the rows. If this is None, the Jacobian will be estimated. full_output : bool, optional If ``True``, return all optional outputs (not just `x` and `ier`). col_deriv : bool, optional If ``True``, specify that the Jacobian function computes derivatives down the columns (faster, because there is no transpose operation). ftol : float, optional Relative error desired in the sum of squares. xtol : float, optional Relative error desired in the approximate solution. gtol : float, optional Orthogonality desired between the function vector and the columns of the Jacobian. maxfev : int, optional The maximum number of calls to the function. If `Dfun` is provided, then the default `maxfev` is 100*(N+1) where N is the number of elements in x0, otherwise the default `maxfev` is 200*(N+1). epsfcn : float, optional A variable used in determining a suitable step length for the forward- difference approximation of the Jacobian (for Dfun=None). Normally the actual step length will be sqrt(epsfcn)*x If epsfcn is less than the machine precision, it is assumed that the relative errors are of the order of the machine precision. factor : float, optional A parameter determining the initial step bound (``factor * || diag * x||``). Should be in interval ``(0.1, 100)``. diag : sequence, optional N positive entries that serve as a scale factors for the variables. Returns ------- x : ndarray The solution (or the result of the last iteration for an unsuccessful call). cov_x : ndarray The inverse of the Hessian. `fjac` and `ipvt` are used to construct an estimate of the Hessian. A value of None indicates a singular matrix, which means the curvature in parameters `x` is numerically flat. To obtain the covariance matrix of the parameters `x`, `cov_x` must be multiplied by the variance of the residuals -- see curve_fit. Only returned if `full_output` is ``True``. infodict : dict a dictionary of optional outputs with the keys: ``nfev`` The number of function calls ``fvec`` The function evaluated at the output ``fjac`` A permutation of the R matrix of a QR factorization of the final approximate Jacobian matrix, stored column wise. Together with ipvt, the covariance of the estimate can be approximated. ``ipvt`` An integer array of length N which defines a permutation matrix, p, such that fjac*p = q*r, where r is upper triangular with diagonal elements of nonincreasing magnitude. Column j of p is column ipvt(j) of the identity matrix. ``qtf`` The vector (transpose(q) * fvec). Only returned if `full_output` is ``True``. mesg : str A string message giving information about the cause of failure. Only returned if `full_output` is ``True``. ier : int An integer flag. If it is equal to 1, 2, 3 or 4, the solution was found. Otherwise, the solution was not found. In either case, the optional output variable 'mesg' gives more information. See Also -------- least_squares : Newer interface to solve nonlinear least-squares problems with bounds on the variables. See ``method='lm'`` in particular. Notes ----- "leastsq" is a wrapper around MINPACK's lmdif and lmder algorithms. cov_x is a Jacobian approximation to the Hessian of the least squares objective function. This approximation assumes that the objective function is based on the difference between some observed target data (ydata) and a (non-linear) function of the parameters `f(xdata, params)` :: func(params) = ydata - f(xdata, params) so that the objective function is :: min sum((ydata - f(xdata, params))**2, axis=0) params The solution, `x`, is always a 1-D array, regardless of the shape of `x0`, or whether `x0` is a scalar. Examples -------- >>> from scipy.optimize import leastsq >>> def func(x): ... return 2*(x-3)**2+1 >>> leastsq(func, 0) (array([2.99999999]), 1) """ x0 = asarray(x0).flatten() n = len(x0) if not isinstance(args, tuple): args = (args,) shape, dtype = _check_func('leastsq', 'func', func, x0, args, n) m = shape[0] if n > m: raise TypeError(f"Improper input: func input vector length N={n} must" f" not exceed func output vector length M={m}") if epsfcn is None: epsfcn = finfo(dtype).eps if Dfun is None: if maxfev == 0: maxfev = 200*(n + 1) retval = _minpack._lmdif(func, x0, args, full_output, ftol, xtol, gtol, maxfev, epsfcn, factor, diag) else: if col_deriv: _check_func('leastsq', 'Dfun', Dfun, x0, args, n, (n, m)) else: _check_func('leastsq', 'Dfun', Dfun, x0, args, n, (m, n)) if maxfev == 0: maxfev = 100 * (n + 1) retval = _minpack._lmder(func, Dfun, x0, args, full_output, col_deriv, ftol, xtol, gtol, maxfev, factor, diag) errors = {0: ["Improper input parameters.", TypeError], 1: ["Both actual and predicted relative reductions " "in the sum of squares\n are at most %f" % ftol, None], 2: ["The relative error between two consecutive " "iterates is at most %f" % xtol, None], 3: ["Both actual and predicted relative reductions in " "the sum of squares\n are at most {:f} and the " "relative error between two consecutive " "iterates is at \n most {:f}".format(ftol, xtol), None], 4: ["The cosine of the angle between func(x) and any " "column of the\n Jacobian is at most %f in " "absolute value" % gtol, None], 5: ["Number of calls to function has reached " "maxfev = %d." % maxfev, ValueError], 6: ["ftol=%f is too small, no further reduction " "in the sum of squares\n is possible." % ftol, ValueError], 7: ["xtol=%f is too small, no further improvement in " "the approximate\n solution is possible." % xtol, ValueError], 8: ["gtol=%f is too small, func(x) is orthogonal to the " "columns of\n the Jacobian to machine " "precision." % gtol, ValueError]} # The FORTRAN return value (possible return values are >= 0 and <= 8) info = retval[-1] if full_output: cov_x = None if info in LEASTSQ_SUCCESS: # This was # perm = take(eye(n), retval[1]['ipvt'] - 1, 0) # r = triu(transpose(retval[1]['fjac'])[:n, :]) # R = dot(r, perm) # cov_x = inv(dot(transpose(R), R)) # but the explicit dot product was not necessary and sometimes # the result was not symmetric positive definite. See gh-4555. perm = retval[1]['ipvt'] - 1 n = len(perm) r = triu(transpose(retval[1]['fjac'])[:n, :]) inv_triu = linalg.get_lapack_funcs('trtri', (r,)) try: # inverse of permuted matrix is a permuation of matrix inverse invR, trtri_info = inv_triu(r) # default: upper, non-unit diag if trtri_info != 0: # explicit comparison for readability raise LinAlgError(f'trtri returned info {trtri_info}') invR[perm] = invR.copy() cov_x = invR @ invR.T except (LinAlgError, ValueError): pass return (retval[0], cov_x) + retval[1:-1] + (errors[info][0], info) else: if info in LEASTSQ_FAILURE: warnings.warn(errors[info][0], RuntimeWarning) elif info == 0: raise errors[info][1](errors[info][0]) return retval[0], info def _lightweight_memoizer(f): # very shallow memoization - only remember the first set of parameters # and corresponding function value to address gh-13670 def _memoized_func(params): if np.all(_memoized_func.last_params == params): return _memoized_func.last_val val = f(params) if _memoized_func.last_params is None: _memoized_func.last_params = np.copy(params) _memoized_func.last_val = val return val _memoized_func.last_params = None _memoized_func.last_val = None return _memoized_func def _wrap_func(func, xdata, ydata, transform): if transform is None: def func_wrapped(params): return func(xdata, *params) - ydata elif transform.ndim == 1: def func_wrapped(params): return transform * (func(xdata, *params) - ydata) else: # Chisq = (y - yd)^T C^{-1} (y-yd) # transform = L such that C = L L^T # C^{-1} = L^{-T} L^{-1} # Chisq = (y - yd)^T L^{-T} L^{-1} (y-yd) # Define (y-yd)' = L^{-1} (y-yd) # by solving # L (y-yd)' = (y-yd) # and minimize (y-yd)'^T (y-yd)' def func_wrapped(params): return solve_triangular(transform, func(xdata, *params) - ydata, lower=True) return func_wrapped def _wrap_jac(jac, xdata, transform): if transform is None: def jac_wrapped(params): return jac(xdata, *params) elif transform.ndim == 1: def jac_wrapped(params): return transform[:, np.newaxis] * np.asarray(jac(xdata, *params)) else: def jac_wrapped(params): return solve_triangular(transform, np.asarray(jac(xdata, *params)), lower=True) return jac_wrapped def _initialize_feasible(lb, ub): p0 = np.ones_like(lb) lb_finite = np.isfinite(lb) ub_finite = np.isfinite(ub) mask = lb_finite & ub_finite p0[mask] = 0.5 * (lb[mask] + ub[mask]) mask = lb_finite & ~ub_finite p0[mask] = lb[mask] + 1 mask = ~lb_finite & ub_finite p0[mask] = ub[mask] - 1 return p0 def curve_fit(f, xdata, ydata, p0=None, sigma=None, absolute_sigma=False, check_finite=None, bounds=(-np.inf, np.inf), method=None, jac=None, *, full_output=False, nan_policy=None, **kwargs): """ Use non-linear least squares to fit a function, f, to data. Assumes ``ydata = f(xdata, *params) + eps``. Parameters ---------- f : callable The model function, f(x, ...). It must take the independent variable as the first argument and the parameters to fit as separate remaining arguments. xdata : array_like The independent variable where the data is measured. Should usually be an M-length sequence or an (k,M)-shaped array for functions with k predictors, and each element should be float convertible if it is an array like object. ydata : array_like The dependent data, a length M array - nominally ``f(xdata, ...)``. p0 : array_like, optional Initial guess for the parameters (length N). If None, then the initial values will all be 1 (if the number of parameters for the function can be determined using introspection, otherwise a ValueError is raised). sigma : None or M-length sequence or MxM array, optional Determines the uncertainty in `ydata`. If we define residuals as ``r = ydata - f(xdata, *popt)``, then the interpretation of `sigma` depends on its number of dimensions: - A 1-D `sigma` should contain values of standard deviations of errors in `ydata`. In this case, the optimized function is ``chisq = sum((r / sigma) ** 2)``. - A 2-D `sigma` should contain the covariance matrix of errors in `ydata`. In this case, the optimized function is ``chisq = r.T @ inv(sigma) @ r``. .. versionadded:: 0.19 None (default) is equivalent of 1-D `sigma` filled with ones. absolute_sigma : bool, optional If True, `sigma` is used in an absolute sense and the estimated parameter covariance `pcov` reflects these absolute values. If False (default), only the relative magnitudes of the `sigma` values matter. The returned parameter covariance matrix `pcov` is based on scaling `sigma` by a constant factor. This constant is set by demanding that the reduced `chisq` for the optimal parameters `popt` when using the *scaled* `sigma` equals unity. In other words, `sigma` is scaled to match the sample variance of the residuals after the fit. Default is False. Mathematically, ``pcov(absolute_sigma=False) = pcov(absolute_sigma=True) * chisq(popt)/(M-N)`` check_finite : bool, optional If True, check that the input arrays do not contain nans of infs, and raise a ValueError if they do. Setting this parameter to False may silently produce nonsensical results if the input arrays do contain nans. Default is True if `nan_policy` is not specified explicitly and False otherwise. bounds : 2-tuple of array_like or `Bounds`, optional Lower and upper bounds on parameters. Defaults to no bounds. There are two ways to specify the bounds: - Instance of `Bounds` class. - 2-tuple of array_like: Each element of the tuple must be either an array with the length equal to the number of parameters, or a scalar (in which case the bound is taken to be the same for all parameters). Use ``np.inf`` with an appropriate sign to disable bounds on all or some parameters. method : {'lm', 'trf', 'dogbox'}, optional Method to use for optimization. See `least_squares` for more details. Default is 'lm' for unconstrained problems and 'trf' if `bounds` are provided. The method 'lm' won't work when the number of observations is less than the number of variables, use 'trf' or 'dogbox' in this case. .. versionadded:: 0.17 jac : callable, string or None, optional Function with signature ``jac(x, ...)`` which computes the Jacobian matrix of the model function with respect to parameters as a dense array_like structure. It will be scaled according to provided `sigma`. If None (default), the Jacobian will be estimated numerically. String keywords for 'trf' and 'dogbox' methods can be used to select a finite difference scheme, see `least_squares`. .. versionadded:: 0.18 full_output : boolean, optional If True, this function returns additioal information: `infodict`, `mesg`, and `ier`. .. versionadded:: 1.9 nan_policy : {'raise', 'omit', None}, optional Defines how to handle when input contains nan. The following options are available (default is None): * 'raise': throws an error * 'omit': performs the calculations ignoring nan values * None: no special handling of NaNs is performed (except what is done by check_finite); the behavior when NaNs are present is implementation-dependent and may change. Note that if this value is specified explicitly (not None), `check_finite` will be set as False. .. versionadded:: 1.11 **kwargs Keyword arguments passed to `leastsq` for ``method='lm'`` or `least_squares` otherwise. Returns ------- popt : array Optimal values for the parameters so that the sum of the squared residuals of ``f(xdata, *popt) - ydata`` is minimized. pcov : 2-D array The estimated approximate covariance of popt. The diagonals provide the variance of the parameter estimate. To compute one standard deviation errors on the parameters, use ``perr = np.sqrt(np.diag(pcov))``. Note that the relationship between `cov` and parameter error estimates is derived based on a linear approximation to the model function around the optimum [1]. When this approximation becomes inaccurate, `cov` may not provide an accurate measure of uncertainty. How the `sigma` parameter affects the estimated covariance depends on `absolute_sigma` argument, as described above. If the Jacobian matrix at the solution doesn't have a full rank, then 'lm' method returns a matrix filled with ``np.inf``, on the other hand 'trf' and 'dogbox' methods use Moore-Penrose pseudoinverse to compute the covariance matrix. Covariance matrices with large condition numbers (e.g. computed with `numpy.linalg.cond`) may indicate that results are unreliable. infodict : dict (returned only if `full_output` is True) a dictionary of optional outputs with the keys: ``nfev`` The number of function calls. Methods 'trf' and 'dogbox' do not count function calls for numerical Jacobian approximation, as opposed to 'lm' method. ``fvec`` The residual values evaluated at the solution, for a 1-D `sigma` this is ``(f(x, *popt) - ydata)/sigma``. ``fjac`` A permutation of the R matrix of a QR factorization of the final approximate Jacobian matrix, stored column wise. Together with ipvt, the covariance of the estimate can be approximated. Method 'lm' only provides this information. ``ipvt`` An integer array of length N which defines a permutation matrix, p, such that fjac*p = q*r, where r is upper triangular with diagonal elements of nonincreasing magnitude. Column j of p is column ipvt(j) of the identity matrix. Method 'lm' only provides this information. ``qtf`` The vector (transpose(q) * fvec). Method 'lm' only provides this information. .. versionadded:: 1.9 mesg : str (returned only if `full_output` is True) A string message giving information about the solution. .. versionadded:: 1.9 ier : int (returnned only if `full_output` is True) An integer flag. If it is equal to 1, 2, 3 or 4, the solution was found. Otherwise, the solution was not found. In either case, the optional output variable `mesg` gives more information. .. versionadded:: 1.9 Raises ------ ValueError if either `ydata` or `xdata` contain NaNs, or if incompatible options are used. RuntimeError if the least-squares minimization fails. OptimizeWarning if covariance of the parameters can not be estimated. See Also -------- least_squares : Minimize the sum of squares of nonlinear functions. scipy.stats.linregress : Calculate a linear least squares regression for two sets of measurements. Notes ----- Users should ensure that inputs `xdata`, `ydata`, and the output of `f` are ``float64``, or else the optimization may return incorrect results. With ``method='lm'``, the algorithm uses the Levenberg-Marquardt algorithm through `leastsq`. Note that this algorithm can only deal with unconstrained problems. Box constraints can be handled by methods 'trf' and 'dogbox'. Refer to the docstring of `least_squares` for more information. References ---------- [1] K. Vugrin et al. Confidence region estimation techniques for nonlinear regression in groundwater flow: Three case studies. Water Resources Research, Vol. 43, W03423, :doi:`10.1029/2005WR004804` Examples -------- >>> import numpy as np >>> import matplotlib.pyplot as plt >>> from scipy.optimize import curve_fit >>> def func(x, a, b, c): ... return a * np.exp(-b * x) + c Define the data to be fit with some noise: >>> xdata = np.linspace(0, 4, 50) >>> y = func(xdata, 2.5, 1.3, 0.5) >>> rng = np.random.default_rng() >>> y_noise = 0.2 * rng.normal(size=xdata.size) >>> ydata = y + y_noise >>> plt.plot(xdata, ydata, 'b-', label='data') Fit for the parameters a, b, c of the function `func`: >>> popt, pcov = curve_fit(func, xdata, ydata) >>> popt array([2.56274217, 1.37268521, 0.47427475]) >>> plt.plot(xdata, func(xdata, *popt), 'r-', ... label='fit: a=%5.3f, b=%5.3f, c=%5.3f' % tuple(popt)) Constrain the optimization to the region of ``0 <= a <= 3``, ``0 <= b <= 1`` and ``0 <= c <= 0.5``: >>> popt, pcov = curve_fit(func, xdata, ydata, bounds=(0, [3., 1., 0.5])) >>> popt array([2.43736712, 1. , 0.34463856]) >>> plt.plot(xdata, func(xdata, *popt), 'g--', ... label='fit: a=%5.3f, b=%5.3f, c=%5.3f' % tuple(popt)) >>> plt.xlabel('x') >>> plt.ylabel('y') >>> plt.legend() >>> plt.show() For reliable results, the model `func` should not be overparametrized; redundant parameters can cause unreliable covariance matrices and, in some cases, poorer quality fits. As a quick check of whether the model may be overparameterized, calculate the condition number of the covariance matrix: >>> np.linalg.cond(pcov) 34.571092161547405 # may vary The value is small, so it does not raise much concern. If, however, we were to add a fourth parameter ``d`` to `func` with the same effect as ``a``: >>> def func(x, a, b, c, d): ... return a * d * np.exp(-b * x) + c # a and d are redundant >>> popt, pcov = curve_fit(func, xdata, ydata) >>> np.linalg.cond(pcov) 1.13250718925596e+32 # may vary Such a large value is cause for concern. The diagonal elements of the covariance matrix, which is related to uncertainty of the fit, gives more information: >>> np.diag(pcov) array([1.48814742e+29, 3.78596560e-02, 5.39253738e-03, 2.76417220e+28]) # may vary Note that the first and last terms are much larger than the other elements, suggesting that the optimal values of these parameters are ambiguous and that only one of these parameters is needed in the model. """ # noqa if p0 is None: # determine number of parameters by inspecting the function sig = _getfullargspec(f) args = sig.args if len(args) < 2: raise ValueError("Unable to determine number of fit parameters.") n = len(args) - 1 else: p0 = np.atleast_1d(p0) n = p0.size if isinstance(bounds, Bounds): lb, ub = bounds.lb, bounds.ub else: lb, ub = prepare_bounds(bounds, n) if p0 is None: p0 = _initialize_feasible(lb, ub) bounded_problem = np.any((lb > -np.inf) | (ub < np.inf)) if method is None: if bounded_problem: method = 'trf' else: method = 'lm' if method == 'lm' and bounded_problem: raise ValueError("Method 'lm' only works for unconstrained problems. " "Use 'trf' or 'dogbox' instead.") if check_finite is None: check_finite = True if nan_policy is None else False # optimization may produce garbage for float32 inputs, cast them to float64 if check_finite: ydata = np.asarray_chkfinite(ydata, float) else: ydata = np.asarray(ydata, float) if isinstance(xdata, (list, tuple, np.ndarray)): # `xdata` is passed straight to the user-defined `f`, so allow # non-array_like `xdata`. if check_finite: xdata = np.asarray_chkfinite(xdata, float) else: xdata = np.asarray(xdata, float) if ydata.size == 0: raise ValueError("`ydata` must not be empty!") # nan handling is needed only if check_finite is False because if True, # the x-y data are already checked, and they don't contain nans. if not check_finite and nan_policy is not None: if nan_policy == "propagate": raise ValueError("`nan_policy='propagate'` is not supported " "by this function.") policies = [None, 'raise', 'omit'] x_contains_nan, nan_policy = _contains_nan(xdata, nan_policy, policies=policies) y_contains_nan, nan_policy = _contains_nan(ydata, nan_policy, policies=policies) if (x_contains_nan or y_contains_nan) and nan_policy == 'omit': # ignore NaNs for N dimensional arrays has_nan = np.isnan(xdata) has_nan = has_nan.any(axis=tuple(range(has_nan.ndim-1))) has_nan |= np.isnan(ydata) xdata = xdata[..., ~has_nan] ydata = ydata[~has_nan] # Determine type of sigma if sigma is not None: sigma = np.asarray(sigma) # if 1-D, sigma are errors, define transform = 1/sigma if sigma.shape == (ydata.size, ): transform = 1.0 / sigma # if 2-D, sigma is the covariance matrix, # define transform = L such that L L^T = C elif sigma.shape == (ydata.size, ydata.size): try: # scipy.linalg.cholesky requires lower=True to return L L^T = A transform = cholesky(sigma, lower=True) except LinAlgError as e: raise ValueError("`sigma` must be positive definite.") from e else: raise ValueError("`sigma` has incorrect shape.") else: transform = None func = _lightweight_memoizer(_wrap_func(f, xdata, ydata, transform)) if callable(jac): jac = _lightweight_memoizer(_wrap_jac(jac, xdata, transform)) elif jac is None and method != 'lm': jac = '2-point' if 'args' in kwargs: # The specification for the model function `f` does not support # additional arguments. Refer to the `curve_fit` docstring for # acceptable call signatures of `f`. raise ValueError("'args' is not a supported keyword argument.") if method == 'lm': # if ydata.size == 1, this might be used for broadcast. if ydata.size != 1 and n > ydata.size: raise TypeError(f"The number of func parameters={n} must not" f" exceed the number of data points={ydata.size}") res = leastsq(func, p0, Dfun=jac, full_output=1, **kwargs) popt, pcov, infodict, errmsg, ier = res ysize = len(infodict['fvec']) cost = np.sum(infodict['fvec'] ** 2) if ier not in [1, 2, 3, 4]: raise RuntimeError("Optimal parameters not found: " + errmsg) else: # Rename maxfev (leastsq) to max_nfev (least_squares), if specified. if 'max_nfev' not in kwargs: kwargs['max_nfev'] = kwargs.pop('maxfev', None) res = least_squares(func, p0, jac=jac, bounds=bounds, method=method, **kwargs) if not res.success: raise RuntimeError("Optimal parameters not found: " + res.message) infodict = dict(nfev=res.nfev, fvec=res.fun) ier = res.status errmsg = res.message ysize = len(res.fun) cost = 2 * res.cost # res.cost is half sum of squares! popt = res.x # Do Moore-Penrose inverse discarding zero singular values. _, s, VT = svd(res.jac, full_matrices=False) threshold = np.finfo(float).eps * max(res.jac.shape) * s[0] s = s[s > threshold] VT = VT[:s.size] pcov = np.dot(VT.T / s**2, VT) warn_cov = False if pcov is None or np.isnan(pcov).any(): # indeterminate covariance pcov = zeros((len(popt), len(popt)), dtype=float) pcov.fill(inf) warn_cov = True elif not absolute_sigma: if ysize > p0.size: s_sq = cost / (ysize - p0.size) pcov = pcov * s_sq else: pcov.fill(inf) warn_cov = True if warn_cov: warnings.warn('Covariance of the parameters could not be estimated', category=OptimizeWarning) if full_output: return popt, pcov, infodict, errmsg, ier else: return popt, pcov def check_gradient(fcn, Dfcn, x0, args=(), col_deriv=0): """Perform a simple check on the gradient for correctness. """ x = atleast_1d(x0) n = len(x) x = x.reshape((n,)) fvec = atleast_1d(fcn(x, *args)) m = len(fvec) fvec = fvec.reshape((m,)) ldfjac = m fjac = atleast_1d(Dfcn(x, *args)) fjac = fjac.reshape((m, n)) if col_deriv == 0: fjac = transpose(fjac) xp = zeros((n,), float) err = zeros((m,), float) fvecp = None _minpack._chkder(m, n, x, fvec, fjac, ldfjac, xp, fvecp, 1, err) fvecp = atleast_1d(fcn(xp, *args)) fvecp = fvecp.reshape((m,)) _minpack._chkder(m, n, x, fvec, fjac, ldfjac, xp, fvecp, 2, err) good = (prod(greater(err, 0.5), axis=0)) return (good, err) def _del2(p0, p1, d): return p0 - np.square(p1 - p0) / d def _relerr(actual, desired): return (actual - desired) / desired def _fixed_point_helper(func, x0, args, xtol, maxiter, use_accel): p0 = x0 for i in range(maxiter): p1 = func(p0, *args) if use_accel: p2 = func(p1, *args) d = p2 - 2.0 * p1 + p0 p = _lazywhere(d != 0, (p0, p1, d), f=_del2, fillvalue=p2) else: p = p1 relerr = _lazywhere(p0 != 0, (p, p0), f=_relerr, fillvalue=p) if np.all(np.abs(relerr) < xtol): return p p0 = p msg = "Failed to converge after %d iterations, value is %s" % (maxiter, p) raise RuntimeError(msg) def fixed_point(func, x0, args=(), xtol=1e-8, maxiter=500, method='del2'): """ Find a fixed point of the function. Given a function of one or more variables and a starting point, find a fixed point of the function: i.e., where ``func(x0) == x0``. Parameters ---------- func : function Function to evaluate. x0 : array_like Fixed point of function. args : tuple, optional Extra arguments to `func`. xtol : float, optional Convergence tolerance, defaults to 1e-08. maxiter : int, optional Maximum number of iterations, defaults to 500. method : {"del2", "iteration"}, optional Method of finding the fixed-point, defaults to "del2", which uses Steffensen's Method with Aitken's ``Del^2`` convergence acceleration [1]_. The "iteration" method simply iterates the function until convergence is detected, without attempting to accelerate the convergence. References ---------- .. [1] Burden, Faires, "Numerical Analysis", 5th edition, pg. 80 Examples -------- >>> import numpy as np >>> from scipy import optimize >>> def func(x, c1, c2): ... return np.sqrt(c1/(x+c2)) >>> c1 = np.array([10,12.]) >>> c2 = np.array([3, 5.]) >>> optimize.fixed_point(func, [1.2, 1.3], args=(c1,c2)) array([ 1.4920333 , 1.37228132]) """ use_accel = {'del2': True, 'iteration': False}[method] x0 = _asarray_validated(x0, as_inexact=True) return _fixed_point_helper(func, x0, args, xtol, maxiter, use_accel)
43,032
37.388046
91
py
scipy
scipy-main/scipy/optimize/_optimize.py
#__docformat__ = "restructuredtext en" # ******NOTICE*************** # optimize.py module by Travis E. Oliphant # # You may copy and use this module as you see fit with no # guarantee implied provided you keep this notice in all copies. # *****END NOTICE************ # A collection of optimization algorithms. Version 0.5 # CHANGES # Added fminbound (July 2001) # Added brute (Aug. 2002) # Finished line search satisfying strong Wolfe conditions (Mar. 2004) # Updated strong Wolfe conditions line search to use # cubic-interpolation (Mar. 2004) # Minimization routines __all__ = ['fmin', 'fmin_powell', 'fmin_bfgs', 'fmin_ncg', 'fmin_cg', 'fminbound', 'brent', 'golden', 'bracket', 'rosen', 'rosen_der', 'rosen_hess', 'rosen_hess_prod', 'brute', 'approx_fprime', 'line_search', 'check_grad', 'OptimizeResult', 'show_options', 'OptimizeWarning'] __docformat__ = "restructuredtext en" import warnings import sys import inspect from numpy import (atleast_1d, eye, argmin, zeros, shape, squeeze, asarray, sqrt) import numpy as np from scipy.sparse.linalg import LinearOperator from ._linesearch import (line_search_wolfe1, line_search_wolfe2, line_search_wolfe2 as line_search, LineSearchWarning) from ._numdiff import approx_derivative from ._hessian_update_strategy import HessianUpdateStrategy from scipy._lib._util import getfullargspec_no_self as _getfullargspec from scipy._lib._util import MapWrapper, check_random_state from scipy.optimize._differentiable_functions import ScalarFunction, FD_METHODS # standard status messages of optimizers _status_message = {'success': 'Optimization terminated successfully.', 'maxfev': 'Maximum number of function evaluations has ' 'been exceeded.', 'maxiter': 'Maximum number of iterations has been ' 'exceeded.', 'pr_loss': 'Desired error not necessarily achieved due ' 'to precision loss.', 'nan': 'NaN result encountered.', 'out_of_bounds': 'The result is outside of the provided ' 'bounds.'} class MemoizeJac: """ Decorator that caches the return values of a function returning `(fun, grad)` each time it is called. """ def __init__(self, fun): self.fun = fun self.jac = None self._value = None self.x = None def _compute_if_needed(self, x, *args): if not np.all(x == self.x) or self._value is None or self.jac is None: self.x = np.asarray(x).copy() fg = self.fun(x, *args) self.jac = fg[1] self._value = fg[0] def __call__(self, x, *args): """ returns the function value """ self._compute_if_needed(x, *args) return self._value def derivative(self, x, *args): self._compute_if_needed(x, *args) return self.jac def _indenter(s, n=0): """ Ensures that lines after the first are indented by the specified amount """ split = s.split("\n") indent = " "*n return ("\n" + indent).join(split) def _float_formatter_10(x): """ Returns a string representation of a float with exactly ten characters """ if np.isposinf(x): return " inf" elif np.isneginf(x): return " -inf" elif np.isnan(x): return " nan" return np.format_float_scientific(x, precision=3, pad_left=2, unique=False) def _dict_formatter(d, n=0, mplus=1, sorter=None): """ Pretty printer for dictionaries `n` keeps track of the starting indentation; lines are indented by this much after a line break. `mplus` is additional left padding applied to keys """ if isinstance(d, dict): m = max(map(len, list(d.keys()))) + mplus # width to print keys s = '\n'.join([k.rjust(m) + ': ' + # right justified, width m _indenter(_dict_formatter(v, m+n+2, 0, sorter), m+2) for k, v in sorter(d)]) # +2 for ': ' else: # By default, NumPy arrays print with linewidth=76. `n` is # the indent at which a line begins printing, so it is subtracted # from the default to avoid exceeding 76 characters total. # `edgeitems` is the number of elements to include before and after # ellipses when arrays are not shown in full. # `threshold` is the maximum number of elements for which an # array is shown in full. # These values tend to work well for use with OptimizeResult. with np.printoptions(linewidth=76-n, edgeitems=2, threshold=12, formatter={'float_kind': _float_formatter_10}): s = str(d) return s def _wrap_callback(callback, method=None): """Wrap a user-provided callback so that attributes can be attached.""" if callback is None or method in {'tnc', 'slsqp', 'cobyla'}: return callback # don't wrap sig = inspect.signature(callback) if set(sig.parameters) == {'intermediate_result'}: def wrapped_callback(res): return callback(intermediate_result=res) elif method == 'trust-constr': def wrapped_callback(res): return callback(np.copy(res.x), res) else: def wrapped_callback(res): return callback(np.copy(res.x)) wrapped_callback.stop_iteration = False return wrapped_callback def _call_callback_maybe_halt(callback, res): """Call wrapped callback; return True if minimization should stop. Parameters ---------- callback : callable or None A user-provided callback wrapped with `_wrap_callback` res : OptimizeResult Information about the current iterate Returns ------- halt : bool True if minimization should stop """ if callback is None: return False try: callback(res) return False except StopIteration: callback.stop_iteration = True # make `minimize` override status/msg return True class OptimizeResult(dict): """ Represents the optimization result. Attributes ---------- x : ndarray The solution of the optimization. success : bool Whether or not the optimizer exited successfully. status : int Termination status of the optimizer. Its value depends on the underlying solver. Refer to `message` for details. message : str Description of the cause of the termination. fun, jac, hess: ndarray Values of objective function, its Jacobian and its Hessian (if available). The Hessians may be approximations, see the documentation of the function in question. hess_inv : object Inverse of the objective function's Hessian; may be an approximation. Not available for all solvers. The type of this attribute may be either np.ndarray or scipy.sparse.linalg.LinearOperator. nfev, njev, nhev : int Number of evaluations of the objective functions and of its Jacobian and Hessian. nit : int Number of iterations performed by the optimizer. maxcv : float The maximum constraint violation. Notes ----- Depending on the specific solver being used, `OptimizeResult` may not have all attributes listed here, and they may have additional attributes not listed here. Since this class is essentially a subclass of dict with attribute accessors, one can see which attributes are available using the `OptimizeResult.keys` method. """ def __getattr__(self, name): try: return self[name] except KeyError as e: raise AttributeError(name) from e __setattr__ = dict.__setitem__ __delattr__ = dict.__delitem__ def __repr__(self): order_keys = ['message', 'success', 'status', 'fun', 'funl', 'x', 'xl', 'col_ind', 'nit', 'lower', 'upper', 'eqlin', 'ineqlin', 'converged', 'flag', 'function_calls', 'iterations', 'root'] order_keys = getattr(self, '_order_keys', order_keys) # 'slack', 'con' are redundant with residuals # 'crossover_nit' is probably not interesting to most users omit_keys = {'slack', 'con', 'crossover_nit', '_order_keys'} def key(item): try: return order_keys.index(item[0].lower()) except ValueError: # item not in list return np.inf def omit_redundant(items): for item in items: if item[0] in omit_keys: continue yield item def item_sorter(d): return sorted(omit_redundant(d.items()), key=key) if self.keys(): return _dict_formatter(self, sorter=item_sorter) else: return self.__class__.__name__ + "()" def __dir__(self): return list(self.keys()) class OptimizeWarning(UserWarning): pass def _check_unknown_options(unknown_options): if unknown_options: msg = ", ".join(map(str, unknown_options.keys())) # Stack level 4: this is called from _minimize_*, which is # called from another function in SciPy. Level 4 is the first # level in user code. warnings.warn("Unknown solver options: %s" % msg, OptimizeWarning, 4) def is_finite_scalar(x): """Test whether `x` is either a finite scalar or a finite array scalar. """ return np.size(x) == 1 and np.isfinite(x) _epsilon = sqrt(np.finfo(float).eps) def vecnorm(x, ord=2): if ord == np.inf: return np.amax(np.abs(x)) elif ord == -np.inf: return np.amin(np.abs(x)) else: return np.sum(np.abs(x)**ord, axis=0)**(1.0 / ord) def _prepare_scalar_function(fun, x0, jac=None, args=(), bounds=None, epsilon=None, finite_diff_rel_step=None, hess=None): """ Creates a ScalarFunction object for use with scalar minimizers (BFGS/LBFGSB/SLSQP/TNC/CG/etc). Parameters ---------- fun : callable The objective function to be minimized. ``fun(x, *args) -> float`` where ``x`` is an 1-D array with shape (n,) and ``args`` is a tuple of the fixed parameters needed to completely specify the function. x0 : ndarray, shape (n,) Initial guess. Array of real elements of size (n,), where 'n' is the number of independent variables. jac : {callable, '2-point', '3-point', 'cs', None}, optional Method for computing the gradient vector. If it is a callable, it should be a function that returns the gradient vector: ``jac(x, *args) -> array_like, shape (n,)`` If one of `{'2-point', '3-point', 'cs'}` is selected then the gradient is calculated with a relative step for finite differences. If `None`, then two-point finite differences with an absolute step is used. args : tuple, optional Extra arguments passed to the objective function and its derivatives (`fun`, `jac` functions). bounds : sequence, optional Bounds on variables. 'new-style' bounds are required. eps : float or ndarray If `jac is None` the absolute step size used for numerical approximation of the jacobian via forward differences. finite_diff_rel_step : None or array_like, optional If `jac in ['2-point', '3-point', 'cs']` the relative step size to use for numerical approximation of the jacobian. The absolute step size is computed as ``h = rel_step * sign(x0) * max(1, abs(x0))``, possibly adjusted to fit into the bounds. For ``method='3-point'`` the sign of `h` is ignored. If None (default) then step is selected automatically. hess : {callable, '2-point', '3-point', 'cs', None} Computes the Hessian matrix. If it is callable, it should return the Hessian matrix: ``hess(x, *args) -> {LinearOperator, spmatrix, array}, (n, n)`` Alternatively, the keywords {'2-point', '3-point', 'cs'} select a finite difference scheme for numerical estimation. Whenever the gradient is estimated via finite-differences, the Hessian cannot be estimated with options {'2-point', '3-point', 'cs'} and needs to be estimated using one of the quasi-Newton strategies. Returns ------- sf : ScalarFunction """ if callable(jac): grad = jac elif jac in FD_METHODS: # epsilon is set to None so that ScalarFunction is made to use # rel_step epsilon = None grad = jac else: # default (jac is None) is to do 2-point finite differences with # absolute step size. ScalarFunction has to be provided an # epsilon value that is not None to use absolute steps. This is # normally the case from most _minimize* methods. grad = '2-point' epsilon = epsilon if hess is None: # ScalarFunction requires something for hess, so we give a dummy # implementation here if nothing is provided, return a value of None # so that downstream minimisers halt. The results of `fun.hess` # should not be used. def hess(x, *args): return None if bounds is None: bounds = (-np.inf, np.inf) # ScalarFunction caches. Reuse of fun(x) during grad # calculation reduces overall function evaluations. sf = ScalarFunction(fun, x0, args, grad, hess, finite_diff_rel_step, bounds, epsilon=epsilon) return sf def _clip_x_for_func(func, bounds): # ensures that x values sent to func are clipped to bounds # this is used as a mitigation for gh11403, slsqp/tnc sometimes # suggest a move that is outside the limits by 1 or 2 ULP. This # unclean fix makes sure x is strictly within bounds. def eval(x): x = _check_clip_x(x, bounds) return func(x) return eval def _check_clip_x(x, bounds): if (x < bounds[0]).any() or (x > bounds[1]).any(): warnings.warn("Values in x were outside bounds during a " "minimize step, clipping to bounds", RuntimeWarning) x = np.clip(x, bounds[0], bounds[1]) return x return x def rosen(x): """ The Rosenbrock function. The function computed is:: sum(100.0*(x[1:] - x[:-1]**2.0)**2.0 + (1 - x[:-1])**2.0) Parameters ---------- x : array_like 1-D array of points at which the Rosenbrock function is to be computed. Returns ------- f : float The value of the Rosenbrock function. See Also -------- rosen_der, rosen_hess, rosen_hess_prod Examples -------- >>> import numpy as np >>> from scipy.optimize import rosen >>> X = 0.1 * np.arange(10) >>> rosen(X) 76.56 For higher-dimensional input ``rosen`` broadcasts. In the following example, we use this to plot a 2D landscape. Note that ``rosen_hess`` does not broadcast in this manner. >>> import matplotlib.pyplot as plt >>> from mpl_toolkits.mplot3d import Axes3D >>> x = np.linspace(-1, 1, 50) >>> X, Y = np.meshgrid(x, x) >>> ax = plt.subplot(111, projection='3d') >>> ax.plot_surface(X, Y, rosen([X, Y])) >>> plt.show() """ x = asarray(x) r = np.sum(100.0 * (x[1:] - x[:-1]**2.0)**2.0 + (1 - x[:-1])**2.0, axis=0) return r def rosen_der(x): """ The derivative (i.e. gradient) of the Rosenbrock function. Parameters ---------- x : array_like 1-D array of points at which the derivative is to be computed. Returns ------- rosen_der : (N,) ndarray The gradient of the Rosenbrock function at `x`. See Also -------- rosen, rosen_hess, rosen_hess_prod Examples -------- >>> import numpy as np >>> from scipy.optimize import rosen_der >>> X = 0.1 * np.arange(9) >>> rosen_der(X) array([ -2. , 10.6, 15.6, 13.4, 6.4, -3. , -12.4, -19.4, 62. ]) """ x = asarray(x) xm = x[1:-1] xm_m1 = x[:-2] xm_p1 = x[2:] der = np.zeros_like(x) der[1:-1] = (200 * (xm - xm_m1**2) - 400 * (xm_p1 - xm**2) * xm - 2 * (1 - xm)) der[0] = -400 * x[0] * (x[1] - x[0]**2) - 2 * (1 - x[0]) der[-1] = 200 * (x[-1] - x[-2]**2) return der def rosen_hess(x): """ The Hessian matrix of the Rosenbrock function. Parameters ---------- x : array_like 1-D array of points at which the Hessian matrix is to be computed. Returns ------- rosen_hess : ndarray The Hessian matrix of the Rosenbrock function at `x`. See Also -------- rosen, rosen_der, rosen_hess_prod Examples -------- >>> import numpy as np >>> from scipy.optimize import rosen_hess >>> X = 0.1 * np.arange(4) >>> rosen_hess(X) array([[-38., 0., 0., 0.], [ 0., 134., -40., 0.], [ 0., -40., 130., -80.], [ 0., 0., -80., 200.]]) """ x = atleast_1d(x) H = np.diag(-400 * x[:-1], 1) - np.diag(400 * x[:-1], -1) diagonal = np.zeros(len(x), dtype=x.dtype) diagonal[0] = 1200 * x[0]**2 - 400 * x[1] + 2 diagonal[-1] = 200 diagonal[1:-1] = 202 + 1200 * x[1:-1]**2 - 400 * x[2:] H = H + np.diag(diagonal) return H def rosen_hess_prod(x, p): """ Product of the Hessian matrix of the Rosenbrock function with a vector. Parameters ---------- x : array_like 1-D array of points at which the Hessian matrix is to be computed. p : array_like 1-D array, the vector to be multiplied by the Hessian matrix. Returns ------- rosen_hess_prod : ndarray The Hessian matrix of the Rosenbrock function at `x` multiplied by the vector `p`. See Also -------- rosen, rosen_der, rosen_hess Examples -------- >>> import numpy as np >>> from scipy.optimize import rosen_hess_prod >>> X = 0.1 * np.arange(9) >>> p = 0.5 * np.arange(9) >>> rosen_hess_prod(X, p) array([ -0., 27., -10., -95., -192., -265., -278., -195., -180.]) """ x = atleast_1d(x) Hp = np.zeros(len(x), dtype=x.dtype) Hp[0] = (1200 * x[0]**2 - 400 * x[1] + 2) * p[0] - 400 * x[0] * p[1] Hp[1:-1] = (-400 * x[:-2] * p[:-2] + (202 + 1200 * x[1:-1]**2 - 400 * x[2:]) * p[1:-1] - 400 * x[1:-1] * p[2:]) Hp[-1] = -400 * x[-2] * p[-2] + 200*p[-1] return Hp def _wrap_scalar_function(function, args): # wraps a minimizer function to count number of evaluations # and to easily provide an args kwd. ncalls = [0] if function is None: return ncalls, None def function_wrapper(x, *wrapper_args): ncalls[0] += 1 # A copy of x is sent to the user function (gh13740) fx = function(np.copy(x), *(wrapper_args + args)) # Ideally, we'd like to a have a true scalar returned from f(x). For # backwards-compatibility, also allow np.array([1.3]), np.array([[1.3]]) etc. if not np.isscalar(fx): try: fx = np.asarray(fx).item() except (TypeError, ValueError) as e: raise ValueError("The user-provided objective function " "must return a scalar value.") from e return fx return ncalls, function_wrapper class _MaxFuncCallError(RuntimeError): pass def _wrap_scalar_function_maxfun_validation(function, args, maxfun): # wraps a minimizer function to count number of evaluations # and to easily provide an args kwd. ncalls = [0] if function is None: return ncalls, None def function_wrapper(x, *wrapper_args): if ncalls[0] >= maxfun: raise _MaxFuncCallError("Too many function calls") ncalls[0] += 1 # A copy of x is sent to the user function (gh13740) fx = function(np.copy(x), *(wrapper_args + args)) # Ideally, we'd like to a have a true scalar returned from f(x). For # backwards-compatibility, also allow np.array([1.3]), # np.array([[1.3]]) etc. if not np.isscalar(fx): try: fx = np.asarray(fx).item() except (TypeError, ValueError) as e: raise ValueError("The user-provided objective function " "must return a scalar value.") from e return fx return ncalls, function_wrapper def fmin(func, x0, args=(), xtol=1e-4, ftol=1e-4, maxiter=None, maxfun=None, full_output=0, disp=1, retall=0, callback=None, initial_simplex=None): """ Minimize a function using the downhill simplex algorithm. This algorithm only uses function values, not derivatives or second derivatives. Parameters ---------- func : callable func(x,*args) The objective function to be minimized. x0 : ndarray Initial guess. args : tuple, optional Extra arguments passed to func, i.e., ``f(x,*args)``. xtol : float, optional Absolute error in xopt between iterations that is acceptable for convergence. ftol : number, optional Absolute error in func(xopt) between iterations that is acceptable for convergence. maxiter : int, optional Maximum number of iterations to perform. maxfun : number, optional Maximum number of function evaluations to make. full_output : bool, optional Set to True if fopt and warnflag outputs are desired. disp : bool, optional Set to True to print convergence messages. retall : bool, optional Set to True to return list of solutions at each iteration. callback : callable, optional Called after each iteration, as callback(xk), where xk is the current parameter vector. initial_simplex : array_like of shape (N + 1, N), optional Initial simplex. If given, overrides `x0`. ``initial_simplex[j,:]`` should contain the coordinates of the jth vertex of the ``N+1`` vertices in the simplex, where ``N`` is the dimension. Returns ------- xopt : ndarray Parameter that minimizes function. fopt : float Value of function at minimum: ``fopt = func(xopt)``. iter : int Number of iterations performed. funcalls : int Number of function calls made. warnflag : int 1 : Maximum number of function evaluations made. 2 : Maximum number of iterations reached. allvecs : list Solution at each iteration. See also -------- minimize: Interface to minimization algorithms for multivariate functions. See the 'Nelder-Mead' `method` in particular. Notes ----- Uses a Nelder-Mead simplex algorithm to find the minimum of function of one or more variables. This algorithm has a long history of successful use in applications. But it will usually be slower than an algorithm that uses first or second derivative information. In practice, it can have poor performance in high-dimensional problems and is not robust to minimizing complicated functions. Additionally, there currently is no complete theory describing when the algorithm will successfully converge to the minimum, or how fast it will if it does. Both the ftol and xtol criteria must be met for convergence. Examples -------- >>> def f(x): ... return x**2 >>> from scipy import optimize >>> minimum = optimize.fmin(f, 1) Optimization terminated successfully. Current function value: 0.000000 Iterations: 17 Function evaluations: 34 >>> minimum[0] -8.8817841970012523e-16 References ---------- .. [1] Nelder, J.A. and Mead, R. (1965), "A simplex method for function minimization", The Computer Journal, 7, pp. 308-313 .. [2] Wright, M.H. (1996), "Direct Search Methods: Once Scorned, Now Respectable", in Numerical Analysis 1995, Proceedings of the 1995 Dundee Biennial Conference in Numerical Analysis, D.F. Griffiths and G.A. Watson (Eds.), Addison Wesley Longman, Harlow, UK, pp. 191-208. """ opts = {'xatol': xtol, 'fatol': ftol, 'maxiter': maxiter, 'maxfev': maxfun, 'disp': disp, 'return_all': retall, 'initial_simplex': initial_simplex} callback = _wrap_callback(callback) res = _minimize_neldermead(func, x0, args, callback=callback, **opts) if full_output: retlist = res['x'], res['fun'], res['nit'], res['nfev'], res['status'] if retall: retlist += (res['allvecs'], ) return retlist else: if retall: return res['x'], res['allvecs'] else: return res['x'] def _minimize_neldermead(func, x0, args=(), callback=None, maxiter=None, maxfev=None, disp=False, return_all=False, initial_simplex=None, xatol=1e-4, fatol=1e-4, adaptive=False, bounds=None, **unknown_options): """ Minimization of scalar function of one or more variables using the Nelder-Mead algorithm. Options ------- disp : bool Set to True to print convergence messages. maxiter, maxfev : int Maximum allowed number of iterations and function evaluations. Will default to ``N*200``, where ``N`` is the number of variables, if neither `maxiter` or `maxfev` is set. If both `maxiter` and `maxfev` are set, minimization will stop at the first reached. return_all : bool, optional Set to True to return a list of the best solution at each of the iterations. initial_simplex : array_like of shape (N + 1, N) Initial simplex. If given, overrides `x0`. ``initial_simplex[j,:]`` should contain the coordinates of the jth vertex of the ``N+1`` vertices in the simplex, where ``N`` is the dimension. xatol : float, optional Absolute error in xopt between iterations that is acceptable for convergence. fatol : number, optional Absolute error in func(xopt) between iterations that is acceptable for convergence. adaptive : bool, optional Adapt algorithm parameters to dimensionality of problem. Useful for high-dimensional minimization [1]_. bounds : sequence or `Bounds`, optional Bounds on variables. There are two ways to specify the bounds: 1. Instance of `Bounds` class. 2. Sequence of ``(min, max)`` pairs for each element in `x`. None is used to specify no bound. Note that this just clips all vertices in simplex based on the bounds. References ---------- .. [1] Gao, F. and Han, L. Implementing the Nelder-Mead simplex algorithm with adaptive parameters. 2012. Computational Optimization and Applications. 51:1, pp. 259-277 """ _check_unknown_options(unknown_options) maxfun = maxfev retall = return_all x0 = np.atleast_1d(x0).flatten() x0 = np.asfarray(x0, x0.dtype) if adaptive: dim = float(len(x0)) rho = 1 chi = 1 + 2/dim psi = 0.75 - 1/(2*dim) sigma = 1 - 1/dim else: rho = 1 chi = 2 psi = 0.5 sigma = 0.5 nonzdelt = 0.05 zdelt = 0.00025 if bounds is not None: lower_bound, upper_bound = bounds.lb, bounds.ub # check bounds if (lower_bound > upper_bound).any(): raise ValueError("Nelder Mead - one of the lower bounds is greater than an upper bound.") if np.any(lower_bound > x0) or np.any(x0 > upper_bound): warnings.warn("Initial guess is not within the specified bounds", OptimizeWarning, 3) if bounds is not None: x0 = np.clip(x0, lower_bound, upper_bound) if initial_simplex is None: N = len(x0) sim = np.empty((N + 1, N), dtype=x0.dtype) sim[0] = x0 for k in range(N): y = np.array(x0, copy=True) if y[k] != 0: y[k] = (1 + nonzdelt)*y[k] else: y[k] = zdelt sim[k + 1] = y else: sim = np.atleast_2d(initial_simplex).copy() sim = np.asfarray(sim, sim.dtype) if sim.ndim != 2 or sim.shape[0] != sim.shape[1] + 1: raise ValueError("`initial_simplex` should be an array of shape (N+1,N)") if len(x0) != sim.shape[1]: raise ValueError("Size of `initial_simplex` is not consistent with `x0`") N = sim.shape[1] if retall: allvecs = [sim[0]] # If neither are set, then set both to default if maxiter is None and maxfun is None: maxiter = N * 200 maxfun = N * 200 elif maxiter is None: # Convert remaining Nones, to np.inf, unless the other is np.inf, in # which case use the default to avoid unbounded iteration if maxfun == np.inf: maxiter = N * 200 else: maxiter = np.inf elif maxfun is None: if maxiter == np.inf: maxfun = N * 200 else: maxfun = np.inf if bounds is not None: sim = np.clip(sim, lower_bound, upper_bound) one2np1 = list(range(1, N + 1)) fsim = np.full((N + 1,), np.inf, dtype=float) fcalls, func = _wrap_scalar_function_maxfun_validation(func, args, maxfun) try: for k in range(N + 1): fsim[k] = func(sim[k]) except _MaxFuncCallError: pass finally: ind = np.argsort(fsim) sim = np.take(sim, ind, 0) fsim = np.take(fsim, ind, 0) ind = np.argsort(fsim) fsim = np.take(fsim, ind, 0) # sort so sim[0,:] has the lowest function value sim = np.take(sim, ind, 0) iterations = 1 while (fcalls[0] < maxfun and iterations < maxiter): try: if (np.max(np.ravel(np.abs(sim[1:] - sim[0]))) <= xatol and np.max(np.abs(fsim[0] - fsim[1:])) <= fatol): break xbar = np.add.reduce(sim[:-1], 0) / N xr = (1 + rho) * xbar - rho * sim[-1] if bounds is not None: xr = np.clip(xr, lower_bound, upper_bound) fxr = func(xr) doshrink = 0 if fxr < fsim[0]: xe = (1 + rho * chi) * xbar - rho * chi * sim[-1] if bounds is not None: xe = np.clip(xe, lower_bound, upper_bound) fxe = func(xe) if fxe < fxr: sim[-1] = xe fsim[-1] = fxe else: sim[-1] = xr fsim[-1] = fxr else: # fsim[0] <= fxr if fxr < fsim[-2]: sim[-1] = xr fsim[-1] = fxr else: # fxr >= fsim[-2] # Perform contraction if fxr < fsim[-1]: xc = (1 + psi * rho) * xbar - psi * rho * sim[-1] if bounds is not None: xc = np.clip(xc, lower_bound, upper_bound) fxc = func(xc) if fxc <= fxr: sim[-1] = xc fsim[-1] = fxc else: doshrink = 1 else: # Perform an inside contraction xcc = (1 - psi) * xbar + psi * sim[-1] if bounds is not None: xcc = np.clip(xcc, lower_bound, upper_bound) fxcc = func(xcc) if fxcc < fsim[-1]: sim[-1] = xcc fsim[-1] = fxcc else: doshrink = 1 if doshrink: for j in one2np1: sim[j] = sim[0] + sigma * (sim[j] - sim[0]) if bounds is not None: sim[j] = np.clip( sim[j], lower_bound, upper_bound) fsim[j] = func(sim[j]) iterations += 1 except _MaxFuncCallError: pass finally: ind = np.argsort(fsim) sim = np.take(sim, ind, 0) fsim = np.take(fsim, ind, 0) if retall: allvecs.append(sim[0]) intermediate_result = OptimizeResult(x=sim[0], fun=fsim[0]) if _call_callback_maybe_halt(callback, intermediate_result): break x = sim[0] fval = np.min(fsim) warnflag = 0 if fcalls[0] >= maxfun: warnflag = 1 msg = _status_message['maxfev'] if disp: warnings.warn(msg, RuntimeWarning, 3) elif iterations >= maxiter: warnflag = 2 msg = _status_message['maxiter'] if disp: warnings.warn(msg, RuntimeWarning, 3) else: msg = _status_message['success'] if disp: print(msg) print(" Current function value: %f" % fval) print(" Iterations: %d" % iterations) print(" Function evaluations: %d" % fcalls[0]) result = OptimizeResult(fun=fval, nit=iterations, nfev=fcalls[0], status=warnflag, success=(warnflag == 0), message=msg, x=x, final_simplex=(sim, fsim)) if retall: result['allvecs'] = allvecs return result def approx_fprime(xk, f, epsilon=_epsilon, *args): """Finite difference approximation of the derivatives of a scalar or vector-valued function. If a function maps from :math:`R^n` to :math:`R^m`, its derivatives form an m-by-n matrix called the Jacobian, where an element :math:`(i, j)` is a partial derivative of f[i] with respect to ``xk[j]``. Parameters ---------- xk : array_like The coordinate vector at which to determine the gradient of `f`. f : callable Function of which to estimate the derivatives of. Has the signature ``f(xk, *args)`` where `xk` is the argument in the form of a 1-D array and `args` is a tuple of any additional fixed parameters needed to completely specify the function. The argument `xk` passed to this function is an ndarray of shape (n,) (never a scalar even if n=1). It must return a 1-D array_like of shape (m,) or a scalar. .. versionchanged:: 1.9.0 `f` is now able to return a 1-D array-like, with the :math:`(m, n)` Jacobian being estimated. epsilon : {float, array_like}, optional Increment to `xk` to use for determining the function gradient. If a scalar, uses the same finite difference delta for all partial derivatives. If an array, should contain one value per element of `xk`. Defaults to ``sqrt(np.finfo(float).eps)``, which is approximately 1.49e-08. \\*args : args, optional Any other arguments that are to be passed to `f`. Returns ------- jac : ndarray The partial derivatives of `f` to `xk`. See Also -------- check_grad : Check correctness of gradient function against approx_fprime. Notes ----- The function gradient is determined by the forward finite difference formula:: f(xk[i] + epsilon[i]) - f(xk[i]) f'[i] = --------------------------------- epsilon[i] Examples -------- >>> import numpy as np >>> from scipy import optimize >>> def func(x, c0, c1): ... "Coordinate vector `x` should be an array of size two." ... return c0 * x[0]**2 + c1*x[1]**2 >>> x = np.ones(2) >>> c0, c1 = (1, 200) >>> eps = np.sqrt(np.finfo(float).eps) >>> optimize.approx_fprime(x, func, [eps, np.sqrt(200) * eps], c0, c1) array([ 2. , 400.00004198]) """ xk = np.asarray(xk, float) f0 = f(xk, *args) return approx_derivative(f, xk, method='2-point', abs_step=epsilon, args=args, f0=f0) def check_grad(func, grad, x0, *args, epsilon=_epsilon, direction='all', seed=None): """Check the correctness of a gradient function by comparing it against a (forward) finite-difference approximation of the gradient. Parameters ---------- func : callable ``func(x0, *args)`` Function whose derivative is to be checked. grad : callable ``grad(x0, *args)`` Jacobian of `func`. x0 : ndarray Points to check `grad` against forward difference approximation of grad using `func`. args : \\*args, optional Extra arguments passed to `func` and `grad`. epsilon : float, optional Step size used for the finite difference approximation. It defaults to ``sqrt(np.finfo(float).eps)``, which is approximately 1.49e-08. direction : str, optional If set to ``'random'``, then gradients along a random vector are used to check `grad` against forward difference approximation using `func`. By default it is ``'all'``, in which case, all the one hot direction vectors are considered to check `grad`. If `func` is a vector valued function then only ``'all'`` can be used. seed : {None, int, `numpy.random.Generator`, `numpy.random.RandomState`}, optional If `seed` is None (or `np.random`), the `numpy.random.RandomState` singleton is used. If `seed` is an int, a new ``RandomState`` instance is used, seeded with `seed`. If `seed` is already a ``Generator`` or ``RandomState`` instance then that instance is used. Specify `seed` for reproducing the return value from this function. The random numbers generated with this seed affect the random vector along which gradients are computed to check ``grad``. Note that `seed` is only used when `direction` argument is set to `'random'`. Returns ------- err : float The square root of the sum of squares (i.e., the 2-norm) of the difference between ``grad(x0, *args)`` and the finite difference approximation of `grad` using func at the points `x0`. See Also -------- approx_fprime Examples -------- >>> import numpy as np >>> def func(x): ... return x[0]**2 - 0.5 * x[1]**3 >>> def grad(x): ... return [2 * x[0], -1.5 * x[1]**2] >>> from scipy.optimize import check_grad >>> check_grad(func, grad, [1.5, -1.5]) 2.9802322387695312e-08 # may vary >>> rng = np.random.default_rng() >>> check_grad(func, grad, [1.5, -1.5], ... direction='random', seed=rng) 2.9802322387695312e-08 """ step = epsilon x0 = np.asarray(x0) def g(w, func, x0, v, *args): return func(x0 + w*v, *args) if direction == 'random': _grad = np.asanyarray(grad(x0, *args)) if _grad.ndim > 1: raise ValueError("'random' can only be used with scalar valued" " func") random_state = check_random_state(seed) v = random_state.normal(0, 1, size=(x0.shape)) _args = (func, x0, v) + args _func = g vars = np.zeros((1,)) analytical_grad = np.dot(_grad, v) elif direction == 'all': _args = args _func = func vars = x0 analytical_grad = grad(x0, *args) else: raise ValueError("{} is not a valid string for " "``direction`` argument".format(direction)) return np.sqrt(np.sum(np.abs( (analytical_grad - approx_fprime(vars, _func, step, *_args))**2 ))) def approx_fhess_p(x0, p, fprime, epsilon, *args): # calculate fprime(x0) first, as this may be cached by ScalarFunction f1 = fprime(*((x0,) + args)) f2 = fprime(*((x0 + epsilon*p,) + args)) return (f2 - f1) / epsilon class _LineSearchError(RuntimeError): pass def _line_search_wolfe12(f, fprime, xk, pk, gfk, old_fval, old_old_fval, **kwargs): """ Same as line_search_wolfe1, but fall back to line_search_wolfe2 if suitable step length is not found, and raise an exception if a suitable step length is not found. Raises ------ _LineSearchError If no suitable step size is found """ extra_condition = kwargs.pop('extra_condition', None) ret = line_search_wolfe1(f, fprime, xk, pk, gfk, old_fval, old_old_fval, **kwargs) if ret[0] is not None and extra_condition is not None: xp1 = xk + ret[0] * pk if not extra_condition(ret[0], xp1, ret[3], ret[5]): # Reject step if extra_condition fails ret = (None,) if ret[0] is None: # line search failed: try different one. with warnings.catch_warnings(): warnings.simplefilter('ignore', LineSearchWarning) kwargs2 = {} for key in ('c1', 'c2', 'amax'): if key in kwargs: kwargs2[key] = kwargs[key] ret = line_search_wolfe2(f, fprime, xk, pk, gfk, old_fval, old_old_fval, extra_condition=extra_condition, **kwargs2) if ret[0] is None: raise _LineSearchError() return ret def fmin_bfgs(f, x0, fprime=None, args=(), gtol=1e-5, norm=np.inf, epsilon=_epsilon, maxiter=None, full_output=0, disp=1, retall=0, callback=None, xrtol=0): """ Minimize a function using the BFGS algorithm. Parameters ---------- f : callable ``f(x,*args)`` Objective function to be minimized. x0 : ndarray Initial guess. fprime : callable ``f'(x,*args)``, optional Gradient of f. args : tuple, optional Extra arguments passed to f and fprime. gtol : float, optional Terminate successfully if gradient norm is less than `gtol` norm : float, optional Order of norm (Inf is max, -Inf is min) epsilon : int or ndarray, optional If `fprime` is approximated, use this value for the step size. callback : callable, optional An optional user-supplied function to call after each iteration. Called as ``callback(xk)``, where ``xk`` is the current parameter vector. maxiter : int, optional Maximum number of iterations to perform. full_output : bool, optional If True, return ``fopt``, ``func_calls``, ``grad_calls``, and ``warnflag`` in addition to ``xopt``. disp : bool, optional Print convergence message if True. retall : bool, optional Return a list of results at each iteration if True. xrtol : float, default: 0 Relative tolerance for `x`. Terminate successfully if step size is less than ``xk * xrtol`` where ``xk`` is the current parameter vector. Returns ------- xopt : ndarray Parameters which minimize f, i.e., ``f(xopt) == fopt``. fopt : float Minimum value. gopt : ndarray Value of gradient at minimum, f'(xopt), which should be near 0. Bopt : ndarray Value of 1/f''(xopt), i.e., the inverse Hessian matrix. func_calls : int Number of function_calls made. grad_calls : int Number of gradient calls made. warnflag : integer 1 : Maximum number of iterations exceeded. 2 : Gradient and/or function calls not changing. 3 : NaN result encountered. allvecs : list The value of `xopt` at each iteration. Only returned if `retall` is True. Notes ----- Optimize the function, `f`, whose gradient is given by `fprime` using the quasi-Newton method of Broyden, Fletcher, Goldfarb, and Shanno (BFGS). See Also -------- minimize: Interface to minimization algorithms for multivariate functions. See ``method='BFGS'`` in particular. References ---------- Wright, and Nocedal 'Numerical Optimization', 1999, p. 198. Examples -------- >>> import numpy as np >>> from scipy.optimize import fmin_bfgs >>> def quadratic_cost(x, Q): ... return x @ Q @ x ... >>> x0 = np.array([-3, -4]) >>> cost_weight = np.diag([1., 10.]) >>> # Note that a trailing comma is necessary for a tuple with single element >>> fmin_bfgs(quadratic_cost, x0, args=(cost_weight,)) Optimization terminated successfully. Current function value: 0.000000 Iterations: 7 # may vary Function evaluations: 24 # may vary Gradient evaluations: 8 # may vary array([ 2.85169950e-06, -4.61820139e-07]) >>> def quadratic_cost_grad(x, Q): ... return 2 * Q @ x ... >>> fmin_bfgs(quadratic_cost, x0, quadratic_cost_grad, args=(cost_weight,)) Optimization terminated successfully. Current function value: 0.000000 Iterations: 7 Function evaluations: 8 Gradient evaluations: 8 array([ 2.85916637e-06, -4.54371951e-07]) """ opts = {'gtol': gtol, 'norm': norm, 'eps': epsilon, 'disp': disp, 'maxiter': maxiter, 'return_all': retall} callback = _wrap_callback(callback) res = _minimize_bfgs(f, x0, args, fprime, callback=callback, **opts) if full_output: retlist = (res['x'], res['fun'], res['jac'], res['hess_inv'], res['nfev'], res['njev'], res['status']) if retall: retlist += (res['allvecs'], ) return retlist else: if retall: return res['x'], res['allvecs'] else: return res['x'] def _minimize_bfgs(fun, x0, args=(), jac=None, callback=None, gtol=1e-5, norm=np.inf, eps=_epsilon, maxiter=None, disp=False, return_all=False, finite_diff_rel_step=None, xrtol=0, **unknown_options): """ Minimization of scalar function of one or more variables using the BFGS algorithm. Options ------- disp : bool Set to True to print convergence messages. maxiter : int Maximum number of iterations to perform. gtol : float Terminate successfully if gradient norm is less than `gtol`. norm : float Order of norm (Inf is max, -Inf is min). eps : float or ndarray If `jac is None` the absolute step size used for numerical approximation of the jacobian via forward differences. return_all : bool, optional Set to True to return a list of the best solution at each of the iterations. finite_diff_rel_step : None or array_like, optional If `jac in ['2-point', '3-point', 'cs']` the relative step size to use for numerical approximation of the jacobian. The absolute step size is computed as ``h = rel_step * sign(x) * max(1, abs(x))``, possibly adjusted to fit into the bounds. For ``method='3-point'`` the sign of `h` is ignored. If None (default) then step is selected automatically. xrtol : float, default: 0 Relative tolerance for `x`. Terminate successfully if step size is less than ``xk * xrtol`` where ``xk`` is the current parameter vector. """ _check_unknown_options(unknown_options) retall = return_all x0 = asarray(x0).flatten() if x0.ndim == 0: x0.shape = (1,) if maxiter is None: maxiter = len(x0) * 200 sf = _prepare_scalar_function(fun, x0, jac, args=args, epsilon=eps, finite_diff_rel_step=finite_diff_rel_step) f = sf.fun myfprime = sf.grad old_fval = f(x0) gfk = myfprime(x0) k = 0 N = len(x0) I = np.eye(N, dtype=int) Hk = I # Sets the initial step guess to dx ~ 1 old_old_fval = old_fval + np.linalg.norm(gfk) / 2 xk = x0 if retall: allvecs = [x0] warnflag = 0 gnorm = vecnorm(gfk, ord=norm) while (gnorm > gtol) and (k < maxiter): pk = -np.dot(Hk, gfk) try: alpha_k, fc, gc, old_fval, old_old_fval, gfkp1 = \ _line_search_wolfe12(f, myfprime, xk, pk, gfk, old_fval, old_old_fval, amin=1e-100, amax=1e100) except _LineSearchError: # Line search failed to find a better solution. warnflag = 2 break sk = alpha_k * pk xkp1 = xk + sk if retall: allvecs.append(xkp1) xk = xkp1 if gfkp1 is None: gfkp1 = myfprime(xkp1) yk = gfkp1 - gfk gfk = gfkp1 k += 1 intermediate_result = OptimizeResult(x=xk, fun=old_fval) if _call_callback_maybe_halt(callback, intermediate_result): break gnorm = vecnorm(gfk, ord=norm) if (gnorm <= gtol): break # See Chapter 5 in P.E. Frandsen, K. Jonasson, H.B. Nielsen, # O. Tingleff: "Unconstrained Optimization", IMM, DTU. 1999. # These notes are available here: # http://www2.imm.dtu.dk/documents/ftp/publlec.html if (alpha_k*vecnorm(pk) <= xrtol*(xrtol + vecnorm(xk))): break if not np.isfinite(old_fval): # We correctly found +-Inf as optimal value, or something went # wrong. warnflag = 2 break rhok_inv = np.dot(yk, sk) # this was handled in numeric, let it remaines for more safety # Cryptic comment above is preserved for posterity. Future reader: # consider change to condition below proposed in gh-1261/gh-17345. if rhok_inv == 0.: rhok = 1000.0 if disp: msg = "Divide-by-zero encountered: rhok assumed large" _print_success_message_or_warn(True, msg) else: rhok = 1. / rhok_inv A1 = I - sk[:, np.newaxis] * yk[np.newaxis, :] * rhok A2 = I - yk[:, np.newaxis] * sk[np.newaxis, :] * rhok Hk = np.dot(A1, np.dot(Hk, A2)) + (rhok * sk[:, np.newaxis] * sk[np.newaxis, :]) fval = old_fval if warnflag == 2: msg = _status_message['pr_loss'] elif k >= maxiter: warnflag = 1 msg = _status_message['maxiter'] elif np.isnan(gnorm) or np.isnan(fval) or np.isnan(xk).any(): warnflag = 3 msg = _status_message['nan'] else: msg = _status_message['success'] if disp: _print_success_message_or_warn(warnflag, msg) print(" Current function value: %f" % fval) print(" Iterations: %d" % k) print(" Function evaluations: %d" % sf.nfev) print(" Gradient evaluations: %d" % sf.ngev) result = OptimizeResult(fun=fval, jac=gfk, hess_inv=Hk, nfev=sf.nfev, njev=sf.ngev, status=warnflag, success=(warnflag == 0), message=msg, x=xk, nit=k) if retall: result['allvecs'] = allvecs return result def _print_success_message_or_warn(warnflag, message, warntype=None): if not warnflag: print(message) else: warnings.warn(message, warntype or OptimizeWarning, stacklevel=3) def fmin_cg(f, x0, fprime=None, args=(), gtol=1e-5, norm=np.inf, epsilon=_epsilon, maxiter=None, full_output=0, disp=1, retall=0, callback=None): """ Minimize a function using a nonlinear conjugate gradient algorithm. Parameters ---------- f : callable, ``f(x, *args)`` Objective function to be minimized. Here `x` must be a 1-D array of the variables that are to be changed in the search for a minimum, and `args` are the other (fixed) parameters of `f`. x0 : ndarray A user-supplied initial estimate of `xopt`, the optimal value of `x`. It must be a 1-D array of values. fprime : callable, ``fprime(x, *args)``, optional A function that returns the gradient of `f` at `x`. Here `x` and `args` are as described above for `f`. The returned value must be a 1-D array. Defaults to None, in which case the gradient is approximated numerically (see `epsilon`, below). args : tuple, optional Parameter values passed to `f` and `fprime`. Must be supplied whenever additional fixed parameters are needed to completely specify the functions `f` and `fprime`. gtol : float, optional Stop when the norm of the gradient is less than `gtol`. norm : float, optional Order to use for the norm of the gradient (``-np.inf`` is min, ``np.inf`` is max). epsilon : float or ndarray, optional Step size(s) to use when `fprime` is approximated numerically. Can be a scalar or a 1-D array. Defaults to ``sqrt(eps)``, with eps the floating point machine precision. Usually ``sqrt(eps)`` is about 1.5e-8. maxiter : int, optional Maximum number of iterations to perform. Default is ``200 * len(x0)``. full_output : bool, optional If True, return `fopt`, `func_calls`, `grad_calls`, and `warnflag` in addition to `xopt`. See the Returns section below for additional information on optional return values. disp : bool, optional If True, return a convergence message, followed by `xopt`. retall : bool, optional If True, add to the returned values the results of each iteration. callback : callable, optional An optional user-supplied function, called after each iteration. Called as ``callback(xk)``, where ``xk`` is the current value of `x0`. Returns ------- xopt : ndarray Parameters which minimize f, i.e., ``f(xopt) == fopt``. fopt : float, optional Minimum value found, f(xopt). Only returned if `full_output` is True. func_calls : int, optional The number of function_calls made. Only returned if `full_output` is True. grad_calls : int, optional The number of gradient calls made. Only returned if `full_output` is True. warnflag : int, optional Integer value with warning status, only returned if `full_output` is True. 0 : Success. 1 : The maximum number of iterations was exceeded. 2 : Gradient and/or function calls were not changing. May indicate that precision was lost, i.e., the routine did not converge. 3 : NaN result encountered. allvecs : list of ndarray, optional List of arrays, containing the results at each iteration. Only returned if `retall` is True. See Also -------- minimize : common interface to all `scipy.optimize` algorithms for unconstrained and constrained minimization of multivariate functions. It provides an alternative way to call ``fmin_cg``, by specifying ``method='CG'``. Notes ----- This conjugate gradient algorithm is based on that of Polak and Ribiere [1]_. Conjugate gradient methods tend to work better when: 1. `f` has a unique global minimizing point, and no local minima or other stationary points, 2. `f` is, at least locally, reasonably well approximated by a quadratic function of the variables, 3. `f` is continuous and has a continuous gradient, 4. `fprime` is not too large, e.g., has a norm less than 1000, 5. The initial guess, `x0`, is reasonably close to `f` 's global minimizing point, `xopt`. References ---------- .. [1] Wright & Nocedal, "Numerical Optimization", 1999, pp. 120-122. Examples -------- Example 1: seek the minimum value of the expression ``a*u**2 + b*u*v + c*v**2 + d*u + e*v + f`` for given values of the parameters and an initial guess ``(u, v) = (0, 0)``. >>> import numpy as np >>> args = (2, 3, 7, 8, 9, 10) # parameter values >>> def f(x, *args): ... u, v = x ... a, b, c, d, e, f = args ... return a*u**2 + b*u*v + c*v**2 + d*u + e*v + f >>> def gradf(x, *args): ... u, v = x ... a, b, c, d, e, f = args ... gu = 2*a*u + b*v + d # u-component of the gradient ... gv = b*u + 2*c*v + e # v-component of the gradient ... return np.asarray((gu, gv)) >>> x0 = np.asarray((0, 0)) # Initial guess. >>> from scipy import optimize >>> res1 = optimize.fmin_cg(f, x0, fprime=gradf, args=args) Optimization terminated successfully. Current function value: 1.617021 Iterations: 4 Function evaluations: 8 Gradient evaluations: 8 >>> res1 array([-1.80851064, -0.25531915]) Example 2: solve the same problem using the `minimize` function. (This `myopts` dictionary shows all of the available options, although in practice only non-default values would be needed. The returned value will be a dictionary.) >>> opts = {'maxiter' : None, # default value. ... 'disp' : True, # non-default value. ... 'gtol' : 1e-5, # default value. ... 'norm' : np.inf, # default value. ... 'eps' : 1.4901161193847656e-08} # default value. >>> res2 = optimize.minimize(f, x0, jac=gradf, args=args, ... method='CG', options=opts) Optimization terminated successfully. Current function value: 1.617021 Iterations: 4 Function evaluations: 8 Gradient evaluations: 8 >>> res2.x # minimum found array([-1.80851064, -0.25531915]) """ opts = {'gtol': gtol, 'norm': norm, 'eps': epsilon, 'disp': disp, 'maxiter': maxiter, 'return_all': retall} callback = _wrap_callback(callback) res = _minimize_cg(f, x0, args, fprime, callback=callback, **opts) if full_output: retlist = res['x'], res['fun'], res['nfev'], res['njev'], res['status'] if retall: retlist += (res['allvecs'], ) return retlist else: if retall: return res['x'], res['allvecs'] else: return res['x'] def _minimize_cg(fun, x0, args=(), jac=None, callback=None, gtol=1e-5, norm=np.inf, eps=_epsilon, maxiter=None, disp=False, return_all=False, finite_diff_rel_step=None, **unknown_options): """ Minimization of scalar function of one or more variables using the conjugate gradient algorithm. Options ------- disp : bool Set to True to print convergence messages. maxiter : int Maximum number of iterations to perform. gtol : float Gradient norm must be less than `gtol` before successful termination. norm : float Order of norm (Inf is max, -Inf is min). eps : float or ndarray If `jac is None` the absolute step size used for numerical approximation of the jacobian via forward differences. return_all : bool, optional Set to True to return a list of the best solution at each of the iterations. finite_diff_rel_step : None or array_like, optional If `jac in ['2-point', '3-point', 'cs']` the relative step size to use for numerical approximation of the jacobian. The absolute step size is computed as ``h = rel_step * sign(x) * max(1, abs(x))``, possibly adjusted to fit into the bounds. For ``method='3-point'`` the sign of `h` is ignored. If None (default) then step is selected automatically. """ _check_unknown_options(unknown_options) retall = return_all x0 = asarray(x0).flatten() if maxiter is None: maxiter = len(x0) * 200 sf = _prepare_scalar_function(fun, x0, jac=jac, args=args, epsilon=eps, finite_diff_rel_step=finite_diff_rel_step) f = sf.fun myfprime = sf.grad old_fval = f(x0) gfk = myfprime(x0) k = 0 xk = x0 # Sets the initial step guess to dx ~ 1 old_old_fval = old_fval + np.linalg.norm(gfk) / 2 if retall: allvecs = [xk] warnflag = 0 pk = -gfk gnorm = vecnorm(gfk, ord=norm) sigma_3 = 0.01 while (gnorm > gtol) and (k < maxiter): deltak = np.dot(gfk, gfk) cached_step = [None] def polak_ribiere_powell_step(alpha, gfkp1=None): xkp1 = xk + alpha * pk if gfkp1 is None: gfkp1 = myfprime(xkp1) yk = gfkp1 - gfk beta_k = max(0, np.dot(yk, gfkp1) / deltak) pkp1 = -gfkp1 + beta_k * pk gnorm = vecnorm(gfkp1, ord=norm) return (alpha, xkp1, pkp1, gfkp1, gnorm) def descent_condition(alpha, xkp1, fp1, gfkp1): # Polak-Ribiere+ needs an explicit check of a sufficient # descent condition, which is not guaranteed by strong Wolfe. # # See Gilbert & Nocedal, "Global convergence properties of # conjugate gradient methods for optimization", # SIAM J. Optimization 2, 21 (1992). cached_step[:] = polak_ribiere_powell_step(alpha, gfkp1) alpha, xk, pk, gfk, gnorm = cached_step # Accept step if it leads to convergence. if gnorm <= gtol: return True # Accept step if sufficient descent condition applies. return np.dot(pk, gfk) <= -sigma_3 * np.dot(gfk, gfk) try: alpha_k, fc, gc, old_fval, old_old_fval, gfkp1 = \ _line_search_wolfe12(f, myfprime, xk, pk, gfk, old_fval, old_old_fval, c2=0.4, amin=1e-100, amax=1e100, extra_condition=descent_condition) except _LineSearchError: # Line search failed to find a better solution. warnflag = 2 break # Reuse already computed results if possible if alpha_k == cached_step[0]: alpha_k, xk, pk, gfk, gnorm = cached_step else: alpha_k, xk, pk, gfk, gnorm = polak_ribiere_powell_step(alpha_k, gfkp1) if retall: allvecs.append(xk) k += 1 intermediate_result = OptimizeResult(x=xk, fun=old_fval) if _call_callback_maybe_halt(callback, intermediate_result): break fval = old_fval if warnflag == 2: msg = _status_message['pr_loss'] elif k >= maxiter: warnflag = 1 msg = _status_message['maxiter'] elif np.isnan(gnorm) or np.isnan(fval) or np.isnan(xk).any(): warnflag = 3 msg = _status_message['nan'] else: msg = _status_message['success'] if disp: _print_success_message_or_warn(warnflag, msg) print(" Current function value: %f" % fval) print(" Iterations: %d" % k) print(" Function evaluations: %d" % sf.nfev) print(" Gradient evaluations: %d" % sf.ngev) result = OptimizeResult(fun=fval, jac=gfk, nfev=sf.nfev, njev=sf.ngev, status=warnflag, success=(warnflag == 0), message=msg, x=xk, nit=k) if retall: result['allvecs'] = allvecs return result def fmin_ncg(f, x0, fprime, fhess_p=None, fhess=None, args=(), avextol=1e-5, epsilon=_epsilon, maxiter=None, full_output=0, disp=1, retall=0, callback=None): """ Unconstrained minimization of a function using the Newton-CG method. Parameters ---------- f : callable ``f(x, *args)`` Objective function to be minimized. x0 : ndarray Initial guess. fprime : callable ``f'(x, *args)`` Gradient of f. fhess_p : callable ``fhess_p(x, p, *args)``, optional Function which computes the Hessian of f times an arbitrary vector, p. fhess : callable ``fhess(x, *args)``, optional Function to compute the Hessian matrix of f. args : tuple, optional Extra arguments passed to f, fprime, fhess_p, and fhess (the same set of extra arguments is supplied to all of these functions). epsilon : float or ndarray, optional If fhess is approximated, use this value for the step size. callback : callable, optional An optional user-supplied function which is called after each iteration. Called as callback(xk), where xk is the current parameter vector. avextol : float, optional Convergence is assumed when the average relative error in the minimizer falls below this amount. maxiter : int, optional Maximum number of iterations to perform. full_output : bool, optional If True, return the optional outputs. disp : bool, optional If True, print convergence message. retall : bool, optional If True, return a list of results at each iteration. Returns ------- xopt : ndarray Parameters which minimize f, i.e., ``f(xopt) == fopt``. fopt : float Value of the function at xopt, i.e., ``fopt = f(xopt)``. fcalls : int Number of function calls made. gcalls : int Number of gradient calls made. hcalls : int Number of Hessian calls made. warnflag : int Warnings generated by the algorithm. 1 : Maximum number of iterations exceeded. 2 : Line search failure (precision loss). 3 : NaN result encountered. allvecs : list The result at each iteration, if retall is True (see below). See also -------- minimize: Interface to minimization algorithms for multivariate functions. See the 'Newton-CG' `method` in particular. Notes ----- Only one of `fhess_p` or `fhess` need to be given. If `fhess` is provided, then `fhess_p` will be ignored. If neither `fhess` nor `fhess_p` is provided, then the hessian product will be approximated using finite differences on `fprime`. `fhess_p` must compute the hessian times an arbitrary vector. If it is not given, finite-differences on `fprime` are used to compute it. Newton-CG methods are also called truncated Newton methods. This function differs from scipy.optimize.fmin_tnc because 1. scipy.optimize.fmin_ncg is written purely in Python using NumPy and scipy while scipy.optimize.fmin_tnc calls a C function. 2. scipy.optimize.fmin_ncg is only for unconstrained minimization while scipy.optimize.fmin_tnc is for unconstrained minimization or box constrained minimization. (Box constraints give lower and upper bounds for each variable separately.) References ---------- Wright & Nocedal, 'Numerical Optimization', 1999, p. 140. """ opts = {'xtol': avextol, 'eps': epsilon, 'maxiter': maxiter, 'disp': disp, 'return_all': retall} callback = _wrap_callback(callback) res = _minimize_newtoncg(f, x0, args, fprime, fhess, fhess_p, callback=callback, **opts) if full_output: retlist = (res['x'], res['fun'], res['nfev'], res['njev'], res['nhev'], res['status']) if retall: retlist += (res['allvecs'], ) return retlist else: if retall: return res['x'], res['allvecs'] else: return res['x'] def _minimize_newtoncg(fun, x0, args=(), jac=None, hess=None, hessp=None, callback=None, xtol=1e-5, eps=_epsilon, maxiter=None, disp=False, return_all=False, **unknown_options): """ Minimization of scalar function of one or more variables using the Newton-CG algorithm. Note that the `jac` parameter (Jacobian) is required. Options ------- disp : bool Set to True to print convergence messages. xtol : float Average relative error in solution `xopt` acceptable for convergence. maxiter : int Maximum number of iterations to perform. eps : float or ndarray If `hessp` is approximated, use this value for the step size. return_all : bool, optional Set to True to return a list of the best solution at each of the iterations. """ _check_unknown_options(unknown_options) if jac is None: raise ValueError('Jacobian is required for Newton-CG method') fhess_p = hessp fhess = hess avextol = xtol epsilon = eps retall = return_all x0 = asarray(x0).flatten() # TODO: add hessp (callable or FD) to ScalarFunction? sf = _prepare_scalar_function( fun, x0, jac, args=args, epsilon=eps, hess=hess ) f = sf.fun fprime = sf.grad _h = sf.hess(x0) # Logic for hess/hessp # - If a callable(hess) is provided, then use that # - If hess is a FD_METHOD, or the output fom hess(x) is a LinearOperator # then create a hessp function using those. # - If hess is None but you have callable(hessp) then use the hessp. # - If hess and hessp are None then approximate hessp using the grad/jac. if (hess in FD_METHODS or isinstance(_h, LinearOperator)): fhess = None def _hessp(x, p, *args): return sf.hess(x).dot(p) fhess_p = _hessp def terminate(warnflag, msg): if disp: _print_success_message_or_warn(warnflag, msg) print(" Current function value: %f" % old_fval) print(" Iterations: %d" % k) print(" Function evaluations: %d" % sf.nfev) print(" Gradient evaluations: %d" % sf.ngev) print(" Hessian evaluations: %d" % hcalls) fval = old_fval result = OptimizeResult(fun=fval, jac=gfk, nfev=sf.nfev, njev=sf.ngev, nhev=hcalls, status=warnflag, success=(warnflag == 0), message=msg, x=xk, nit=k) if retall: result['allvecs'] = allvecs return result hcalls = 0 if maxiter is None: maxiter = len(x0)*200 cg_maxiter = 20*len(x0) xtol = len(x0) * avextol update = [2 * xtol] xk = x0 if retall: allvecs = [xk] k = 0 gfk = None old_fval = f(x0) old_old_fval = None float64eps = np.finfo(np.float64).eps while np.add.reduce(np.abs(update)) > xtol: if k >= maxiter: msg = "Warning: " + _status_message['maxiter'] return terminate(1, msg) # Compute a search direction pk by applying the CG method to # del2 f(xk) p = - grad f(xk) starting from 0. b = -fprime(xk) maggrad = np.add.reduce(np.abs(b)) eta = np.min([0.5, np.sqrt(maggrad)]) termcond = eta * maggrad xsupi = zeros(len(x0), dtype=x0.dtype) ri = -b psupi = -ri i = 0 dri0 = np.dot(ri, ri) if fhess is not None: # you want to compute hessian once. A = sf.hess(xk) hcalls = hcalls + 1 for k2 in range(cg_maxiter): if np.add.reduce(np.abs(ri)) <= termcond: break if fhess is None: if fhess_p is None: Ap = approx_fhess_p(xk, psupi, fprime, epsilon) else: Ap = fhess_p(xk, psupi, *args) hcalls = hcalls + 1 else: if isinstance(A, HessianUpdateStrategy): # if hess was supplied as a HessianUpdateStrategy Ap = A.dot(psupi) else: Ap = np.dot(A, psupi) # check curvature Ap = asarray(Ap).squeeze() # get rid of matrices... curv = np.dot(psupi, Ap) if 0 <= curv <= 3 * float64eps: break elif curv < 0: if (i > 0): break else: # fall back to steepest descent direction xsupi = dri0 / (-curv) * b break alphai = dri0 / curv xsupi = xsupi + alphai * psupi ri = ri + alphai * Ap dri1 = np.dot(ri, ri) betai = dri1 / dri0 psupi = -ri + betai * psupi i = i + 1 dri0 = dri1 # update np.dot(ri,ri) for next time. else: # curvature keeps increasing, bail out msg = ("Warning: CG iterations didn't converge. The Hessian is not " "positive definite.") return terminate(3, msg) pk = xsupi # search direction is solution to system. gfk = -b # gradient at xk try: alphak, fc, gc, old_fval, old_old_fval, gfkp1 = \ _line_search_wolfe12(f, fprime, xk, pk, gfk, old_fval, old_old_fval) except _LineSearchError: # Line search failed to find a better solution. msg = "Warning: " + _status_message['pr_loss'] return terminate(2, msg) update = alphak * pk xk = xk + update # upcast if necessary if retall: allvecs.append(xk) k += 1 intermediate_result = OptimizeResult(x=xk, fun=old_fval) if _call_callback_maybe_halt(callback, intermediate_result): return terminate(5, "") else: if np.isnan(old_fval) or np.isnan(update).any(): return terminate(3, _status_message['nan']) msg = _status_message['success'] return terminate(0, msg) def fminbound(func, x1, x2, args=(), xtol=1e-5, maxfun=500, full_output=0, disp=1): """Bounded minimization for scalar functions. Parameters ---------- func : callable f(x,*args) Objective function to be minimized (must accept and return scalars). x1, x2 : float or array scalar Finite optimization bounds. args : tuple, optional Extra arguments passed to function. xtol : float, optional The convergence tolerance. maxfun : int, optional Maximum number of function evaluations allowed. full_output : bool, optional If True, return optional outputs. disp : int, optional If non-zero, print messages. 0 : no message printing. 1 : non-convergence notification messages only. 2 : print a message on convergence too. 3 : print iteration results. Returns ------- xopt : ndarray Parameters (over given interval) which minimize the objective function. fval : number (Optional output) The function value evaluated at the minimizer. ierr : int (Optional output) An error flag (0 if converged, 1 if maximum number of function calls reached). numfunc : int (Optional output) The number of function calls made. See also -------- minimize_scalar: Interface to minimization algorithms for scalar univariate functions. See the 'Bounded' `method` in particular. Notes ----- Finds a local minimizer of the scalar function `func` in the interval x1 < xopt < x2 using Brent's method. (See `brent` for auto-bracketing.) References ---------- .. [1] Forsythe, G.E., M. A. Malcolm, and C. B. Moler. "Computer Methods for Mathematical Computations." Prentice-Hall Series in Automatic Computation 259 (1977). .. [2] Brent, Richard P. Algorithms for Minimization Without Derivatives. Courier Corporation, 2013. Examples -------- `fminbound` finds the minimizer of the function in the given range. The following examples illustrate this. >>> from scipy import optimize >>> def f(x): ... return (x-1)**2 >>> minimizer = optimize.fminbound(f, -4, 4) >>> minimizer 1.0 >>> minimum = f(minimizer) >>> minimum 0.0 >>> res = optimize.fminbound(f, 3, 4, full_output=True) >>> minimizer, fval, ierr, numfunc = res >>> minimizer 3.000005960860986 >>> minimum = f(minimizer) >>> minimum, fval (4.000023843479476, 4.000023843479476) """ options = {'xatol': xtol, 'maxiter': maxfun, 'disp': disp} res = _minimize_scalar_bounded(func, (x1, x2), args, **options) if full_output: return res['x'], res['fun'], res['status'], res['nfev'] else: return res['x'] def _minimize_scalar_bounded(func, bounds, args=(), xatol=1e-5, maxiter=500, disp=0, **unknown_options): """ Options ------- maxiter : int Maximum number of iterations to perform. disp: int, optional If non-zero, print messages. 0 : no message printing. 1 : non-convergence notification messages only. 2 : print a message on convergence too. 3 : print iteration results. xatol : float Absolute error in solution `xopt` acceptable for convergence. """ _check_unknown_options(unknown_options) maxfun = maxiter # Test bounds are of correct form if len(bounds) != 2: raise ValueError('bounds must have two elements.') x1, x2 = bounds if not (is_finite_scalar(x1) and is_finite_scalar(x2)): raise ValueError("Optimization bounds must be finite scalars.") if x1 > x2: raise ValueError("The lower bound exceeds the upper bound.") flag = 0 header = ' Func-count x f(x) Procedure' step = ' initial' sqrt_eps = sqrt(2.2e-16) golden_mean = 0.5 * (3.0 - sqrt(5.0)) a, b = x1, x2 fulc = a + golden_mean * (b - a) nfc, xf = fulc, fulc rat = e = 0.0 x = xf fx = func(x, *args) num = 1 fmin_data = (1, xf, fx) fu = np.inf ffulc = fnfc = fx xm = 0.5 * (a + b) tol1 = sqrt_eps * np.abs(xf) + xatol / 3.0 tol2 = 2.0 * tol1 if disp > 2: print(" ") print(header) print("%5.0f %12.6g %12.6g %s" % (fmin_data + (step,))) while (np.abs(xf - xm) > (tol2 - 0.5 * (b - a))): golden = 1 # Check for parabolic fit if np.abs(e) > tol1: golden = 0 r = (xf - nfc) * (fx - ffulc) q = (xf - fulc) * (fx - fnfc) p = (xf - fulc) * q - (xf - nfc) * r q = 2.0 * (q - r) if q > 0.0: p = -p q = np.abs(q) r = e e = rat # Check for acceptability of parabola if ((np.abs(p) < np.abs(0.5*q*r)) and (p > q*(a - xf)) and (p < q * (b - xf))): rat = (p + 0.0) / q x = xf + rat step = ' parabolic' if ((x - a) < tol2) or ((b - x) < tol2): si = np.sign(xm - xf) + ((xm - xf) == 0) rat = tol1 * si else: # do a golden-section step golden = 1 if golden: # do a golden-section step if xf >= xm: e = a - xf else: e = b - xf rat = golden_mean*e step = ' golden' si = np.sign(rat) + (rat == 0) x = xf + si * np.maximum(np.abs(rat), tol1) fu = func(x, *args) num += 1 fmin_data = (num, x, fu) if disp > 2: print("%5.0f %12.6g %12.6g %s" % (fmin_data + (step,))) if fu <= fx: if x >= xf: a = xf else: b = xf fulc, ffulc = nfc, fnfc nfc, fnfc = xf, fx xf, fx = x, fu else: if x < xf: a = x else: b = x if (fu <= fnfc) or (nfc == xf): fulc, ffulc = nfc, fnfc nfc, fnfc = x, fu elif (fu <= ffulc) or (fulc == xf) or (fulc == nfc): fulc, ffulc = x, fu xm = 0.5 * (a + b) tol1 = sqrt_eps * np.abs(xf) + xatol / 3.0 tol2 = 2.0 * tol1 if num >= maxfun: flag = 1 break if np.isnan(xf) or np.isnan(fx) or np.isnan(fu): flag = 2 fval = fx if disp > 0: _endprint(x, flag, fval, maxfun, xatol, disp) result = OptimizeResult(fun=fval, status=flag, success=(flag == 0), message={0: 'Solution found.', 1: 'Maximum number of function calls ' 'reached.', 2: _status_message['nan']}.get(flag, ''), x=xf, nfev=num, nit=num) return result class Brent: #need to rethink design of __init__ def __init__(self, func, args=(), tol=1.48e-8, maxiter=500, full_output=0, disp=0): self.func = func self.args = args self.tol = tol self.maxiter = maxiter self._mintol = 1.0e-11 self._cg = 0.3819660 self.xmin = None self.fval = None self.iter = 0 self.funcalls = 0 self.disp = disp # need to rethink design of set_bracket (new options, etc.) def set_bracket(self, brack=None): self.brack = brack def get_bracket_info(self): #set up func = self.func args = self.args brack = self.brack ### BEGIN core bracket_info code ### ### carefully DOCUMENT any CHANGES in core ## if brack is None: xa, xb, xc, fa, fb, fc, funcalls = bracket(func, args=args) elif len(brack) == 2: xa, xb, xc, fa, fb, fc, funcalls = bracket(func, xa=brack[0], xb=brack[1], args=args) elif len(brack) == 3: xa, xb, xc = brack if (xa > xc): # swap so xa < xc can be assumed xc, xa = xa, xc if not ((xa < xb) and (xb < xc)): raise ValueError( "Bracketing values (xa, xb, xc) do not" " fulfill this requirement: (xa < xb) and (xb < xc)" ) fa = func(*((xa,) + args)) fb = func(*((xb,) + args)) fc = func(*((xc,) + args)) if not ((fb < fa) and (fb < fc)): raise ValueError( "Bracketing values (xa, xb, xc) do not fulfill" " this requirement: (f(xb) < f(xa)) and (f(xb) < f(xc))" ) funcalls = 3 else: raise ValueError("Bracketing interval must be " "length 2 or 3 sequence.") ### END core bracket_info code ### return xa, xb, xc, fa, fb, fc, funcalls def optimize(self): # set up for optimization func = self.func xa, xb, xc, fa, fb, fc, funcalls = self.get_bracket_info() _mintol = self._mintol _cg = self._cg ################################# #BEGIN CORE ALGORITHM ################################# x = w = v = xb fw = fv = fx = fb if (xa < xc): a = xa b = xc else: a = xc b = xa deltax = 0.0 iter = 0 if self.disp > 2: print(" ") print(f"{'Func-count':^12} {'x':^12} {'f(x)': ^12}") print(f"{funcalls:^12g} {x:^12.6g} {fx:^12.6g}") while (iter < self.maxiter): tol1 = self.tol * np.abs(x) + _mintol tol2 = 2.0 * tol1 xmid = 0.5 * (a + b) # check for convergence if np.abs(x - xmid) < (tol2 - 0.5 * (b - a)): break # XXX In the first iteration, rat is only bound in the true case # of this conditional. This used to cause an UnboundLocalError # (gh-4140). It should be set before the if (but to what?). if (np.abs(deltax) <= tol1): if (x >= xmid): deltax = a - x # do a golden section step else: deltax = b - x rat = _cg * deltax else: # do a parabolic step tmp1 = (x - w) * (fx - fv) tmp2 = (x - v) * (fx - fw) p = (x - v) * tmp2 - (x - w) * tmp1 tmp2 = 2.0 * (tmp2 - tmp1) if (tmp2 > 0.0): p = -p tmp2 = np.abs(tmp2) dx_temp = deltax deltax = rat # check parabolic fit if ((p > tmp2 * (a - x)) and (p < tmp2 * (b - x)) and (np.abs(p) < np.abs(0.5 * tmp2 * dx_temp))): rat = p * 1.0 / tmp2 # if parabolic step is useful. u = x + rat if ((u - a) < tol2 or (b - u) < tol2): if xmid - x >= 0: rat = tol1 else: rat = -tol1 else: if (x >= xmid): deltax = a - x # if it's not do a golden section step else: deltax = b - x rat = _cg * deltax if (np.abs(rat) < tol1): # update by at least tol1 if rat >= 0: u = x + tol1 else: u = x - tol1 else: u = x + rat fu = func(*((u,) + self.args)) # calculate new output value funcalls += 1 if (fu > fx): # if it's bigger than current if (u < x): a = u else: b = u if (fu <= fw) or (w == x): v = w w = u fv = fw fw = fu elif (fu <= fv) or (v == x) or (v == w): v = u fv = fu else: if (u >= x): a = x else: b = x v = w w = x x = u fv = fw fw = fx fx = fu if self.disp > 2: print(f"{funcalls:^12g} {x:^12.6g} {fx:^12.6g}") iter += 1 ################################# #END CORE ALGORITHM ################################# self.xmin = x self.fval = fx self.iter = iter self.funcalls = funcalls def get_result(self, full_output=False): if full_output: return self.xmin, self.fval, self.iter, self.funcalls else: return self.xmin def brent(func, args=(), brack=None, tol=1.48e-8, full_output=0, maxiter=500): """ Given a function of one variable and a possible bracket, return a local minimizer of the function isolated to a fractional precision of tol. Parameters ---------- func : callable f(x,*args) Objective function. args : tuple, optional Additional arguments (if present). brack : tuple, optional Either a triple ``(xa, xb, xc)`` satisfying ``xa < xb < xc`` and ``func(xb) < func(xa) and func(xb) < func(xc)``, or a pair ``(xa, xb)`` to be used as initial points for a downhill bracket search (see `scipy.optimize.bracket`). The minimizer ``x`` will not necessarily satisfy ``xa <= x <= xb``. tol : float, optional Relative error in solution `xopt` acceptable for convergence. full_output : bool, optional If True, return all output args (xmin, fval, iter, funcalls). maxiter : int, optional Maximum number of iterations in solution. Returns ------- xmin : ndarray Optimum point. fval : float (Optional output) Optimum function value. iter : int (Optional output) Number of iterations. funcalls : int (Optional output) Number of objective function evaluations made. See also -------- minimize_scalar: Interface to minimization algorithms for scalar univariate functions. See the 'Brent' `method` in particular. Notes ----- Uses inverse parabolic interpolation when possible to speed up convergence of golden section method. Does not ensure that the minimum lies in the range specified by `brack`. See `scipy.optimize.fminbound`. Examples -------- We illustrate the behaviour of the function when `brack` is of size 2 and 3 respectively. In the case where `brack` is of the form ``(xa, xb)``, we can see for the given values, the output does not necessarily lie in the range ``(xa, xb)``. >>> def f(x): ... return (x-1)**2 >>> from scipy import optimize >>> minimizer = optimize.brent(f, brack=(1, 2)) >>> minimizer 1 >>> res = optimize.brent(f, brack=(-1, 0.5, 2), full_output=True) >>> xmin, fval, iter, funcalls = res >>> f(xmin), fval (0.0, 0.0) """ options = {'xtol': tol, 'maxiter': maxiter} res = _minimize_scalar_brent(func, brack, args, **options) if full_output: return res['x'], res['fun'], res['nit'], res['nfev'] else: return res['x'] def _minimize_scalar_brent(func, brack=None, args=(), xtol=1.48e-8, maxiter=500, disp=0, **unknown_options): """ Options ------- maxiter : int Maximum number of iterations to perform. xtol : float Relative error in solution `xopt` acceptable for convergence. disp: int, optional If non-zero, print messages. 0 : no message printing. 1 : non-convergence notification messages only. 2 : print a message on convergence too. 3 : print iteration results. Notes ----- Uses inverse parabolic interpolation when possible to speed up convergence of golden section method. """ _check_unknown_options(unknown_options) tol = xtol if tol < 0: raise ValueError('tolerance should be >= 0, got %r' % tol) brent = Brent(func=func, args=args, tol=tol, full_output=True, maxiter=maxiter, disp=disp) brent.set_bracket(brack) brent.optimize() x, fval, nit, nfev = brent.get_result(full_output=True) success = nit < maxiter and not (np.isnan(x) or np.isnan(fval)) if success: message = ("\nOptimization terminated successfully;\n" "The returned value satisfies the termination criteria\n" f"(using xtol = {xtol} )") else: if nit >= maxiter: message = "\nMaximum number of iterations exceeded" if np.isnan(x) or np.isnan(fval): message = f"{_status_message['nan']}" if disp: _print_success_message_or_warn(not success, message) return OptimizeResult(fun=fval, x=x, nit=nit, nfev=nfev, success=success, message=message) def golden(func, args=(), brack=None, tol=_epsilon, full_output=0, maxiter=5000): """ Return the minimizer of a function of one variable using the golden section method. Given a function of one variable and a possible bracketing interval, return a minimizer of the function isolated to a fractional precision of tol. Parameters ---------- func : callable func(x,*args) Objective function to minimize. args : tuple, optional Additional arguments (if present), passed to func. brack : tuple, optional Either a triple ``(xa, xb, xc)`` where ``xa < xb < xc`` and ``func(xb) < func(xa) and func(xb) < func(xc)``, or a pair (xa, xb) to be used as initial points for a downhill bracket search (see `scipy.optimize.bracket`). The minimizer ``x`` will not necessarily satisfy ``xa <= x <= xb``. tol : float, optional x tolerance stop criterion full_output : bool, optional If True, return optional outputs. maxiter : int Maximum number of iterations to perform. Returns ------- xmin : ndarray Optimum point. fval : float (Optional output) Optimum function value. funcalls : int (Optional output) Number of objective function evaluations made. See also -------- minimize_scalar: Interface to minimization algorithms for scalar univariate functions. See the 'Golden' `method` in particular. Notes ----- Uses analog of bisection method to decrease the bracketed interval. Examples -------- We illustrate the behaviour of the function when `brack` is of size 2 and 3, respectively. In the case where `brack` is of the form (xa,xb), we can see for the given values, the output need not necessarily lie in the range ``(xa, xb)``. >>> def f(x): ... return (x-1)**2 >>> from scipy import optimize >>> minimizer = optimize.golden(f, brack=(1, 2)) >>> minimizer 1 >>> res = optimize.golden(f, brack=(-1, 0.5, 2), full_output=True) >>> xmin, fval, funcalls = res >>> f(xmin), fval (9.925165290385052e-18, 9.925165290385052e-18) """ options = {'xtol': tol, 'maxiter': maxiter} res = _minimize_scalar_golden(func, brack, args, **options) if full_output: return res['x'], res['fun'], res['nfev'] else: return res['x'] def _minimize_scalar_golden(func, brack=None, args=(), xtol=_epsilon, maxiter=5000, disp=0, **unknown_options): """ Options ------- xtol : float Relative error in solution `xopt` acceptable for convergence. maxiter : int Maximum number of iterations to perform. disp: int, optional If non-zero, print messages. 0 : no message printing. 1 : non-convergence notification messages only. 2 : print a message on convergence too. 3 : print iteration results. """ _check_unknown_options(unknown_options) tol = xtol if brack is None: xa, xb, xc, fa, fb, fc, funcalls = bracket(func, args=args) elif len(brack) == 2: xa, xb, xc, fa, fb, fc, funcalls = bracket(func, xa=brack[0], xb=brack[1], args=args) elif len(brack) == 3: xa, xb, xc = brack if (xa > xc): # swap so xa < xc can be assumed xc, xa = xa, xc if not ((xa < xb) and (xb < xc)): raise ValueError( "Bracketing values (xa, xb, xc) do not" " fulfill this requirement: (xa < xb) and (xb < xc)" ) fa = func(*((xa,) + args)) fb = func(*((xb,) + args)) fc = func(*((xc,) + args)) if not ((fb < fa) and (fb < fc)): raise ValueError( "Bracketing values (xa, xb, xc) do not fulfill" " this requirement: (f(xb) < f(xa)) and (f(xb) < f(xc))" ) funcalls = 3 else: raise ValueError("Bracketing interval must be length 2 or 3 sequence.") _gR = 0.61803399 # golden ratio conjugate: 2.0/(1.0+sqrt(5.0)) _gC = 1.0 - _gR x3 = xc x0 = xa if (np.abs(xc - xb) > np.abs(xb - xa)): x1 = xb x2 = xb + _gC * (xc - xb) else: x2 = xb x1 = xb - _gC * (xb - xa) f1 = func(*((x1,) + args)) f2 = func(*((x2,) + args)) funcalls += 2 nit = 0 if disp > 2: print(" ") print(f"{'Func-count':^12} {'x':^12} {'f(x)': ^12}") for i in range(maxiter): if np.abs(x3 - x0) <= tol * (np.abs(x1) + np.abs(x2)): break if (f2 < f1): x0 = x1 x1 = x2 x2 = _gR * x1 + _gC * x3 f1 = f2 f2 = func(*((x2,) + args)) else: x3 = x2 x2 = x1 x1 = _gR * x2 + _gC * x0 f2 = f1 f1 = func(*((x1,) + args)) funcalls += 1 if disp > 2: if (f1 < f2): xmin, fval = x1, f1 else: xmin, fval = x2, f2 print(f"{funcalls:^12g} {xmin:^12.6g} {fval:^12.6g}") nit += 1 # end of iteration loop if (f1 < f2): xmin = x1 fval = f1 else: xmin = x2 fval = f2 success = nit < maxiter and not (np.isnan(fval) or np.isnan(xmin)) if success: message = ("\nOptimization terminated successfully;\n" "The returned value satisfies the termination criteria\n" f"(using xtol = {xtol} )") else: if nit >= maxiter: message = "\nMaximum number of iterations exceeded" if np.isnan(xmin) or np.isnan(fval): message = f"{_status_message['nan']}" if disp: _print_success_message_or_warn(not success, message) return OptimizeResult(fun=fval, nfev=funcalls, x=xmin, nit=nit, success=success, message=message) def bracket(func, xa=0.0, xb=1.0, args=(), grow_limit=110.0, maxiter=1000): """ Bracket the minimum of a function. Given a function and distinct initial points, search in the downhill direction (as defined by the initial points) and return three points that bracket the minimum of the function. Parameters ---------- func : callable f(x,*args) Objective function to minimize. xa, xb : float, optional Initial points. Defaults `xa` to 0.0, and `xb` to 1.0. A local minimum need not be contained within this interval. args : tuple, optional Additional arguments (if present), passed to `func`. grow_limit : float, optional Maximum grow limit. Defaults to 110.0 maxiter : int, optional Maximum number of iterations to perform. Defaults to 1000. Returns ------- xa, xb, xc : float Final points of the bracket. fa, fb, fc : float Objective function values at the bracket points. funcalls : int Number of function evaluations made. Raises ------ BracketError If no valid bracket is found before the algorithm terminates. See notes for conditions of a valid bracket. Notes ----- The algorithm attempts to find three strictly ordered points (i.e. :math:`x_a < x_b < x_c` or :math:`x_c < x_b < x_a`) satisfying :math:`f(x_b) ≤ f(x_a)` and :math:`f(x_b) ≤ f(x_c)`, where one of the inequalities must be satistfied strictly and all :math:`x_i` must be finite. Examples -------- This function can find a downward convex region of a function: >>> import numpy as np >>> import matplotlib.pyplot as plt >>> from scipy.optimize import bracket >>> def f(x): ... return 10*x**2 + 3*x + 5 >>> x = np.linspace(-2, 2) >>> y = f(x) >>> init_xa, init_xb = 0.1, 1 >>> xa, xb, xc, fa, fb, fc, funcalls = bracket(f, xa=init_xa, xb=init_xb) >>> plt.axvline(x=init_xa, color="k", linestyle="--") >>> plt.axvline(x=init_xb, color="k", linestyle="--") >>> plt.plot(x, y, "-k") >>> plt.plot(xa, fa, "bx") >>> plt.plot(xb, fb, "rx") >>> plt.plot(xc, fc, "bx") >>> plt.show() Note that both initial points were to the right of the minimum, and the third point was found in the "downhill" direction: the direction in which the function appeared to be decreasing (to the left). The final points are strictly ordered, and the function value at the middle point is less than the function values at the endpoints; it follows that a minimum must lie within the bracket. """ _gold = 1.618034 # golden ratio: (1.0+sqrt(5.0))/2.0 _verysmall_num = 1e-21 # convert to numpy floats if not already xa, xb = np.asarray([xa, xb]) fa = func(*(xa,) + args) fb = func(*(xb,) + args) if (fa < fb): # Switch so fa > fb xa, xb = xb, xa fa, fb = fb, fa xc = xb + _gold * (xb - xa) fc = func(*((xc,) + args)) funcalls = 3 iter = 0 while (fc < fb): tmp1 = (xb - xa) * (fb - fc) tmp2 = (xb - xc) * (fb - fa) val = tmp2 - tmp1 if np.abs(val) < _verysmall_num: denom = 2.0 * _verysmall_num else: denom = 2.0 * val w = xb - ((xb - xc) * tmp2 - (xb - xa) * tmp1) / denom wlim = xb + grow_limit * (xc - xb) msg = ("No valid bracket was found before the iteration limit was " "reached. Consider trying different initial points or " "increasing `maxiter`.") if iter > maxiter: raise RuntimeError(msg) iter += 1 if (w - xc) * (xb - w) > 0.0: fw = func(*((w,) + args)) funcalls += 1 if (fw < fc): xa = xb xb = w fa = fb fb = fw break elif (fw > fb): xc = w fc = fw break w = xc + _gold * (xc - xb) fw = func(*((w,) + args)) funcalls += 1 elif (w - wlim)*(wlim - xc) >= 0.0: w = wlim fw = func(*((w,) + args)) funcalls += 1 elif (w - wlim)*(xc - w) > 0.0: fw = func(*((w,) + args)) funcalls += 1 if (fw < fc): xb = xc xc = w w = xc + _gold * (xc - xb) fb = fc fc = fw fw = func(*((w,) + args)) funcalls += 1 else: w = xc + _gold * (xc - xb) fw = func(*((w,) + args)) funcalls += 1 xa = xb xb = xc xc = w fa = fb fb = fc fc = fw # three conditions for a valid bracket cond1 = (fb < fc and fb <= fa) or (fb < fa and fb <= fc) cond2 = (xa < xb < xc or xc < xb < xa) cond3 = np.isfinite(xa) and np.isfinite(xb) and np.isfinite(xc) msg = ("The algorithm terminated without finding a valid bracket. " "Consider trying different initial points.") if not (cond1 and cond2 and cond3): e = BracketError(msg) e.data = (xa, xb, xc, fa, fb, fc, funcalls) raise e return xa, xb, xc, fa, fb, fc, funcalls class BracketError(RuntimeError): pass def _recover_from_bracket_error(solver, fun, bracket, args, **options): # `bracket` was originally written without checking whether the resulting # bracket is valid. `brent` and `golden` built on top of it without # checking the returned bracket for validity, and their output can be # incorrect without warning/error if the original bracket is invalid. # gh-14858 noticed the problem, and the following is the desired # behavior: # - `scipy.optimize.bracket`, `scipy.optimize.brent`, and # `scipy.optimize.golden` should raise an error if the bracket is # invalid, as opposed to silently returning garbage # - `scipy.optimize.minimize_scalar` should return with `success=False` # and other information # The changes that would be required to achieve this the traditional # way (`return`ing all the required information from bracket all the way # up to `minimizer_scalar`) are extensive and invasive. (See a6aa40d.) # We can achieve the same thing by raising the error in `bracket`, but # storing the information needed by `minimize_scalar` in the error object, # and intercepting it here. try: res = solver(fun, bracket, args, **options) except BracketError as e: msg = str(e) xa, xb, xc, fa, fb, fc, funcalls = e.data xs, fs = [xa, xb, xc], [fa, fb, fc] if np.any(np.isnan([xs, fs])): x, fun = np.nan, np.nan else: imin = np.argmin(fs) x, fun = xs[imin], fs[imin] return OptimizeResult(fun=fun, nfev=funcalls, x=x, nit=0, success=False, message=msg) return res def _line_for_search(x0, alpha, lower_bound, upper_bound): """ Given a parameter vector ``x0`` with length ``n`` and a direction vector ``alpha`` with length ``n``, and lower and upper bounds on each of the ``n`` parameters, what are the bounds on a scalar ``l`` such that ``lower_bound <= x0 + alpha * l <= upper_bound``. Parameters ---------- x0 : np.array. The vector representing the current location. Note ``np.shape(x0) == (n,)``. alpha : np.array. The vector representing the direction. Note ``np.shape(alpha) == (n,)``. lower_bound : np.array. The lower bounds for each parameter in ``x0``. If the ``i``th parameter in ``x0`` is unbounded below, then ``lower_bound[i]`` should be ``-np.inf``. Note ``np.shape(lower_bound) == (n,)``. upper_bound : np.array. The upper bounds for each parameter in ``x0``. If the ``i``th parameter in ``x0`` is unbounded above, then ``upper_bound[i]`` should be ``np.inf``. Note ``np.shape(upper_bound) == (n,)``. Returns ------- res : tuple ``(lmin, lmax)`` The bounds for ``l`` such that ``lower_bound[i] <= x0[i] + alpha[i] * l <= upper_bound[i]`` for all ``i``. """ # get nonzero indices of alpha so we don't get any zero division errors. # alpha will not be all zero, since it is called from _linesearch_powell # where we have a check for this. nonzero, = alpha.nonzero() lower_bound, upper_bound = lower_bound[nonzero], upper_bound[nonzero] x0, alpha = x0[nonzero], alpha[nonzero] low = (lower_bound - x0) / alpha high = (upper_bound - x0) / alpha # positive and negative indices pos = alpha > 0 lmin_pos = np.where(pos, low, 0) lmin_neg = np.where(pos, 0, high) lmax_pos = np.where(pos, high, 0) lmax_neg = np.where(pos, 0, low) lmin = np.max(lmin_pos + lmin_neg) lmax = np.min(lmax_pos + lmax_neg) # if x0 is outside the bounds, then it is possible that there is # no way to get back in the bounds for the parameters being updated # with the current direction alpha. # when this happens, lmax < lmin. # If this is the case, then we can just return (0, 0) return (lmin, lmax) if lmax >= lmin else (0, 0) def _linesearch_powell(func, p, xi, tol=1e-3, lower_bound=None, upper_bound=None, fval=None): """Line-search algorithm using fminbound. Find the minimium of the function ``func(x0 + alpha*direc)``. lower_bound : np.array. The lower bounds for each parameter in ``x0``. If the ``i``th parameter in ``x0`` is unbounded below, then ``lower_bound[i]`` should be ``-np.inf``. Note ``np.shape(lower_bound) == (n,)``. upper_bound : np.array. The upper bounds for each parameter in ``x0``. If the ``i``th parameter in ``x0`` is unbounded above, then ``upper_bound[i]`` should be ``np.inf``. Note ``np.shape(upper_bound) == (n,)``. fval : number. ``fval`` is equal to ``func(p)``, the idea is just to avoid recomputing it so we can limit the ``fevals``. """ def myfunc(alpha): return func(p + alpha*xi) # if xi is zero, then don't optimize if not np.any(xi): return ((fval, p, xi) if fval is not None else (func(p), p, xi)) elif lower_bound is None and upper_bound is None: # non-bounded minimization res = _recover_from_bracket_error(_minimize_scalar_brent, myfunc, None, tuple(), xtol=tol) alpha_min, fret = res.x, res.fun xi = alpha_min * xi return squeeze(fret), p + xi, xi else: bound = _line_for_search(p, xi, lower_bound, upper_bound) if np.isneginf(bound[0]) and np.isposinf(bound[1]): # equivalent to unbounded return _linesearch_powell(func, p, xi, fval=fval, tol=tol) elif not np.isneginf(bound[0]) and not np.isposinf(bound[1]): # we can use a bounded scalar minimization res = _minimize_scalar_bounded(myfunc, bound, xatol=tol / 100) xi = res.x * xi return squeeze(res.fun), p + xi, xi else: # only bounded on one side. use the tangent function to convert # the infinity bound to a finite bound. The new bounded region # is a subregion of the region bounded by -np.pi/2 and np.pi/2. bound = np.arctan(bound[0]), np.arctan(bound[1]) res = _minimize_scalar_bounded( lambda x: myfunc(np.tan(x)), bound, xatol=tol / 100) xi = np.tan(res.x) * xi return squeeze(res.fun), p + xi, xi def fmin_powell(func, x0, args=(), xtol=1e-4, ftol=1e-4, maxiter=None, maxfun=None, full_output=0, disp=1, retall=0, callback=None, direc=None): """ Minimize a function using modified Powell's method. This method only uses function values, not derivatives. Parameters ---------- func : callable f(x,*args) Objective function to be minimized. x0 : ndarray Initial guess. args : tuple, optional Extra arguments passed to func. xtol : float, optional Line-search error tolerance. ftol : float, optional Relative error in ``func(xopt)`` acceptable for convergence. maxiter : int, optional Maximum number of iterations to perform. maxfun : int, optional Maximum number of function evaluations to make. full_output : bool, optional If True, ``fopt``, ``xi``, ``direc``, ``iter``, ``funcalls``, and ``warnflag`` are returned. disp : bool, optional If True, print convergence messages. retall : bool, optional If True, return a list of the solution at each iteration. callback : callable, optional An optional user-supplied function, called after each iteration. Called as ``callback(xk)``, where ``xk`` is the current parameter vector. direc : ndarray, optional Initial fitting step and parameter order set as an (N, N) array, where N is the number of fitting parameters in `x0`. Defaults to step size 1.0 fitting all parameters simultaneously (``np.eye((N, N))``). To prevent initial consideration of values in a step or to change initial step size, set to 0 or desired step size in the Jth position in the Mth block, where J is the position in `x0` and M is the desired evaluation step, with steps being evaluated in index order. Step size and ordering will change freely as minimization proceeds. Returns ------- xopt : ndarray Parameter which minimizes `func`. fopt : number Value of function at minimum: ``fopt = func(xopt)``. direc : ndarray Current direction set. iter : int Number of iterations. funcalls : int Number of function calls made. warnflag : int Integer warning flag: 1 : Maximum number of function evaluations. 2 : Maximum number of iterations. 3 : NaN result encountered. 4 : The result is out of the provided bounds. allvecs : list List of solutions at each iteration. See also -------- minimize: Interface to unconstrained minimization algorithms for multivariate functions. See the 'Powell' method in particular. Notes ----- Uses a modification of Powell's method to find the minimum of a function of N variables. Powell's method is a conjugate direction method. The algorithm has two loops. The outer loop merely iterates over the inner loop. The inner loop minimizes over each current direction in the direction set. At the end of the inner loop, if certain conditions are met, the direction that gave the largest decrease is dropped and replaced with the difference between the current estimated x and the estimated x from the beginning of the inner-loop. The technical conditions for replacing the direction of greatest increase amount to checking that 1. No further gain can be made along the direction of greatest increase from that iteration. 2. The direction of greatest increase accounted for a large sufficient fraction of the decrease in the function value from that iteration of the inner loop. References ---------- Powell M.J.D. (1964) An efficient method for finding the minimum of a function of several variables without calculating derivatives, Computer Journal, 7 (2):155-162. Press W., Teukolsky S.A., Vetterling W.T., and Flannery B.P.: Numerical Recipes (any edition), Cambridge University Press Examples -------- >>> def f(x): ... return x**2 >>> from scipy import optimize >>> minimum = optimize.fmin_powell(f, -1) Optimization terminated successfully. Current function value: 0.000000 Iterations: 2 Function evaluations: 16 >>> minimum array(0.0) """ opts = {'xtol': xtol, 'ftol': ftol, 'maxiter': maxiter, 'maxfev': maxfun, 'disp': disp, 'direc': direc, 'return_all': retall} callback = _wrap_callback(callback) res = _minimize_powell(func, x0, args, callback=callback, **opts) if full_output: retlist = (res['x'], res['fun'], res['direc'], res['nit'], res['nfev'], res['status']) if retall: retlist += (res['allvecs'], ) return retlist else: if retall: return res['x'], res['allvecs'] else: return res['x'] def _minimize_powell(func, x0, args=(), callback=None, bounds=None, xtol=1e-4, ftol=1e-4, maxiter=None, maxfev=None, disp=False, direc=None, return_all=False, **unknown_options): """ Minimization of scalar function of one or more variables using the modified Powell algorithm. Parameters ---------- fun : callable The objective function to be minimized. ``fun(x, *args) -> float`` where ``x`` is a 1-D array with shape (n,) and ``args`` is a tuple of the fixed parameters needed to completely specify the function. x0 : ndarray, shape (n,) Initial guess. Array of real elements of size (n,), where ``n`` is the number of independent variables. args : tuple, optional Extra arguments passed to the objective function and its derivatives (`fun`, `jac` and `hess` functions). method : str or callable, optional The present documentation is specific to ``method='powell'``, but other options are available. See documentation for `scipy.optimize.minimize`. bounds : sequence or `Bounds`, optional Bounds on decision variables. There are two ways to specify the bounds: 1. Instance of `Bounds` class. 2. Sequence of ``(min, max)`` pairs for each element in `x`. None is used to specify no bound. If bounds are not provided, then an unbounded line search will be used. If bounds are provided and the initial guess is within the bounds, then every function evaluation throughout the minimization procedure will be within the bounds. If bounds are provided, the initial guess is outside the bounds, and `direc` is full rank (or left to default), then some function evaluations during the first iteration may be outside the bounds, but every function evaluation after the first iteration will be within the bounds. If `direc` is not full rank, then some parameters may not be optimized and the solution is not guaranteed to be within the bounds. options : dict, optional A dictionary of solver options. All methods accept the following generic options: maxiter : int Maximum number of iterations to perform. Depending on the method each iteration may use several function evaluations. disp : bool Set to True to print convergence messages. See method-specific options for ``method='powell'`` below. callback : callable, optional Called after each iteration. The signature is: ``callback(xk)`` where ``xk`` is the current parameter vector. Returns ------- res : OptimizeResult The optimization result represented as a ``OptimizeResult`` object. Important attributes are: ``x`` the solution array, ``success`` a Boolean flag indicating if the optimizer exited successfully and ``message`` which describes the cause of the termination. See `OptimizeResult` for a description of other attributes. Options ------- disp : bool Set to True to print convergence messages. xtol : float Relative error in solution `xopt` acceptable for convergence. ftol : float Relative error in ``fun(xopt)`` acceptable for convergence. maxiter, maxfev : int Maximum allowed number of iterations and function evaluations. Will default to ``N*1000``, where ``N`` is the number of variables, if neither `maxiter` or `maxfev` is set. If both `maxiter` and `maxfev` are set, minimization will stop at the first reached. direc : ndarray Initial set of direction vectors for the Powell method. return_all : bool, optional Set to True to return a list of the best solution at each of the iterations. """ _check_unknown_options(unknown_options) maxfun = maxfev retall = return_all x = asarray(x0).flatten() if retall: allvecs = [x] N = len(x) # If neither are set, then set both to default if maxiter is None and maxfun is None: maxiter = N * 1000 maxfun = N * 1000 elif maxiter is None: # Convert remaining Nones, to np.inf, unless the other is np.inf, in # which case use the default to avoid unbounded iteration if maxfun == np.inf: maxiter = N * 1000 else: maxiter = np.inf elif maxfun is None: if maxiter == np.inf: maxfun = N * 1000 else: maxfun = np.inf # we need to use a mutable object here that we can update in the # wrapper function fcalls, func = _wrap_scalar_function_maxfun_validation(func, args, maxfun) if direc is None: direc = eye(N, dtype=float) else: direc = asarray(direc, dtype=float) if np.linalg.matrix_rank(direc) != direc.shape[0]: warnings.warn("direc input is not full rank, some parameters may " "not be optimized", OptimizeWarning, 3) if bounds is None: # don't make these arrays of all +/- inf. because # _linesearch_powell will do an unnecessary check of all the elements. # just keep them None, _linesearch_powell will not have to check # all the elements. lower_bound, upper_bound = None, None else: # bounds is standardized in _minimize.py. lower_bound, upper_bound = bounds.lb, bounds.ub if np.any(lower_bound > x0) or np.any(x0 > upper_bound): warnings.warn("Initial guess is not within the specified bounds", OptimizeWarning, 3) fval = squeeze(func(x)) x1 = x.copy() iter = 0 while True: try: fx = fval bigind = 0 delta = 0.0 for i in range(N): direc1 = direc[i] fx2 = fval fval, x, direc1 = _linesearch_powell(func, x, direc1, tol=xtol * 100, lower_bound=lower_bound, upper_bound=upper_bound, fval=fval) if (fx2 - fval) > delta: delta = fx2 - fval bigind = i iter += 1 if retall: allvecs.append(x) intermediate_result = OptimizeResult(x=x, fun=fval) if _call_callback_maybe_halt(callback, intermediate_result): break bnd = ftol * (np.abs(fx) + np.abs(fval)) + 1e-20 if 2.0 * (fx - fval) <= bnd: break if fcalls[0] >= maxfun: break if iter >= maxiter: break if np.isnan(fx) and np.isnan(fval): # Ended up in a nan-region: bail out break # Construct the extrapolated point direc1 = x - x1 x1 = x.copy() # make sure that we don't go outside the bounds when extrapolating if lower_bound is None and upper_bound is None: lmax = 1 else: _, lmax = _line_for_search(x, direc1, lower_bound, upper_bound) x2 = x + min(lmax, 1) * direc1 fx2 = squeeze(func(x2)) if (fx > fx2): t = 2.0*(fx + fx2 - 2.0*fval) temp = (fx - fval - delta) t *= temp*temp temp = fx - fx2 t -= delta*temp*temp if t < 0.0: fval, x, direc1 = _linesearch_powell( func, x, direc1, tol=xtol * 100, lower_bound=lower_bound, upper_bound=upper_bound, fval=fval ) if np.any(direc1): direc[bigind] = direc[-1] direc[-1] = direc1 except _MaxFuncCallError: break warnflag = 0 msg = _status_message['success'] # out of bounds is more urgent than exceeding function evals or iters, # but I don't want to cause inconsistencies by changing the # established warning flags for maxfev and maxiter, so the out of bounds # warning flag becomes 3, but is checked for first. if bounds and (np.any(lower_bound > x) or np.any(x > upper_bound)): warnflag = 4 msg = _status_message['out_of_bounds'] elif fcalls[0] >= maxfun: warnflag = 1 msg = _status_message['maxfev'] elif iter >= maxiter: warnflag = 2 msg = _status_message['maxiter'] elif np.isnan(fval) or np.isnan(x).any(): warnflag = 3 msg = _status_message['nan'] if disp: _print_success_message_or_warn(warnflag, msg, RuntimeWarning) print(" Current function value: %f" % fval) print(" Iterations: %d" % iter) print(" Function evaluations: %d" % fcalls[0]) result = OptimizeResult(fun=fval, direc=direc, nit=iter, nfev=fcalls[0], status=warnflag, success=(warnflag == 0), message=msg, x=x) if retall: result['allvecs'] = allvecs return result def _endprint(x, flag, fval, maxfun, xtol, disp): if flag == 0: if disp > 1: print("\nOptimization terminated successfully;\n" "The returned value satisfies the termination criteria\n" "(using xtol = ", xtol, ")") return if flag == 1: msg = ("\nMaximum number of function evaluations exceeded --- " "increase maxfun argument.\n") elif flag == 2: msg = "\n{}".format(_status_message['nan']) _print_success_message_or_warn(flag, msg) return def brute(func, ranges, args=(), Ns=20, full_output=0, finish=fmin, disp=False, workers=1): """Minimize a function over a given range by brute force. Uses the "brute force" method, i.e., computes the function's value at each point of a multidimensional grid of points, to find the global minimum of the function. The function is evaluated everywhere in the range with the datatype of the first call to the function, as enforced by the ``vectorize`` NumPy function. The value and type of the function evaluation returned when ``full_output=True`` are affected in addition by the ``finish`` argument (see Notes). The brute force approach is inefficient because the number of grid points increases exponentially - the number of grid points to evaluate is ``Ns ** len(x)``. Consequently, even with coarse grid spacing, even moderately sized problems can take a long time to run, and/or run into memory limitations. Parameters ---------- func : callable The objective function to be minimized. Must be in the form ``f(x, *args)``, where ``x`` is the argument in the form of a 1-D array and ``args`` is a tuple of any additional fixed parameters needed to completely specify the function. ranges : tuple Each component of the `ranges` tuple must be either a "slice object" or a range tuple of the form ``(low, high)``. The program uses these to create the grid of points on which the objective function will be computed. See `Note 2` for more detail. args : tuple, optional Any additional fixed parameters needed to completely specify the function. Ns : int, optional Number of grid points along the axes, if not otherwise specified. See `Note2`. full_output : bool, optional If True, return the evaluation grid and the objective function's values on it. finish : callable, optional An optimization function that is called with the result of brute force minimization as initial guess. `finish` should take `func` and the initial guess as positional arguments, and take `args` as keyword arguments. It may additionally take `full_output` and/or `disp` as keyword arguments. Use None if no "polishing" function is to be used. See Notes for more details. disp : bool, optional Set to True to print convergence messages from the `finish` callable. workers : int or map-like callable, optional If `workers` is an int the grid is subdivided into `workers` sections and evaluated in parallel (uses `multiprocessing.Pool <multiprocessing>`). Supply `-1` to use all cores available to the Process. Alternatively supply a map-like callable, such as `multiprocessing.Pool.map` for evaluating the grid in parallel. This evaluation is carried out as ``workers(func, iterable)``. Requires that `func` be pickleable. .. versionadded:: 1.3.0 Returns ------- x0 : ndarray A 1-D array containing the coordinates of a point at which the objective function had its minimum value. (See `Note 1` for which point is returned.) fval : float Function value at the point `x0`. (Returned when `full_output` is True.) grid : tuple Representation of the evaluation grid. It has the same length as `x0`. (Returned when `full_output` is True.) Jout : ndarray Function values at each point of the evaluation grid, i.e., ``Jout = func(*grid)``. (Returned when `full_output` is True.) See Also -------- basinhopping, differential_evolution Notes ----- *Note 1*: The program finds the gridpoint at which the lowest value of the objective function occurs. If `finish` is None, that is the point returned. When the global minimum occurs within (or not very far outside) the grid's boundaries, and the grid is fine enough, that point will be in the neighborhood of the global minimum. However, users often employ some other optimization program to "polish" the gridpoint values, i.e., to seek a more precise (local) minimum near `brute's` best gridpoint. The `brute` function's `finish` option provides a convenient way to do that. Any polishing program used must take `brute's` output as its initial guess as a positional argument, and take `brute's` input values for `args` as keyword arguments, otherwise an error will be raised. It may additionally take `full_output` and/or `disp` as keyword arguments. `brute` assumes that the `finish` function returns either an `OptimizeResult` object or a tuple in the form: ``(xmin, Jmin, ... , statuscode)``, where ``xmin`` is the minimizing value of the argument, ``Jmin`` is the minimum value of the objective function, "..." may be some other returned values (which are not used by `brute`), and ``statuscode`` is the status code of the `finish` program. Note that when `finish` is not None, the values returned are those of the `finish` program, *not* the gridpoint ones. Consequently, while `brute` confines its search to the input grid points, the `finish` program's results usually will not coincide with any gridpoint, and may fall outside the grid's boundary. Thus, if a minimum only needs to be found over the provided grid points, make sure to pass in `finish=None`. *Note 2*: The grid of points is a `numpy.mgrid` object. For `brute` the `ranges` and `Ns` inputs have the following effect. Each component of the `ranges` tuple can be either a slice object or a two-tuple giving a range of values, such as (0, 5). If the component is a slice object, `brute` uses it directly. If the component is a two-tuple range, `brute` internally converts it to a slice object that interpolates `Ns` points from its low-value to its high-value, inclusive. Examples -------- We illustrate the use of `brute` to seek the global minimum of a function of two variables that is given as the sum of a positive-definite quadratic and two deep "Gaussian-shaped" craters. Specifically, define the objective function `f` as the sum of three other functions, ``f = f1 + f2 + f3``. We suppose each of these has a signature ``(z, *params)``, where ``z = (x, y)``, and ``params`` and the functions are as defined below. >>> import numpy as np >>> params = (2, 3, 7, 8, 9, 10, 44, -1, 2, 26, 1, -2, 0.5) >>> def f1(z, *params): ... x, y = z ... a, b, c, d, e, f, g, h, i, j, k, l, scale = params ... return (a * x**2 + b * x * y + c * y**2 + d*x + e*y + f) >>> def f2(z, *params): ... x, y = z ... a, b, c, d, e, f, g, h, i, j, k, l, scale = params ... return (-g*np.exp(-((x-h)**2 + (y-i)**2) / scale)) >>> def f3(z, *params): ... x, y = z ... a, b, c, d, e, f, g, h, i, j, k, l, scale = params ... return (-j*np.exp(-((x-k)**2 + (y-l)**2) / scale)) >>> def f(z, *params): ... return f1(z, *params) + f2(z, *params) + f3(z, *params) Thus, the objective function may have local minima near the minimum of each of the three functions of which it is composed. To use `fmin` to polish its gridpoint result, we may then continue as follows: >>> rranges = (slice(-4, 4, 0.25), slice(-4, 4, 0.25)) >>> from scipy import optimize >>> resbrute = optimize.brute(f, rranges, args=params, full_output=True, ... finish=optimize.fmin) >>> resbrute[0] # global minimum array([-1.05665192, 1.80834843]) >>> resbrute[1] # function value at global minimum -3.4085818767 Note that if `finish` had been set to None, we would have gotten the gridpoint [-1.0 1.75] where the rounded function value is -2.892. """ N = len(ranges) if N > 40: raise ValueError("Brute Force not possible with more " "than 40 variables.") lrange = list(ranges) for k in range(N): if not isinstance(lrange[k], slice): if len(lrange[k]) < 3: lrange[k] = tuple(lrange[k]) + (complex(Ns),) lrange[k] = slice(*lrange[k]) if (N == 1): lrange = lrange[0] grid = np.mgrid[lrange] # obtain an array of parameters that is iterable by a map-like callable inpt_shape = grid.shape if (N > 1): grid = np.reshape(grid, (inpt_shape[0], np.prod(inpt_shape[1:]))).T if not np.iterable(args): args = (args,) wrapped_func = _Brute_Wrapper(func, args) # iterate over input arrays, possibly in parallel with MapWrapper(pool=workers) as mapper: Jout = np.array(list(mapper(wrapped_func, grid))) if (N == 1): grid = (grid,) Jout = np.squeeze(Jout) elif (N > 1): Jout = np.reshape(Jout, inpt_shape[1:]) grid = np.reshape(grid.T, inpt_shape) Nshape = shape(Jout) indx = argmin(Jout.ravel(), axis=-1) Nindx = np.empty(N, int) xmin = np.empty(N, float) for k in range(N - 1, -1, -1): thisN = Nshape[k] Nindx[k] = indx % Nshape[k] indx = indx // thisN for k in range(N): xmin[k] = grid[k][tuple(Nindx)] Jmin = Jout[tuple(Nindx)] if (N == 1): grid = grid[0] xmin = xmin[0] if callable(finish): # set up kwargs for `finish` function finish_args = _getfullargspec(finish).args finish_kwargs = dict() if 'full_output' in finish_args: finish_kwargs['full_output'] = 1 if 'disp' in finish_args: finish_kwargs['disp'] = disp elif 'options' in finish_args: # pass 'disp' as `options` # (e.g., if `finish` is `minimize`) finish_kwargs['options'] = {'disp': disp} # run minimizer res = finish(func, xmin, args=args, **finish_kwargs) if isinstance(res, OptimizeResult): xmin = res.x Jmin = res.fun success = res.success else: xmin = res[0] Jmin = res[1] success = res[-1] == 0 if not success: if disp: warnings.warn( "Either final optimization did not succeed " "or `finish` does not return `statuscode` as its last " "argument.", RuntimeWarning, 2) if full_output: return xmin, Jmin, grid, Jout else: return xmin class _Brute_Wrapper: """ Object to wrap user cost function for optimize.brute, allowing picklability """ def __init__(self, f, args): self.f = f self.args = [] if args is None else args def __call__(self, x): # flatten needed for one dimensional case. return self.f(np.asarray(x).flatten(), *self.args) def show_options(solver=None, method=None, disp=True): """ Show documentation for additional options of optimization solvers. These are method-specific options that can be supplied through the ``options`` dict. Parameters ---------- solver : str Type of optimization solver. One of 'minimize', 'minimize_scalar', 'root', 'root_scalar', 'linprog', or 'quadratic_assignment'. method : str, optional If not given, shows all methods of the specified solver. Otherwise, show only the options for the specified method. Valid values corresponds to methods' names of respective solver (e.g., 'BFGS' for 'minimize'). disp : bool, optional Whether to print the result rather than returning it. Returns ------- text Either None (for disp=True) or the text string (disp=False) Notes ----- The solver-specific methods are: `scipy.optimize.minimize` - :ref:`Nelder-Mead <optimize.minimize-neldermead>` - :ref:`Powell <optimize.minimize-powell>` - :ref:`CG <optimize.minimize-cg>` - :ref:`BFGS <optimize.minimize-bfgs>` - :ref:`Newton-CG <optimize.minimize-newtoncg>` - :ref:`L-BFGS-B <optimize.minimize-lbfgsb>` - :ref:`TNC <optimize.minimize-tnc>` - :ref:`COBYLA <optimize.minimize-cobyla>` - :ref:`SLSQP <optimize.minimize-slsqp>` - :ref:`dogleg <optimize.minimize-dogleg>` - :ref:`trust-ncg <optimize.minimize-trustncg>` `scipy.optimize.root` - :ref:`hybr <optimize.root-hybr>` - :ref:`lm <optimize.root-lm>` - :ref:`broyden1 <optimize.root-broyden1>` - :ref:`broyden2 <optimize.root-broyden2>` - :ref:`anderson <optimize.root-anderson>` - :ref:`linearmixing <optimize.root-linearmixing>` - :ref:`diagbroyden <optimize.root-diagbroyden>` - :ref:`excitingmixing <optimize.root-excitingmixing>` - :ref:`krylov <optimize.root-krylov>` - :ref:`df-sane <optimize.root-dfsane>` `scipy.optimize.minimize_scalar` - :ref:`brent <optimize.minimize_scalar-brent>` - :ref:`golden <optimize.minimize_scalar-golden>` - :ref:`bounded <optimize.minimize_scalar-bounded>` `scipy.optimize.root_scalar` - :ref:`bisect <optimize.root_scalar-bisect>` - :ref:`brentq <optimize.root_scalar-brentq>` - :ref:`brenth <optimize.root_scalar-brenth>` - :ref:`ridder <optimize.root_scalar-ridder>` - :ref:`toms748 <optimize.root_scalar-toms748>` - :ref:`newton <optimize.root_scalar-newton>` - :ref:`secant <optimize.root_scalar-secant>` - :ref:`halley <optimize.root_scalar-halley>` `scipy.optimize.linprog` - :ref:`simplex <optimize.linprog-simplex>` - :ref:`interior-point <optimize.linprog-interior-point>` - :ref:`revised simplex <optimize.linprog-revised_simplex>` - :ref:`highs <optimize.linprog-highs>` - :ref:`highs-ds <optimize.linprog-highs-ds>` - :ref:`highs-ipm <optimize.linprog-highs-ipm>` `scipy.optimize.quadratic_assignment` - :ref:`faq <optimize.qap-faq>` - :ref:`2opt <optimize.qap-2opt>` Examples -------- We can print documentations of a solver in stdout: >>> from scipy.optimize import show_options >>> show_options(solver="minimize") ... Specifying a method is possible: >>> show_options(solver="minimize", method="Nelder-Mead") ... We can also get the documentations as a string: >>> show_options(solver="minimize", method="Nelder-Mead", disp=False) Minimization of scalar function of one or more variables using the ... """ import textwrap doc_routines = { 'minimize': ( ('bfgs', 'scipy.optimize._optimize._minimize_bfgs'), ('cg', 'scipy.optimize._optimize._minimize_cg'), ('cobyla', 'scipy.optimize._cobyla_py._minimize_cobyla'), ('dogleg', 'scipy.optimize._trustregion_dogleg._minimize_dogleg'), ('l-bfgs-b', 'scipy.optimize._lbfgsb_py._minimize_lbfgsb'), ('nelder-mead', 'scipy.optimize._optimize._minimize_neldermead'), ('newton-cg', 'scipy.optimize._optimize._minimize_newtoncg'), ('powell', 'scipy.optimize._optimize._minimize_powell'), ('slsqp', 'scipy.optimize._slsqp_py._minimize_slsqp'), ('tnc', 'scipy.optimize._tnc._minimize_tnc'), ('trust-ncg', 'scipy.optimize._trustregion_ncg._minimize_trust_ncg'), ('trust-constr', 'scipy.optimize._trustregion_constr.' '_minimize_trustregion_constr'), ('trust-exact', 'scipy.optimize._trustregion_exact._minimize_trustregion_exact'), ('trust-krylov', 'scipy.optimize._trustregion_krylov._minimize_trust_krylov'), ), 'root': ( ('hybr', 'scipy.optimize._minpack_py._root_hybr'), ('lm', 'scipy.optimize._root._root_leastsq'), ('broyden1', 'scipy.optimize._root._root_broyden1_doc'), ('broyden2', 'scipy.optimize._root._root_broyden2_doc'), ('anderson', 'scipy.optimize._root._root_anderson_doc'), ('diagbroyden', 'scipy.optimize._root._root_diagbroyden_doc'), ('excitingmixing', 'scipy.optimize._root._root_excitingmixing_doc'), ('linearmixing', 'scipy.optimize._root._root_linearmixing_doc'), ('krylov', 'scipy.optimize._root._root_krylov_doc'), ('df-sane', 'scipy.optimize._spectral._root_df_sane'), ), 'root_scalar': ( ('bisect', 'scipy.optimize._root_scalar._root_scalar_bisect_doc'), ('brentq', 'scipy.optimize._root_scalar._root_scalar_brentq_doc'), ('brenth', 'scipy.optimize._root_scalar._root_scalar_brenth_doc'), ('ridder', 'scipy.optimize._root_scalar._root_scalar_ridder_doc'), ('toms748', 'scipy.optimize._root_scalar._root_scalar_toms748_doc'), ('secant', 'scipy.optimize._root_scalar._root_scalar_secant_doc'), ('newton', 'scipy.optimize._root_scalar._root_scalar_newton_doc'), ('halley', 'scipy.optimize._root_scalar._root_scalar_halley_doc'), ), 'linprog': ( ('simplex', 'scipy.optimize._linprog._linprog_simplex_doc'), ('interior-point', 'scipy.optimize._linprog._linprog_ip_doc'), ('revised simplex', 'scipy.optimize._linprog._linprog_rs_doc'), ('highs-ipm', 'scipy.optimize._linprog._linprog_highs_ipm_doc'), ('highs-ds', 'scipy.optimize._linprog._linprog_highs_ds_doc'), ('highs', 'scipy.optimize._linprog._linprog_highs_doc'), ), 'quadratic_assignment': ( ('faq', 'scipy.optimize._qap._quadratic_assignment_faq'), ('2opt', 'scipy.optimize._qap._quadratic_assignment_2opt'), ), 'minimize_scalar': ( ('brent', 'scipy.optimize._optimize._minimize_scalar_brent'), ('bounded', 'scipy.optimize._optimize._minimize_scalar_bounded'), ('golden', 'scipy.optimize._optimize._minimize_scalar_golden'), ), } if solver is None: text = ["\n\n\n========\n", "minimize\n", "========\n"] text.append(show_options('minimize', disp=False)) text.extend(["\n\n===============\n", "minimize_scalar\n", "===============\n"]) text.append(show_options('minimize_scalar', disp=False)) text.extend(["\n\n\n====\n", "root\n", "====\n"]) text.append(show_options('root', disp=False)) text.extend(['\n\n\n=======\n', 'linprog\n', '=======\n']) text.append(show_options('linprog', disp=False)) text = "".join(text) else: solver = solver.lower() if solver not in doc_routines: raise ValueError(f'Unknown solver {solver!r}') if method is None: text = [] for name, _ in doc_routines[solver]: text.extend(["\n\n" + name, "\n" + "="*len(name) + "\n\n"]) text.append(show_options(solver, name, disp=False)) text = "".join(text) else: method = method.lower() methods = dict(doc_routines[solver]) if method not in methods: raise ValueError(f"Unknown method {method!r}") name = methods[method] # Import function object parts = name.split('.') mod_name = ".".join(parts[:-1]) __import__(mod_name) obj = getattr(sys.modules[mod_name], parts[-1]) # Get doc doc = obj.__doc__ if doc is not None: text = textwrap.dedent(doc).strip() else: text = "" if disp: print(text) return else: return text
146,247
34.574799
101
py
scipy
scipy-main/scipy/optimize/_linprog_simplex.py
"""Simplex method for linear programming The *simplex* method uses a traditional, full-tableau implementation of Dantzig's simplex algorithm [1]_, [2]_ (*not* the Nelder-Mead simplex). This algorithm is included for backwards compatibility and educational purposes. .. versionadded:: 0.15.0 Warnings -------- The simplex method may encounter numerical difficulties when pivot values are close to the specified tolerance. If encountered try remove any redundant constraints, change the pivot strategy to Bland's rule or increase the tolerance value. Alternatively, more robust methods maybe be used. See :ref:`'interior-point' <optimize.linprog-interior-point>` and :ref:`'revised simplex' <optimize.linprog-revised_simplex>`. References ---------- .. [1] Dantzig, George B., Linear programming and extensions. Rand Corporation Research Study Princeton Univ. Press, Princeton, NJ, 1963 .. [2] Hillier, S.H. and Lieberman, G.J. (1995), "Introduction to Mathematical Programming", McGraw-Hill, Chapter 4. """ import numpy as np from warnings import warn from ._optimize import OptimizeResult, OptimizeWarning, _check_unknown_options from ._linprog_util import _postsolve def _pivot_col(T, tol=1e-9, bland=False): """ Given a linear programming simplex tableau, determine the column of the variable to enter the basis. Parameters ---------- T : 2-D array A 2-D array representing the simplex tableau, T, corresponding to the linear programming problem. It should have the form: [[A[0, 0], A[0, 1], ..., A[0, n_total], b[0]], [A[1, 0], A[1, 1], ..., A[1, n_total], b[1]], . . . [A[m, 0], A[m, 1], ..., A[m, n_total], b[m]], [c[0], c[1], ..., c[n_total], 0]] for a Phase 2 problem, or the form: [[A[0, 0], A[0, 1], ..., A[0, n_total], b[0]], [A[1, 0], A[1, 1], ..., A[1, n_total], b[1]], . . . [A[m, 0], A[m, 1], ..., A[m, n_total], b[m]], [c[0], c[1], ..., c[n_total], 0], [c'[0], c'[1], ..., c'[n_total], 0]] for a Phase 1 problem (a problem in which a basic feasible solution is sought prior to maximizing the actual objective. ``T`` is modified in place by ``_solve_simplex``. tol : float Elements in the objective row larger than -tol will not be considered for pivoting. Nominally this value is zero, but numerical issues cause a tolerance about zero to be necessary. bland : bool If True, use Bland's rule for selection of the column (select the first column with a negative coefficient in the objective row, regardless of magnitude). Returns ------- status: bool True if a suitable pivot column was found, otherwise False. A return of False indicates that the linear programming simplex algorithm is complete. col: int The index of the column of the pivot element. If status is False, col will be returned as nan. """ ma = np.ma.masked_where(T[-1, :-1] >= -tol, T[-1, :-1], copy=False) if ma.count() == 0: return False, np.nan if bland: # ma.mask is sometimes 0d return True, np.nonzero(np.logical_not(np.atleast_1d(ma.mask)))[0][0] return True, np.ma.nonzero(ma == ma.min())[0][0] def _pivot_row(T, basis, pivcol, phase, tol=1e-9, bland=False): """ Given a linear programming simplex tableau, determine the row for the pivot operation. Parameters ---------- T : 2-D array A 2-D array representing the simplex tableau, T, corresponding to the linear programming problem. It should have the form: [[A[0, 0], A[0, 1], ..., A[0, n_total], b[0]], [A[1, 0], A[1, 1], ..., A[1, n_total], b[1]], . . . [A[m, 0], A[m, 1], ..., A[m, n_total], b[m]], [c[0], c[1], ..., c[n_total], 0]] for a Phase 2 problem, or the form: [[A[0, 0], A[0, 1], ..., A[0, n_total], b[0]], [A[1, 0], A[1, 1], ..., A[1, n_total], b[1]], . . . [A[m, 0], A[m, 1], ..., A[m, n_total], b[m]], [c[0], c[1], ..., c[n_total], 0], [c'[0], c'[1], ..., c'[n_total], 0]] for a Phase 1 problem (a Problem in which a basic feasible solution is sought prior to maximizing the actual objective. ``T`` is modified in place by ``_solve_simplex``. basis : array A list of the current basic variables. pivcol : int The index of the pivot column. phase : int The phase of the simplex algorithm (1 or 2). tol : float Elements in the pivot column smaller than tol will not be considered for pivoting. Nominally this value is zero, but numerical issues cause a tolerance about zero to be necessary. bland : bool If True, use Bland's rule for selection of the row (if more than one row can be used, choose the one with the lowest variable index). Returns ------- status: bool True if a suitable pivot row was found, otherwise False. A return of False indicates that the linear programming problem is unbounded. row: int The index of the row of the pivot element. If status is False, row will be returned as nan. """ if phase == 1: k = 2 else: k = 1 ma = np.ma.masked_where(T[:-k, pivcol] <= tol, T[:-k, pivcol], copy=False) if ma.count() == 0: return False, np.nan mb = np.ma.masked_where(T[:-k, pivcol] <= tol, T[:-k, -1], copy=False) q = mb / ma min_rows = np.ma.nonzero(q == q.min())[0] if bland: return True, min_rows[np.argmin(np.take(basis, min_rows))] return True, min_rows[0] def _apply_pivot(T, basis, pivrow, pivcol, tol=1e-9): """ Pivot the simplex tableau inplace on the element given by (pivrow, pivol). The entering variable corresponds to the column given by pivcol forcing the variable basis[pivrow] to leave the basis. Parameters ---------- T : 2-D array A 2-D array representing the simplex tableau, T, corresponding to the linear programming problem. It should have the form: [[A[0, 0], A[0, 1], ..., A[0, n_total], b[0]], [A[1, 0], A[1, 1], ..., A[1, n_total], b[1]], . . . [A[m, 0], A[m, 1], ..., A[m, n_total], b[m]], [c[0], c[1], ..., c[n_total], 0]] for a Phase 2 problem, or the form: [[A[0, 0], A[0, 1], ..., A[0, n_total], b[0]], [A[1, 0], A[1, 1], ..., A[1, n_total], b[1]], . . . [A[m, 0], A[m, 1], ..., A[m, n_total], b[m]], [c[0], c[1], ..., c[n_total], 0], [c'[0], c'[1], ..., c'[n_total], 0]] for a Phase 1 problem (a problem in which a basic feasible solution is sought prior to maximizing the actual objective. ``T`` is modified in place by ``_solve_simplex``. basis : 1-D array An array of the indices of the basic variables, such that basis[i] contains the column corresponding to the basic variable for row i. Basis is modified in place by _apply_pivot. pivrow : int Row index of the pivot. pivcol : int Column index of the pivot. """ basis[pivrow] = pivcol pivval = T[pivrow, pivcol] T[pivrow] = T[pivrow] / pivval for irow in range(T.shape[0]): if irow != pivrow: T[irow] = T[irow] - T[pivrow] * T[irow, pivcol] # The selected pivot should never lead to a pivot value less than the tol. if np.isclose(pivval, tol, atol=0, rtol=1e4): message = ( "The pivot operation produces a pivot value of:{: .1e}, " "which is only slightly greater than the specified " "tolerance{: .1e}. This may lead to issues regarding the " "numerical stability of the simplex method. " "Removing redundant constraints, changing the pivot strategy " "via Bland's rule or increasing the tolerance may " "help reduce the issue.".format(pivval, tol)) warn(message, OptimizeWarning, stacklevel=5) def _solve_simplex(T, n, basis, callback, postsolve_args, maxiter=1000, tol=1e-9, phase=2, bland=False, nit0=0, ): """ Solve a linear programming problem in "standard form" using the Simplex Method. Linear Programming is intended to solve the following problem form: Minimize:: c @ x Subject to:: A @ x == b x >= 0 Parameters ---------- T : 2-D array A 2-D array representing the simplex tableau, T, corresponding to the linear programming problem. It should have the form: [[A[0, 0], A[0, 1], ..., A[0, n_total], b[0]], [A[1, 0], A[1, 1], ..., A[1, n_total], b[1]], . . . [A[m, 0], A[m, 1], ..., A[m, n_total], b[m]], [c[0], c[1], ..., c[n_total], 0]] for a Phase 2 problem, or the form: [[A[0, 0], A[0, 1], ..., A[0, n_total], b[0]], [A[1, 0], A[1, 1], ..., A[1, n_total], b[1]], . . . [A[m, 0], A[m, 1], ..., A[m, n_total], b[m]], [c[0], c[1], ..., c[n_total], 0], [c'[0], c'[1], ..., c'[n_total], 0]] for a Phase 1 problem (a problem in which a basic feasible solution is sought prior to maximizing the actual objective. ``T`` is modified in place by ``_solve_simplex``. n : int The number of true variables in the problem. basis : 1-D array An array of the indices of the basic variables, such that basis[i] contains the column corresponding to the basic variable for row i. Basis is modified in place by _solve_simplex callback : callable, optional If a callback function is provided, it will be called within each iteration of the algorithm. The callback must accept a `scipy.optimize.OptimizeResult` consisting of the following fields: x : 1-D array Current solution vector fun : float Current value of the objective function success : bool True only when a phase has completed successfully. This will be False for most iterations. slack : 1-D array The values of the slack variables. Each slack variable corresponds to an inequality constraint. If the slack is zero, the corresponding constraint is active. con : 1-D array The (nominally zero) residuals of the equality constraints, that is, ``b - A_eq @ x`` phase : int The phase of the optimization being executed. In phase 1 a basic feasible solution is sought and the T has an additional row representing an alternate objective function. status : int An integer representing the exit status of the optimization:: 0 : Optimization terminated successfully 1 : Iteration limit reached 2 : Problem appears to be infeasible 3 : Problem appears to be unbounded 4 : Serious numerical difficulties encountered nit : int The number of iterations performed. message : str A string descriptor of the exit status of the optimization. postsolve_args : tuple Data needed by _postsolve to convert the solution to the standard-form problem into the solution to the original problem. maxiter : int The maximum number of iterations to perform before aborting the optimization. tol : float The tolerance which determines when a solution is "close enough" to zero in Phase 1 to be considered a basic feasible solution or close enough to positive to serve as an optimal solution. phase : int The phase of the optimization being executed. In phase 1 a basic feasible solution is sought and the T has an additional row representing an alternate objective function. bland : bool If True, choose pivots using Bland's rule [3]_. In problems which fail to converge due to cycling, using Bland's rule can provide convergence at the expense of a less optimal path about the simplex. nit0 : int The initial iteration number used to keep an accurate iteration total in a two-phase problem. Returns ------- nit : int The number of iterations. Used to keep an accurate iteration total in the two-phase problem. status : int An integer representing the exit status of the optimization:: 0 : Optimization terminated successfully 1 : Iteration limit reached 2 : Problem appears to be infeasible 3 : Problem appears to be unbounded 4 : Serious numerical difficulties encountered """ nit = nit0 status = 0 message = '' complete = False if phase == 1: m = T.shape[1]-2 elif phase == 2: m = T.shape[1]-1 else: raise ValueError("Argument 'phase' to _solve_simplex must be 1 or 2") if phase == 2: # Check if any artificial variables are still in the basis. # If yes, check if any coefficients from this row and a column # corresponding to one of the non-artificial variable is non-zero. # If found, pivot at this term. If not, start phase 2. # Do this for all artificial variables in the basis. # Ref: "An Introduction to Linear Programming and Game Theory" # by Paul R. Thie, Gerard E. Keough, 3rd Ed, # Chapter 3.7 Redundant Systems (pag 102) for pivrow in [row for row in range(basis.size) if basis[row] > T.shape[1] - 2]: non_zero_row = [col for col in range(T.shape[1] - 1) if abs(T[pivrow, col]) > tol] if len(non_zero_row) > 0: pivcol = non_zero_row[0] _apply_pivot(T, basis, pivrow, pivcol, tol) nit += 1 if len(basis[:m]) == 0: solution = np.empty(T.shape[1] - 1, dtype=np.float64) else: solution = np.empty(max(T.shape[1] - 1, max(basis[:m]) + 1), dtype=np.float64) while not complete: # Find the pivot column pivcol_found, pivcol = _pivot_col(T, tol, bland) if not pivcol_found: pivcol = np.nan pivrow = np.nan status = 0 complete = True else: # Find the pivot row pivrow_found, pivrow = _pivot_row(T, basis, pivcol, phase, tol, bland) if not pivrow_found: status = 3 complete = True if callback is not None: solution[:] = 0 solution[basis[:n]] = T[:n, -1] x = solution[:m] x, fun, slack, con = _postsolve( x, postsolve_args ) res = OptimizeResult({ 'x': x, 'fun': fun, 'slack': slack, 'con': con, 'status': status, 'message': message, 'nit': nit, 'success': status == 0 and complete, 'phase': phase, 'complete': complete, }) callback(res) if not complete: if nit >= maxiter: # Iteration limit exceeded status = 1 complete = True else: _apply_pivot(T, basis, pivrow, pivcol, tol) nit += 1 return nit, status def _linprog_simplex(c, c0, A, b, callback, postsolve_args, maxiter=1000, tol=1e-9, disp=False, bland=False, **unknown_options): """ Minimize a linear objective function subject to linear equality and non-negativity constraints using the two phase simplex method. Linear programming is intended to solve problems of the following form: Minimize:: c @ x Subject to:: A @ x == b x >= 0 User-facing documentation is in _linprog_doc.py. Parameters ---------- c : 1-D array Coefficients of the linear objective function to be minimized. c0 : float Constant term in objective function due to fixed (and eliminated) variables. (Purely for display.) A : 2-D array 2-D array such that ``A @ x``, gives the values of the equality constraints at ``x``. b : 1-D array 1-D array of values representing the right hand side of each equality constraint (row) in ``A``. callback : callable, optional If a callback function is provided, it will be called within each iteration of the algorithm. The callback function must accept a single `scipy.optimize.OptimizeResult` consisting of the following fields: x : 1-D array Current solution vector fun : float Current value of the objective function success : bool True when an algorithm has completed successfully. slack : 1-D array The values of the slack variables. Each slack variable corresponds to an inequality constraint. If the slack is zero, the corresponding constraint is active. con : 1-D array The (nominally zero) residuals of the equality constraints, that is, ``b - A_eq @ x`` phase : int The phase of the algorithm being executed. status : int An integer representing the status of the optimization:: 0 : Algorithm proceeding nominally 1 : Iteration limit reached 2 : Problem appears to be infeasible 3 : Problem appears to be unbounded 4 : Serious numerical difficulties encountered nit : int The number of iterations performed. message : str A string descriptor of the exit status of the optimization. postsolve_args : tuple Data needed by _postsolve to convert the solution to the standard-form problem into the solution to the original problem. Options ------- maxiter : int The maximum number of iterations to perform. disp : bool If True, print exit status message to sys.stdout tol : float The tolerance which determines when a solution is "close enough" to zero in Phase 1 to be considered a basic feasible solution or close enough to positive to serve as an optimal solution. bland : bool If True, use Bland's anti-cycling rule [3]_ to choose pivots to prevent cycling. If False, choose pivots which should lead to a converged solution more quickly. The latter method is subject to cycling (non-convergence) in rare instances. unknown_options : dict Optional arguments not used by this particular solver. If `unknown_options` is non-empty a warning is issued listing all unused options. Returns ------- x : 1-D array Solution vector. status : int An integer representing the exit status of the optimization:: 0 : Optimization terminated successfully 1 : Iteration limit reached 2 : Problem appears to be infeasible 3 : Problem appears to be unbounded 4 : Serious numerical difficulties encountered message : str A string descriptor of the exit status of the optimization. iteration : int The number of iterations taken to solve the problem. References ---------- .. [1] Dantzig, George B., Linear programming and extensions. Rand Corporation Research Study Princeton Univ. Press, Princeton, NJ, 1963 .. [2] Hillier, S.H. and Lieberman, G.J. (1995), "Introduction to Mathematical Programming", McGraw-Hill, Chapter 4. .. [3] Bland, Robert G. New finite pivoting rules for the simplex method. Mathematics of Operations Research (2), 1977: pp. 103-107. Notes ----- The expected problem formulation differs between the top level ``linprog`` module and the method specific solvers. The method specific solvers expect a problem in standard form: Minimize:: c @ x Subject to:: A @ x == b x >= 0 Whereas the top level ``linprog`` module expects a problem of form: Minimize:: c @ x Subject to:: A_ub @ x <= b_ub A_eq @ x == b_eq lb <= x <= ub where ``lb = 0`` and ``ub = None`` unless set in ``bounds``. The original problem contains equality, upper-bound and variable constraints whereas the method specific solver requires equality constraints and variable non-negativity. ``linprog`` module converts the original problem to standard form by converting the simple bounds to upper bound constraints, introducing non-negative slack variables for inequality constraints, and expressing unbounded variables as the difference between two non-negative variables. """ _check_unknown_options(unknown_options) status = 0 messages = {0: "Optimization terminated successfully.", 1: "Iteration limit reached.", 2: "Optimization failed. Unable to find a feasible" " starting point.", 3: "Optimization failed. The problem appears to be unbounded.", 4: "Optimization failed. Singular matrix encountered."} n, m = A.shape # All constraints must have b >= 0. is_negative_constraint = np.less(b, 0) A[is_negative_constraint] *= -1 b[is_negative_constraint] *= -1 # As all constraints are equality constraints the artificial variables # will also be basic variables. av = np.arange(n) + m basis = av.copy() # Format the phase one tableau by adding artificial variables and stacking # the constraints, the objective row and pseudo-objective row. row_constraints = np.hstack((A, np.eye(n), b[:, np.newaxis])) row_objective = np.hstack((c, np.zeros(n), c0)) row_pseudo_objective = -row_constraints.sum(axis=0) row_pseudo_objective[av] = 0 T = np.vstack((row_constraints, row_objective, row_pseudo_objective)) nit1, status = _solve_simplex(T, n, basis, callback=callback, postsolve_args=postsolve_args, maxiter=maxiter, tol=tol, phase=1, bland=bland ) # if pseudo objective is zero, remove the last row from the tableau and # proceed to phase 2 nit2 = nit1 if abs(T[-1, -1]) < tol: # Remove the pseudo-objective row from the tableau T = T[:-1, :] # Remove the artificial variable columns from the tableau T = np.delete(T, av, 1) else: # Failure to find a feasible starting point status = 2 messages[status] = ( "Phase 1 of the simplex method failed to find a feasible " "solution. The pseudo-objective function evaluates to {0:.1e} " "which exceeds the required tolerance of {1} for a solution to be " "considered 'close enough' to zero to be a basic solution. " "Consider increasing the tolerance to be greater than {0:.1e}. " "If this tolerance is unacceptably large the problem may be " "infeasible.".format(abs(T[-1, -1]), tol) ) if status == 0: # Phase 2 nit2, status = _solve_simplex(T, n, basis, callback=callback, postsolve_args=postsolve_args, maxiter=maxiter, tol=tol, phase=2, bland=bland, nit0=nit1 ) solution = np.zeros(n + m) solution[basis[:n]] = T[:n, -1] x = solution[:m] return x, status, messages[status], int(nit2)
24,725
36.350453
82
py
scipy
scipy-main/scipy/optimize/setup.py
import sys import os.path from os.path import join from scipy._build_utils import numpy_nodepr_api def configuration(parent_package='', top_path=None): from numpy.distutils.misc_util import Configuration from numpy.distutils.system_info import get_info from scipy._build_utils import (gfortran_legacy_flag_hook, blas_ilp64_pre_build_hook, combine_dict, uses_blas64, get_f2py_int64_options) from scipy._build_utils.compiler_helper import ( set_cxx_flags_clib_hook, set_c_flags_hook) from distutils.sysconfig import get_python_inc config = Configuration('optimize', parent_package, top_path) include_dirs = [join(os.path.dirname(__file__), '..', '_lib', 'src')] minpack_src = [join('minpack', '*f')] config.add_library('minpack', sources=minpack_src) config.add_extension('_minpack', sources=['_minpackmodule.c'], libraries=['minpack'], depends=(["minpack.h", "__minpack.h"] + minpack_src), include_dirs=include_dirs, **numpy_nodepr_api) config.add_library('rectangular_lsap', sources='rectangular_lsap/rectangular_lsap.cpp', headers='rectangular_lsap/rectangular_lsap.h', _pre_build_hook=set_cxx_flags_clib_hook) _lsap = config.add_extension( '_lsap', sources=['_lsap.c'], libraries=['rectangular_lsap'], depends=(['rectangular_lsap/rectangular_lsap.cpp', 'rectangular_lsap/rectangular_lsap.h']), include_dirs=include_dirs, **numpy_nodepr_api) _lsap._pre_build_hook = set_c_flags_hook rootfind_src = [join('Zeros', '*.c')] rootfind_hdr = [join('Zeros', 'zeros.h')] config.add_library('rootfind', sources=rootfind_src, headers=rootfind_hdr, **numpy_nodepr_api) config.add_extension('_zeros', sources=['zeros.c'], libraries=['rootfind'], depends=(rootfind_src + rootfind_hdr), **numpy_nodepr_api) if uses_blas64(): lapack = get_info('lapack_ilp64_opt') f2py_options = get_f2py_int64_options() pre_build_hook = blas_ilp64_pre_build_hook(lapack) else: lapack = get_info('lapack_opt') f2py_options = None pre_build_hook = None lapack = combine_dict(lapack, numpy_nodepr_api) sources = ['lbfgsb.pyf', 'lbfgsb.f', 'linpack.f', 'timer.f'] ext = config.add_extension('_lbfgsb', sources=[join('lbfgsb_src', x) for x in sources], f2py_options=f2py_options, **lapack) ext._pre_build_hook = pre_build_hook sources = ['_moduleTNC.c', 'tnc.c'] config.add_extension('_moduleTNC', sources=[join('tnc', x) for x in sources], depends=[join('tnc', 'tnc.h')], **numpy_nodepr_api) config.add_extension('_cobyla', sources=[join('cobyla', x) for x in [ 'cobyla.pyf', 'cobyla2.f', 'trstlp.f']], **numpy_nodepr_api) sources = ['minpack2.pyf', 'dcsrch.f', 'dcstep.f'] config.add_extension('_minpack2', sources=[join('minpack2', x) for x in sources], **numpy_nodepr_api) sources = ['slsqp.pyf', 'slsqp_optmz.f'] ext = config.add_extension('_slsqp', sources=[ join('slsqp', x) for x in sources], **numpy_nodepr_api) ext._pre_build_hook = gfortran_legacy_flag_hook sources = [join('_direct', x) for x in ('direct_wrap.c', 'DIRect.c', 'DIRsubrout.c', 'DIRserial.c')] headers = ['_directmodule.h', join('_direct', 'direct-internal.h')] config.add_library('_direct_lib', sources=sources, headers=headers, include_dirs=[get_python_inc()], **numpy_nodepr_api) config.add_extension('_direct', sources=['_directmodule.c'], libraries=['_direct_lib'], depends=(sources + headers), **numpy_nodepr_api) if int(os.environ.get('SCIPY_USE_PYTHRAN', 1)): import pythran ext = pythran.dist.PythranExtension( 'scipy.optimize._group_columns', sources=["scipy/optimize/_group_columns.py"], config=['compiler.blas=none']) config.ext_modules.append(ext) else: config.add_extension('_group_columns', sources=['_group_columns.c'],) config.add_extension('_bglu_dense', sources=['_bglu_dense.c']) config.add_subpackage('_lsq') config.add_subpackage('_trlib') config.add_subpackage('_trustregion_constr') # Cython optimize API for zeros functions config.add_subpackage('cython_optimize') config.add_data_files('cython_optimize.pxd') config.add_subpackage('_shgo_lib') config.add_data_dir('_shgo_lib') # HiGHS linear programming libraries and extensions if 'sdist' not in sys.argv: # Avoid running this during sdist creation - it makes numpy.distutils # create an empty cython/src top-level directory. config.add_subpackage('_highs') config.add_data_dir('tests') # Add license files config.add_data_files('lbfgsb_src/README') return config if __name__ == '__main__': from numpy.distutils.core import setup setup(**configuration(top_path='').todict())
5,833
36.63871
78
py
scipy
scipy-main/scipy/optimize/_trustregion_exact.py
"""Nearly exact trust-region optimization subproblem.""" import numpy as np from scipy.linalg import (norm, get_lapack_funcs, solve_triangular, cho_solve) from ._trustregion import (_minimize_trust_region, BaseQuadraticSubproblem) __all__ = ['_minimize_trustregion_exact', 'estimate_smallest_singular_value', 'singular_leading_submatrix', 'IterativeSubproblem'] def _minimize_trustregion_exact(fun, x0, args=(), jac=None, hess=None, **trust_region_options): """ Minimization of scalar function of one or more variables using a nearly exact trust-region algorithm. Options ------- initial_trust_radius : float Initial trust-region radius. max_trust_radius : float Maximum value of the trust-region radius. No steps that are longer than this value will be proposed. eta : float Trust region related acceptance stringency for proposed steps. gtol : float Gradient norm must be less than ``gtol`` before successful termination. """ if jac is None: raise ValueError('Jacobian is required for trust region ' 'exact minimization.') if not callable(hess): raise ValueError('Hessian matrix is required for trust region ' 'exact minimization.') return _minimize_trust_region(fun, x0, args=args, jac=jac, hess=hess, subproblem=IterativeSubproblem, **trust_region_options) def estimate_smallest_singular_value(U): """Given upper triangular matrix ``U`` estimate the smallest singular value and the correspondent right singular vector in O(n**2) operations. Parameters ---------- U : ndarray Square upper triangular matrix. Returns ------- s_min : float Estimated smallest singular value of the provided matrix. z_min : ndarray Estimatied right singular vector. Notes ----- The procedure is based on [1]_ and is done in two steps. First, it finds a vector ``e`` with components selected from {+1, -1} such that the solution ``w`` from the system ``U.T w = e`` is as large as possible. Next it estimate ``U v = w``. The smallest singular value is close to ``norm(w)/norm(v)`` and the right singular vector is close to ``v/norm(v)``. The estimation will be better more ill-conditioned is the matrix. References ---------- .. [1] Cline, A. K., Moler, C. B., Stewart, G. W., Wilkinson, J. H. An estimate for the condition number of a matrix. 1979. SIAM Journal on Numerical Analysis, 16(2), 368-375. """ U = np.atleast_2d(U) m, n = U.shape if m != n: raise ValueError("A square triangular matrix should be provided.") # A vector `e` with components selected from {+1, -1} # is selected so that the solution `w` to the system # `U.T w = e` is as large as possible. Implementation # based on algorithm 3.5.1, p. 142, from reference [2] # adapted for lower triangular matrix. p = np.zeros(n) w = np.empty(n) # Implemented according to: Golub, G. H., Van Loan, C. F. (2013). # "Matrix computations". Forth Edition. JHU press. pp. 140-142. for k in range(n): wp = (1-p[k]) / U.T[k, k] wm = (-1-p[k]) / U.T[k, k] pp = p[k+1:] + U.T[k+1:, k]*wp pm = p[k+1:] + U.T[k+1:, k]*wm if abs(wp) + norm(pp, 1) >= abs(wm) + norm(pm, 1): w[k] = wp p[k+1:] = pp else: w[k] = wm p[k+1:] = pm # The system `U v = w` is solved using backward substitution. v = solve_triangular(U, w) v_norm = norm(v) w_norm = norm(w) # Smallest singular value s_min = w_norm / v_norm # Associated vector z_min = v / v_norm return s_min, z_min def gershgorin_bounds(H): """ Given a square matrix ``H`` compute upper and lower bounds for its eigenvalues (Gregoshgorin Bounds). Defined ref. [1]. References ---------- .. [1] Conn, A. R., Gould, N. I., & Toint, P. L. Trust region methods. 2000. Siam. pp. 19. """ H_diag = np.diag(H) H_diag_abs = np.abs(H_diag) H_row_sums = np.sum(np.abs(H), axis=1) lb = np.min(H_diag + H_diag_abs - H_row_sums) ub = np.max(H_diag - H_diag_abs + H_row_sums) return lb, ub def singular_leading_submatrix(A, U, k): """ Compute term that makes the leading ``k`` by ``k`` submatrix from ``A`` singular. Parameters ---------- A : ndarray Symmetric matrix that is not positive definite. U : ndarray Upper triangular matrix resulting of an incomplete Cholesky decomposition of matrix ``A``. k : int Positive integer such that the leading k by k submatrix from `A` is the first non-positive definite leading submatrix. Returns ------- delta : float Amount that should be added to the element (k, k) of the leading k by k submatrix of ``A`` to make it singular. v : ndarray A vector such that ``v.T B v = 0``. Where B is the matrix A after ``delta`` is added to its element (k, k). """ # Compute delta delta = np.sum(U[:k-1, k-1]**2) - A[k-1, k-1] n = len(A) # Inicialize v v = np.zeros(n) v[k-1] = 1 # Compute the remaining values of v by solving a triangular system. if k != 1: v[:k-1] = solve_triangular(U[:k-1, :k-1], -U[:k-1, k-1]) return delta, v class IterativeSubproblem(BaseQuadraticSubproblem): """Quadratic subproblem solved by nearly exact iterative method. Notes ----- This subproblem solver was based on [1]_, [2]_ and [3]_, which implement similar algorithms. The algorithm is basically that of [1]_ but ideas from [2]_ and [3]_ were also used. References ---------- .. [1] A.R. Conn, N.I. Gould, and P.L. Toint, "Trust region methods", Siam, pp. 169-200, 2000. .. [2] J. Nocedal and S. Wright, "Numerical optimization", Springer Science & Business Media. pp. 83-91, 2006. .. [3] J.J. More and D.C. Sorensen, "Computing a trust region step", SIAM Journal on Scientific and Statistical Computing, vol. 4(3), pp. 553-572, 1983. """ # UPDATE_COEFF appears in reference [1]_ # in formula 7.3.14 (p. 190) named as "theta". # As recommended there it value is fixed in 0.01. UPDATE_COEFF = 0.01 EPS = np.finfo(float).eps def __init__(self, x, fun, jac, hess, hessp=None, k_easy=0.1, k_hard=0.2): super().__init__(x, fun, jac, hess) # When the trust-region shrinks in two consecutive # calculations (``tr_radius < previous_tr_radius``) # the lower bound ``lambda_lb`` may be reused, # facilitating the convergence. To indicate no # previous value is known at first ``previous_tr_radius`` # is set to -1 and ``lambda_lb`` to None. self.previous_tr_radius = -1 self.lambda_lb = None self.niter = 0 # ``k_easy`` and ``k_hard`` are parameters used # to determine the stop criteria to the iterative # subproblem solver. Take a look at pp. 194-197 # from reference _[1] for a more detailed description. self.k_easy = k_easy self.k_hard = k_hard # Get Lapack function for cholesky decomposition. # The implemented SciPy wrapper does not return # the incomplete factorization needed by the method. self.cholesky, = get_lapack_funcs(('potrf',), (self.hess,)) # Get info about Hessian self.dimension = len(self.hess) self.hess_gershgorin_lb,\ self.hess_gershgorin_ub = gershgorin_bounds(self.hess) self.hess_inf = norm(self.hess, np.inf) self.hess_fro = norm(self.hess, 'fro') # A constant such that for vectors smaler than that # backward substituition is not reliable. It was stabilished # based on Golub, G. H., Van Loan, C. F. (2013). # "Matrix computations". Forth Edition. JHU press., p.165. self.CLOSE_TO_ZERO = self.dimension * self.EPS * self.hess_inf def _initial_values(self, tr_radius): """Given a trust radius, return a good initial guess for the damping factor, the lower bound and the upper bound. The values were chosen accordingly to the guidelines on section 7.3.8 (p. 192) from [1]_. """ # Upper bound for the damping factor lambda_ub = max(0, self.jac_mag/tr_radius + min(-self.hess_gershgorin_lb, self.hess_fro, self.hess_inf)) # Lower bound for the damping factor lambda_lb = max(0, -min(self.hess.diagonal()), self.jac_mag/tr_radius - min(self.hess_gershgorin_ub, self.hess_fro, self.hess_inf)) # Improve bounds with previous info if tr_radius < self.previous_tr_radius: lambda_lb = max(self.lambda_lb, lambda_lb) # Initial guess for the damping factor if lambda_lb == 0: lambda_initial = 0 else: lambda_initial = max(np.sqrt(lambda_lb * lambda_ub), lambda_lb + self.UPDATE_COEFF*(lambda_ub-lambda_lb)) return lambda_initial, lambda_lb, lambda_ub def solve(self, tr_radius): """Solve quadratic subproblem""" lambda_current, lambda_lb, lambda_ub = self._initial_values(tr_radius) n = self.dimension hits_boundary = True already_factorized = False self.niter = 0 while True: # Compute Cholesky factorization if already_factorized: already_factorized = False else: H = self.hess+lambda_current*np.eye(n) U, info = self.cholesky(H, lower=False, overwrite_a=False, clean=True) self.niter += 1 # Check if factorization succeeded if info == 0 and self.jac_mag > self.CLOSE_TO_ZERO: # Successful factorization # Solve `U.T U p = s` p = cho_solve((U, False), -self.jac) p_norm = norm(p) # Check for interior convergence if p_norm <= tr_radius and lambda_current == 0: hits_boundary = False break # Solve `U.T w = p` w = solve_triangular(U, p, trans='T') w_norm = norm(w) # Compute Newton step accordingly to # formula (4.44) p.87 from ref [2]_. delta_lambda = (p_norm/w_norm)**2 * (p_norm-tr_radius)/tr_radius lambda_new = lambda_current + delta_lambda if p_norm < tr_radius: # Inside boundary s_min, z_min = estimate_smallest_singular_value(U) ta, tb = self.get_boundaries_intersections(p, z_min, tr_radius) # Choose `step_len` with the smallest magnitude. # The reason for this choice is explained at # ref [3]_, p. 6 (Immediately before the formula # for `tau`). step_len = min([ta, tb], key=abs) # Compute the quadratic term (p.T*H*p) quadratic_term = np.dot(p, np.dot(H, p)) # Check stop criteria relative_error = (step_len**2 * s_min**2) / (quadratic_term + lambda_current*tr_radius**2) if relative_error <= self.k_hard: p += step_len * z_min break # Update uncertanty bounds lambda_ub = lambda_current lambda_lb = max(lambda_lb, lambda_current - s_min**2) # Compute Cholesky factorization H = self.hess + lambda_new*np.eye(n) c, info = self.cholesky(H, lower=False, overwrite_a=False, clean=True) # Check if the factorization have succeeded # if info == 0: # Successful factorization # Update damping factor lambda_current = lambda_new already_factorized = True else: # Unsuccessful factorization # Update uncertanty bounds lambda_lb = max(lambda_lb, lambda_new) # Update damping factor lambda_current = max(np.sqrt(lambda_lb * lambda_ub), lambda_lb + self.UPDATE_COEFF*(lambda_ub-lambda_lb)) else: # Outside boundary # Check stop criteria relative_error = abs(p_norm - tr_radius) / tr_radius if relative_error <= self.k_easy: break # Update uncertanty bounds lambda_lb = lambda_current # Update damping factor lambda_current = lambda_new elif info == 0 and self.jac_mag <= self.CLOSE_TO_ZERO: # jac_mag very close to zero # Check for interior convergence if lambda_current == 0: p = np.zeros(n) hits_boundary = False break s_min, z_min = estimate_smallest_singular_value(U) step_len = tr_radius # Check stop criteria if step_len**2 * s_min**2 <= self.k_hard * lambda_current * tr_radius**2: p = step_len * z_min break # Update uncertanty bounds lambda_ub = lambda_current lambda_lb = max(lambda_lb, lambda_current - s_min**2) # Update damping factor lambda_current = max(np.sqrt(lambda_lb * lambda_ub), lambda_lb + self.UPDATE_COEFF*(lambda_ub-lambda_lb)) else: # Unsuccessful factorization # Compute auxiliary terms delta, v = singular_leading_submatrix(H, U, info) v_norm = norm(v) # Update uncertanty interval lambda_lb = max(lambda_lb, lambda_current + delta/v_norm**2) # Update damping factor lambda_current = max(np.sqrt(lambda_lb * lambda_ub), lambda_lb + self.UPDATE_COEFF*(lambda_ub-lambda_lb)) self.lambda_lb = lambda_lb self.lambda_current = lambda_current self.previous_tr_radius = tr_radius return p, hits_boundary
15,413
34.763341
110
py
scipy
scipy-main/scipy/optimize/_tstutils.py
r""" Parameters used in test and benchmark methods. Collections of test cases suitable for testing 1-D root-finders 'original': The original benchmarking functions. Real-valued functions of real-valued inputs on an interval with a zero. f1, .., f3 are continuous and infinitely differentiable f4 has a left- and right- discontinuity at the root f5 has a root at 1 replacing a 1st order pole f6 is randomly positive on one side of the root, randomly negative on the other. f4 - f6 are not continuous at the root. 'aps': The test problems in the 1995 paper TOMS "Algorithm 748: Enclosing Zeros of Continuous Functions" by Alefeld, Potra and Shi. Real-valued functions of real-valued inputs on an interval with a zero. Suitable for methods which start with an enclosing interval, and derivatives up to 2nd order. 'complex': Some complex-valued functions of complex-valued inputs. No enclosing bracket is provided. Suitable for methods which use one or more starting values, and derivatives up to 2nd order. The test cases are provided as a list of dictionaries. The dictionary keys will be a subset of: ["f", "fprime", "fprime2", "args", "bracket", "smoothness", "a", "b", "x0", "x1", "root", "ID"] """ # Sources: # [1] Alefeld, G. E. and Potra, F. A. and Shi, Yixun, # "Algorithm 748: Enclosing Zeros of Continuous Functions", # ACM Trans. Math. Softw. Volume 221(1995) # doi = {10.1145/210089.210111}, # [2] Chandrupatla, Tirupathi R. "A new hybrid quadratic/bisection algorithm # for finding the zero of a nonlinear function without using derivatives." # Advances in Engineering Software 28.3 (1997): 145-149. from random import random import numpy as np from scipy.optimize import _zeros_py as cc # "description" refers to the original functions description = """ f2 is a symmetric parabola, x**2 - 1 f3 is a quartic polynomial with large hump in interval f4 is step function with a discontinuity at 1 f5 is a hyperbola with vertical asymptote at 1 f6 has random values positive to left of 1, negative to right Of course, these are not real problems. They just test how the 'good' solvers behave in bad circumstances where bisection is really the best. A good solver should not be much worse than bisection in such circumstance, while being faster for smooth monotone sorts of functions. """ def f1(x): r"""f1 is a quadratic with roots at 0 and 1""" return x * (x - 1.) def f1_fp(x): return 2 * x - 1 def f1_fpp(x): return 2 def f2(x): r"""f2 is a symmetric parabola, x**2 - 1""" return x**2 - 1 def f2_fp(x): return 2 * x def f2_fpp(x): return 2 def f3(x): r"""A quartic with roots at 0, 1, 2 and 3""" return x * (x - 1.) * (x - 2.) * (x - 3.) # x**4 - 6x**3 + 11x**2 - 6x def f3_fp(x): return 4 * x**3 - 18 * x**2 + 22 * x - 6 def f3_fpp(x): return 12 * x**2 - 36 * x + 22 def f4(x): r"""Piecewise linear, left- and right- discontinuous at x=1, the root.""" if x > 1: return 1.0 + .1 * x if x < 1: return -1.0 + .1 * x return 0 def f5(x): r"""Hyperbola with a pole at x=1, but pole replaced with 0. Not continuous at root.""" if x != 1: return 1.0 / (1. - x) return 0 # f6(x) returns random value. Without memoization, calling twice with the # same x returns different values, hence a "random value", not a # "function with random values" _f6_cache = {} def f6(x): v = _f6_cache.get(x, None) if v is None: if x > 1: v = random() elif x < 1: v = -random() else: v = 0 _f6_cache[x] = v return v # Each Original test case has # - a function and its two derivatives, # - additional arguments, # - a bracket enclosing a root, # - the order of differentiability (smoothness) on this interval # - a starting value for methods which don't require a bracket # - the root (inside the bracket) # - an Identifier of the test case _ORIGINAL_TESTS_KEYS = ["f", "fprime", "fprime2", "args", "bracket", "smoothness", "x0", "root", "ID"] _ORIGINAL_TESTS = [ [f1, f1_fp, f1_fpp, (), [0.5, np.sqrt(3)], np.inf, 0.6, 1.0, "original.01.00"], [f2, f2_fp, f2_fpp, (), [0.5, np.sqrt(3)], np.inf, 0.6, 1.0, "original.02.00"], [f3, f3_fp, f3_fpp, (), [0.5, np.sqrt(3)], np.inf, 0.6, 1.0, "original.03.00"], [f4, None, None, (), [0.5, np.sqrt(3)], -1, 0.6, 1.0, "original.04.00"], [f5, None, None, (), [0.5, np.sqrt(3)], -1, 0.6, 1.0, "original.05.00"], [f6, None, None, (), [0.5, np.sqrt(3)], -np.inf, 0.6, 1.0, "original.05.00"] ] _ORIGINAL_TESTS_DICTS = [dict(zip(_ORIGINAL_TESTS_KEYS, testcase)) for testcase in _ORIGINAL_TESTS] # ################## # "APS" test cases # Functions and test cases that appear in [1] def aps01_f(x): r"""Straightforward sum of trigonometric function and polynomial""" return np.sin(x) - x / 2 def aps01_fp(x): return np.cos(x) - 1.0 / 2 def aps01_fpp(x): return -np.sin(x) def aps02_f(x): r"""poles at x=n**2, 1st and 2nd derivatives at root are also close to 0""" ii = np.arange(1, 21) return -2 * np.sum((2 * ii - 5)**2 / (x - ii**2)**3) def aps02_fp(x): ii = np.arange(1, 21) return 6 * np.sum((2 * ii - 5)**2 / (x - ii**2)**4) def aps02_fpp(x): ii = np.arange(1, 21) return 24 * np.sum((2 * ii - 5)**2 / (x - ii**2)**5) def aps03_f(x, a, b): r"""Rapidly changing at the root""" return a * x * np.exp(b * x) def aps03_fp(x, a, b): return a * (b * x + 1) * np.exp(b * x) def aps03_fpp(x, a, b): return a * (b * (b * x + 1) + b) * np.exp(b * x) def aps04_f(x, n, a): r"""Medium-degree polynomial""" return x**n - a def aps04_fp(x, n, a): return n * x**(n - 1) def aps04_fpp(x, n, a): return n * (n - 1) * x**(n - 2) def aps05_f(x): r"""Simple Trigonometric function""" return np.sin(x) - 1.0 / 2 def aps05_fp(x): return np.cos(x) def aps05_fpp(x): return -np.sin(x) def aps06_f(x, n): r"""Exponential rapidly changing from -1 to 1 at x=0""" return 2 * x * np.exp(-n) - 2 * np.exp(-n * x) + 1 def aps06_fp(x, n): return 2 * np.exp(-n) + 2 * n * np.exp(-n * x) def aps06_fpp(x, n): return -2 * n * n * np.exp(-n * x) def aps07_f(x, n): r"""Upside down parabola with parametrizable height""" return (1 + (1 - n)**2) * x - (1 - n * x)**2 def aps07_fp(x, n): return (1 + (1 - n)**2) + 2 * n * (1 - n * x) def aps07_fpp(x, n): return -2 * n * n def aps08_f(x, n): r"""Degree n polynomial""" return x * x - (1 - x)**n def aps08_fp(x, n): return 2 * x + n * (1 - x)**(n - 1) def aps08_fpp(x, n): return 2 - n * (n - 1) * (1 - x)**(n - 2) def aps09_f(x, n): r"""Upside down quartic with parametrizable height""" return (1 + (1 - n)**4) * x - (1 - n * x)**4 def aps09_fp(x, n): return (1 + (1 - n)**4) + 4 * n * (1 - n * x)**3 def aps09_fpp(x, n): return -12 * n * (1 - n * x)**2 def aps10_f(x, n): r"""Exponential plus a polynomial""" return np.exp(-n * x) * (x - 1) + x**n def aps10_fp(x, n): return np.exp(-n * x) * (-n * (x - 1) + 1) + n * x**(n - 1) def aps10_fpp(x, n): return np.exp(-n * x) * (-n * (-n * (x - 1) + 1) + -n * x) + n * (n - 1) * x**(n - 2) def aps11_f(x, n): r"""Rational function with a zero at x=1/n and a pole at x=0""" return (n * x - 1) / ((n - 1) * x) def aps11_fp(x, n): return 1 / (n - 1) / x**2 def aps11_fpp(x, n): return -2 / (n - 1) / x**3 def aps12_f(x, n): r"""nth root of x, with a zero at x=n""" return np.power(x, 1.0 / n) - np.power(n, 1.0 / n) def aps12_fp(x, n): return np.power(x, (1.0 - n) / n) / n def aps12_fpp(x, n): return np.power(x, (1.0 - 2 * n) / n) * (1.0 / n) * (1.0 - n) / n _MAX_EXPABLE = np.log(np.finfo(float).max) def aps13_f(x): r"""Function with *all* derivatives 0 at the root""" if x == 0: return 0 # x2 = 1.0/x**2 # if x2 > 708: # return 0 y = 1 / x**2 if y > _MAX_EXPABLE: return 0 return x / np.exp(y) def aps13_fp(x): if x == 0: return 0 y = 1 / x**2 if y > _MAX_EXPABLE: return 0 return (1 + 2 / x**2) / np.exp(y) def aps13_fpp(x): if x == 0: return 0 y = 1 / x**2 if y > _MAX_EXPABLE: return 0 return 2 * (2 - x**2) / x**5 / np.exp(y) def aps14_f(x, n): r"""0 for negative x-values, trigonometric+linear for x positive""" if x <= 0: return -n / 20.0 return n / 20.0 * (x / 1.5 + np.sin(x) - 1) def aps14_fp(x, n): if x <= 0: return 0 return n / 20.0 * (1.0 / 1.5 + np.cos(x)) def aps14_fpp(x, n): if x <= 0: return 0 return -n / 20.0 * (np.sin(x)) def aps15_f(x, n): r"""piecewise linear, constant outside of [0, 0.002/(1+n)]""" if x < 0: return -0.859 if x > 2 * 1e-3 / (1 + n): return np.e - 1.859 return np.exp((n + 1) * x / 2 * 1000) - 1.859 def aps15_fp(x, n): if not 0 <= x <= 2 * 1e-3 / (1 + n): return np.e - 1.859 return np.exp((n + 1) * x / 2 * 1000) * (n + 1) / 2 * 1000 def aps15_fpp(x, n): if not 0 <= x <= 2 * 1e-3 / (1 + n): return np.e - 1.859 return np.exp((n + 1) * x / 2 * 1000) * (n + 1) / 2 * 1000 * (n + 1) / 2 * 1000 # Each APS test case has # - a function and its two derivatives, # - additional arguments, # - a bracket enclosing a root, # - the order of differentiability of the function on this interval # - a starting value for methods which don't require a bracket # - the root (inside the bracket) # - an Identifier of the test case # # Algorithm 748 is a bracketing algorithm so a bracketing interval was provided # in [1] for each test case. Newton and Halley methods need a single # starting point x0, which was chosen to be near the middle of the interval, # unless that would have made the problem too easy. _APS_TESTS_KEYS = ["f", "fprime", "fprime2", "args", "bracket", "smoothness", "x0", "root", "ID"] _APS_TESTS = [ [aps01_f, aps01_fp, aps01_fpp, (), [np.pi / 2, np.pi], np.inf, 3, 1.89549426703398094e+00, "aps.01.00"], [aps02_f, aps02_fp, aps02_fpp, (), [1 + 1e-9, 4 - 1e-9], np.inf, 2, 3.02291534727305677e+00, "aps.02.00"], [aps02_f, aps02_fp, aps02_fpp, (), [4 + 1e-9, 9 - 1e-9], np.inf, 5, 6.68375356080807848e+00, "aps.02.01"], [aps02_f, aps02_fp, aps02_fpp, (), [9 + 1e-9, 16 - 1e-9], np.inf, 10, 1.12387016550022114e+01, "aps.02.02"], [aps02_f, aps02_fp, aps02_fpp, (), [16 + 1e-9, 25 - 1e-9], np.inf, 17, 1.96760000806234103e+01, "aps.02.03"], [aps02_f, aps02_fp, aps02_fpp, (), [25 + 1e-9, 36 - 1e-9], np.inf, 26, 2.98282273265047557e+01, "aps.02.04"], [aps02_f, aps02_fp, aps02_fpp, (), [36 + 1e-9, 49 - 1e-9], np.inf, 37, 4.19061161952894139e+01, "aps.02.05"], [aps02_f, aps02_fp, aps02_fpp, (), [49 + 1e-9, 64 - 1e-9], np.inf, 50, 5.59535958001430913e+01, "aps.02.06"], [aps02_f, aps02_fp, aps02_fpp, (), [64 + 1e-9, 81 - 1e-9], np.inf, 65, 7.19856655865877997e+01, "aps.02.07"], [aps02_f, aps02_fp, aps02_fpp, (), [81 + 1e-9, 100 - 1e-9], np.inf, 82, 9.00088685391666701e+01, "aps.02.08"], [aps02_f, aps02_fp, aps02_fpp, (), [100 + 1e-9, 121 - 1e-9], np.inf, 101, 1.10026532748330197e+02, "aps.02.09"], [aps03_f, aps03_fp, aps03_fpp, (-40, -1), [-9, 31], np.inf, -2, 0, "aps.03.00"], [aps03_f, aps03_fp, aps03_fpp, (-100, -2), [-9, 31], np.inf, -2, 0, "aps.03.01"], [aps03_f, aps03_fp, aps03_fpp, (-200, -3), [-9, 31], np.inf, -2, 0, "aps.03.02"], [aps04_f, aps04_fp, aps04_fpp, (4, 0.2), [0, 5], np.inf, 2.5, 6.68740304976422006e-01, "aps.04.00"], [aps04_f, aps04_fp, aps04_fpp, (6, 0.2), [0, 5], np.inf, 2.5, 7.64724491331730039e-01, "aps.04.01"], [aps04_f, aps04_fp, aps04_fpp, (8, 0.2), [0, 5], np.inf, 2.5, 8.17765433957942545e-01, "aps.04.02"], [aps04_f, aps04_fp, aps04_fpp, (10, 0.2), [0, 5], np.inf, 2.5, 8.51339922520784609e-01, "aps.04.03"], [aps04_f, aps04_fp, aps04_fpp, (12, 0.2), [0, 5], np.inf, 2.5, 8.74485272221167897e-01, "aps.04.04"], [aps04_f, aps04_fp, aps04_fpp, (4, 1), [0, 5], np.inf, 2.5, 1, "aps.04.05"], [aps04_f, aps04_fp, aps04_fpp, (6, 1), [0, 5], np.inf, 2.5, 1, "aps.04.06"], [aps04_f, aps04_fp, aps04_fpp, (8, 1), [0, 5], np.inf, 2.5, 1, "aps.04.07"], [aps04_f, aps04_fp, aps04_fpp, (10, 1), [0, 5], np.inf, 2.5, 1, "aps.04.08"], [aps04_f, aps04_fp, aps04_fpp, (12, 1), [0, 5], np.inf, 2.5, 1, "aps.04.09"], [aps04_f, aps04_fp, aps04_fpp, (8, 1), [-0.95, 4.05], np.inf, 1.5, 1, "aps.04.10"], [aps04_f, aps04_fp, aps04_fpp, (10, 1), [-0.95, 4.05], np.inf, 1.5, 1, "aps.04.11"], [aps04_f, aps04_fp, aps04_fpp, (12, 1), [-0.95, 4.05], np.inf, 1.5, 1, "aps.04.12"], [aps04_f, aps04_fp, aps04_fpp, (14, 1), [-0.95, 4.05], np.inf, 1.5, 1, "aps.04.13"], [aps05_f, aps05_fp, aps05_fpp, (), [0, 1.5], np.inf, 1.3, np.pi / 6, "aps.05.00"], [aps06_f, aps06_fp, aps06_fpp, (1,), [0, 1], np.inf, 0.5, 4.22477709641236709e-01, "aps.06.00"], [aps06_f, aps06_fp, aps06_fpp, (2,), [0, 1], np.inf, 0.5, 3.06699410483203705e-01, "aps.06.01"], [aps06_f, aps06_fp, aps06_fpp, (3,), [0, 1], np.inf, 0.5, 2.23705457654662959e-01, "aps.06.02"], [aps06_f, aps06_fp, aps06_fpp, (4,), [0, 1], np.inf, 0.5, 1.71719147519508369e-01, "aps.06.03"], [aps06_f, aps06_fp, aps06_fpp, (5,), [0, 1], np.inf, 0.4, 1.38257155056824066e-01, "aps.06.04"], [aps06_f, aps06_fp, aps06_fpp, (20,), [0, 1], np.inf, 0.1, 3.46573590208538521e-02, "aps.06.05"], [aps06_f, aps06_fp, aps06_fpp, (40,), [0, 1], np.inf, 5e-02, 1.73286795139986315e-02, "aps.06.06"], [aps06_f, aps06_fp, aps06_fpp, (60,), [0, 1], np.inf, 1.0 / 30, 1.15524530093324210e-02, "aps.06.07"], [aps06_f, aps06_fp, aps06_fpp, (80,), [0, 1], np.inf, 2.5e-02, 8.66433975699931573e-03, "aps.06.08"], [aps06_f, aps06_fp, aps06_fpp, (100,), [0, 1], np.inf, 2e-02, 6.93147180559945415e-03, "aps.06.09"], [aps07_f, aps07_fp, aps07_fpp, (5,), [0, 1], np.inf, 0.4, 3.84025518406218985e-02, "aps.07.00"], [aps07_f, aps07_fp, aps07_fpp, (10,), [0, 1], np.inf, 0.4, 9.90000999800049949e-03, "aps.07.01"], [aps07_f, aps07_fp, aps07_fpp, (20,), [0, 1], np.inf, 0.4, 2.49375003906201174e-03, "aps.07.02"], [aps08_f, aps08_fp, aps08_fpp, (2,), [0, 1], np.inf, 0.9, 0.5, "aps.08.00"], [aps08_f, aps08_fp, aps08_fpp, (5,), [0, 1], np.inf, 0.9, 3.45954815848242059e-01, "aps.08.01"], [aps08_f, aps08_fp, aps08_fpp, (10,), [0, 1], np.inf, 0.9, 2.45122333753307220e-01, "aps.08.02"], [aps08_f, aps08_fp, aps08_fpp, (15,), [0, 1], np.inf, 0.9, 1.95547623536565629e-01, "aps.08.03"], [aps08_f, aps08_fp, aps08_fpp, (20,), [0, 1], np.inf, 0.9, 1.64920957276440960e-01, "aps.08.04"], [aps09_f, aps09_fp, aps09_fpp, (1,), [0, 1], np.inf, 0.5, 2.75508040999484394e-01, "aps.09.00"], [aps09_f, aps09_fp, aps09_fpp, (2,), [0, 1], np.inf, 0.5, 1.37754020499742197e-01, "aps.09.01"], [aps09_f, aps09_fp, aps09_fpp, (4,), [0, 1], np.inf, 0.5, 1.03052837781564422e-02, "aps.09.02"], [aps09_f, aps09_fp, aps09_fpp, (5,), [0, 1], np.inf, 0.5, 3.61710817890406339e-03, "aps.09.03"], [aps09_f, aps09_fp, aps09_fpp, (8,), [0, 1], np.inf, 0.5, 4.10872918496395375e-04, "aps.09.04"], [aps09_f, aps09_fp, aps09_fpp, (15,), [0, 1], np.inf, 0.5, 2.59895758929076292e-05, "aps.09.05"], [aps09_f, aps09_fp, aps09_fpp, (20,), [0, 1], np.inf, 0.5, 7.66859512218533719e-06, "aps.09.06"], [aps10_f, aps10_fp, aps10_fpp, (1,), [0, 1], np.inf, 0.9, 4.01058137541547011e-01, "aps.10.00"], [aps10_f, aps10_fp, aps10_fpp, (5,), [0, 1], np.inf, 0.9, 5.16153518757933583e-01, "aps.10.01"], [aps10_f, aps10_fp, aps10_fpp, (10,), [0, 1], np.inf, 0.9, 5.39522226908415781e-01, "aps.10.02"], [aps10_f, aps10_fp, aps10_fpp, (15,), [0, 1], np.inf, 0.9, 5.48182294340655241e-01, "aps.10.03"], [aps10_f, aps10_fp, aps10_fpp, (20,), [0, 1], np.inf, 0.9, 5.52704666678487833e-01, "aps.10.04"], [aps11_f, aps11_fp, aps11_fpp, (2,), [0.01, 1], np.inf, 1e-02, 1.0 / 2, "aps.11.00"], [aps11_f, aps11_fp, aps11_fpp, (5,), [0.01, 1], np.inf, 1e-02, 1.0 / 5, "aps.11.01"], [aps11_f, aps11_fp, aps11_fpp, (15,), [0.01, 1], np.inf, 1e-02, 1.0 / 15, "aps.11.02"], [aps11_f, aps11_fp, aps11_fpp, (20,), [0.01, 1], np.inf, 1e-02, 1.0 / 20, "aps.11.03"], [aps12_f, aps12_fp, aps12_fpp, (2,), [1, 100], np.inf, 1.1, 2, "aps.12.00"], [aps12_f, aps12_fp, aps12_fpp, (3,), [1, 100], np.inf, 1.1, 3, "aps.12.01"], [aps12_f, aps12_fp, aps12_fpp, (4,), [1, 100], np.inf, 1.1, 4, "aps.12.02"], [aps12_f, aps12_fp, aps12_fpp, (5,), [1, 100], np.inf, 1.1, 5, "aps.12.03"], [aps12_f, aps12_fp, aps12_fpp, (6,), [1, 100], np.inf, 1.1, 6, "aps.12.04"], [aps12_f, aps12_fp, aps12_fpp, (7,), [1, 100], np.inf, 1.1, 7, "aps.12.05"], [aps12_f, aps12_fp, aps12_fpp, (9,), [1, 100], np.inf, 1.1, 9, "aps.12.06"], [aps12_f, aps12_fp, aps12_fpp, (11,), [1, 100], np.inf, 1.1, 11, "aps.12.07"], [aps12_f, aps12_fp, aps12_fpp, (13,), [1, 100], np.inf, 1.1, 13, "aps.12.08"], [aps12_f, aps12_fp, aps12_fpp, (15,), [1, 100], np.inf, 1.1, 15, "aps.12.09"], [aps12_f, aps12_fp, aps12_fpp, (17,), [1, 100], np.inf, 1.1, 17, "aps.12.10"], [aps12_f, aps12_fp, aps12_fpp, (19,), [1, 100], np.inf, 1.1, 19, "aps.12.11"], [aps12_f, aps12_fp, aps12_fpp, (21,), [1, 100], np.inf, 1.1, 21, "aps.12.12"], [aps12_f, aps12_fp, aps12_fpp, (23,), [1, 100], np.inf, 1.1, 23, "aps.12.13"], [aps12_f, aps12_fp, aps12_fpp, (25,), [1, 100], np.inf, 1.1, 25, "aps.12.14"], [aps12_f, aps12_fp, aps12_fpp, (27,), [1, 100], np.inf, 1.1, 27, "aps.12.15"], [aps12_f, aps12_fp, aps12_fpp, (29,), [1, 100], np.inf, 1.1, 29, "aps.12.16"], [aps12_f, aps12_fp, aps12_fpp, (31,), [1, 100], np.inf, 1.1, 31, "aps.12.17"], [aps12_f, aps12_fp, aps12_fpp, (33,), [1, 100], np.inf, 1.1, 33, "aps.12.18"], [aps13_f, aps13_fp, aps13_fpp, (), [-1, 4], np.inf, 1.5, 0, "aps.13.00"], [aps14_f, aps14_fp, aps14_fpp, (1,), [-1000, np.pi / 2], 0, 1, 6.23806518961612433e-01, "aps.14.00"], [aps14_f, aps14_fp, aps14_fpp, (2,), [-1000, np.pi / 2], 0, 1, 6.23806518961612433e-01, "aps.14.01"], [aps14_f, aps14_fp, aps14_fpp, (3,), [-1000, np.pi / 2], 0, 1, 6.23806518961612433e-01, "aps.14.02"], [aps14_f, aps14_fp, aps14_fpp, (4,), [-1000, np.pi / 2], 0, 1, 6.23806518961612433e-01, "aps.14.03"], [aps14_f, aps14_fp, aps14_fpp, (5,), [-1000, np.pi / 2], 0, 1, 6.23806518961612433e-01, "aps.14.04"], [aps14_f, aps14_fp, aps14_fpp, (6,), [-1000, np.pi / 2], 0, 1, 6.23806518961612433e-01, "aps.14.05"], [aps14_f, aps14_fp, aps14_fpp, (7,), [-1000, np.pi / 2], 0, 1, 6.23806518961612433e-01, "aps.14.06"], [aps14_f, aps14_fp, aps14_fpp, (8,), [-1000, np.pi / 2], 0, 1, 6.23806518961612433e-01, "aps.14.07"], [aps14_f, aps14_fp, aps14_fpp, (9,), [-1000, np.pi / 2], 0, 1, 6.23806518961612433e-01, "aps.14.08"], [aps14_f, aps14_fp, aps14_fpp, (10,), [-1000, np.pi / 2], 0, 1, 6.23806518961612433e-01, "aps.14.09"], [aps14_f, aps14_fp, aps14_fpp, (11,), [-1000, np.pi / 2], 0, 1, 6.23806518961612433e-01, "aps.14.10"], [aps14_f, aps14_fp, aps14_fpp, (12,), [-1000, np.pi / 2], 0, 1, 6.23806518961612433e-01, "aps.14.11"], [aps14_f, aps14_fp, aps14_fpp, (13,), [-1000, np.pi / 2], 0, 1, 6.23806518961612433e-01, "aps.14.12"], [aps14_f, aps14_fp, aps14_fpp, (14,), [-1000, np.pi / 2], 0, 1, 6.23806518961612433e-01, "aps.14.13"], [aps14_f, aps14_fp, aps14_fpp, (15,), [-1000, np.pi / 2], 0, 1, 6.23806518961612433e-01, "aps.14.14"], [aps14_f, aps14_fp, aps14_fpp, (16,), [-1000, np.pi / 2], 0, 1, 6.23806518961612433e-01, "aps.14.15"], [aps14_f, aps14_fp, aps14_fpp, (17,), [-1000, np.pi / 2], 0, 1, 6.23806518961612433e-01, "aps.14.16"], [aps14_f, aps14_fp, aps14_fpp, (18,), [-1000, np.pi / 2], 0, 1, 6.23806518961612433e-01, "aps.14.17"], [aps14_f, aps14_fp, aps14_fpp, (19,), [-1000, np.pi / 2], 0, 1, 6.23806518961612433e-01, "aps.14.18"], [aps14_f, aps14_fp, aps14_fpp, (20,), [-1000, np.pi / 2], 0, 1, 6.23806518961612433e-01, "aps.14.19"], [aps14_f, aps14_fp, aps14_fpp, (21,), [-1000, np.pi / 2], 0, 1, 6.23806518961612433e-01, "aps.14.20"], [aps14_f, aps14_fp, aps14_fpp, (22,), [-1000, np.pi / 2], 0, 1, 6.23806518961612433e-01, "aps.14.21"], [aps14_f, aps14_fp, aps14_fpp, (23,), [-1000, np.pi / 2], 0, 1, 6.23806518961612433e-01, "aps.14.22"], [aps14_f, aps14_fp, aps14_fpp, (24,), [-1000, np.pi / 2], 0, 1, 6.23806518961612433e-01, "aps.14.23"], [aps14_f, aps14_fp, aps14_fpp, (25,), [-1000, np.pi / 2], 0, 1, 6.23806518961612433e-01, "aps.14.24"], [aps14_f, aps14_fp, aps14_fpp, (26,), [-1000, np.pi / 2], 0, 1, 6.23806518961612433e-01, "aps.14.25"], [aps14_f, aps14_fp, aps14_fpp, (27,), [-1000, np.pi / 2], 0, 1, 6.23806518961612433e-01, "aps.14.26"], [aps14_f, aps14_fp, aps14_fpp, (28,), [-1000, np.pi / 2], 0, 1, 6.23806518961612433e-01, "aps.14.27"], [aps14_f, aps14_fp, aps14_fpp, (29,), [-1000, np.pi / 2], 0, 1, 6.23806518961612433e-01, "aps.14.28"], [aps14_f, aps14_fp, aps14_fpp, (30,), [-1000, np.pi / 2], 0, 1, 6.23806518961612433e-01, "aps.14.29"], [aps14_f, aps14_fp, aps14_fpp, (31,), [-1000, np.pi / 2], 0, 1, 6.23806518961612433e-01, "aps.14.30"], [aps14_f, aps14_fp, aps14_fpp, (32,), [-1000, np.pi / 2], 0, 1, 6.23806518961612433e-01, "aps.14.31"], [aps14_f, aps14_fp, aps14_fpp, (33,), [-1000, np.pi / 2], 0, 1, 6.23806518961612433e-01, "aps.14.32"], [aps14_f, aps14_fp, aps14_fpp, (34,), [-1000, np.pi / 2], 0, 1, 6.23806518961612433e-01, "aps.14.33"], [aps14_f, aps14_fp, aps14_fpp, (35,), [-1000, np.pi / 2], 0, 1, 6.23806518961612433e-01, "aps.14.34"], [aps14_f, aps14_fp, aps14_fpp, (36,), [-1000, np.pi / 2], 0, 1, 6.23806518961612433e-01, "aps.14.35"], [aps14_f, aps14_fp, aps14_fpp, (37,), [-1000, np.pi / 2], 0, 1, 6.23806518961612433e-01, "aps.14.36"], [aps14_f, aps14_fp, aps14_fpp, (38,), [-1000, np.pi / 2], 0, 1, 6.23806518961612433e-01, "aps.14.37"], [aps14_f, aps14_fp, aps14_fpp, (39,), [-1000, np.pi / 2], 0, 1, 6.23806518961612433e-01, "aps.14.38"], [aps14_f, aps14_fp, aps14_fpp, (40,), [-1000, np.pi / 2], 0, 1, 6.23806518961612433e-01, "aps.14.39"], [aps15_f, aps15_fp, aps15_fpp, (20,), [-1000, 1e-4], 0, -2, 5.90513055942197166e-05, "aps.15.00"], [aps15_f, aps15_fp, aps15_fpp, (21,), [-1000, 1e-4], 0, -2, 5.63671553399369967e-05, "aps.15.01"], [aps15_f, aps15_fp, aps15_fpp, (22,), [-1000, 1e-4], 0, -2, 5.39164094555919196e-05, "aps.15.02"], [aps15_f, aps15_fp, aps15_fpp, (23,), [-1000, 1e-4], 0, -2, 5.16698923949422470e-05, "aps.15.03"], [aps15_f, aps15_fp, aps15_fpp, (24,), [-1000, 1e-4], 0, -2, 4.96030966991445609e-05, "aps.15.04"], [aps15_f, aps15_fp, aps15_fpp, (25,), [-1000, 1e-4], 0, -2, 4.76952852876389951e-05, "aps.15.05"], [aps15_f, aps15_fp, aps15_fpp, (26,), [-1000, 1e-4], 0, -2, 4.59287932399486662e-05, "aps.15.06"], [aps15_f, aps15_fp, aps15_fpp, (27,), [-1000, 1e-4], 0, -2, 4.42884791956647841e-05, "aps.15.07"], [aps15_f, aps15_fp, aps15_fpp, (28,), [-1000, 1e-4], 0, -2, 4.27612902578832391e-05, "aps.15.08"], [aps15_f, aps15_fp, aps15_fpp, (29,), [-1000, 1e-4], 0, -2, 4.13359139159538030e-05, "aps.15.09"], [aps15_f, aps15_fp, aps15_fpp, (30,), [-1000, 1e-4], 0, -2, 4.00024973380198076e-05, "aps.15.10"], [aps15_f, aps15_fp, aps15_fpp, (31,), [-1000, 1e-4], 0, -2, 3.87524192962066869e-05, "aps.15.11"], [aps15_f, aps15_fp, aps15_fpp, (32,), [-1000, 1e-4], 0, -2, 3.75781035599579910e-05, "aps.15.12"], [aps15_f, aps15_fp, aps15_fpp, (33,), [-1000, 1e-4], 0, -2, 3.64728652199592355e-05, "aps.15.13"], [aps15_f, aps15_fp, aps15_fpp, (34,), [-1000, 1e-4], 0, -2, 3.54307833565318273e-05, "aps.15.14"], [aps15_f, aps15_fp, aps15_fpp, (35,), [-1000, 1e-4], 0, -2, 3.44465949299614980e-05, "aps.15.15"], [aps15_f, aps15_fp, aps15_fpp, (36,), [-1000, 1e-4], 0, -2, 3.35156058778003705e-05, "aps.15.16"], [aps15_f, aps15_fp, aps15_fpp, (37,), [-1000, 1e-4], 0, -2, 3.26336162494372125e-05, "aps.15.17"], [aps15_f, aps15_fp, aps15_fpp, (38,), [-1000, 1e-4], 0, -2, 3.17968568584260013e-05, "aps.15.18"], [aps15_f, aps15_fp, aps15_fpp, (39,), [-1000, 1e-4], 0, -2, 3.10019354369653455e-05, "aps.15.19"], [aps15_f, aps15_fp, aps15_fpp, (40,), [-1000, 1e-4], 0, -2, 3.02457906702100968e-05, "aps.15.20"], [aps15_f, aps15_fp, aps15_fpp, (100,), [-1000, 1e-4], 0, -2, 1.22779942324615231e-05, "aps.15.21"], [aps15_f, aps15_fp, aps15_fpp, (200,), [-1000, 1e-4], 0, -2, 6.16953939044086617e-06, "aps.15.22"], [aps15_f, aps15_fp, aps15_fpp, (300,), [-1000, 1e-4], 0, -2, 4.11985852982928163e-06, "aps.15.23"], [aps15_f, aps15_fp, aps15_fpp, (400,), [-1000, 1e-4], 0, -2, 3.09246238772721682e-06, "aps.15.24"], [aps15_f, aps15_fp, aps15_fpp, (500,), [-1000, 1e-4], 0, -2, 2.47520442610501789e-06, "aps.15.25"], [aps15_f, aps15_fp, aps15_fpp, (600,), [-1000, 1e-4], 0, -2, 2.06335676785127107e-06, "aps.15.26"], [aps15_f, aps15_fp, aps15_fpp, (700,), [-1000, 1e-4], 0, -2, 1.76901200781542651e-06, "aps.15.27"], [aps15_f, aps15_fp, aps15_fpp, (800,), [-1000, 1e-4], 0, -2, 1.54816156988591016e-06, "aps.15.28"], [aps15_f, aps15_fp, aps15_fpp, (900,), [-1000, 1e-4], 0, -2, 1.37633453660223511e-06, "aps.15.29"], [aps15_f, aps15_fp, aps15_fpp, (1000,), [-1000, 1e-4], 0, -2, 1.23883857889971403e-06, "aps.15.30"] ] _APS_TESTS_DICTS = [dict(zip(_APS_TESTS_KEYS, testcase)) for testcase in _APS_TESTS] # ################## # "complex" test cases # A few simple, complex-valued, functions, defined on the complex plane. def cplx01_f(z, n, a): r"""z**n-a: Use to find the nth root of a""" return z**n - a def cplx01_fp(z, n, a): return n * z**(n - 1) def cplx01_fpp(z, n, a): return n * (n - 1) * z**(n - 2) def cplx02_f(z, a): r"""e**z - a: Use to find the log of a""" return np.exp(z) - a def cplx02_fp(z, a): return np.exp(z) def cplx02_fpp(z, a): return np.exp(z) # Each "complex" test case has # - a function and its two derivatives, # - additional arguments, # - the order of differentiability of the function on this interval # - two starting values x0 and x1 # - the root # - an Identifier of the test case # # Algorithm 748 is a bracketing algorithm so a bracketing interval was provided # in [1] for each test case. Newton and Halley need a single starting point # x0, which was chosen to be near the middle of the interval, unless that # would make the problem too easy. _COMPLEX_TESTS_KEYS = ["f", "fprime", "fprime2", "args", "smoothness", "x0", "x1", "root", "ID"] _COMPLEX_TESTS = [ [cplx01_f, cplx01_fp, cplx01_fpp, (2, -1), np.inf, (1 + 1j), (0.5 + 0.5j), 1j, "complex.01.00"], [cplx01_f, cplx01_fp, cplx01_fpp, (3, 1), np.inf, (-1 + 1j), (-0.5 + 2.0j), (-0.5 + np.sqrt(3) / 2 * 1.0j), "complex.01.01"], [cplx01_f, cplx01_fp, cplx01_fpp, (3, -1), np.inf, 1j, (0.5 + 0.5j), (0.5 + np.sqrt(3) / 2 * 1.0j), "complex.01.02"], [cplx01_f, cplx01_fp, cplx01_fpp, (3, 8), np.inf, 5, 4, 2, "complex.01.03"], [cplx02_f, cplx02_fp, cplx02_fpp, (-1,), np.inf, (1 + 2j), (0.5 + 0.5j), np.pi * 1.0j, "complex.02.00"], [cplx02_f, cplx02_fp, cplx02_fpp, (1j,), np.inf, (1 + 2j), (0.5 + 0.5j), np.pi * 0.5j, "complex.02.01"], ] _COMPLEX_TESTS_DICTS = [dict(zip(_COMPLEX_TESTS_KEYS, testcase)) for testcase in _COMPLEX_TESTS] def _add_a_b(tests): r"""Add "a" and "b" keys to each test from the "bracket" value""" for d in tests: for k, v in zip(['a', 'b'], d.get('bracket', [])): d[k] = v _add_a_b(_ORIGINAL_TESTS_DICTS) _add_a_b(_APS_TESTS_DICTS) _add_a_b(_COMPLEX_TESTS_DICTS) def get_tests(collection='original', smoothness=None): r"""Return the requested collection of test cases, as an array of dicts with subset-specific keys Allowed values of collection: 'original': The original benchmarking functions. Real-valued functions of real-valued inputs on an interval with a zero. f1, .., f3 are continuous and infinitely differentiable f4 has a single discontinuity at the root f5 has a root at 1 replacing a 1st order pole f6 is randomly positive on one side of the root, randomly negative on the other 'aps': The test problems in the TOMS "Algorithm 748: Enclosing Zeros of Continuous Functions" paper by Alefeld, Potra and Shi. Real-valued functions of real-valued inputs on an interval with a zero. Suitable for methods which start with an enclosing interval, and derivatives up to 2nd order. 'complex': Some complex-valued functions of complex-valued inputs. No enclosing bracket is provided. Suitable for methods which use one or more starting values, and derivatives up to 2nd order. The dictionary keys will be a subset of ["f", "fprime", "fprime2", "args", "bracket", "a", b", "smoothness", "x0", "x1", "root", "ID"] """ collection = collection or "original" subsets = {"aps": _APS_TESTS_DICTS, "complex": _COMPLEX_TESTS_DICTS, "original": _ORIGINAL_TESTS_DICTS, "chandrupatla": _CHANDRUPATLA_TESTS_DICTS} tests = subsets.get(collection, []) if smoothness is not None: tests = [tc for tc in tests if tc['smoothness'] >= smoothness] return tests # Backwards compatibility methods = [cc.bisect, cc.ridder, cc.brenth, cc.brentq] mstrings = ['cc.bisect', 'cc.ridder', 'cc.brenth', 'cc.brentq'] functions = [f2, f3, f4, f5, f6] fstrings = ['f2', 'f3', 'f4', 'f5', 'f6'] # ################## # "Chandrupatla" test cases # Functions and test cases that appear in [2] def fun1(x): return x**3 - 2*x - 5 fun1.root = 2.0945514815423265 # additional precision using mpmath.findroot def fun2(x): return 1 - 1/x**2 fun2.root = 1 def fun3(x): return (x-3)**3 fun3.root = 3 def fun4(x): return 6*(x-2)**5 fun4.root = 2 def fun5(x): return x**9 fun5.root = 0 def fun6(x): return x**19 fun6.root = 0 def fun7(x): return 0 if abs(x) < 3.8e-4 else x*np.exp(-x**(-2)) fun7.root = 0 def fun8(x): xi = 0.61489 return -(3062*(1-xi)*np.exp(-x))/(xi + (1-xi)*np.exp(-x)) - 1013 + 1628/x fun8.root = 1.0375360332870405 def fun9(x): return np.exp(x) - 2 - 0.01/x**2 + .000002/x**3 fun9.root = 0.7032048403631358 # Each "chandropatla" test case has # - a function, # - two starting values x0 and x1 # - the root # - the number of function evaluations required by Chandrupatla's algorithm # - an Identifier of the test case # # Chandrupatla's is a bracketing algorithm, so a bracketing interval was # provided in [2] for each test case. No special support for testing with # secant/Newton/Halley is provided. _CHANDRUPATLA_TESTS_KEYS = ["f", "bracket", "root", "nfeval", "ID"] _CHANDRUPATLA_TESTS = [ [fun1, [2, 3], fun1.root, 7], [fun1, [1, 10], fun1.root, 11], [fun1, [1, 100], fun1.root, 14], [fun1, [-1e4, 1e4], fun1.root, 23], [fun1, [-1e10, 1e10], fun1.root, 43], [fun2, [0.5, 1.51], fun2.root, 8], [fun2, [1e-4, 1e4], fun2.root, 22], [fun2, [1e-6, 1e6], fun2.root, 28], [fun2, [1e-10, 1e10], fun2.root, 41], [fun2, [1e-12, 1e12], fun2.root, 48], [fun3, [0, 5], fun3.root, 21], [fun3, [-10, 10], fun3.root, 23], [fun3, [-1e4, 1e4], fun3.root, 36], [fun3, [-1e6, 1e6], fun3.root, 45], [fun3, [-1e10, 1e10], fun3.root, 55], [fun4, [0, 5], fun4.root, 21], [fun4, [-10, 10], fun4.root, 23], [fun4, [-1e4, 1e4], fun4.root, 33], [fun4, [-1e6, 1e6], fun4.root, 43], [fun4, [-1e10, 1e10], fun4.root, 54], [fun5, [-1, 4], fun5.root, 21], [fun5, [-2, 5], fun5.root, 22], [fun5, [-1, 10], fun5.root, 23], [fun5, [-5, 50], fun5.root, 25], [fun5, [-10, 100], fun5.root, 26], [fun6, [-1., 4.], fun6.root, 21], [fun6, [-2., 5.], fun6.root, 22], [fun6, [-1., 10.], fun6.root, 23], [fun6, [-5., 50.], fun6.root, 25], [fun6, [-10., 100.], fun6.root, 26], [fun7, [-1, 4], fun7.root, 8], [fun7, [-2, 5], fun7.root, 8], [fun7, [-1, 10], fun7.root, 11], [fun7, [-5, 50], fun7.root, 18], [fun7, [-10, 100], fun7.root, 19], [fun8, [2e-4, 2], fun8.root, 9], [fun8, [2e-4, 3], fun8.root, 10], [fun8, [2e-4, 9], fun8.root, 11], [fun8, [2e-4, 27], fun8.root, 12], [fun8, [2e-4, 81], fun8.root, 14], [fun9, [2e-4, 1], fun9.root, 7], [fun9, [2e-4, 3], fun9.root, 8], [fun9, [2e-4, 9], fun9.root, 10], [fun9, [2e-4, 27], fun9.root, 11], [fun9, [2e-4, 81], fun9.root, 13], ] _CHANDRUPATLA_TESTS = [test + [f'{test[0].__name__}.{i%5+1}'] for i, test in enumerate(_CHANDRUPATLA_TESTS)] _CHANDRUPATLA_TESTS_DICTS = [dict(zip(_CHANDRUPATLA_TESTS_KEYS, testcase)) for testcase in _CHANDRUPATLA_TESTS] _add_a_b(_CHANDRUPATLA_TESTS_DICTS)
33,043
40.512563
116
py
scipy
scipy-main/scipy/optimize/_trustregion_ncg.py
"""Newton-CG trust-region optimization.""" import math import numpy as np import scipy.linalg from ._trustregion import (_minimize_trust_region, BaseQuadraticSubproblem) __all__ = [] def _minimize_trust_ncg(fun, x0, args=(), jac=None, hess=None, hessp=None, **trust_region_options): """ Minimization of scalar function of one or more variables using the Newton conjugate gradient trust-region algorithm. Options ------- initial_trust_radius : float Initial trust-region radius. max_trust_radius : float Maximum value of the trust-region radius. No steps that are longer than this value will be proposed. eta : float Trust region related acceptance stringency for proposed steps. gtol : float Gradient norm must be less than `gtol` before successful termination. """ if jac is None: raise ValueError('Jacobian is required for Newton-CG trust-region ' 'minimization') if hess is None and hessp is None: raise ValueError('Either the Hessian or the Hessian-vector product ' 'is required for Newton-CG trust-region minimization') return _minimize_trust_region(fun, x0, args=args, jac=jac, hess=hess, hessp=hessp, subproblem=CGSteihaugSubproblem, **trust_region_options) class CGSteihaugSubproblem(BaseQuadraticSubproblem): """Quadratic subproblem solved by a conjugate gradient method""" def solve(self, trust_radius): """ Solve the subproblem using a conjugate gradient method. Parameters ---------- trust_radius : float We are allowed to wander only this far away from the origin. Returns ------- p : ndarray The proposed step. hits_boundary : bool True if the proposed step is on the boundary of the trust region. Notes ----- This is algorithm (7.2) of Nocedal and Wright 2nd edition. Only the function that computes the Hessian-vector product is required. The Hessian itself is not required, and the Hessian does not need to be positive semidefinite. """ # get the norm of jacobian and define the origin p_origin = np.zeros_like(self.jac) # define a default tolerance tolerance = min(0.5, math.sqrt(self.jac_mag)) * self.jac_mag # Stop the method if the search direction # is a direction of nonpositive curvature. if self.jac_mag < tolerance: hits_boundary = False return p_origin, hits_boundary # init the state for the first iteration z = p_origin r = self.jac d = -r # Search for the min of the approximation of the objective function. while True: # do an iteration Bd = self.hessp(d) dBd = np.dot(d, Bd) if dBd <= 0: # Look at the two boundary points. # Find both values of t to get the boundary points such that # ||z + t d|| == trust_radius # and then choose the one with the predicted min value. ta, tb = self.get_boundaries_intersections(z, d, trust_radius) pa = z + ta * d pb = z + tb * d if self(pa) < self(pb): p_boundary = pa else: p_boundary = pb hits_boundary = True return p_boundary, hits_boundary r_squared = np.dot(r, r) alpha = r_squared / dBd z_next = z + alpha * d if scipy.linalg.norm(z_next) >= trust_radius: # Find t >= 0 to get the boundary point such that # ||z + t d|| == trust_radius ta, tb = self.get_boundaries_intersections(z, d, trust_radius) p_boundary = z + tb * d hits_boundary = True return p_boundary, hits_boundary r_next = r + alpha * Bd r_next_squared = np.dot(r_next, r_next) if math.sqrt(r_next_squared) < tolerance: hits_boundary = False return z_next, hits_boundary beta_next = r_next_squared / r_squared d_next = -r_next + beta_next * d # update the state for the next iteration z = z_next r = r_next d = d_next
4,580
35.070866
79
py
scipy
scipy-main/scipy/optimize/minpack2.py
# This file is not meant for public use and will be removed in SciPy v2.0.0. # Use the `scipy.optimize` namespace for importing the functions # included below. import warnings from . import _minpack2 __all__ = [ # noqa: F822 'dcsrch', 'dcstep', ] def __dir__(): return __all__ def __getattr__(name): if name not in __all__: raise AttributeError( "scipy.optimize.minpack2 is deprecated and has no attribute " f"{name}. Try looking in scipy.optimize instead.") warnings.warn(f"Please use `{name}` from the `scipy.optimize` namespace, " "the `scipy.optimize.minpack2` namespace is deprecated.", category=DeprecationWarning, stacklevel=2) return getattr(_minpack2, name)
769
24.666667
78
py
scipy
scipy-main/scipy/optimize/_milp.py
import warnings import numpy as np from scipy.sparse import csc_array, vstack, issparse from ._highs._highs_wrapper import _highs_wrapper # type: ignore[import] from ._constraints import LinearConstraint, Bounds from ._optimize import OptimizeResult from ._linprog_highs import _highs_to_scipy_status_message def _constraints_to_components(constraints): """ Convert sequence of constraints to a single set of components A, b_l, b_u. `constraints` could be 1. A LinearConstraint 2. A tuple representing a LinearConstraint 3. An invalid object 4. A sequence of composed entirely of objects of type 1/2 5. A sequence containing at least one object of type 3 We want to accept 1, 2, and 4 and reject 3 and 5. """ message = ("`constraints` (or each element within `constraints`) must be " "convertible into an instance of " "`scipy.optimize.LinearConstraint`.") As = [] b_ls = [] b_us = [] # Accept case 1 by standardizing as case 4 if isinstance(constraints, LinearConstraint): constraints = [constraints] else: # Reject case 3 try: iter(constraints) except TypeError as exc: raise ValueError(message) from exc # Accept case 2 by standardizing as case 4 if len(constraints) == 3: # argument could be a single tuple representing a LinearConstraint try: constraints = [LinearConstraint(*constraints)] except (TypeError, ValueError, np.VisibleDeprecationWarning): # argument was not a tuple representing a LinearConstraint pass # Address cases 4/5 for constraint in constraints: # if it's not a LinearConstraint or something that represents a # LinearConstraint at this point, it's invalid if not isinstance(constraint, LinearConstraint): try: constraint = LinearConstraint(*constraint) except TypeError as exc: raise ValueError(message) from exc As.append(csc_array(constraint.A)) b_ls.append(np.atleast_1d(constraint.lb).astype(np.double)) b_us.append(np.atleast_1d(constraint.ub).astype(np.double)) if len(As) > 1: A = vstack(As, format="csc") b_l = np.concatenate(b_ls) b_u = np.concatenate(b_us) else: # avoid unnecessary copying A = As[0] b_l = b_ls[0] b_u = b_us[0] return A, b_l, b_u def _milp_iv(c, integrality, bounds, constraints, options): # objective IV if issparse(c): raise ValueError("`c` must be a dense array.") c = np.atleast_1d(c).astype(np.double) if c.ndim != 1 or c.size == 0 or not np.all(np.isfinite(c)): message = ("`c` must be a one-dimensional array of finite numbers " "with at least one element.") raise ValueError(message) # integrality IV if issparse(integrality): raise ValueError("`integrality` must be a dense array.") message = ("`integrality` must contain integers 0-3 and be broadcastable " "to `c.shape`.") if integrality is None: integrality = 0 try: integrality = np.broadcast_to(integrality, c.shape).astype(np.uint8) except ValueError: raise ValueError(message) if integrality.min() < 0 or integrality.max() > 3: raise ValueError(message) # bounds IV if bounds is None: bounds = Bounds(0, np.inf) elif not isinstance(bounds, Bounds): message = ("`bounds` must be convertible into an instance of " "`scipy.optimize.Bounds`.") try: bounds = Bounds(*bounds) except TypeError as exc: raise ValueError(message) from exc try: lb = np.broadcast_to(bounds.lb, c.shape).astype(np.double) ub = np.broadcast_to(bounds.ub, c.shape).astype(np.double) except (ValueError, TypeError) as exc: message = ("`bounds.lb` and `bounds.ub` must contain reals and " "be broadcastable to `c.shape`.") raise ValueError(message) from exc # constraints IV if not constraints: constraints = [LinearConstraint(np.empty((0, c.size)), np.empty((0,)), np.empty((0,)))] try: A, b_l, b_u = _constraints_to_components(constraints) except ValueError as exc: message = ("`constraints` (or each element within `constraints`) must " "be convertible into an instance of " "`scipy.optimize.LinearConstraint`.") raise ValueError(message) from exc if A.shape != (b_l.size, c.size): message = "The shape of `A` must be (len(b_l), len(c))." raise ValueError(message) indptr, indices, data = A.indptr, A.indices, A.data.astype(np.double) # options IV options = options or {} supported_options = {'disp', 'presolve', 'time_limit', 'node_limit', 'mip_rel_gap'} unsupported_options = set(options).difference(supported_options) if unsupported_options: message = (f"Unrecognized options detected: {unsupported_options}. " "These will be passed to HiGHS verbatim.") warnings.warn(message, RuntimeWarning, stacklevel=3) options_iv = {'log_to_console': options.pop("disp", False), 'mip_max_nodes': options.pop("node_limit", None)} options_iv.update(options) return c, integrality, lb, ub, indptr, indices, data, b_l, b_u, options_iv def milp(c, *, integrality=None, bounds=None, constraints=None, options=None): r""" Mixed-integer linear programming Solves problems of the following form: .. math:: \min_x \ & c^T x \\ \mbox{such that} \ & b_l \leq A x \leq b_u,\\ & l \leq x \leq u, \\ & x_i \in \mathbb{Z}, i \in X_i where :math:`x` is a vector of decision variables; :math:`c`, :math:`b_l`, :math:`b_u`, :math:`l`, and :math:`u` are vectors; :math:`A` is a matrix, and :math:`X_i` is the set of indices of decision variables that must be integral. (In this context, a variable that can assume only integer values is said to be "integral"; it has an "integrality" constraint.) Alternatively, that's: minimize:: c @ x such that:: b_l <= A @ x <= b_u l <= x <= u Specified elements of x must be integers By default, ``l = 0`` and ``u = np.inf`` unless specified with ``bounds``. Parameters ---------- c : 1D dense array_like The coefficients of the linear objective function to be minimized. `c` is converted to a double precision array before the problem is solved. integrality : 1D dense array_like, optional Indicates the type of integrality constraint on each decision variable. ``0`` : Continuous variable; no integrality constraint. ``1`` : Integer variable; decision variable must be an integer within `bounds`. ``2`` : Semi-continuous variable; decision variable must be within `bounds` or take value ``0``. ``3`` : Semi-integer variable; decision variable must be an integer within `bounds` or take value ``0``. By default, all variables are continuous. `integrality` is converted to an array of integers before the problem is solved. bounds : scipy.optimize.Bounds, optional Bounds on the decision variables. Lower and upper bounds are converted to double precision arrays before the problem is solved. The ``keep_feasible`` parameter of the `Bounds` object is ignored. If not specified, all decision variables are constrained to be non-negative. constraints : sequence of scipy.optimize.LinearConstraint, optional Linear constraints of the optimization problem. Arguments may be one of the following: 1. A single `LinearConstraint` object 2. A single tuple that can be converted to a `LinearConstraint` object as ``LinearConstraint(*constraints)`` 3. A sequence composed entirely of objects of type 1. and 2. Before the problem is solved, all values are converted to double precision, and the matrices of constraint coefficients are converted to instances of `scipy.sparse.csc_array`. The ``keep_feasible`` parameter of `LinearConstraint` objects is ignored. options : dict, optional A dictionary of solver options. The following keys are recognized. disp : bool (default: ``False``) Set to ``True`` if indicators of optimization status are to be printed to the console during optimization. node_limit : int, optional The maximum number of nodes (linear program relaxations) to solve before stopping. Default is no maximum number of nodes. presolve : bool (default: ``True``) Presolve attempts to identify trivial infeasibilities, identify trivial unboundedness, and simplify the problem before sending it to the main solver. time_limit : float, optional The maximum number of seconds allotted to solve the problem. Default is no time limit. mip_rel_gap : float, optional Termination criterion for MIP solver: solver will terminate when the gap between the primal objective value and the dual objective bound, scaled by the primal objective value, is <= mip_rel_gap. Returns ------- res : OptimizeResult An instance of :class:`scipy.optimize.OptimizeResult`. The object is guaranteed to have the following attributes. status : int An integer representing the exit status of the algorithm. ``0`` : Optimal solution found. ``1`` : Iteration or time limit reached. ``2`` : Problem is infeasible. ``3`` : Problem is unbounded. ``4`` : Other; see message for details. success : bool ``True`` when an optimal solution is found and ``False`` otherwise. message : str A string descriptor of the exit status of the algorithm. The following attributes will also be present, but the values may be ``None``, depending on the solution status. x : ndarray The values of the decision variables that minimize the objective function while satisfying the constraints. fun : float The optimal value of the objective function ``c @ x``. mip_node_count : int The number of subproblems or "nodes" solved by the MILP solver. mip_dual_bound : float The MILP solver's final estimate of the lower bound on the optimal solution. mip_gap : float The difference between the primal objective value and the dual objective bound, scaled by the primal objective value. Notes ----- `milp` is a wrapper of the HiGHS linear optimization software [1]_. The algorithm is deterministic, and it typically finds the global optimum of moderately challenging mixed-integer linear programs (when it exists). References ---------- .. [1] Huangfu, Q., Galabova, I., Feldmeier, M., and Hall, J. A. J. "HiGHS - high performance software for linear optimization." https://highs.dev/ .. [2] Huangfu, Q. and Hall, J. A. J. "Parallelizing the dual revised simplex method." Mathematical Programming Computation, 10 (1), 119-142, 2018. DOI: 10.1007/s12532-017-0130-5 Examples -------- Consider the problem at https://en.wikipedia.org/wiki/Integer_programming#Example, which is expressed as a maximization problem of two variables. Since `milp` requires that the problem be expressed as a minimization problem, the objective function coefficients on the decision variables are: >>> import numpy as np >>> c = -np.array([0, 1]) Note the negative sign: we maximize the original objective function by minimizing the negative of the objective function. We collect the coefficients of the constraints into arrays like: >>> A = np.array([[-1, 1], [3, 2], [2, 3]]) >>> b_u = np.array([1, 12, 12]) >>> b_l = np.full_like(b_u, -np.inf) Because there is no lower limit on these constraints, we have defined a variable ``b_l`` full of values representing negative infinity. This may be unfamiliar to users of `scipy.optimize.linprog`, which only accepts "less than" (or "upper bound") inequality constraints of the form ``A_ub @ x <= b_u``. By accepting both ``b_l`` and ``b_u`` of constraints ``b_l <= A_ub @ x <= b_u``, `milp` makes it easy to specify "greater than" inequality constraints, "less than" inequality constraints, and equality constraints concisely. These arrays are collected into a single `LinearConstraint` object like: >>> from scipy.optimize import LinearConstraint >>> constraints = LinearConstraint(A, b_l, b_u) The non-negativity bounds on the decision variables are enforced by default, so we do not need to provide an argument for `bounds`. Finally, the problem states that both decision variables must be integers: >>> integrality = np.ones_like(c) We solve the problem like: >>> from scipy.optimize import milp >>> res = milp(c=c, constraints=constraints, integrality=integrality) >>> res.x [1.0, 2.0] Note that had we solved the relaxed problem (without integrality constraints): >>> res = milp(c=c, constraints=constraints) # OR: >>> # from scipy.optimize import linprog; res = linprog(c, A, b_u) >>> res.x [1.8, 2.8] we would not have obtained the correct solution by rounding to the nearest integers. Other examples are given :ref:`in the tutorial <tutorial-optimize_milp>`. """ args_iv = _milp_iv(c, integrality, bounds, constraints, options) c, integrality, lb, ub, indptr, indices, data, b_l, b_u, options = args_iv highs_res = _highs_wrapper(c, indptr, indices, data, b_l, b_u, lb, ub, integrality, options) res = {} # Convert to scipy-style status and message highs_status = highs_res.get('status', None) highs_message = highs_res.get('message', None) status, message = _highs_to_scipy_status_message(highs_status, highs_message) res['status'] = status res['message'] = message res['success'] = (status == 0) x = highs_res.get('x', None) res['x'] = np.array(x) if x is not None else None res['fun'] = highs_res.get('fun', None) res['mip_node_count'] = highs_res.get('mip_node_count', None) res['mip_dual_bound'] = highs_res.get('mip_dual_bound', None) res['mip_gap'] = highs_res.get('mip_gap', None) return OptimizeResult(res)
15,129
37.596939
79
py
scipy
scipy-main/scipy/optimize/moduleTNC.py
# This file is not meant for public use and will be removed in SciPy v2.0.0. # Use the `scipy.optimize` namespace for importing the functions # included below. import warnings from . import _moduleTNC __all__ = [ # noqa: F822 ] def __dir__(): return __all__ def __getattr__(name): if name not in __all__: raise AttributeError( "scipy.optimize.moduleTNC is deprecated and has no attribute " f"{name}. Try looking in scipy.optimize instead.") warnings.warn(f"Please use `{name}` from the `scipy.optimize` namespace, " "the `scipy.optimize.moduleTNC` namespace is deprecated.", category=DeprecationWarning, stacklevel=2) return getattr(_moduleTNC, name)
746
24.758621
78
py
scipy
scipy-main/scipy/optimize/_root.py
""" Unified interfaces to root finding algorithms. Functions --------- - root : find a root of a vector function. """ __all__ = ['root'] import numpy as np from warnings import warn from ._optimize import MemoizeJac, OptimizeResult, _check_unknown_options from ._minpack_py import _root_hybr, leastsq from ._spectral import _root_df_sane from . import _nonlin as nonlin ROOT_METHODS = ['hybr', 'lm', 'broyden1', 'broyden2', 'anderson', 'linearmixing', 'diagbroyden', 'excitingmixing', 'krylov', 'df-sane'] def root(fun, x0, args=(), method='hybr', jac=None, tol=None, callback=None, options=None): r""" Find a root of a vector function. Parameters ---------- fun : callable A vector function to find a root of. x0 : ndarray Initial guess. args : tuple, optional Extra arguments passed to the objective function and its Jacobian. method : str, optional Type of solver. Should be one of - 'hybr' :ref:`(see here) <optimize.root-hybr>` - 'lm' :ref:`(see here) <optimize.root-lm>` - 'broyden1' :ref:`(see here) <optimize.root-broyden1>` - 'broyden2' :ref:`(see here) <optimize.root-broyden2>` - 'anderson' :ref:`(see here) <optimize.root-anderson>` - 'linearmixing' :ref:`(see here) <optimize.root-linearmixing>` - 'diagbroyden' :ref:`(see here) <optimize.root-diagbroyden>` - 'excitingmixing' :ref:`(see here) <optimize.root-excitingmixing>` - 'krylov' :ref:`(see here) <optimize.root-krylov>` - 'df-sane' :ref:`(see here) <optimize.root-dfsane>` jac : bool or callable, optional If `jac` is a Boolean and is True, `fun` is assumed to return the value of Jacobian along with the objective function. If False, the Jacobian will be estimated numerically. `jac` can also be a callable returning the Jacobian of `fun`. In this case, it must accept the same arguments as `fun`. tol : float, optional Tolerance for termination. For detailed control, use solver-specific options. callback : function, optional Optional callback function. It is called on every iteration as ``callback(x, f)`` where `x` is the current solution and `f` the corresponding residual. For all methods but 'hybr' and 'lm'. options : dict, optional A dictionary of solver options. E.g., `xtol` or `maxiter`, see :obj:`show_options()` for details. Returns ------- sol : OptimizeResult The solution represented as a ``OptimizeResult`` object. Important attributes are: ``x`` the solution array, ``success`` a Boolean flag indicating if the algorithm exited successfully and ``message`` which describes the cause of the termination. See `OptimizeResult` for a description of other attributes. See also -------- show_options : Additional options accepted by the solvers Notes ----- This section describes the available solvers that can be selected by the 'method' parameter. The default method is *hybr*. Method *hybr* uses a modification of the Powell hybrid method as implemented in MINPACK [1]_. Method *lm* solves the system of nonlinear equations in a least squares sense using a modification of the Levenberg-Marquardt algorithm as implemented in MINPACK [1]_. Method *df-sane* is a derivative-free spectral method. [3]_ Methods *broyden1*, *broyden2*, *anderson*, *linearmixing*, *diagbroyden*, *excitingmixing*, *krylov* are inexact Newton methods, with backtracking or full line searches [2]_. Each method corresponds to a particular Jacobian approximations. - Method *broyden1* uses Broyden's first Jacobian approximation, it is known as Broyden's good method. - Method *broyden2* uses Broyden's second Jacobian approximation, it is known as Broyden's bad method. - Method *anderson* uses (extended) Anderson mixing. - Method *Krylov* uses Krylov approximation for inverse Jacobian. It is suitable for large-scale problem. - Method *diagbroyden* uses diagonal Broyden Jacobian approximation. - Method *linearmixing* uses a scalar Jacobian approximation. - Method *excitingmixing* uses a tuned diagonal Jacobian approximation. .. warning:: The algorithms implemented for methods *diagbroyden*, *linearmixing* and *excitingmixing* may be useful for specific problems, but whether they will work may depend strongly on the problem. .. versionadded:: 0.11.0 References ---------- .. [1] More, Jorge J., Burton S. Garbow, and Kenneth E. Hillstrom. 1980. User Guide for MINPACK-1. .. [2] C. T. Kelley. 1995. Iterative Methods for Linear and Nonlinear Equations. Society for Industrial and Applied Mathematics. <https://archive.siam.org/books/kelley/fr16/> .. [3] W. La Cruz, J.M. Martinez, M. Raydan. Math. Comp. 75, 1429 (2006). Examples -------- The following functions define a system of nonlinear equations and its jacobian. >>> import numpy as np >>> def fun(x): ... return [x[0] + 0.5 * (x[0] - x[1])**3 - 1.0, ... 0.5 * (x[1] - x[0])**3 + x[1]] >>> def jac(x): ... return np.array([[1 + 1.5 * (x[0] - x[1])**2, ... -1.5 * (x[0] - x[1])**2], ... [-1.5 * (x[1] - x[0])**2, ... 1 + 1.5 * (x[1] - x[0])**2]]) A solution can be obtained as follows. >>> from scipy import optimize >>> sol = optimize.root(fun, [0, 0], jac=jac, method='hybr') >>> sol.x array([ 0.8411639, 0.1588361]) **Large problem** Suppose that we needed to solve the following integrodifferential equation on the square :math:`[0,1]\times[0,1]`: .. math:: \nabla^2 P = 10 \left(\int_0^1\int_0^1\cosh(P)\,dx\,dy\right)^2 with :math:`P(x,1) = 1` and :math:`P=0` elsewhere on the boundary of the square. The solution can be found using the ``method='krylov'`` solver: >>> from scipy import optimize >>> # parameters >>> nx, ny = 75, 75 >>> hx, hy = 1./(nx-1), 1./(ny-1) >>> P_left, P_right = 0, 0 >>> P_top, P_bottom = 1, 0 >>> def residual(P): ... d2x = np.zeros_like(P) ... d2y = np.zeros_like(P) ... ... d2x[1:-1] = (P[2:] - 2*P[1:-1] + P[:-2]) / hx/hx ... d2x[0] = (P[1] - 2*P[0] + P_left)/hx/hx ... d2x[-1] = (P_right - 2*P[-1] + P[-2])/hx/hx ... ... d2y[:,1:-1] = (P[:,2:] - 2*P[:,1:-1] + P[:,:-2])/hy/hy ... d2y[:,0] = (P[:,1] - 2*P[:,0] + P_bottom)/hy/hy ... d2y[:,-1] = (P_top - 2*P[:,-1] + P[:,-2])/hy/hy ... ... return d2x + d2y - 10*np.cosh(P).mean()**2 >>> guess = np.zeros((nx, ny), float) >>> sol = optimize.root(residual, guess, method='krylov') >>> print('Residual: %g' % abs(residual(sol.x)).max()) Residual: 5.7972e-06 # may vary >>> import matplotlib.pyplot as plt >>> x, y = np.mgrid[0:1:(nx*1j), 0:1:(ny*1j)] >>> plt.pcolormesh(x, y, sol.x, shading='gouraud') >>> plt.colorbar() >>> plt.show() """ if not isinstance(args, tuple): args = (args,) meth = method.lower() if options is None: options = {} if callback is not None and meth in ('hybr', 'lm'): warn('Method %s does not accept callback.' % method, RuntimeWarning) # fun also returns the Jacobian if not callable(jac) and meth in ('hybr', 'lm'): if bool(jac): fun = MemoizeJac(fun) jac = fun.derivative else: jac = None # set default tolerances if tol is not None: options = dict(options) if meth in ('hybr', 'lm'): options.setdefault('xtol', tol) elif meth in ('df-sane',): options.setdefault('ftol', tol) elif meth in ('broyden1', 'broyden2', 'anderson', 'linearmixing', 'diagbroyden', 'excitingmixing', 'krylov'): options.setdefault('xtol', tol) options.setdefault('xatol', np.inf) options.setdefault('ftol', np.inf) options.setdefault('fatol', np.inf) if meth == 'hybr': sol = _root_hybr(fun, x0, args=args, jac=jac, **options) elif meth == 'lm': sol = _root_leastsq(fun, x0, args=args, jac=jac, **options) elif meth == 'df-sane': _warn_jac_unused(jac, method) sol = _root_df_sane(fun, x0, args=args, callback=callback, **options) elif meth in ('broyden1', 'broyden2', 'anderson', 'linearmixing', 'diagbroyden', 'excitingmixing', 'krylov'): _warn_jac_unused(jac, method) sol = _root_nonlin_solve(fun, x0, args=args, jac=jac, _method=meth, _callback=callback, **options) else: raise ValueError('Unknown solver %s' % method) return sol def _warn_jac_unused(jac, method): if jac is not None: warn(f'Method {method} does not use the jacobian (jac).', RuntimeWarning) def _root_leastsq(fun, x0, args=(), jac=None, col_deriv=0, xtol=1.49012e-08, ftol=1.49012e-08, gtol=0.0, maxiter=0, eps=0.0, factor=100, diag=None, **unknown_options): """ Solve for least squares with Levenberg-Marquardt Options ------- col_deriv : bool non-zero to specify that the Jacobian function computes derivatives down the columns (faster, because there is no transpose operation). ftol : float Relative error desired in the sum of squares. xtol : float Relative error desired in the approximate solution. gtol : float Orthogonality desired between the function vector and the columns of the Jacobian. maxiter : int The maximum number of calls to the function. If zero, then 100*(N+1) is the maximum where N is the number of elements in x0. epsfcn : float A suitable step length for the forward-difference approximation of the Jacobian (for Dfun=None). If epsfcn is less than the machine precision, it is assumed that the relative errors in the functions are of the order of the machine precision. factor : float A parameter determining the initial step bound (``factor * || diag * x||``). Should be in interval ``(0.1, 100)``. diag : sequence N positive entries that serve as a scale factors for the variables. """ _check_unknown_options(unknown_options) x, cov_x, info, msg, ier = leastsq(fun, x0, args=args, Dfun=jac, full_output=True, col_deriv=col_deriv, xtol=xtol, ftol=ftol, gtol=gtol, maxfev=maxiter, epsfcn=eps, factor=factor, diag=diag) sol = OptimizeResult(x=x, message=msg, status=ier, success=ier in (1, 2, 3, 4), cov_x=cov_x, fun=info.pop('fvec')) sol.update(info) return sol def _root_nonlin_solve(fun, x0, args=(), jac=None, _callback=None, _method=None, nit=None, disp=False, maxiter=None, ftol=None, fatol=None, xtol=None, xatol=None, tol_norm=None, line_search='armijo', jac_options=None, **unknown_options): _check_unknown_options(unknown_options) f_tol = fatol f_rtol = ftol x_tol = xatol x_rtol = xtol verbose = disp if jac_options is None: jac_options = dict() jacobian = {'broyden1': nonlin.BroydenFirst, 'broyden2': nonlin.BroydenSecond, 'anderson': nonlin.Anderson, 'linearmixing': nonlin.LinearMixing, 'diagbroyden': nonlin.DiagBroyden, 'excitingmixing': nonlin.ExcitingMixing, 'krylov': nonlin.KrylovJacobian }[_method] if args: if jac is True: def f(x): return fun(x, *args)[0] else: def f(x): return fun(x, *args) else: f = fun x, info = nonlin.nonlin_solve(f, x0, jacobian=jacobian(**jac_options), iter=nit, verbose=verbose, maxiter=maxiter, f_tol=f_tol, f_rtol=f_rtol, x_tol=x_tol, x_rtol=x_rtol, tol_norm=tol_norm, line_search=line_search, callback=_callback, full_output=True, raise_exception=False) sol = OptimizeResult(x=x) sol.update(info) return sol def _root_broyden1_doc(): """ Options ------- nit : int, optional Number of iterations to make. If omitted (default), make as many as required to meet tolerances. disp : bool, optional Print status to stdout on every iteration. maxiter : int, optional Maximum number of iterations to make. If more are needed to meet convergence, `NoConvergence` is raised. ftol : float, optional Relative tolerance for the residual. If omitted, not used. fatol : float, optional Absolute tolerance (in max-norm) for the residual. If omitted, default is 6e-6. xtol : float, optional Relative minimum step size. If omitted, not used. xatol : float, optional Absolute minimum step size, as determined from the Jacobian approximation. If the step size is smaller than this, optimization is terminated as successful. If omitted, not used. tol_norm : function(vector) -> scalar, optional Norm to use in convergence check. Default is the maximum norm. line_search : {None, 'armijo' (default), 'wolfe'}, optional Which type of a line search to use to determine the step size in the direction given by the Jacobian approximation. Defaults to 'armijo'. jac_options : dict, optional Options for the respective Jacobian approximation. alpha : float, optional Initial guess for the Jacobian is (-1/alpha). reduction_method : str or tuple, optional Method used in ensuring that the rank of the Broyden matrix stays low. Can either be a string giving the name of the method, or a tuple of the form ``(method, param1, param2, ...)`` that gives the name of the method and values for additional parameters. Methods available: - ``restart`` Drop all matrix columns. Has no extra parameters. - ``simple`` Drop oldest matrix column. Has no extra parameters. - ``svd`` Keep only the most significant SVD components. Extra parameters: - ``to_retain`` Number of SVD components to retain when rank reduction is done. Default is ``max_rank - 2``. max_rank : int, optional Maximum rank for the Broyden matrix. Default is infinity (i.e., no rank reduction). Examples -------- >>> def func(x): ... return np.cos(x) + x[::-1] - [1, 2, 3, 4] ... >>> from scipy import optimize >>> res = optimize.root(func, [1, 1, 1, 1], method='broyden1', tol=1e-14) >>> x = res.x >>> x array([4.04674914, 3.91158389, 2.71791677, 1.61756251]) >>> np.cos(x) + x[::-1] array([1., 2., 3., 4.]) """ pass def _root_broyden2_doc(): """ Options ------- nit : int, optional Number of iterations to make. If omitted (default), make as many as required to meet tolerances. disp : bool, optional Print status to stdout on every iteration. maxiter : int, optional Maximum number of iterations to make. If more are needed to meet convergence, `NoConvergence` is raised. ftol : float, optional Relative tolerance for the residual. If omitted, not used. fatol : float, optional Absolute tolerance (in max-norm) for the residual. If omitted, default is 6e-6. xtol : float, optional Relative minimum step size. If omitted, not used. xatol : float, optional Absolute minimum step size, as determined from the Jacobian approximation. If the step size is smaller than this, optimization is terminated as successful. If omitted, not used. tol_norm : function(vector) -> scalar, optional Norm to use in convergence check. Default is the maximum norm. line_search : {None, 'armijo' (default), 'wolfe'}, optional Which type of a line search to use to determine the step size in the direction given by the Jacobian approximation. Defaults to 'armijo'. jac_options : dict, optional Options for the respective Jacobian approximation. alpha : float, optional Initial guess for the Jacobian is (-1/alpha). reduction_method : str or tuple, optional Method used in ensuring that the rank of the Broyden matrix stays low. Can either be a string giving the name of the method, or a tuple of the form ``(method, param1, param2, ...)`` that gives the name of the method and values for additional parameters. Methods available: - ``restart`` Drop all matrix columns. Has no extra parameters. - ``simple`` Drop oldest matrix column. Has no extra parameters. - ``svd`` Keep only the most significant SVD components. Extra parameters: - ``to_retain`` Number of SVD components to retain when rank reduction is done. Default is ``max_rank - 2``. max_rank : int, optional Maximum rank for the Broyden matrix. Default is infinity (i.e., no rank reduction). """ pass def _root_anderson_doc(): """ Options ------- nit : int, optional Number of iterations to make. If omitted (default), make as many as required to meet tolerances. disp : bool, optional Print status to stdout on every iteration. maxiter : int, optional Maximum number of iterations to make. If more are needed to meet convergence, `NoConvergence` is raised. ftol : float, optional Relative tolerance for the residual. If omitted, not used. fatol : float, optional Absolute tolerance (in max-norm) for the residual. If omitted, default is 6e-6. xtol : float, optional Relative minimum step size. If omitted, not used. xatol : float, optional Absolute minimum step size, as determined from the Jacobian approximation. If the step size is smaller than this, optimization is terminated as successful. If omitted, not used. tol_norm : function(vector) -> scalar, optional Norm to use in convergence check. Default is the maximum norm. line_search : {None, 'armijo' (default), 'wolfe'}, optional Which type of a line search to use to determine the step size in the direction given by the Jacobian approximation. Defaults to 'armijo'. jac_options : dict, optional Options for the respective Jacobian approximation. alpha : float, optional Initial guess for the Jacobian is (-1/alpha). M : float, optional Number of previous vectors to retain. Defaults to 5. w0 : float, optional Regularization parameter for numerical stability. Compared to unity, good values of the order of 0.01. """ pass def _root_linearmixing_doc(): """ Options ------- nit : int, optional Number of iterations to make. If omitted (default), make as many as required to meet tolerances. disp : bool, optional Print status to stdout on every iteration. maxiter : int, optional Maximum number of iterations to make. If more are needed to meet convergence, ``NoConvergence`` is raised. ftol : float, optional Relative tolerance for the residual. If omitted, not used. fatol : float, optional Absolute tolerance (in max-norm) for the residual. If omitted, default is 6e-6. xtol : float, optional Relative minimum step size. If omitted, not used. xatol : float, optional Absolute minimum step size, as determined from the Jacobian approximation. If the step size is smaller than this, optimization is terminated as successful. If omitted, not used. tol_norm : function(vector) -> scalar, optional Norm to use in convergence check. Default is the maximum norm. line_search : {None, 'armijo' (default), 'wolfe'}, optional Which type of a line search to use to determine the step size in the direction given by the Jacobian approximation. Defaults to 'armijo'. jac_options : dict, optional Options for the respective Jacobian approximation. alpha : float, optional initial guess for the jacobian is (-1/alpha). """ pass def _root_diagbroyden_doc(): """ Options ------- nit : int, optional Number of iterations to make. If omitted (default), make as many as required to meet tolerances. disp : bool, optional Print status to stdout on every iteration. maxiter : int, optional Maximum number of iterations to make. If more are needed to meet convergence, `NoConvergence` is raised. ftol : float, optional Relative tolerance for the residual. If omitted, not used. fatol : float, optional Absolute tolerance (in max-norm) for the residual. If omitted, default is 6e-6. xtol : float, optional Relative minimum step size. If omitted, not used. xatol : float, optional Absolute minimum step size, as determined from the Jacobian approximation. If the step size is smaller than this, optimization is terminated as successful. If omitted, not used. tol_norm : function(vector) -> scalar, optional Norm to use in convergence check. Default is the maximum norm. line_search : {None, 'armijo' (default), 'wolfe'}, optional Which type of a line search to use to determine the step size in the direction given by the Jacobian approximation. Defaults to 'armijo'. jac_options : dict, optional Options for the respective Jacobian approximation. alpha : float, optional initial guess for the jacobian is (-1/alpha). """ pass def _root_excitingmixing_doc(): """ Options ------- nit : int, optional Number of iterations to make. If omitted (default), make as many as required to meet tolerances. disp : bool, optional Print status to stdout on every iteration. maxiter : int, optional Maximum number of iterations to make. If more are needed to meet convergence, `NoConvergence` is raised. ftol : float, optional Relative tolerance for the residual. If omitted, not used. fatol : float, optional Absolute tolerance (in max-norm) for the residual. If omitted, default is 6e-6. xtol : float, optional Relative minimum step size. If omitted, not used. xatol : float, optional Absolute minimum step size, as determined from the Jacobian approximation. If the step size is smaller than this, optimization is terminated as successful. If omitted, not used. tol_norm : function(vector) -> scalar, optional Norm to use in convergence check. Default is the maximum norm. line_search : {None, 'armijo' (default), 'wolfe'}, optional Which type of a line search to use to determine the step size in the direction given by the Jacobian approximation. Defaults to 'armijo'. jac_options : dict, optional Options for the respective Jacobian approximation. alpha : float, optional Initial Jacobian approximation is (-1/alpha). alphamax : float, optional The entries of the diagonal Jacobian are kept in the range ``[alpha, alphamax]``. """ pass def _root_krylov_doc(): """ Options ------- nit : int, optional Number of iterations to make. If omitted (default), make as many as required to meet tolerances. disp : bool, optional Print status to stdout on every iteration. maxiter : int, optional Maximum number of iterations to make. If more are needed to meet convergence, `NoConvergence` is raised. ftol : float, optional Relative tolerance for the residual. If omitted, not used. fatol : float, optional Absolute tolerance (in max-norm) for the residual. If omitted, default is 6e-6. xtol : float, optional Relative minimum step size. If omitted, not used. xatol : float, optional Absolute minimum step size, as determined from the Jacobian approximation. If the step size is smaller than this, optimization is terminated as successful. If omitted, not used. tol_norm : function(vector) -> scalar, optional Norm to use in convergence check. Default is the maximum norm. line_search : {None, 'armijo' (default), 'wolfe'}, optional Which type of a line search to use to determine the step size in the direction given by the Jacobian approximation. Defaults to 'armijo'. jac_options : dict, optional Options for the respective Jacobian approximation. rdiff : float, optional Relative step size to use in numerical differentiation. method : str or callable, optional Krylov method to use to approximate the Jacobian. Can be a string, or a function implementing the same interface as the iterative solvers in `scipy.sparse.linalg`. If a string, needs to be one of: ``'lgmres'``, ``'gmres'``, ``'bicgstab'``, ``'cgs'``, ``'minres'``, ``'tfqmr'``. The default is `scipy.sparse.linalg.lgmres`. inner_M : LinearOperator or InverseJacobian Preconditioner for the inner Krylov iteration. Note that you can use also inverse Jacobians as (adaptive) preconditioners. For example, >>> jac = BroydenFirst() >>> kjac = KrylovJacobian(inner_M=jac.inverse). If the preconditioner has a method named 'update', it will be called as ``update(x, f)`` after each nonlinear step, with ``x`` giving the current point, and ``f`` the current function value. inner_tol, inner_maxiter, ... Parameters to pass on to the "inner" Krylov solver. See `scipy.sparse.linalg.gmres` for details. outer_k : int, optional Size of the subspace kept across LGMRES nonlinear iterations. See `scipy.sparse.linalg.lgmres` for details. """ pass
28,280
38.333797
81
py
scipy
scipy-main/scipy/optimize/_shgo.py
"""shgo: The simplicial homology global optimisation algorithm.""" from collections import namedtuple import time import logging import warnings import sys import numpy as np from scipy import spatial from scipy.optimize import OptimizeResult, minimize, Bounds from scipy.optimize._optimize import MemoizeJac from scipy.optimize._constraints import new_bounds_to_old from scipy.optimize._minimize import standardize_constraints from scipy._lib._util import _FunctionWrapper from scipy.optimize._shgo_lib._complex import Complex __all__ = ['shgo'] def shgo( func, bounds, args=(), constraints=None, n=100, iters=1, callback=None, minimizer_kwargs=None, options=None, sampling_method='simplicial', *, workers=1 ): """ Finds the global minimum of a function using SHG optimization. SHGO stands for "simplicial homology global optimization". Parameters ---------- func : callable The objective function to be minimized. Must be in the form ``f(x, *args)``, where ``x`` is the argument in the form of a 1-D array and ``args`` is a tuple of any additional fixed parameters needed to completely specify the function. bounds : sequence or `Bounds` Bounds for variables. There are two ways to specify the bounds: 1. Instance of `Bounds` class. 2. Sequence of ``(min, max)`` pairs for each element in `x`. args : tuple, optional Any additional fixed parameters needed to completely specify the objective function. constraints : {Constraint, dict} or List of {Constraint, dict}, optional Constraints definition. Only for COBYLA, SLSQP and trust-constr. See the tutorial [5]_ for further details on specifying constraints. .. note:: Only COBYLA, SLSQP, and trust-constr local minimize methods currently support constraint arguments. If the ``constraints`` sequence used in the local optimization problem is not defined in ``minimizer_kwargs`` and a constrained method is used then the global ``constraints`` will be used. (Defining a ``constraints`` sequence in ``minimizer_kwargs`` means that ``constraints`` will not be added so if equality constraints and so forth need to be added then the inequality functions in ``constraints`` need to be added to ``minimizer_kwargs`` too). COBYLA only supports inequality constraints. .. versionchanged:: 1.11.0 ``constraints`` accepts `NonlinearConstraint`, `LinearConstraint`. n : int, optional Number of sampling points used in the construction of the simplicial complex. For the default ``simplicial`` sampling method 2**dim + 1 sampling points are generated instead of the default `n=100`. For all other specified values `n` sampling points are generated. For ``sobol``, ``halton`` and other arbitrary `sampling_methods` `n=100` or another speciefied number of sampling points are generated. iters : int, optional Number of iterations used in the construction of the simplicial complex. Default is 1. callback : callable, optional Called after each iteration, as ``callback(xk)``, where ``xk`` is the current parameter vector. minimizer_kwargs : dict, optional Extra keyword arguments to be passed to the minimizer ``scipy.optimize.minimize`` Some important options could be: * method : str The minimization method. If not given, chosen to be one of BFGS, L-BFGS-B, SLSQP, depending on whether or not the problem has constraints or bounds. * args : tuple Extra arguments passed to the objective function (``func``) and its derivatives (Jacobian, Hessian). * options : dict, optional Note that by default the tolerance is specified as ``{ftol: 1e-12}`` options : dict, optional A dictionary of solver options. Many of the options specified for the global routine are also passed to the scipy.optimize.minimize routine. The options that are also passed to the local routine are marked with "(L)". Stopping criteria, the algorithm will terminate if any of the specified criteria are met. However, the default algorithm does not require any to be specified: * maxfev : int (L) Maximum number of function evaluations in the feasible domain. (Note only methods that support this option will terminate the routine at precisely exact specified value. Otherwise the criterion will only terminate during a global iteration) * f_min Specify the minimum objective function value, if it is known. * f_tol : float Precision goal for the value of f in the stopping criterion. Note that the global routine will also terminate if a sampling point in the global routine is within this tolerance. * maxiter : int Maximum number of iterations to perform. * maxev : int Maximum number of sampling evaluations to perform (includes searching in infeasible points). * maxtime : float Maximum processing runtime allowed * minhgrd : int Minimum homology group rank differential. The homology group of the objective function is calculated (approximately) during every iteration. The rank of this group has a one-to-one correspondence with the number of locally convex subdomains in the objective function (after adequate sampling points each of these subdomains contain a unique global minimum). If the difference in the hgr is 0 between iterations for ``maxhgrd`` specified iterations the algorithm will terminate. Objective function knowledge: * symmetry : list or bool Specify if the objective function contains symmetric variables. The search space (and therefore performance) is decreased by up to O(n!) times in the fully symmetric case. If `True` is specified then all variables will be set symmetric to the first variable. Default is set to False. E.g. f(x) = (x_1 + x_2 + x_3) + (x_4)**2 + (x_5)**2 + (x_6)**2 In this equation x_2 and x_3 are symmetric to x_1, while x_5 and x_6 are symmetric to x_4, this can be specified to the solver as: symmetry = [0, # Variable 1 0, # symmetric to variable 1 0, # symmetric to variable 1 3, # Variable 4 3, # symmetric to variable 4 3, # symmetric to variable 4 ] * jac : bool or callable, optional Jacobian (gradient) of objective function. Only for CG, BFGS, Newton-CG, L-BFGS-B, TNC, SLSQP, dogleg, trust-ncg. If ``jac`` is a boolean and is True, ``fun`` is assumed to return the gradient along with the objective function. If False, the gradient will be estimated numerically. ``jac`` can also be a callable returning the gradient of the objective. In this case, it must accept the same arguments as ``fun``. (Passed to `scipy.optimize.minmize` automatically) * hess, hessp : callable, optional Hessian (matrix of second-order derivatives) of objective function or Hessian of objective function times an arbitrary vector p. Only for Newton-CG, dogleg, trust-ncg. Only one of ``hessp`` or ``hess`` needs to be given. If ``hess`` is provided, then ``hessp`` will be ignored. If neither ``hess`` nor ``hessp`` is provided, then the Hessian product will be approximated using finite differences on ``jac``. ``hessp`` must compute the Hessian times an arbitrary vector. (Passed to `scipy.optimize.minmize` automatically) Algorithm settings: * minimize_every_iter : bool If True then promising global sampling points will be passed to a local minimization routine every iteration. If True then only the final minimizer pool will be run. Defaults to True. * local_iter : int Only evaluate a few of the best minimizer pool candidates every iteration. If False all potential points are passed to the local minimization routine. * infty_constraints : bool If True then any sampling points generated which are outside will the feasible domain will be saved and given an objective function value of ``inf``. If False then these points will be discarded. Using this functionality could lead to higher performance with respect to function evaluations before the global minimum is found, specifying False will use less memory at the cost of a slight decrease in performance. Defaults to True. Feedback: * disp : bool (L) Set to True to print convergence messages. sampling_method : str or function, optional Current built in sampling method options are ``halton``, ``sobol`` and ``simplicial``. The default ``simplicial`` provides the theoretical guarantee of convergence to the global minimum in finite time. ``halton`` and ``sobol`` method are faster in terms of sampling point generation at the cost of the loss of guaranteed convergence. It is more appropriate for most "easier" problems where the convergence is relatively fast. User defined sampling functions must accept two arguments of ``n`` sampling points of dimension ``dim`` per call and output an array of sampling points with shape `n x dim`. workers : int or map-like callable, optional Sample and run the local serial minimizations in parallel. Supply -1 to use all available CPU cores, or an int to use that many Processes (uses `multiprocessing.Pool <multiprocessing>`). Alternatively supply a map-like callable, such as `multiprocessing.Pool.map` for parallel evaluation. This evaluation is carried out as ``workers(func, iterable)``. Requires that `func` be pickleable. .. versionadded:: 1.11.0 Returns ------- res : OptimizeResult The optimization result represented as a `OptimizeResult` object. Important attributes are: ``x`` the solution array corresponding to the global minimum, ``fun`` the function output at the global solution, ``xl`` an ordered list of local minima solutions, ``funl`` the function output at the corresponding local solutions, ``success`` a Boolean flag indicating if the optimizer exited successfully, ``message`` which describes the cause of the termination, ``nfev`` the total number of objective function evaluations including the sampling calls, ``nlfev`` the total number of objective function evaluations culminating from all local search optimizations, ``nit`` number of iterations performed by the global routine. Notes ----- Global optimization using simplicial homology global optimization [1]_. Appropriate for solving general purpose NLP and blackbox optimization problems to global optimality (low-dimensional problems). In general, the optimization problems are of the form:: minimize f(x) subject to g_i(x) >= 0, i = 1,...,m h_j(x) = 0, j = 1,...,p where x is a vector of one or more variables. ``f(x)`` is the objective function ``R^n -> R``, ``g_i(x)`` are the inequality constraints, and ``h_j(x)`` are the equality constraints. Optionally, the lower and upper bounds for each element in x can also be specified using the `bounds` argument. While most of the theoretical advantages of SHGO are only proven for when ``f(x)`` is a Lipschitz smooth function, the algorithm is also proven to converge to the global optimum for the more general case where ``f(x)`` is non-continuous, non-convex and non-smooth, if the default sampling method is used [1]_. The local search method may be specified using the ``minimizer_kwargs`` parameter which is passed on to ``scipy.optimize.minimize``. By default, the ``SLSQP`` method is used. In general, it is recommended to use the ``SLSQP`` or ``COBYLA`` local minimization if inequality constraints are defined for the problem since the other methods do not use constraints. The ``halton`` and ``sobol`` method points are generated using `scipy.stats.qmc`. Any other QMC method could be used. References ---------- .. [1] Endres, SC, Sandrock, C, Focke, WW (2018) "A simplicial homology algorithm for lipschitz optimisation", Journal of Global Optimization. .. [2] Joe, SW and Kuo, FY (2008) "Constructing Sobol' sequences with better two-dimensional projections", SIAM J. Sci. Comput. 30, 2635-2654. .. [3] Hock, W and Schittkowski, K (1981) "Test examples for nonlinear programming codes", Lecture Notes in Economics and Mathematical Systems, 187. Springer-Verlag, New York. http://www.ai7.uni-bayreuth.de/test_problem_coll.pdf .. [4] Wales, DJ (2015) "Perspective: Insight into reaction coordinates and dynamics from the potential energy landscape", Journal of Chemical Physics, 142(13), 2015. .. [5] https://docs.scipy.org/doc/scipy/tutorial/optimize.html#constrained-minimization-of-multivariate-scalar-functions-minimize Examples -------- First consider the problem of minimizing the Rosenbrock function, `rosen`: >>> from scipy.optimize import rosen, shgo >>> bounds = [(0,2), (0, 2), (0, 2), (0, 2), (0, 2)] >>> result = shgo(rosen, bounds) >>> result.x, result.fun (array([1., 1., 1., 1., 1.]), 2.920392374190081e-18) Note that bounds determine the dimensionality of the objective function and is therefore a required input, however you can specify empty bounds using ``None`` or objects like ``np.inf`` which will be converted to large float numbers. >>> bounds = [(None, None), ]*4 >>> result = shgo(rosen, bounds) >>> result.x array([0.99999851, 0.99999704, 0.99999411, 0.9999882 ]) Next, we consider the Eggholder function, a problem with several local minima and one global minimum. We will demonstrate the use of arguments and the capabilities of `shgo`. (https://en.wikipedia.org/wiki/Test_functions_for_optimization) >>> import numpy as np >>> def eggholder(x): ... return (-(x[1] + 47.0) ... * np.sin(np.sqrt(abs(x[0]/2.0 + (x[1] + 47.0)))) ... - x[0] * np.sin(np.sqrt(abs(x[0] - (x[1] + 47.0)))) ... ) ... >>> bounds = [(-512, 512), (-512, 512)] `shgo` has built-in low discrepancy sampling sequences. First, we will input 64 initial sampling points of the *Sobol'* sequence: >>> result = shgo(eggholder, bounds, n=64, sampling_method='sobol') >>> result.x, result.fun (array([512. , 404.23180824]), -959.6406627208397) `shgo` also has a return for any other local minima that was found, these can be called using: >>> result.xl array([[ 512. , 404.23180824], [ 283.0759062 , -487.12565635], [-294.66820039, -462.01964031], [-105.87688911, 423.15323845], [-242.97926 , 274.38030925], [-506.25823477, 6.3131022 ], [-408.71980731, -156.10116949], [ 150.23207937, 301.31376595], [ 91.00920901, -391.283763 ], [ 202.89662724, -269.38043241], [ 361.66623976, -106.96493868], [-219.40612786, -244.06020508]]) >>> result.funl array([-959.64066272, -718.16745962, -704.80659592, -565.99778097, -559.78685655, -557.36868733, -507.87385942, -493.9605115 , -426.48799655, -421.15571437, -419.31194957, -410.98477763]) These results are useful in applications where there are many global minima and the values of other global minima are desired or where the local minima can provide insight into the system (for example morphologies in physical chemistry [4]_). If we want to find a larger number of local minima, we can increase the number of sampling points or the number of iterations. We'll increase the number of sampling points to 64 and the number of iterations from the default of 1 to 3. Using ``simplicial`` this would have given us 64 x 3 = 192 initial sampling points. >>> result_2 = shgo(eggholder, ... bounds, n=64, iters=3, sampling_method='sobol') >>> len(result.xl), len(result_2.xl) (12, 23) Note the difference between, e.g., ``n=192, iters=1`` and ``n=64, iters=3``. In the first case the promising points contained in the minimiser pool are processed only once. In the latter case it is processed every 64 sampling points for a total of 3 times. To demonstrate solving problems with non-linear constraints consider the following example from Hock and Schittkowski problem 73 (cattle-feed) [3]_:: minimize: f = 24.55 * x_1 + 26.75 * x_2 + 39 * x_3 + 40.50 * x_4 subject to: 2.3 * x_1 + 5.6 * x_2 + 11.1 * x_3 + 1.3 * x_4 - 5 >= 0, 12 * x_1 + 11.9 * x_2 + 41.8 * x_3 + 52.1 * x_4 - 21 -1.645 * sqrt(0.28 * x_1**2 + 0.19 * x_2**2 + 20.5 * x_3**2 + 0.62 * x_4**2) >= 0, x_1 + x_2 + x_3 + x_4 - 1 == 0, 1 >= x_i >= 0 for all i The approximate answer given in [3]_ is:: f([0.6355216, -0.12e-11, 0.3127019, 0.05177655]) = 29.894378 >>> def f(x): # (cattle-feed) ... return 24.55*x[0] + 26.75*x[1] + 39*x[2] + 40.50*x[3] ... >>> def g1(x): ... return 2.3*x[0] + 5.6*x[1] + 11.1*x[2] + 1.3*x[3] - 5 # >=0 ... >>> def g2(x): ... return (12*x[0] + 11.9*x[1] +41.8*x[2] + 52.1*x[3] - 21 ... - 1.645 * np.sqrt(0.28*x[0]**2 + 0.19*x[1]**2 ... + 20.5*x[2]**2 + 0.62*x[3]**2) ... ) # >=0 ... >>> def h1(x): ... return x[0] + x[1] + x[2] + x[3] - 1 # == 0 ... >>> cons = ({'type': 'ineq', 'fun': g1}, ... {'type': 'ineq', 'fun': g2}, ... {'type': 'eq', 'fun': h1}) >>> bounds = [(0, 1.0),]*4 >>> res = shgo(f, bounds, n=150, constraints=cons) >>> res message: Optimization terminated successfully. success: True fun: 29.894378159142136 funl: [ 2.989e+01] x: [ 6.355e-01 1.137e-13 3.127e-01 5.178e-02] xl: [[ 6.355e-01 1.137e-13 3.127e-01 5.178e-02]] nit: 1 nfev: 142 nlfev: 35 nljev: 5 nlhev: 0 >>> g1(res.x), g2(res.x), h1(res.x) (-5.062616992290714e-14, -2.9594104944408173e-12, 0.0) """ # if necessary, convert bounds class to old bounds if isinstance(bounds, Bounds): bounds = new_bounds_to_old(bounds.lb, bounds.ub, len(bounds.lb)) # Initiate SHGO class # use in context manager to make sure that any parallelization # resources are freed. with SHGO(func, bounds, args=args, constraints=constraints, n=n, iters=iters, callback=callback, minimizer_kwargs=minimizer_kwargs, options=options, sampling_method=sampling_method, workers=workers) as shc: # Run the algorithm, process results and test success shc.iterate_all() if not shc.break_routine: if shc.disp: logging.info("Successfully completed construction of complex.") # Test post iterations success if len(shc.LMC.xl_maps) == 0: # If sampling failed to find pool, return lowest sampled point # with a warning shc.find_lowest_vertex() shc.break_routine = True shc.fail_routine(mes="Failed to find a feasible minimizer point. " "Lowest sampling point = {}".format(shc.f_lowest)) shc.res.fun = shc.f_lowest shc.res.x = shc.x_lowest shc.res.nfev = shc.fn shc.res.tnev = shc.n_sampled else: # Test that the optimal solutions do not violate any constraints pass # TODO # Confirm the routine ran successfully if not shc.break_routine: shc.res.message = 'Optimization terminated successfully.' shc.res.success = True # Return the final results return shc.res class SHGO: def __init__(self, func, bounds, args=(), constraints=None, n=None, iters=None, callback=None, minimizer_kwargs=None, options=None, sampling_method='simplicial', workers=1): from scipy.stats import qmc # Input checks methods = ['halton', 'sobol', 'simplicial'] if isinstance(sampling_method, str) and sampling_method not in methods: raise ValueError(("Unknown sampling_method specified." " Valid methods: {}").format(', '.join(methods))) # Split obj func if given with Jac try: if ((minimizer_kwargs['jac'] is True) and (not callable(minimizer_kwargs['jac']))): self.func = MemoizeJac(func) jac = self.func.derivative minimizer_kwargs['jac'] = jac func = self.func # .fun else: self.func = func # Normal definition of objective function except (TypeError, KeyError): self.func = func # Normal definition of objective function # Initiate class self.func = _FunctionWrapper(func, args) self.bounds = bounds self.args = args self.callback = callback # Bounds abound = np.array(bounds, float) self.dim = np.shape(abound)[0] # Dimensionality of problem # Set none finite values to large floats infind = ~np.isfinite(abound) abound[infind[:, 0], 0] = -1e50 abound[infind[:, 1], 1] = 1e50 # Check if bounds are correctly specified bnderr = abound[:, 0] > abound[:, 1] if bnderr.any(): raise ValueError('Error: lb > ub in bounds {}.' .format(', '.join(str(b) for b in bnderr))) self.bounds = abound # Constraints # Process constraint dict sequence: self.constraints = constraints if constraints is not None: self.min_cons = constraints self.g_cons = [] self.g_args = [] # shgo internals deals with old-style constraints # self.constraints is used to create Complex, so need # to be stored internally in old-style. # `minimize` takes care of normalising these constraints # for slsqp/cobyla/trust-constr. self.constraints = standardize_constraints( constraints, np.empty(self.dim, float), 'old' ) for cons in self.constraints: if cons['type'] in ('ineq'): self.g_cons.append(cons['fun']) try: self.g_args.append(cons['args']) except KeyError: self.g_args.append(()) self.g_cons = tuple(self.g_cons) self.g_args = tuple(self.g_args) else: self.g_cons = None self.g_args = None # Define local minimization keyword arguments # Start with defaults self.minimizer_kwargs = {'method': 'SLSQP', 'bounds': self.bounds, 'options': {}, 'callback': self.callback } if minimizer_kwargs is not None: # Overwrite with supplied values self.minimizer_kwargs.update(minimizer_kwargs) else: self.minimizer_kwargs['options'] = {'ftol': 1e-12} if ( self.minimizer_kwargs['method'].lower() in ('slsqp', 'cobyla', 'trust-constr') and ( minimizer_kwargs is not None and 'constraints' not in minimizer_kwargs and constraints is not None ) or (self.g_cons is not None) ): self.minimizer_kwargs['constraints'] = self.min_cons # Process options dict if options is not None: self.init_options(options) else: # Default settings: self.f_min_true = None self.minimize_every_iter = True # Algorithm limits self.maxiter = None self.maxfev = None self.maxev = None self.maxtime = None self.f_min_true = None self.minhgrd = None # Objective function knowledge self.symmetry = None # Algorithm functionality self.infty_cons_sampl = True self.local_iter = False # Feedback self.disp = False # Remove unknown arguments in self.minimizer_kwargs # Start with arguments all the solvers have in common self.min_solver_args = ['fun', 'x0', 'args', 'callback', 'options', 'method'] # then add the ones unique to specific solvers solver_args = { '_custom': ['jac', 'hess', 'hessp', 'bounds', 'constraints'], 'nelder-mead': [], 'powell': [], 'cg': ['jac'], 'bfgs': ['jac'], 'newton-cg': ['jac', 'hess', 'hessp'], 'l-bfgs-b': ['jac', 'bounds'], 'tnc': ['jac', 'bounds'], 'cobyla': ['constraints', 'catol'], 'slsqp': ['jac', 'bounds', 'constraints'], 'dogleg': ['jac', 'hess'], 'trust-ncg': ['jac', 'hess', 'hessp'], 'trust-krylov': ['jac', 'hess', 'hessp'], 'trust-exact': ['jac', 'hess'], 'trust-constr': ['jac', 'hess', 'hessp', 'constraints'], } method = self.minimizer_kwargs['method'] self.min_solver_args += solver_args[method.lower()] # Only retain the known arguments def _restrict_to_keys(dictionary, goodkeys): """Remove keys from dictionary if not in goodkeys - inplace""" existingkeys = set(dictionary) for key in existingkeys - set(goodkeys): dictionary.pop(key, None) _restrict_to_keys(self.minimizer_kwargs, self.min_solver_args) _restrict_to_keys(self.minimizer_kwargs['options'], self.min_solver_args + ['ftol']) # Algorithm controls # Global controls self.stop_global = False # Used in the stopping_criteria method self.break_routine = False # Break the algorithm globally self.iters = iters # Iterations to be ran self.iters_done = 0 # Iterations completed self.n = n # Sampling points per iteration self.nc = 0 # n # Sampling points to sample in current iteration self.n_prc = 0 # Processed points (used to track Delaunay iters) self.n_sampled = 0 # To track no. of sampling points already generated self.fn = 0 # Number of feasible sampling points evaluations performed self.hgr = 0 # Homology group rank # Initially attempt to build the triangulation incrementally: self.qhull_incremental = True # Default settings if no sampling criteria. if (self.n is None) and (self.iters is None) \ and (sampling_method == 'simplicial'): self.n = 2 ** self.dim + 1 self.nc = 0 # self.n if self.iters is None: self.iters = 1 if (self.n is None) and not (sampling_method == 'simplicial'): self.n = self.n = 100 self.nc = 0 # self.n if (self.n == 100) and (sampling_method == 'simplicial'): self.n = 2 ** self.dim + 1 if not ((self.maxiter is None) and (self.maxfev is None) and ( self.maxev is None) and (self.minhgrd is None) and (self.f_min_true is None)): self.iters = None # Set complex construction mode based on a provided stopping criteria: # Initialise sampling Complex and function cache # Note that sfield_args=() since args are already wrapped in self.func # using the_FunctionWrapper class. self.HC = Complex(dim=self.dim, domain=self.bounds, sfield=self.func, sfield_args=(), symmetry=self.symmetry, constraints=self.constraints, workers=workers) # Choose complex constructor if sampling_method == 'simplicial': self.iterate_complex = self.iterate_hypercube self.sampling_method = sampling_method elif sampling_method in ['halton', 'sobol'] or \ not isinstance(sampling_method, str): self.iterate_complex = self.iterate_delaunay # Sampling method used if sampling_method in ['halton', 'sobol']: if sampling_method == 'sobol': self.n = int(2 ** np.ceil(np.log2(self.n))) # self.n #TODO: Should always be self.n, this is # unacceptable for shgo, check that nfev behaves as # expected. self.nc = 0 self.sampling_method = 'sobol' self.qmc_engine = qmc.Sobol(d=self.dim, scramble=False, seed=0) else: self.sampling_method = 'halton' self.qmc_engine = qmc.Halton(d=self.dim, scramble=True, seed=0) def sampling_method(n, d): return self.qmc_engine.random(n) else: # A user defined sampling method: self.sampling_method = 'custom' self.sampling = self.sampling_custom self.sampling_function = sampling_method # F(n, d) # Local controls self.stop_l_iter = False # Local minimisation iterations self.stop_complex_iter = False # Sampling iterations # Initiate storage objects used in algorithm classes self.minimizer_pool = [] # Cache of local minimizers mapped self.LMC = LMapCache() # Initialize return object self.res = OptimizeResult() # scipy.optimize.OptimizeResult object self.res.nfev = 0 # Includes each sampling point as func evaluation self.res.nlfev = 0 # Local function evals for all minimisers self.res.nljev = 0 # Local Jacobian evals for all minimisers self.res.nlhev = 0 # Local Hessian evals for all minimisers # Initiation aids def init_options(self, options): """ Initiates the options. Can also be useful to change parameters after class initiation. Parameters ---------- options : dict Returns ------- None """ # Update 'options' dict passed to optimize.minimize # Do this first so we don't mutate `options` below. self.minimizer_kwargs['options'].update(options) # Ensure that 'jac', 'hess', and 'hessp' are passed directly to # `minimize` as keywords, not as part of its 'options' dictionary. for opt in ['jac', 'hess', 'hessp']: if opt in self.minimizer_kwargs['options']: self.minimizer_kwargs[opt] = ( self.minimizer_kwargs['options'].pop(opt)) # Default settings: self.minimize_every_iter = options.get('minimize_every_iter', True) # Algorithm limits # Maximum number of iterations to perform. self.maxiter = options.get('maxiter', None) # Maximum number of function evaluations in the feasible domain self.maxfev = options.get('maxfev', None) # Maximum number of sampling evaluations (includes searching in # infeasible points self.maxev = options.get('maxev', None) # Maximum processing runtime allowed self.init = time.time() self.maxtime = options.get('maxtime', None) if 'f_min' in options: # Specify the minimum objective function value, if it is known. self.f_min_true = options['f_min'] self.f_tol = options.get('f_tol', 1e-4) else: self.f_min_true = None self.minhgrd = options.get('minhgrd', None) # Objective function knowledge self.symmetry = options.get('symmetry', False) if self.symmetry: self.symmetry = [0, ]*len(self.bounds) else: self.symmetry = None # Algorithm functionality # Only evaluate a few of the best candiates self.local_iter = options.get('local_iter', False) self.infty_cons_sampl = options.get('infty_constraints', True) # Feedback self.disp = options.get('disp', False) def __enter__(self): return self def __exit__(self, *args): return self.HC.V._mapwrapper.__exit__(*args) # Iteration properties # Main construction loop: def iterate_all(self): """ Construct for `iters` iterations. If uniform sampling is used, every iteration adds 'n' sampling points. Iterations if a stopping criteria (e.g., sampling points or processing time) has been met. """ if self.disp: logging.info('Splitting first generation') while not self.stop_global: if self.break_routine: break # Iterate complex, process minimisers self.iterate() self.stopping_criteria() # Build minimiser pool # Final iteration only needed if pools weren't minimised every # iteration if not self.minimize_every_iter: if not self.break_routine: self.find_minima() self.res.nit = self.iters_done # + 1 self.fn = self.HC.V.nfev def find_minima(self): """ Construct the minimizer pool, map the minimizers to local minima and sort the results into a global return object. """ if self.disp: logging.info('Searching for minimizer pool...') self.minimizers() if len(self.X_min) != 0: # Minimize the pool of minimizers with local minimization methods # Note that if Options['local_iter'] is an `int` instead of default # value False then only that number of candidates will be minimized self.minimise_pool(self.local_iter) # Sort results and build the global return object self.sort_result() # Lowest values used to report in case of failures self.f_lowest = self.res.fun self.x_lowest = self.res.x else: self.find_lowest_vertex() if self.disp: logging.info(f"Minimiser pool = SHGO.X_min = {self.X_min}") def find_lowest_vertex(self): # Find the lowest objective function value on one of # the vertices of the simplicial complex self.f_lowest = np.inf for x in self.HC.V.cache: if self.HC.V[x].f < self.f_lowest: if self.disp: logging.info(f'self.HC.V[x].f = {self.HC.V[x].f}') self.f_lowest = self.HC.V[x].f self.x_lowest = self.HC.V[x].x_a for lmc in self.LMC.cache: if self.LMC[lmc].f_min < self.f_lowest: self.f_lowest = self.LMC[lmc].f_min self.x_lowest = self.LMC[lmc].x_l if self.f_lowest == np.inf: # no feasible point self.f_lowest = None self.x_lowest = None # Stopping criteria functions: def finite_iterations(self): mi = min(x for x in [self.iters, self.maxiter] if x is not None) if self.disp: logging.info(f'Iterations done = {self.iters_done} / {mi}') if self.iters is not None: if self.iters_done >= (self.iters): self.stop_global = True if self.maxiter is not None: # Stop for infeasible sampling if self.iters_done >= (self.maxiter): self.stop_global = True return self.stop_global def finite_fev(self): # Finite function evals in the feasible domain if self.disp: logging.info(f'Function evaluations done = {self.fn} / {self.maxfev}') if self.fn >= self.maxfev: self.stop_global = True return self.stop_global def finite_ev(self): # Finite evaluations including infeasible sampling points if self.disp: logging.info(f'Sampling evaluations done = {self.n_sampled} ' f'/ {self.maxev}') if self.n_sampled >= self.maxev: self.stop_global = True def finite_time(self): if self.disp: logging.info(f'Time elapsed = {time.time() - self.init} ' f'/ {self.maxtime}') if (time.time() - self.init) >= self.maxtime: self.stop_global = True def finite_precision(self): """ Stop the algorithm if the final function value is known Specify in options (with ``self.f_min_true = options['f_min']``) and the tolerance with ``f_tol = options['f_tol']`` """ # If no minimizer has been found use the lowest sampling value self.find_lowest_vertex() if self.disp: logging.info(f'Lowest function evaluation = {self.f_lowest}') logging.info(f'Specified minimum = {self.f_min_true}') # If no feasible point was return from test if self.f_lowest is None: return self.stop_global # Function to stop algorithm at specified percentage error: if self.f_min_true == 0.0: if self.f_lowest <= self.f_tol: self.stop_global = True else: pe = (self.f_lowest - self.f_min_true) / abs(self.f_min_true) if self.f_lowest <= self.f_min_true: self.stop_global = True # 2if (pe - self.f_tol) <= abs(1.0 / abs(self.f_min_true)): if abs(pe) >= 2 * self.f_tol: warnings.warn("A much lower value than expected f* =" + f" {self.f_min_true} than" + " the was found f_lowest =" + f"{self.f_lowest} ") if pe <= self.f_tol: self.stop_global = True return self.stop_global def finite_homology_growth(self): """ Stop the algorithm if homology group rank did not grow in iteration. """ if self.LMC.size == 0: return # pass on no reason to stop yet. self.hgrd = self.LMC.size - self.hgr self.hgr = self.LMC.size if self.hgrd <= self.minhgrd: self.stop_global = True if self.disp: logging.info(f'Current homology growth = {self.hgrd} ' f' (minimum growth = {self.minhgrd})') return self.stop_global def stopping_criteria(self): """ Various stopping criteria ran every iteration Returns ------- stop : bool """ if self.maxiter is not None: self.finite_iterations() if self.iters is not None: self.finite_iterations() if self.maxfev is not None: self.finite_fev() if self.maxev is not None: self.finite_ev() if self.maxtime is not None: self.finite_time() if self.f_min_true is not None: self.finite_precision() if self.minhgrd is not None: self.finite_homology_growth() return self.stop_global def iterate(self): self.iterate_complex() # Build minimizer pool if self.minimize_every_iter: if not self.break_routine: self.find_minima() # Process minimizer pool # Algorithm updates self.iters_done += 1 def iterate_hypercube(self): """ Iterate a subdivision of the complex Note: called with ``self.iterate_complex()`` after class initiation """ # Iterate the complex if self.disp: logging.info('Constructing and refining simplicial complex graph ' 'structure') if self.n is None: self.HC.refine_all() self.n_sampled = self.HC.V.size() # nevs counted else: self.HC.refine(self.n) self.n_sampled += self.n if self.disp: logging.info('Triangulation completed, evaluating all contraints ' 'and objective function values.') # Readd minimisers to complex if len(self.LMC.xl_maps) > 0: for xl in self.LMC.cache: v = self.HC.V[xl] v_near = v.star() for v in v.nn: v_near = v_near.union(v.nn) # Reconnect vertices to complex # if self.HC.connect_vertex_non_symm(tuple(self.LMC[xl].x_l), # near=v_near): # continue # else: # If failure to find in v_near, then search all vertices # (very expensive operation: # self.HC.connect_vertex_non_symm(tuple(self.LMC[xl].x_l) # ) # Evaluate all constraints and functions self.HC.V.process_pools() if self.disp: logging.info('Evaluations completed.') # feasible sampling points counted by the triangulation.py routines self.fn = self.HC.V.nfev return def iterate_delaunay(self): """ Build a complex of Delaunay triangulated points Note: called with ``self.iterate_complex()`` after class initiation """ self.nc += self.n self.sampled_surface(infty_cons_sampl=self.infty_cons_sampl) # Add sampled points to a triangulation, construct self.Tri if self.disp: logging.info(f'self.n = {self.n}') logging.info(f'self.nc = {self.nc}') logging.info('Constructing and refining simplicial complex graph ' 'structure from sampling points.') if self.dim < 2: self.Ind_sorted = np.argsort(self.C, axis=0) self.Ind_sorted = self.Ind_sorted.flatten() tris = [] for ind, ind_s in enumerate(self.Ind_sorted): if ind > 0: tris.append(self.Ind_sorted[ind - 1:ind + 1]) tris = np.array(tris) # Store 1D triangulation: self.Tri = namedtuple('Tri', ['points', 'simplices'])(self.C, tris) self.points = {} else: if self.C.shape[0] > self.dim + 1: # Ensure a simplex can be built self.delaunay_triangulation(n_prc=self.n_prc) self.n_prc = self.C.shape[0] if self.disp: logging.info('Triangulation completed, evaluating all ' 'constraints and objective function values.') if hasattr(self, 'Tri'): self.HC.vf_to_vv(self.Tri.points, self.Tri.simplices) # Process all pools # Evaluate all constraints and functions if self.disp: logging.info('Triangulation completed, evaluating all contraints ' 'and objective function values.') # Evaluate all constraints and functions self.HC.V.process_pools() if self.disp: logging.info('Evaluations completed.') # feasible sampling points counted by the triangulation.py routines self.fn = self.HC.V.nfev self.n_sampled = self.nc # nevs counted in triangulation return # Hypercube minimizers def minimizers(self): """ Returns the indexes of all minimizers """ self.minimizer_pool = [] # Note: Can implement parallelization here for x in self.HC.V.cache: in_LMC = False if len(self.LMC.xl_maps) > 0: for xlmi in self.LMC.xl_maps: if np.all(np.array(x) == np.array(xlmi)): in_LMC = True if in_LMC: continue if self.HC.V[x].minimiser(): if self.disp: logging.info('=' * 60) logging.info(f'v.x = {self.HC.V[x].x_a} is minimizer') logging.info(f'v.f = {self.HC.V[x].f} is minimizer') logging.info('=' * 30) if self.HC.V[x] not in self.minimizer_pool: self.minimizer_pool.append(self.HC.V[x]) if self.disp: logging.info('Neighbors:') logging.info('=' * 30) for vn in self.HC.V[x].nn: logging.info(f'x = {vn.x} || f = {vn.f}') logging.info('=' * 60) self.minimizer_pool_F = [] self.X_min = [] # normalized tuple in the Vertex cache self.X_min_cache = {} # Cache used in hypercube sampling for v in self.minimizer_pool: self.X_min.append(v.x_a) self.minimizer_pool_F.append(v.f) self.X_min_cache[tuple(v.x_a)] = v.x self.minimizer_pool_F = np.array(self.minimizer_pool_F) self.X_min = np.array(self.X_min) # TODO: Only do this if global mode self.sort_min_pool() return self.X_min # Local minimisation # Minimiser pool processing def minimise_pool(self, force_iter=False): """ This processing method can optionally minimise only the best candidate solutions in the minimiser pool Parameters ---------- force_iter : int Number of starting minimizers to process (can be sepcified globally or locally) """ # Find first local minimum # NOTE: Since we always minimize this value regardless it is a waste to # build the topograph first before minimizing lres_f_min = self.minimize(self.X_min[0], ind=self.minimizer_pool[0]) # Trim minimized point from current minimizer set self.trim_min_pool(0) while not self.stop_l_iter: # Global stopping criteria: self.stopping_criteria() # Note first iteration is outside loop: if force_iter: force_iter -= 1 if force_iter == 0: self.stop_l_iter = True break if np.shape(self.X_min)[0] == 0: self.stop_l_iter = True break # Construct topograph from current minimizer set # (NOTE: This is a very small topograph using only the minizer pool # , it might be worth using some graph theory tools instead. self.g_topograph(lres_f_min.x, self.X_min) # Find local minimum at the miniser with the greatest Euclidean # distance from the current solution ind_xmin_l = self.Z[:, -1] lres_f_min = self.minimize(self.Ss[-1, :], self.minimizer_pool[-1]) # Trim minimised point from current minimizer set self.trim_min_pool(ind_xmin_l) # Reset controls self.stop_l_iter = False return def sort_min_pool(self): # Sort to find minimum func value in min_pool self.ind_f_min = np.argsort(self.minimizer_pool_F) self.minimizer_pool = np.array(self.minimizer_pool)[self.ind_f_min] self.minimizer_pool_F = np.array(self.minimizer_pool_F)[ self.ind_f_min] return def trim_min_pool(self, trim_ind): self.X_min = np.delete(self.X_min, trim_ind, axis=0) self.minimizer_pool_F = np.delete(self.minimizer_pool_F, trim_ind) self.minimizer_pool = np.delete(self.minimizer_pool, trim_ind) return def g_topograph(self, x_min, X_min): """ Returns the topographical vector stemming from the specified value ``x_min`` for the current feasible set ``X_min`` with True boolean values indicating positive entries and False values indicating negative entries. """ x_min = np.array([x_min]) self.Y = spatial.distance.cdist(x_min, X_min, 'euclidean') # Find sorted indexes of spatial distances: self.Z = np.argsort(self.Y, axis=-1) self.Ss = X_min[self.Z][0] self.minimizer_pool = self.minimizer_pool[self.Z] self.minimizer_pool = self.minimizer_pool[0] return self.Ss # Local bound functions def construct_lcb_simplicial(self, v_min): """ Construct locally (approximately) convex bounds Parameters ---------- v_min : Vertex object The minimizer vertex Returns ------- cbounds : list of lists List of size dimension with length-2 list of bounds for each dimension. """ cbounds = [[x_b_i[0], x_b_i[1]] for x_b_i in self.bounds] # Loop over all bounds for vn in v_min.nn: for i, x_i in enumerate(vn.x_a): # Lower bound if (x_i < v_min.x_a[i]) and (x_i > cbounds[i][0]): cbounds[i][0] = x_i # Upper bound if (x_i > v_min.x_a[i]) and (x_i < cbounds[i][1]): cbounds[i][1] = x_i if self.disp: logging.info(f'cbounds found for v_min.x_a = {v_min.x_a}') logging.info(f'cbounds = {cbounds}') return cbounds def construct_lcb_delaunay(self, v_min, ind=None): """ Construct locally (approximately) convex bounds Parameters ---------- v_min : Vertex object The minimizer vertex Returns ------- cbounds : list of lists List of size dimension with length-2 list of bounds for each dimension. """ cbounds = [[x_b_i[0], x_b_i[1]] for x_b_i in self.bounds] return cbounds # Minimize a starting point locally def minimize(self, x_min, ind=None): """ This function is used to calculate the local minima using the specified sampling point as a starting value. Parameters ---------- x_min : vector of floats Current starting point to minimize. Returns ------- lres : OptimizeResult The local optimization result represented as a `OptimizeResult` object. """ # Use minima maps if vertex was already run if self.disp: logging.info(f'Vertex minimiser maps = {self.LMC.v_maps}') if self.LMC[x_min].lres is not None: logging.info(f'Found self.LMC[x_min].lres = ' f'{self.LMC[x_min].lres}') return self.LMC[x_min].lres if self.callback is not None: logging.info('Callback for ' 'minimizer starting at {}:'.format(x_min)) if self.disp: logging.info('Starting ' 'minimization at {}...'.format(x_min)) if self.sampling_method == 'simplicial': x_min_t = tuple(x_min) # Find the normalized tuple in the Vertex cache: x_min_t_norm = self.X_min_cache[tuple(x_min_t)] x_min_t_norm = tuple(x_min_t_norm) g_bounds = self.construct_lcb_simplicial(self.HC.V[x_min_t_norm]) if 'bounds' in self.min_solver_args: self.minimizer_kwargs['bounds'] = g_bounds logging.info(self.minimizer_kwargs['bounds']) else: g_bounds = self.construct_lcb_delaunay(x_min, ind=ind) if 'bounds' in self.min_solver_args: self.minimizer_kwargs['bounds'] = g_bounds logging.info(self.minimizer_kwargs['bounds']) if self.disp and 'bounds' in self.minimizer_kwargs: logging.info('bounds in kwarg:') logging.info(self.minimizer_kwargs['bounds']) # Local minimization using scipy.optimize.minimize: lres = minimize(self.func, x_min, **self.minimizer_kwargs) if self.disp: logging.info(f'lres = {lres}') # Local function evals for all minimizers self.res.nlfev += lres.nfev if 'njev' in lres: self.res.nljev += lres.njev if 'nhev' in lres: self.res.nlhev += lres.nhev try: # Needed because of the brain dead 1x1 NumPy arrays lres.fun = lres.fun[0] except (IndexError, TypeError): lres.fun # Append minima maps self.LMC[x_min] self.LMC.add_res(x_min, lres, bounds=g_bounds) return lres # Post local minimization processing def sort_result(self): """ Sort results and build the global return object """ # Sort results in local minima cache results = self.LMC.sort_cache_result() self.res.xl = results['xl'] self.res.funl = results['funl'] self.res.x = results['x'] self.res.fun = results['fun'] # Add local func evals to sampling func evals # Count the number of feasible vertices and add to local func evals: self.res.nfev = self.fn + self.res.nlfev return self.res # Algorithm controls def fail_routine(self, mes=("Failed to converge")): self.break_routine = True self.res.success = False self.X_min = [None] self.res.message = mes def sampled_surface(self, infty_cons_sampl=False): """ Sample the function surface. There are 2 modes, if ``infty_cons_sampl`` is True then the sampled points that are generated outside the feasible domain will be assigned an ``inf`` value in accordance with SHGO rules. This guarantees convergence and usually requires less objective function evaluations at the computational costs of more Delaunay triangulation points. If ``infty_cons_sampl`` is False, then the infeasible points are discarded and only a subspace of the sampled points are used. This comes at the cost of the loss of guaranteed convergence and usually requires more objective function evaluations. """ # Generate sampling points if self.disp: logging.info('Generating sampling points') self.sampling(self.nc, self.dim) if len(self.LMC.xl_maps) > 0: self.C = np.vstack((self.C, np.array(self.LMC.xl_maps))) if not infty_cons_sampl: # Find subspace of feasible points if self.g_cons is not None: self.sampling_subspace() # Sort remaining samples self.sorted_samples() # Find objective function references self.n_sampled = self.nc def sampling_custom(self, n, dim): """ Generates uniform sampling points in a hypercube and scales the points to the bound limits. """ # Generate sampling points. # Generate uniform sample points in [0, 1]^m \subset R^m if self.n_sampled == 0: self.C = self.sampling_function(n, dim) else: self.C = self.sampling_function(n, dim) # Distribute over bounds for i in range(len(self.bounds)): self.C[:, i] = (self.C[:, i] * (self.bounds[i][1] - self.bounds[i][0]) + self.bounds[i][0]) return self.C def sampling_subspace(self): """Find subspace of feasible points from g_func definition""" # Subspace of feasible points. for ind, g in enumerate(self.g_cons): # C.shape = (Z, dim) where Z is the number of sampling points to # evaluate and dim is the dimensionality of the problem. # the constraint function may not be vectorised so have to step # through each sampling point sequentially. feasible = np.array( [np.all(g(x_C, *self.g_args[ind]) >= 0.0) for x_C in self.C], dtype=bool ) self.C = self.C[feasible] if self.C.size == 0: self.res.message = ('No sampling point found within the ' + 'feasible set. Increasing sampling ' + 'size.') # sampling correctly for both 1-D and >1-D cases if self.disp: logging.info(self.res.message) def sorted_samples(self): # Validated """Find indexes of the sorted sampling points""" self.Ind_sorted = np.argsort(self.C, axis=0) self.Xs = self.C[self.Ind_sorted] return self.Ind_sorted, self.Xs def delaunay_triangulation(self, n_prc=0): if hasattr(self, 'Tri') and self.qhull_incremental: # TODO: Uncertain if n_prc needs to add len(self.LMC.xl_maps) # in self.sampled_surface self.Tri.add_points(self.C[n_prc:, :]) else: try: self.Tri = spatial.Delaunay(self.C, incremental=self.qhull_incremental, ) except spatial.QhullError: if str(sys.exc_info()[1])[:6] == 'QH6239': logging.warning('QH6239 Qhull precision error detected, ' 'this usually occurs when no bounds are ' 'specified, Qhull can only run with ' 'handling cocircular/cospherical points' ' and in this case incremental mode is ' 'switched off. The performance of shgo ' 'will be reduced in this mode.') self.qhull_incremental = False self.Tri = spatial.Delaunay(self.C, incremental= self.qhull_incremental) else: raise return self.Tri class LMap: def __init__(self, v): self.v = v self.x_l = None self.lres = None self.f_min = None self.lbounds = [] class LMapCache: def __init__(self): self.cache = {} # Lists for search queries self.v_maps = [] self.xl_maps = [] self.xl_maps_set = set() self.f_maps = [] self.lbound_maps = [] self.size = 0 def __getitem__(self, v): try: v = np.ndarray.tolist(v) except TypeError: pass v = tuple(v) try: return self.cache[v] except KeyError: xval = LMap(v) self.cache[v] = xval return self.cache[v] def add_res(self, v, lres, bounds=None): v = np.ndarray.tolist(v) v = tuple(v) self.cache[v].x_l = lres.x self.cache[v].lres = lres self.cache[v].f_min = lres.fun self.cache[v].lbounds = bounds # Update cache size self.size += 1 # Cache lists for search queries self.v_maps.append(v) self.xl_maps.append(lres.x) self.xl_maps_set.add(tuple(lres.x)) self.f_maps.append(lres.fun) self.lbound_maps.append(bounds) def sort_cache_result(self): """ Sort results and build the global return object """ results = {} # Sort results and save self.xl_maps = np.array(self.xl_maps) self.f_maps = np.array(self.f_maps) # Sorted indexes in Func_min ind_sorted = np.argsort(self.f_maps) # Save ordered list of minima results['xl'] = self.xl_maps[ind_sorted] # Ordered x vals self.f_maps = np.array(self.f_maps) results['funl'] = self.f_maps[ind_sorted] results['funl'] = results['funl'].T # Find global of all minimizers results['x'] = self.xl_maps[ind_sorted[0]] # Save global minima results['fun'] = self.f_maps[ind_sorted[0]] # Save global fun value self.xl_maps = np.ndarray.tolist(self.xl_maps) self.f_maps = np.ndarray.tolist(self.f_maps) return results
62,233
37.993734
133
py
scipy
scipy-main/scipy/optimize/_linprog_doc.py
""" Created on Sat Aug 22 19:49:17 2020 @author: matth """ def _linprog_highs_doc(c, A_ub=None, b_ub=None, A_eq=None, b_eq=None, bounds=None, method='highs', callback=None, maxiter=None, disp=False, presolve=True, time_limit=None, dual_feasibility_tolerance=None, primal_feasibility_tolerance=None, ipm_optimality_tolerance=None, simplex_dual_edge_weight_strategy=None, mip_rel_gap=None, **unknown_options): r""" Linear programming: minimize a linear objective function subject to linear equality and inequality constraints using one of the HiGHS solvers. Linear programming solves problems of the following form: .. math:: \min_x \ & c^T x \\ \mbox{such that} \ & A_{ub} x \leq b_{ub},\\ & A_{eq} x = b_{eq},\\ & l \leq x \leq u , where :math:`x` is a vector of decision variables; :math:`c`, :math:`b_{ub}`, :math:`b_{eq}`, :math:`l`, and :math:`u` are vectors; and :math:`A_{ub}` and :math:`A_{eq}` are matrices. Alternatively, that's: minimize:: c @ x such that:: A_ub @ x <= b_ub A_eq @ x == b_eq lb <= x <= ub Note that by default ``lb = 0`` and ``ub = None`` unless specified with ``bounds``. Parameters ---------- c : 1-D array The coefficients of the linear objective function to be minimized. A_ub : 2-D array, optional The inequality constraint matrix. Each row of ``A_ub`` specifies the coefficients of a linear inequality constraint on ``x``. b_ub : 1-D array, optional The inequality constraint vector. Each element represents an upper bound on the corresponding value of ``A_ub @ x``. A_eq : 2-D array, optional The equality constraint matrix. Each row of ``A_eq`` specifies the coefficients of a linear equality constraint on ``x``. b_eq : 1-D array, optional The equality constraint vector. Each element of ``A_eq @ x`` must equal the corresponding element of ``b_eq``. bounds : sequence, optional A sequence of ``(min, max)`` pairs for each element in ``x``, defining the minimum and maximum values of that decision variable. Use ``None`` to indicate that there is no bound. By default, bounds are ``(0, None)`` (all decision variables are non-negative). If a single tuple ``(min, max)`` is provided, then ``min`` and ``max`` will serve as bounds for all decision variables. method : str This is the method-specific documentation for 'highs', which chooses automatically between :ref:`'highs-ds' <optimize.linprog-highs-ds>` and :ref:`'highs-ipm' <optimize.linprog-highs-ipm>`. :ref:`'interior-point' <optimize.linprog-interior-point>` (default), :ref:`'revised simplex' <optimize.linprog-revised_simplex>`, and :ref:`'simplex' <optimize.linprog-simplex>` (legacy) are also available. integrality : 1-D array or int, optional Indicates the type of integrality constraint on each decision variable. ``0`` : Continuous variable; no integrality constraint. ``1`` : Integer variable; decision variable must be an integer within `bounds`. ``2`` : Semi-continuous variable; decision variable must be within `bounds` or take value ``0``. ``3`` : Semi-integer variable; decision variable must be an integer within `bounds` or take value ``0``. By default, all variables are continuous. For mixed integrality constraints, supply an array of shape `c.shape`. To infer a constraint on each decision variable from shorter inputs, the argument will be broadcasted to `c.shape` using `np.broadcast_to`. This argument is currently used only by the ``'highs'`` method and ignored otherwise. Options ------- maxiter : int The maximum number of iterations to perform in either phase. For :ref:`'highs-ipm' <optimize.linprog-highs-ipm>`, this does not include the number of crossover iterations. Default is the largest possible value for an ``int`` on the platform. disp : bool (default: ``False``) Set to ``True`` if indicators of optimization status are to be printed to the console during optimization. presolve : bool (default: ``True``) Presolve attempts to identify trivial infeasibilities, identify trivial unboundedness, and simplify the problem before sending it to the main solver. It is generally recommended to keep the default setting ``True``; set to ``False`` if presolve is to be disabled. time_limit : float The maximum time in seconds allotted to solve the problem; default is the largest possible value for a ``double`` on the platform. dual_feasibility_tolerance : double (default: 1e-07) Dual feasibility tolerance for :ref:`'highs-ds' <optimize.linprog-highs-ds>`. The minimum of this and ``primal_feasibility_tolerance`` is used for the feasibility tolerance of :ref:`'highs-ipm' <optimize.linprog-highs-ipm>`. primal_feasibility_tolerance : double (default: 1e-07) Primal feasibility tolerance for :ref:`'highs-ds' <optimize.linprog-highs-ds>`. The minimum of this and ``dual_feasibility_tolerance`` is used for the feasibility tolerance of :ref:`'highs-ipm' <optimize.linprog-highs-ipm>`. ipm_optimality_tolerance : double (default: ``1e-08``) Optimality tolerance for :ref:`'highs-ipm' <optimize.linprog-highs-ipm>`. Minimum allowable value is 1e-12. simplex_dual_edge_weight_strategy : str (default: None) Strategy for simplex dual edge weights. The default, ``None``, automatically selects one of the following. ``'dantzig'`` uses Dantzig's original strategy of choosing the most negative reduced cost. ``'devex'`` uses the strategy described in [15]_. ``steepest`` uses the exact steepest edge strategy as described in [16]_. ``'steepest-devex'`` begins with the exact steepest edge strategy until the computation is too costly or inexact and then switches to the devex method. Curently, ``None`` always selects ``'steepest-devex'``, but this may change as new options become available. mip_rel_gap : double (default: None) Termination criterion for MIP solver: solver will terminate when the gap between the primal objective value and the dual objective bound, scaled by the primal objective value, is <= mip_rel_gap. unknown_options : dict Optional arguments not used by this particular solver. If ``unknown_options`` is non-empty, a warning is issued listing all unused options. Returns ------- res : OptimizeResult A :class:`scipy.optimize.OptimizeResult` consisting of the fields: x : 1D array The values of the decision variables that minimizes the objective function while satisfying the constraints. fun : float The optimal value of the objective function ``c @ x``. slack : 1D array The (nominally positive) values of the slack, ``b_ub - A_ub @ x``. con : 1D array The (nominally zero) residuals of the equality constraints, ``b_eq - A_eq @ x``. success : bool ``True`` when the algorithm succeeds in finding an optimal solution. status : int An integer representing the exit status of the algorithm. ``0`` : Optimization terminated successfully. ``1`` : Iteration or time limit reached. ``2`` : Problem appears to be infeasible. ``3`` : Problem appears to be unbounded. ``4`` : The HiGHS solver ran into a problem. message : str A string descriptor of the exit status of the algorithm. nit : int The total number of iterations performed. For the HiGHS simplex method, this includes iterations in all phases. For the HiGHS interior-point method, this does not include crossover iterations. crossover_nit : int The number of primal/dual pushes performed during the crossover routine for the HiGHS interior-point method. This is ``0`` for the HiGHS simplex method. ineqlin : OptimizeResult Solution and sensitivity information corresponding to the inequality constraints, `b_ub`. A dictionary consisting of the fields: residual : np.ndnarray The (nominally positive) values of the slack variables, ``b_ub - A_ub @ x``. This quantity is also commonly referred to as "slack". marginals : np.ndarray The sensitivity (partial derivative) of the objective function with respect to the right-hand side of the inequality constraints, `b_ub`. eqlin : OptimizeResult Solution and sensitivity information corresponding to the equality constraints, `b_eq`. A dictionary consisting of the fields: residual : np.ndarray The (nominally zero) residuals of the equality constraints, ``b_eq - A_eq @ x``. marginals : np.ndarray The sensitivity (partial derivative) of the objective function with respect to the right-hand side of the equality constraints, `b_eq`. lower, upper : OptimizeResult Solution and sensitivity information corresponding to the lower and upper bounds on decision variables, `bounds`. residual : np.ndarray The (nominally positive) values of the quantity ``x - lb`` (lower) or ``ub - x`` (upper). marginals : np.ndarray The sensitivity (partial derivative) of the objective function with respect to the lower and upper `bounds`. Notes ----- Method :ref:`'highs-ds' <optimize.linprog-highs-ds>` is a wrapper of the C++ high performance dual revised simplex implementation (HSOL) [13]_, [14]_. Method :ref:`'highs-ipm' <optimize.linprog-highs-ipm>` is a wrapper of a C++ implementation of an **i**\ nterior-\ **p**\ oint **m**\ ethod [13]_; it features a crossover routine, so it is as accurate as a simplex solver. Method :ref:`'highs' <optimize.linprog-highs>` chooses between the two automatically. For new code involving `linprog`, we recommend explicitly choosing one of these three method values instead of :ref:`'interior-point' <optimize.linprog-interior-point>` (default), :ref:`'revised simplex' <optimize.linprog-revised_simplex>`, and :ref:`'simplex' <optimize.linprog-simplex>` (legacy). The result fields `ineqlin`, `eqlin`, `lower`, and `upper` all contain `marginals`, or partial derivatives of the objective function with respect to the right-hand side of each constraint. These partial derivatives are also referred to as "Lagrange multipliers", "dual values", and "shadow prices". The sign convention of `marginals` is opposite that of Lagrange multipliers produced by many nonlinear solvers. References ---------- .. [13] Huangfu, Q., Galabova, I., Feldmeier, M., and Hall, J. A. J. "HiGHS - high performance software for linear optimization." https://highs.dev/ .. [14] Huangfu, Q. and Hall, J. A. J. "Parallelizing the dual revised simplex method." Mathematical Programming Computation, 10 (1), 119-142, 2018. DOI: 10.1007/s12532-017-0130-5 .. [15] Harris, Paula MJ. "Pivot selection methods of the Devex LP code." Mathematical programming 5.1 (1973): 1-28. .. [16] Goldfarb, Donald, and John Ker Reid. "A practicable steepest-edge simplex algorithm." Mathematical Programming 12.1 (1977): 361-371. """ pass def _linprog_highs_ds_doc(c, A_ub=None, b_ub=None, A_eq=None, b_eq=None, bounds=None, method='highs-ds', callback=None, maxiter=None, disp=False, presolve=True, time_limit=None, dual_feasibility_tolerance=None, primal_feasibility_tolerance=None, simplex_dual_edge_weight_strategy=None, **unknown_options): r""" Linear programming: minimize a linear objective function subject to linear equality and inequality constraints using the HiGHS dual simplex solver. Linear programming solves problems of the following form: .. math:: \min_x \ & c^T x \\ \mbox{such that} \ & A_{ub} x \leq b_{ub},\\ & A_{eq} x = b_{eq},\\ & l \leq x \leq u , where :math:`x` is a vector of decision variables; :math:`c`, :math:`b_{ub}`, :math:`b_{eq}`, :math:`l`, and :math:`u` are vectors; and :math:`A_{ub}` and :math:`A_{eq}` are matrices. Alternatively, that's: minimize:: c @ x such that:: A_ub @ x <= b_ub A_eq @ x == b_eq lb <= x <= ub Note that by default ``lb = 0`` and ``ub = None`` unless specified with ``bounds``. Parameters ---------- c : 1-D array The coefficients of the linear objective function to be minimized. A_ub : 2-D array, optional The inequality constraint matrix. Each row of ``A_ub`` specifies the coefficients of a linear inequality constraint on ``x``. b_ub : 1-D array, optional The inequality constraint vector. Each element represents an upper bound on the corresponding value of ``A_ub @ x``. A_eq : 2-D array, optional The equality constraint matrix. Each row of ``A_eq`` specifies the coefficients of a linear equality constraint on ``x``. b_eq : 1-D array, optional The equality constraint vector. Each element of ``A_eq @ x`` must equal the corresponding element of ``b_eq``. bounds : sequence, optional A sequence of ``(min, max)`` pairs for each element in ``x``, defining the minimum and maximum values of that decision variable. Use ``None`` to indicate that there is no bound. By default, bounds are ``(0, None)`` (all decision variables are non-negative). If a single tuple ``(min, max)`` is provided, then ``min`` and ``max`` will serve as bounds for all decision variables. method : str This is the method-specific documentation for 'highs-ds'. :ref:`'highs' <optimize.linprog-highs>`, :ref:`'highs-ipm' <optimize.linprog-highs-ipm>`, :ref:`'interior-point' <optimize.linprog-interior-point>` (default), :ref:`'revised simplex' <optimize.linprog-revised_simplex>`, and :ref:`'simplex' <optimize.linprog-simplex>` (legacy) are also available. Options ------- maxiter : int The maximum number of iterations to perform in either phase. Default is the largest possible value for an ``int`` on the platform. disp : bool (default: ``False``) Set to ``True`` if indicators of optimization status are to be printed to the console during optimization. presolve : bool (default: ``True``) Presolve attempts to identify trivial infeasibilities, identify trivial unboundedness, and simplify the problem before sending it to the main solver. It is generally recommended to keep the default setting ``True``; set to ``False`` if presolve is to be disabled. time_limit : float The maximum time in seconds allotted to solve the problem; default is the largest possible value for a ``double`` on the platform. dual_feasibility_tolerance : double (default: 1e-07) Dual feasibility tolerance for :ref:`'highs-ds' <optimize.linprog-highs-ds>`. primal_feasibility_tolerance : double (default: 1e-07) Primal feasibility tolerance for :ref:`'highs-ds' <optimize.linprog-highs-ds>`. simplex_dual_edge_weight_strategy : str (default: None) Strategy for simplex dual edge weights. The default, ``None``, automatically selects one of the following. ``'dantzig'`` uses Dantzig's original strategy of choosing the most negative reduced cost. ``'devex'`` uses the strategy described in [15]_. ``steepest`` uses the exact steepest edge strategy as described in [16]_. ``'steepest-devex'`` begins with the exact steepest edge strategy until the computation is too costly or inexact and then switches to the devex method. Curently, ``None`` always selects ``'steepest-devex'``, but this may change as new options become available. unknown_options : dict Optional arguments not used by this particular solver. If ``unknown_options`` is non-empty, a warning is issued listing all unused options. Returns ------- res : OptimizeResult A :class:`scipy.optimize.OptimizeResult` consisting of the fields: x : 1D array The values of the decision variables that minimizes the objective function while satisfying the constraints. fun : float The optimal value of the objective function ``c @ x``. slack : 1D array The (nominally positive) values of the slack, ``b_ub - A_ub @ x``. con : 1D array The (nominally zero) residuals of the equality constraints, ``b_eq - A_eq @ x``. success : bool ``True`` when the algorithm succeeds in finding an optimal solution. status : int An integer representing the exit status of the algorithm. ``0`` : Optimization terminated successfully. ``1`` : Iteration or time limit reached. ``2`` : Problem appears to be infeasible. ``3`` : Problem appears to be unbounded. ``4`` : The HiGHS solver ran into a problem. message : str A string descriptor of the exit status of the algorithm. nit : int The total number of iterations performed. This includes iterations in all phases. crossover_nit : int This is always ``0`` for the HiGHS simplex method. For the HiGHS interior-point method, this is the number of primal/dual pushes performed during the crossover routine. ineqlin : OptimizeResult Solution and sensitivity information corresponding to the inequality constraints, `b_ub`. A dictionary consisting of the fields: residual : np.ndnarray The (nominally positive) values of the slack variables, ``b_ub - A_ub @ x``. This quantity is also commonly referred to as "slack". marginals : np.ndarray The sensitivity (partial derivative) of the objective function with respect to the right-hand side of the inequality constraints, `b_ub`. eqlin : OptimizeResult Solution and sensitivity information corresponding to the equality constraints, `b_eq`. A dictionary consisting of the fields: residual : np.ndarray The (nominally zero) residuals of the equality constraints, ``b_eq - A_eq @ x``. marginals : np.ndarray The sensitivity (partial derivative) of the objective function with respect to the right-hand side of the equality constraints, `b_eq`. lower, upper : OptimizeResult Solution and sensitivity information corresponding to the lower and upper bounds on decision variables, `bounds`. residual : np.ndarray The (nominally positive) values of the quantity ``x - lb`` (lower) or ``ub - x`` (upper). marginals : np.ndarray The sensitivity (partial derivative) of the objective function with respect to the lower and upper `bounds`. Notes ----- Method :ref:`'highs-ds' <optimize.linprog-highs-ds>` is a wrapper of the C++ high performance dual revised simplex implementation (HSOL) [13]_, [14]_. Method :ref:`'highs-ipm' <optimize.linprog-highs-ipm>` is a wrapper of a C++ implementation of an **i**\ nterior-\ **p**\ oint **m**\ ethod [13]_; it features a crossover routine, so it is as accurate as a simplex solver. Method :ref:`'highs' <optimize.linprog-highs>` chooses between the two automatically. For new code involving `linprog`, we recommend explicitly choosing one of these three method values instead of :ref:`'interior-point' <optimize.linprog-interior-point>` (default), :ref:`'revised simplex' <optimize.linprog-revised_simplex>`, and :ref:`'simplex' <optimize.linprog-simplex>` (legacy). The result fields `ineqlin`, `eqlin`, `lower`, and `upper` all contain `marginals`, or partial derivatives of the objective function with respect to the right-hand side of each constraint. These partial derivatives are also referred to as "Lagrange multipliers", "dual values", and "shadow prices". The sign convention of `marginals` is opposite that of Lagrange multipliers produced by many nonlinear solvers. References ---------- .. [13] Huangfu, Q., Galabova, I., Feldmeier, M., and Hall, J. A. J. "HiGHS - high performance software for linear optimization." https://highs.dev/ .. [14] Huangfu, Q. and Hall, J. A. J. "Parallelizing the dual revised simplex method." Mathematical Programming Computation, 10 (1), 119-142, 2018. DOI: 10.1007/s12532-017-0130-5 .. [15] Harris, Paula MJ. "Pivot selection methods of the Devex LP code." Mathematical programming 5.1 (1973): 1-28. .. [16] Goldfarb, Donald, and John Ker Reid. "A practicable steepest-edge simplex algorithm." Mathematical Programming 12.1 (1977): 361-371. """ pass def _linprog_highs_ipm_doc(c, A_ub=None, b_ub=None, A_eq=None, b_eq=None, bounds=None, method='highs-ipm', callback=None, maxiter=None, disp=False, presolve=True, time_limit=None, dual_feasibility_tolerance=None, primal_feasibility_tolerance=None, ipm_optimality_tolerance=None, **unknown_options): r""" Linear programming: minimize a linear objective function subject to linear equality and inequality constraints using the HiGHS interior point solver. Linear programming solves problems of the following form: .. math:: \min_x \ & c^T x \\ \mbox{such that} \ & A_{ub} x \leq b_{ub},\\ & A_{eq} x = b_{eq},\\ & l \leq x \leq u , where :math:`x` is a vector of decision variables; :math:`c`, :math:`b_{ub}`, :math:`b_{eq}`, :math:`l`, and :math:`u` are vectors; and :math:`A_{ub}` and :math:`A_{eq}` are matrices. Alternatively, that's: minimize:: c @ x such that:: A_ub @ x <= b_ub A_eq @ x == b_eq lb <= x <= ub Note that by default ``lb = 0`` and ``ub = None`` unless specified with ``bounds``. Parameters ---------- c : 1-D array The coefficients of the linear objective function to be minimized. A_ub : 2-D array, optional The inequality constraint matrix. Each row of ``A_ub`` specifies the coefficients of a linear inequality constraint on ``x``. b_ub : 1-D array, optional The inequality constraint vector. Each element represents an upper bound on the corresponding value of ``A_ub @ x``. A_eq : 2-D array, optional The equality constraint matrix. Each row of ``A_eq`` specifies the coefficients of a linear equality constraint on ``x``. b_eq : 1-D array, optional The equality constraint vector. Each element of ``A_eq @ x`` must equal the corresponding element of ``b_eq``. bounds : sequence, optional A sequence of ``(min, max)`` pairs for each element in ``x``, defining the minimum and maximum values of that decision variable. Use ``None`` to indicate that there is no bound. By default, bounds are ``(0, None)`` (all decision variables are non-negative). If a single tuple ``(min, max)`` is provided, then ``min`` and ``max`` will serve as bounds for all decision variables. method : str This is the method-specific documentation for 'highs-ipm'. :ref:`'highs-ipm' <optimize.linprog-highs>`, :ref:`'highs-ds' <optimize.linprog-highs-ds>`, :ref:`'interior-point' <optimize.linprog-interior-point>` (default), :ref:`'revised simplex' <optimize.linprog-revised_simplex>`, and :ref:`'simplex' <optimize.linprog-simplex>` (legacy) are also available. Options ------- maxiter : int The maximum number of iterations to perform in either phase. For :ref:`'highs-ipm' <optimize.linprog-highs-ipm>`, this does not include the number of crossover iterations. Default is the largest possible value for an ``int`` on the platform. disp : bool (default: ``False``) Set to ``True`` if indicators of optimization status are to be printed to the console during optimization. presolve : bool (default: ``True``) Presolve attempts to identify trivial infeasibilities, identify trivial unboundedness, and simplify the problem before sending it to the main solver. It is generally recommended to keep the default setting ``True``; set to ``False`` if presolve is to be disabled. time_limit : float The maximum time in seconds allotted to solve the problem; default is the largest possible value for a ``double`` on the platform. dual_feasibility_tolerance : double (default: 1e-07) The minimum of this and ``primal_feasibility_tolerance`` is used for the feasibility tolerance of :ref:`'highs-ipm' <optimize.linprog-highs-ipm>`. primal_feasibility_tolerance : double (default: 1e-07) The minimum of this and ``dual_feasibility_tolerance`` is used for the feasibility tolerance of :ref:`'highs-ipm' <optimize.linprog-highs-ipm>`. ipm_optimality_tolerance : double (default: ``1e-08``) Optimality tolerance for :ref:`'highs-ipm' <optimize.linprog-highs-ipm>`. Minimum allowable value is 1e-12. unknown_options : dict Optional arguments not used by this particular solver. If ``unknown_options`` is non-empty, a warning is issued listing all unused options. Returns ------- res : OptimizeResult A :class:`scipy.optimize.OptimizeResult` consisting of the fields: x : 1D array The values of the decision variables that minimizes the objective function while satisfying the constraints. fun : float The optimal value of the objective function ``c @ x``. slack : 1D array The (nominally positive) values of the slack, ``b_ub - A_ub @ x``. con : 1D array The (nominally zero) residuals of the equality constraints, ``b_eq - A_eq @ x``. success : bool ``True`` when the algorithm succeeds in finding an optimal solution. status : int An integer representing the exit status of the algorithm. ``0`` : Optimization terminated successfully. ``1`` : Iteration or time limit reached. ``2`` : Problem appears to be infeasible. ``3`` : Problem appears to be unbounded. ``4`` : The HiGHS solver ran into a problem. message : str A string descriptor of the exit status of the algorithm. nit : int The total number of iterations performed. For the HiGHS interior-point method, this does not include crossover iterations. crossover_nit : int The number of primal/dual pushes performed during the crossover routine for the HiGHS interior-point method. ineqlin : OptimizeResult Solution and sensitivity information corresponding to the inequality constraints, `b_ub`. A dictionary consisting of the fields: residual : np.ndnarray The (nominally positive) values of the slack variables, ``b_ub - A_ub @ x``. This quantity is also commonly referred to as "slack". marginals : np.ndarray The sensitivity (partial derivative) of the objective function with respect to the right-hand side of the inequality constraints, `b_ub`. eqlin : OptimizeResult Solution and sensitivity information corresponding to the equality constraints, `b_eq`. A dictionary consisting of the fields: residual : np.ndarray The (nominally zero) residuals of the equality constraints, ``b_eq - A_eq @ x``. marginals : np.ndarray The sensitivity (partial derivative) of the objective function with respect to the right-hand side of the equality constraints, `b_eq`. lower, upper : OptimizeResult Solution and sensitivity information corresponding to the lower and upper bounds on decision variables, `bounds`. residual : np.ndarray The (nominally positive) values of the quantity ``x - lb`` (lower) or ``ub - x`` (upper). marginals : np.ndarray The sensitivity (partial derivative) of the objective function with respect to the lower and upper `bounds`. Notes ----- Method :ref:`'highs-ipm' <optimize.linprog-highs-ipm>` is a wrapper of a C++ implementation of an **i**\ nterior-\ **p**\ oint **m**\ ethod [13]_; it features a crossover routine, so it is as accurate as a simplex solver. Method :ref:`'highs-ds' <optimize.linprog-highs-ds>` is a wrapper of the C++ high performance dual revised simplex implementation (HSOL) [13]_, [14]_. Method :ref:`'highs' <optimize.linprog-highs>` chooses between the two automatically. For new code involving `linprog`, we recommend explicitly choosing one of these three method values instead of :ref:`'interior-point' <optimize.linprog-interior-point>` (default), :ref:`'revised simplex' <optimize.linprog-revised_simplex>`, and :ref:`'simplex' <optimize.linprog-simplex>` (legacy). The result fields `ineqlin`, `eqlin`, `lower`, and `upper` all contain `marginals`, or partial derivatives of the objective function with respect to the right-hand side of each constraint. These partial derivatives are also referred to as "Lagrange multipliers", "dual values", and "shadow prices". The sign convention of `marginals` is opposite that of Lagrange multipliers produced by many nonlinear solvers. References ---------- .. [13] Huangfu, Q., Galabova, I., Feldmeier, M., and Hall, J. A. J. "HiGHS - high performance software for linear optimization." https://highs.dev/ .. [14] Huangfu, Q. and Hall, J. A. J. "Parallelizing the dual revised simplex method." Mathematical Programming Computation, 10 (1), 119-142, 2018. DOI: 10.1007/s12532-017-0130-5 """ pass def _linprog_ip_doc(c, A_ub=None, b_ub=None, A_eq=None, b_eq=None, bounds=None, method='interior-point', callback=None, maxiter=1000, disp=False, presolve=True, tol=1e-8, autoscale=False, rr=True, alpha0=.99995, beta=0.1, sparse=False, lstsq=False, sym_pos=True, cholesky=True, pc=True, ip=False, permc_spec='MMD_AT_PLUS_A', **unknown_options): r""" Linear programming: minimize a linear objective function subject to linear equality and inequality constraints using the interior-point method of [4]_. .. deprecated:: 1.9.0 `method='interior-point'` will be removed in SciPy 1.11.0. It is replaced by `method='highs'` because the latter is faster and more robust. Linear programming solves problems of the following form: .. math:: \min_x \ & c^T x \\ \mbox{such that} \ & A_{ub} x \leq b_{ub},\\ & A_{eq} x = b_{eq},\\ & l \leq x \leq u , where :math:`x` is a vector of decision variables; :math:`c`, :math:`b_{ub}`, :math:`b_{eq}`, :math:`l`, and :math:`u` are vectors; and :math:`A_{ub}` and :math:`A_{eq}` are matrices. Alternatively, that's: minimize:: c @ x such that:: A_ub @ x <= b_ub A_eq @ x == b_eq lb <= x <= ub Note that by default ``lb = 0`` and ``ub = None`` unless specified with ``bounds``. Parameters ---------- c : 1-D array The coefficients of the linear objective function to be minimized. A_ub : 2-D array, optional The inequality constraint matrix. Each row of ``A_ub`` specifies the coefficients of a linear inequality constraint on ``x``. b_ub : 1-D array, optional The inequality constraint vector. Each element represents an upper bound on the corresponding value of ``A_ub @ x``. A_eq : 2-D array, optional The equality constraint matrix. Each row of ``A_eq`` specifies the coefficients of a linear equality constraint on ``x``. b_eq : 1-D array, optional The equality constraint vector. Each element of ``A_eq @ x`` must equal the corresponding element of ``b_eq``. bounds : sequence, optional A sequence of ``(min, max)`` pairs for each element in ``x``, defining the minimum and maximum values of that decision variable. Use ``None`` to indicate that there is no bound. By default, bounds are ``(0, None)`` (all decision variables are non-negative). If a single tuple ``(min, max)`` is provided, then ``min`` and ``max`` will serve as bounds for all decision variables. method : str This is the method-specific documentation for 'interior-point'. :ref:`'highs' <optimize.linprog-highs>`, :ref:`'highs-ds' <optimize.linprog-highs-ds>`, :ref:`'highs-ipm' <optimize.linprog-highs-ipm>`, :ref:`'revised simplex' <optimize.linprog-revised_simplex>`, and :ref:`'simplex' <optimize.linprog-simplex>` (legacy) are also available. callback : callable, optional Callback function to be executed once per iteration. Options ------- maxiter : int (default: 1000) The maximum number of iterations of the algorithm. disp : bool (default: False) Set to ``True`` if indicators of optimization status are to be printed to the console each iteration. presolve : bool (default: True) Presolve attempts to identify trivial infeasibilities, identify trivial unboundedness, and simplify the problem before sending it to the main solver. It is generally recommended to keep the default setting ``True``; set to ``False`` if presolve is to be disabled. tol : float (default: 1e-8) Termination tolerance to be used for all termination criteria; see [4]_ Section 4.5. autoscale : bool (default: False) Set to ``True`` to automatically perform equilibration. Consider using this option if the numerical values in the constraints are separated by several orders of magnitude. rr : bool (default: True) Set to ``False`` to disable automatic redundancy removal. alpha0 : float (default: 0.99995) The maximal step size for Mehrota's predictor-corrector search direction; see :math:`\beta_{3}` of [4]_ Table 8.1. beta : float (default: 0.1) The desired reduction of the path parameter :math:`\mu` (see [6]_) when Mehrota's predictor-corrector is not in use (uncommon). sparse : bool (default: False) Set to ``True`` if the problem is to be treated as sparse after presolve. If either ``A_eq`` or ``A_ub`` is a sparse matrix, this option will automatically be set ``True``, and the problem will be treated as sparse even during presolve. If your constraint matrices contain mostly zeros and the problem is not very small (less than about 100 constraints or variables), consider setting ``True`` or providing ``A_eq`` and ``A_ub`` as sparse matrices. lstsq : bool (default: ``False``) Set to ``True`` if the problem is expected to be very poorly conditioned. This should always be left ``False`` unless severe numerical difficulties are encountered. Leave this at the default unless you receive a warning message suggesting otherwise. sym_pos : bool (default: True) Leave ``True`` if the problem is expected to yield a well conditioned symmetric positive definite normal equation matrix (almost always). Leave this at the default unless you receive a warning message suggesting otherwise. cholesky : bool (default: True) Set to ``True`` if the normal equations are to be solved by explicit Cholesky decomposition followed by explicit forward/backward substitution. This is typically faster for problems that are numerically well-behaved. pc : bool (default: True) Leave ``True`` if the predictor-corrector method of Mehrota is to be used. This is almost always (if not always) beneficial. ip : bool (default: False) Set to ``True`` if the improved initial point suggestion due to [4]_ Section 4.3 is desired. Whether this is beneficial or not depends on the problem. permc_spec : str (default: 'MMD_AT_PLUS_A') (Has effect only with ``sparse = True``, ``lstsq = False``, ``sym_pos = True``, and no SuiteSparse.) A matrix is factorized in each iteration of the algorithm. This option specifies how to permute the columns of the matrix for sparsity preservation. Acceptable values are: - ``NATURAL``: natural ordering. - ``MMD_ATA``: minimum degree ordering on the structure of A^T A. - ``MMD_AT_PLUS_A``: minimum degree ordering on the structure of A^T+A. - ``COLAMD``: approximate minimum degree column ordering. This option can impact the convergence of the interior point algorithm; test different values to determine which performs best for your problem. For more information, refer to ``scipy.sparse.linalg.splu``. unknown_options : dict Optional arguments not used by this particular solver. If `unknown_options` is non-empty a warning is issued listing all unused options. Returns ------- res : OptimizeResult A :class:`scipy.optimize.OptimizeResult` consisting of the fields: x : 1-D array The values of the decision variables that minimizes the objective function while satisfying the constraints. fun : float The optimal value of the objective function ``c @ x``. slack : 1-D array The (nominally positive) values of the slack variables, ``b_ub - A_ub @ x``. con : 1-D array The (nominally zero) residuals of the equality constraints, ``b_eq - A_eq @ x``. success : bool ``True`` when the algorithm succeeds in finding an optimal solution. status : int An integer representing the exit status of the algorithm. ``0`` : Optimization terminated successfully. ``1`` : Iteration limit reached. ``2`` : Problem appears to be infeasible. ``3`` : Problem appears to be unbounded. ``4`` : Numerical difficulties encountered. message : str A string descriptor of the exit status of the algorithm. nit : int The total number of iterations performed in all phases. Notes ----- This method implements the algorithm outlined in [4]_ with ideas from [8]_ and a structure inspired by the simpler methods of [6]_. The primal-dual path following method begins with initial 'guesses' of the primal and dual variables of the standard form problem and iteratively attempts to solve the (nonlinear) Karush-Kuhn-Tucker conditions for the problem with a gradually reduced logarithmic barrier term added to the objective. This particular implementation uses a homogeneous self-dual formulation, which provides certificates of infeasibility or unboundedness where applicable. The default initial point for the primal and dual variables is that defined in [4]_ Section 4.4 Equation 8.22. Optionally (by setting initial point option ``ip=True``), an alternate (potentially improved) starting point can be calculated according to the additional recommendations of [4]_ Section 4.4. A search direction is calculated using the predictor-corrector method (single correction) proposed by Mehrota and detailed in [4]_ Section 4.1. (A potential improvement would be to implement the method of multiple corrections described in [4]_ Section 4.2.) In practice, this is accomplished by solving the normal equations, [4]_ Section 5.1 Equations 8.31 and 8.32, derived from the Newton equations [4]_ Section 5 Equations 8.25 (compare to [4]_ Section 4 Equations 8.6-8.8). The advantage of solving the normal equations rather than 8.25 directly is that the matrices involved are symmetric positive definite, so Cholesky decomposition can be used rather than the more expensive LU factorization. With default options, the solver used to perform the factorization depends on third-party software availability and the conditioning of the problem. For dense problems, solvers are tried in the following order: 1. ``scipy.linalg.cho_factor`` 2. ``scipy.linalg.solve`` with option ``sym_pos=True`` 3. ``scipy.linalg.solve`` with option ``sym_pos=False`` 4. ``scipy.linalg.lstsq`` For sparse problems: 1. ``sksparse.cholmod.cholesky`` (if scikit-sparse and SuiteSparse are installed) 2. ``scipy.sparse.linalg.factorized`` (if scikit-umfpack and SuiteSparse are installed) 3. ``scipy.sparse.linalg.splu`` (which uses SuperLU distributed with SciPy) 4. ``scipy.sparse.linalg.lsqr`` If the solver fails for any reason, successively more robust (but slower) solvers are attempted in the order indicated. Attempting, failing, and re-starting factorization can be time consuming, so if the problem is numerically challenging, options can be set to bypass solvers that are failing. Setting ``cholesky=False`` skips to solver 2, ``sym_pos=False`` skips to solver 3, and ``lstsq=True`` skips to solver 4 for both sparse and dense problems. Potential improvements for combatting issues associated with dense columns in otherwise sparse problems are outlined in [4]_ Section 5.3 and [10]_ Section 4.1-4.2; the latter also discusses the alleviation of accuracy issues associated with the substitution approach to free variables. After calculating the search direction, the maximum possible step size that does not activate the non-negativity constraints is calculated, and the smaller of this step size and unity is applied (as in [4]_ Section 4.1.) [4]_ Section 4.3 suggests improvements for choosing the step size. The new point is tested according to the termination conditions of [4]_ Section 4.5. The same tolerance, which can be set using the ``tol`` option, is used for all checks. (A potential improvement would be to expose the different tolerances to be set independently.) If optimality, unboundedness, or infeasibility is detected, the solve procedure terminates; otherwise it repeats. Whereas the top level ``linprog`` module expects a problem of form: Minimize:: c @ x Subject to:: A_ub @ x <= b_ub A_eq @ x == b_eq lb <= x <= ub where ``lb = 0`` and ``ub = None`` unless set in ``bounds``. The problem is automatically converted to the form: Minimize:: c @ x Subject to:: A @ x == b x >= 0 for solution. That is, the original problem contains equality, upper-bound and variable constraints whereas the method specific solver requires equality constraints and variable non-negativity. ``linprog`` converts the original problem to standard form by converting the simple bounds to upper bound constraints, introducing non-negative slack variables for inequality constraints, and expressing unbounded variables as the difference between two non-negative variables. The problem is converted back to the original form before results are reported. References ---------- .. [4] Andersen, Erling D., and Knud D. Andersen. "The MOSEK interior point optimizer for linear programming: an implementation of the homogeneous algorithm." High performance optimization. Springer US, 2000. 197-232. .. [6] Freund, Robert M. "Primal-Dual Interior-Point Methods for Linear Programming based on Newton's Method." Unpublished Course Notes, March 2004. Available 2/25/2017 at https://ocw.mit.edu/courses/sloan-school-of-management/15-084j-nonlinear-programming-spring-2004/lecture-notes/lec14_int_pt_mthd.pdf .. [8] Andersen, Erling D., and Knud D. Andersen. "Presolving in linear programming." Mathematical Programming 71.2 (1995): 221-245. .. [9] Bertsimas, Dimitris, and J. Tsitsiklis. "Introduction to linear programming." Athena Scientific 1 (1997): 997. .. [10] Andersen, Erling D., et al. Implementation of interior point methods for large scale linear programming. HEC/Universite de Geneve, 1996. """ pass def _linprog_rs_doc(c, A_ub=None, b_ub=None, A_eq=None, b_eq=None, bounds=None, method='interior-point', callback=None, x0=None, maxiter=5000, disp=False, presolve=True, tol=1e-12, autoscale=False, rr=True, maxupdate=10, mast=False, pivot="mrc", **unknown_options): r""" Linear programming: minimize a linear objective function subject to linear equality and inequality constraints using the revised simplex method. .. deprecated:: 1.9.0 `method='revised simplex'` will be removed in SciPy 1.11.0. It is replaced by `method='highs'` because the latter is faster and more robust. Linear programming solves problems of the following form: .. math:: \min_x \ & c^T x \\ \mbox{such that} \ & A_{ub} x \leq b_{ub},\\ & A_{eq} x = b_{eq},\\ & l \leq x \leq u , where :math:`x` is a vector of decision variables; :math:`c`, :math:`b_{ub}`, :math:`b_{eq}`, :math:`l`, and :math:`u` are vectors; and :math:`A_{ub}` and :math:`A_{eq}` are matrices. Alternatively, that's: minimize:: c @ x such that:: A_ub @ x <= b_ub A_eq @ x == b_eq lb <= x <= ub Note that by default ``lb = 0`` and ``ub = None`` unless specified with ``bounds``. Parameters ---------- c : 1-D array The coefficients of the linear objective function to be minimized. A_ub : 2-D array, optional The inequality constraint matrix. Each row of ``A_ub`` specifies the coefficients of a linear inequality constraint on ``x``. b_ub : 1-D array, optional The inequality constraint vector. Each element represents an upper bound on the corresponding value of ``A_ub @ x``. A_eq : 2-D array, optional The equality constraint matrix. Each row of ``A_eq`` specifies the coefficients of a linear equality constraint on ``x``. b_eq : 1-D array, optional The equality constraint vector. Each element of ``A_eq @ x`` must equal the corresponding element of ``b_eq``. bounds : sequence, optional A sequence of ``(min, max)`` pairs for each element in ``x``, defining the minimum and maximum values of that decision variable. Use ``None`` to indicate that there is no bound. By default, bounds are ``(0, None)`` (all decision variables are non-negative). If a single tuple ``(min, max)`` is provided, then ``min`` and ``max`` will serve as bounds for all decision variables. method : str This is the method-specific documentation for 'revised simplex'. :ref:`'highs' <optimize.linprog-highs>`, :ref:`'highs-ds' <optimize.linprog-highs-ds>`, :ref:`'highs-ipm' <optimize.linprog-highs-ipm>`, :ref:`'interior-point' <optimize.linprog-interior-point>` (default), and :ref:`'simplex' <optimize.linprog-simplex>` (legacy) are also available. callback : callable, optional Callback function to be executed once per iteration. x0 : 1-D array, optional Guess values of the decision variables, which will be refined by the optimization algorithm. This argument is currently used only by the 'revised simplex' method, and can only be used if `x0` represents a basic feasible solution. Options ------- maxiter : int (default: 5000) The maximum number of iterations to perform in either phase. disp : bool (default: False) Set to ``True`` if indicators of optimization status are to be printed to the console each iteration. presolve : bool (default: True) Presolve attempts to identify trivial infeasibilities, identify trivial unboundedness, and simplify the problem before sending it to the main solver. It is generally recommended to keep the default setting ``True``; set to ``False`` if presolve is to be disabled. tol : float (default: 1e-12) The tolerance which determines when a solution is "close enough" to zero in Phase 1 to be considered a basic feasible solution or close enough to positive to serve as an optimal solution. autoscale : bool (default: False) Set to ``True`` to automatically perform equilibration. Consider using this option if the numerical values in the constraints are separated by several orders of magnitude. rr : bool (default: True) Set to ``False`` to disable automatic redundancy removal. maxupdate : int (default: 10) The maximum number of updates performed on the LU factorization. After this many updates is reached, the basis matrix is factorized from scratch. mast : bool (default: False) Minimize Amortized Solve Time. If enabled, the average time to solve a linear system using the basis factorization is measured. Typically, the average solve time will decrease with each successive solve after initial factorization, as factorization takes much more time than the solve operation (and updates). Eventually, however, the updated factorization becomes sufficiently complex that the average solve time begins to increase. When this is detected, the basis is refactorized from scratch. Enable this option to maximize speed at the risk of nondeterministic behavior. Ignored if ``maxupdate`` is 0. pivot : "mrc" or "bland" (default: "mrc") Pivot rule: Minimum Reduced Cost ("mrc") or Bland's rule ("bland"). Choose Bland's rule if iteration limit is reached and cycling is suspected. unknown_options : dict Optional arguments not used by this particular solver. If `unknown_options` is non-empty a warning is issued listing all unused options. Returns ------- res : OptimizeResult A :class:`scipy.optimize.OptimizeResult` consisting of the fields: x : 1-D array The values of the decision variables that minimizes the objective function while satisfying the constraints. fun : float The optimal value of the objective function ``c @ x``. slack : 1-D array The (nominally positive) values of the slack variables, ``b_ub - A_ub @ x``. con : 1-D array The (nominally zero) residuals of the equality constraints, ``b_eq - A_eq @ x``. success : bool ``True`` when the algorithm succeeds in finding an optimal solution. status : int An integer representing the exit status of the algorithm. ``0`` : Optimization terminated successfully. ``1`` : Iteration limit reached. ``2`` : Problem appears to be infeasible. ``3`` : Problem appears to be unbounded. ``4`` : Numerical difficulties encountered. ``5`` : Problem has no constraints; turn presolve on. ``6`` : Invalid guess provided. message : str A string descriptor of the exit status of the algorithm. nit : int The total number of iterations performed in all phases. Notes ----- Method *revised simplex* uses the revised simplex method as described in [9]_, except that a factorization [11]_ of the basis matrix, rather than its inverse, is efficiently maintained and used to solve the linear systems at each iteration of the algorithm. References ---------- .. [9] Bertsimas, Dimitris, and J. Tsitsiklis. "Introduction to linear programming." Athena Scientific 1 (1997): 997. .. [11] Bartels, Richard H. "A stabilization of the simplex method." Journal in Numerische Mathematik 16.5 (1971): 414-434. """ pass def _linprog_simplex_doc(c, A_ub=None, b_ub=None, A_eq=None, b_eq=None, bounds=None, method='interior-point', callback=None, maxiter=5000, disp=False, presolve=True, tol=1e-12, autoscale=False, rr=True, bland=False, **unknown_options): r""" Linear programming: minimize a linear objective function subject to linear equality and inequality constraints using the tableau-based simplex method. .. deprecated:: 1.9.0 `method='simplex'` will be removed in SciPy 1.11.0. It is replaced by `method='highs'` because the latter is faster and more robust. Linear programming solves problems of the following form: .. math:: \min_x \ & c^T x \\ \mbox{such that} \ & A_{ub} x \leq b_{ub},\\ & A_{eq} x = b_{eq},\\ & l \leq x \leq u , where :math:`x` is a vector of decision variables; :math:`c`, :math:`b_{ub}`, :math:`b_{eq}`, :math:`l`, and :math:`u` are vectors; and :math:`A_{ub}` and :math:`A_{eq}` are matrices. Alternatively, that's: minimize:: c @ x such that:: A_ub @ x <= b_ub A_eq @ x == b_eq lb <= x <= ub Note that by default ``lb = 0`` and ``ub = None`` unless specified with ``bounds``. Parameters ---------- c : 1-D array The coefficients of the linear objective function to be minimized. A_ub : 2-D array, optional The inequality constraint matrix. Each row of ``A_ub`` specifies the coefficients of a linear inequality constraint on ``x``. b_ub : 1-D array, optional The inequality constraint vector. Each element represents an upper bound on the corresponding value of ``A_ub @ x``. A_eq : 2-D array, optional The equality constraint matrix. Each row of ``A_eq`` specifies the coefficients of a linear equality constraint on ``x``. b_eq : 1-D array, optional The equality constraint vector. Each element of ``A_eq @ x`` must equal the corresponding element of ``b_eq``. bounds : sequence, optional A sequence of ``(min, max)`` pairs for each element in ``x``, defining the minimum and maximum values of that decision variable. Use ``None`` to indicate that there is no bound. By default, bounds are ``(0, None)`` (all decision variables are non-negative). If a single tuple ``(min, max)`` is provided, then ``min`` and ``max`` will serve as bounds for all decision variables. method : str This is the method-specific documentation for 'simplex'. :ref:`'highs' <optimize.linprog-highs>`, :ref:`'highs-ds' <optimize.linprog-highs-ds>`, :ref:`'highs-ipm' <optimize.linprog-highs-ipm>`, :ref:`'interior-point' <optimize.linprog-interior-point>` (default), and :ref:`'revised simplex' <optimize.linprog-revised_simplex>` are also available. callback : callable, optional Callback function to be executed once per iteration. Options ------- maxiter : int (default: 5000) The maximum number of iterations to perform in either phase. disp : bool (default: False) Set to ``True`` if indicators of optimization status are to be printed to the console each iteration. presolve : bool (default: True) Presolve attempts to identify trivial infeasibilities, identify trivial unboundedness, and simplify the problem before sending it to the main solver. It is generally recommended to keep the default setting ``True``; set to ``False`` if presolve is to be disabled. tol : float (default: 1e-12) The tolerance which determines when a solution is "close enough" to zero in Phase 1 to be considered a basic feasible solution or close enough to positive to serve as an optimal solution. autoscale : bool (default: False) Set to ``True`` to automatically perform equilibration. Consider using this option if the numerical values in the constraints are separated by several orders of magnitude. rr : bool (default: True) Set to ``False`` to disable automatic redundancy removal. bland : bool If True, use Bland's anti-cycling rule [3]_ to choose pivots to prevent cycling. If False, choose pivots which should lead to a converged solution more quickly. The latter method is subject to cycling (non-convergence) in rare instances. unknown_options : dict Optional arguments not used by this particular solver. If `unknown_options` is non-empty a warning is issued listing all unused options. Returns ------- res : OptimizeResult A :class:`scipy.optimize.OptimizeResult` consisting of the fields: x : 1-D array The values of the decision variables that minimizes the objective function while satisfying the constraints. fun : float The optimal value of the objective function ``c @ x``. slack : 1-D array The (nominally positive) values of the slack variables, ``b_ub - A_ub @ x``. con : 1-D array The (nominally zero) residuals of the equality constraints, ``b_eq - A_eq @ x``. success : bool ``True`` when the algorithm succeeds in finding an optimal solution. status : int An integer representing the exit status of the algorithm. ``0`` : Optimization terminated successfully. ``1`` : Iteration limit reached. ``2`` : Problem appears to be infeasible. ``3`` : Problem appears to be unbounded. ``4`` : Numerical difficulties encountered. message : str A string descriptor of the exit status of the algorithm. nit : int The total number of iterations performed in all phases. References ---------- .. [1] Dantzig, George B., Linear programming and extensions. Rand Corporation Research Study Princeton Univ. Press, Princeton, NJ, 1963 .. [2] Hillier, S.H. and Lieberman, G.J. (1995), "Introduction to Mathematical Programming", McGraw-Hill, Chapter 4. .. [3] Bland, Robert G. New finite pivoting rules for the simplex method. Mathematics of Operations Research (2), 1977: pp. 103-107. """ pass
61,943
42.166551
143
py
scipy
scipy-main/scipy/optimize/_linprog.py
""" A top-level linear programming interface. .. versionadded:: 0.15.0 Functions --------- .. autosummary:: :toctree: generated/ linprog linprog_verbose_callback linprog_terse_callback """ import numpy as np from ._optimize import OptimizeResult, OptimizeWarning from warnings import warn from ._linprog_highs import _linprog_highs from ._linprog_ip import _linprog_ip from ._linprog_simplex import _linprog_simplex from ._linprog_rs import _linprog_rs from ._linprog_doc import (_linprog_highs_doc, _linprog_ip_doc, _linprog_rs_doc, _linprog_simplex_doc, _linprog_highs_ipm_doc, _linprog_highs_ds_doc) from ._linprog_util import ( _parse_linprog, _presolve, _get_Abc, _LPProblem, _autoscale, _postsolve, _check_result, _display_summary) from copy import deepcopy __all__ = ['linprog', 'linprog_verbose_callback', 'linprog_terse_callback'] __docformat__ = "restructuredtext en" LINPROG_METHODS = ['simplex', 'revised simplex', 'interior-point', 'highs', 'highs-ds', 'highs-ipm'] def linprog_verbose_callback(res): """ A sample callback function demonstrating the linprog callback interface. This callback produces detailed output to sys.stdout before each iteration and after the final iteration of the simplex algorithm. Parameters ---------- res : A `scipy.optimize.OptimizeResult` consisting of the following fields: x : 1-D array The independent variable vector which optimizes the linear programming problem. fun : float Value of the objective function. success : bool True if the algorithm succeeded in finding an optimal solution. slack : 1-D array The values of the slack variables. Each slack variable corresponds to an inequality constraint. If the slack is zero, then the corresponding constraint is active. con : 1-D array The (nominally zero) residuals of the equality constraints, that is, ``b - A_eq @ x`` phase : int The phase of the optimization being executed. In phase 1 a basic feasible solution is sought and the T has an additional row representing an alternate objective function. status : int An integer representing the exit status of the optimization:: 0 : Optimization terminated successfully 1 : Iteration limit reached 2 : Problem appears to be infeasible 3 : Problem appears to be unbounded 4 : Serious numerical difficulties encountered nit : int The number of iterations performed. message : str A string descriptor of the exit status of the optimization. """ x = res['x'] fun = res['fun'] phase = res['phase'] status = res['status'] nit = res['nit'] message = res['message'] complete = res['complete'] saved_printoptions = np.get_printoptions() np.set_printoptions(linewidth=500, formatter={'float': lambda x: f"{x: 12.4f}"}) if status: print('--------- Simplex Early Exit -------\n') print(f'The simplex method exited early with status {status:d}') print(message) elif complete: print('--------- Simplex Complete --------\n') print(f'Iterations required: {nit}') else: print(f'--------- Iteration {nit:d} ---------\n') if nit > 0: if phase == 1: print('Current Pseudo-Objective Value:') else: print('Current Objective Value:') print('f = ', fun) print() print('Current Solution Vector:') print('x = ', x) print() np.set_printoptions(**saved_printoptions) def linprog_terse_callback(res): """ A sample callback function demonstrating the linprog callback interface. This callback produces brief output to sys.stdout before each iteration and after the final iteration of the simplex algorithm. Parameters ---------- res : A `scipy.optimize.OptimizeResult` consisting of the following fields: x : 1-D array The independent variable vector which optimizes the linear programming problem. fun : float Value of the objective function. success : bool True if the algorithm succeeded in finding an optimal solution. slack : 1-D array The values of the slack variables. Each slack variable corresponds to an inequality constraint. If the slack is zero, then the corresponding constraint is active. con : 1-D array The (nominally zero) residuals of the equality constraints, that is, ``b - A_eq @ x``. phase : int The phase of the optimization being executed. In phase 1 a basic feasible solution is sought and the T has an additional row representing an alternate objective function. status : int An integer representing the exit status of the optimization:: 0 : Optimization terminated successfully 1 : Iteration limit reached 2 : Problem appears to be infeasible 3 : Problem appears to be unbounded 4 : Serious numerical difficulties encountered nit : int The number of iterations performed. message : str A string descriptor of the exit status of the optimization. """ nit = res['nit'] x = res['x'] if nit == 0: print("Iter: X:") print(f"{nit: <5d} ", end="") print(x) def linprog(c, A_ub=None, b_ub=None, A_eq=None, b_eq=None, bounds=None, method='highs', callback=None, options=None, x0=None, integrality=None): r""" Linear programming: minimize a linear objective function subject to linear equality and inequality constraints. Linear programming solves problems of the following form: .. math:: \min_x \ & c^T x \\ \mbox{such that} \ & A_{ub} x \leq b_{ub},\\ & A_{eq} x = b_{eq},\\ & l \leq x \leq u , where :math:`x` is a vector of decision variables; :math:`c`, :math:`b_{ub}`, :math:`b_{eq}`, :math:`l`, and :math:`u` are vectors; and :math:`A_{ub}` and :math:`A_{eq}` are matrices. Alternatively, that's: - minimize :: c @ x - such that :: A_ub @ x <= b_ub A_eq @ x == b_eq lb <= x <= ub Note that by default ``lb = 0`` and ``ub = None``. Other bounds can be specified with ``bounds``. Parameters ---------- c : 1-D array The coefficients of the linear objective function to be minimized. A_ub : 2-D array, optional The inequality constraint matrix. Each row of ``A_ub`` specifies the coefficients of a linear inequality constraint on ``x``. b_ub : 1-D array, optional The inequality constraint vector. Each element represents an upper bound on the corresponding value of ``A_ub @ x``. A_eq : 2-D array, optional The equality constraint matrix. Each row of ``A_eq`` specifies the coefficients of a linear equality constraint on ``x``. b_eq : 1-D array, optional The equality constraint vector. Each element of ``A_eq @ x`` must equal the corresponding element of ``b_eq``. bounds : sequence, optional A sequence of ``(min, max)`` pairs for each element in ``x``, defining the minimum and maximum values of that decision variable. If a single tuple ``(min, max)`` is provided, then ``min`` and ``max`` will serve as bounds for all decision variables. Use ``None`` to indicate that there is no bound. For instance, the default bound ``(0, None)`` means that all decision variables are non-negative, and the pair ``(None, None)`` means no bounds at all, i.e. all variables are allowed to be any real. method : str, optional The algorithm used to solve the standard form problem. :ref:`'highs' <optimize.linprog-highs>` (default), :ref:`'highs-ds' <optimize.linprog-highs-ds>`, :ref:`'highs-ipm' <optimize.linprog-highs-ipm>`, :ref:`'interior-point' <optimize.linprog-interior-point>` (legacy), :ref:`'revised simplex' <optimize.linprog-revised_simplex>` (legacy), and :ref:`'simplex' <optimize.linprog-simplex>` (legacy) are supported. The legacy methods are deprecated and will be removed in SciPy 1.11.0. callback : callable, optional If a callback function is provided, it will be called at least once per iteration of the algorithm. The callback function must accept a single `scipy.optimize.OptimizeResult` consisting of the following fields: x : 1-D array The current solution vector. fun : float The current value of the objective function ``c @ x``. success : bool ``True`` when the algorithm has completed successfully. slack : 1-D array The (nominally positive) values of the slack, ``b_ub - A_ub @ x``. con : 1-D array The (nominally zero) residuals of the equality constraints, ``b_eq - A_eq @ x``. phase : int The phase of the algorithm being executed. status : int An integer representing the status of the algorithm. ``0`` : Optimization proceeding nominally. ``1`` : Iteration limit reached. ``2`` : Problem appears to be infeasible. ``3`` : Problem appears to be unbounded. ``4`` : Numerical difficulties encountered. nit : int The current iteration number. message : str A string descriptor of the algorithm status. Callback functions are not currently supported by the HiGHS methods. options : dict, optional A dictionary of solver options. All methods accept the following options: maxiter : int Maximum number of iterations to perform. Default: see method-specific documentation. disp : bool Set to ``True`` to print convergence messages. Default: ``False``. presolve : bool Set to ``False`` to disable automatic presolve. Default: ``True``. All methods except the HiGHS solvers also accept: tol : float A tolerance which determines when a residual is "close enough" to zero to be considered exactly zero. autoscale : bool Set to ``True`` to automatically perform equilibration. Consider using this option if the numerical values in the constraints are separated by several orders of magnitude. Default: ``False``. rr : bool Set to ``False`` to disable automatic redundancy removal. Default: ``True``. rr_method : string Method used to identify and remove redundant rows from the equality constraint matrix after presolve. For problems with dense input, the available methods for redundancy removal are: "SVD": Repeatedly performs singular value decomposition on the matrix, detecting redundant rows based on nonzeros in the left singular vectors that correspond with zero singular values. May be fast when the matrix is nearly full rank. "pivot": Uses the algorithm presented in [5]_ to identify redundant rows. "ID": Uses a randomized interpolative decomposition. Identifies columns of the matrix transpose not used in a full-rank interpolative decomposition of the matrix. None: Uses "svd" if the matrix is nearly full rank, that is, the difference between the matrix rank and the number of rows is less than five. If not, uses "pivot". The behavior of this default is subject to change without prior notice. Default: None. For problems with sparse input, this option is ignored, and the pivot-based algorithm presented in [5]_ is used. For method-specific options, see :func:`show_options('linprog') <show_options>`. x0 : 1-D array, optional Guess values of the decision variables, which will be refined by the optimization algorithm. This argument is currently used only by the 'revised simplex' method, and can only be used if `x0` represents a basic feasible solution. integrality : 1-D array or int, optional Indicates the type of integrality constraint on each decision variable. ``0`` : Continuous variable; no integrality constraint. ``1`` : Integer variable; decision variable must be an integer within `bounds`. ``2`` : Semi-continuous variable; decision variable must be within `bounds` or take value ``0``. ``3`` : Semi-integer variable; decision variable must be an integer within `bounds` or take value ``0``. By default, all variables are continuous. For mixed integrality constraints, supply an array of shape `c.shape`. To infer a constraint on each decision variable from shorter inputs, the argument will be broadcasted to `c.shape` using `np.broadcast_to`. This argument is currently used only by the ``'highs'`` method and ignored otherwise. Returns ------- res : OptimizeResult A :class:`scipy.optimize.OptimizeResult` consisting of the fields below. Note that the return types of the fields may depend on whether the optimization was successful, therefore it is recommended to check `OptimizeResult.status` before relying on the other fields: x : 1-D array The values of the decision variables that minimizes the objective function while satisfying the constraints. fun : float The optimal value of the objective function ``c @ x``. slack : 1-D array The (nominally positive) values of the slack variables, ``b_ub - A_ub @ x``. con : 1-D array The (nominally zero) residuals of the equality constraints, ``b_eq - A_eq @ x``. success : bool ``True`` when the algorithm succeeds in finding an optimal solution. status : int An integer representing the exit status of the algorithm. ``0`` : Optimization terminated successfully. ``1`` : Iteration limit reached. ``2`` : Problem appears to be infeasible. ``3`` : Problem appears to be unbounded. ``4`` : Numerical difficulties encountered. nit : int The total number of iterations performed in all phases. message : str A string descriptor of the exit status of the algorithm. See Also -------- show_options : Additional options accepted by the solvers. Notes ----- This section describes the available solvers that can be selected by the 'method' parameter. `'highs-ds'` and `'highs-ipm'` are interfaces to the HiGHS simplex and interior-point method solvers [13]_, respectively. `'highs'` (default) chooses between the two automatically. These are the fastest linear programming solvers in SciPy, especially for large, sparse problems; which of these two is faster is problem-dependent. The other solvers (`'interior-point'`, `'revised simplex'`, and `'simplex'`) are legacy methods and will be removed in SciPy 1.11.0. Method *highs-ds* is a wrapper of the C++ high performance dual revised simplex implementation (HSOL) [13]_, [14]_. Method *highs-ipm* is a wrapper of a C++ implementation of an **i**\ nterior-\ **p**\ oint **m**\ ethod [13]_; it features a crossover routine, so it is as accurate as a simplex solver. Method *highs* chooses between the two automatically. For new code involving `linprog`, we recommend explicitly choosing one of these three method values. .. versionadded:: 1.6.0 Method *interior-point* uses the primal-dual path following algorithm as outlined in [4]_. This algorithm supports sparse constraint matrices and is typically faster than the simplex methods, especially for large, sparse problems. Note, however, that the solution returned may be slightly less accurate than those of the simplex methods and will not, in general, correspond with a vertex of the polytope defined by the constraints. .. versionadded:: 1.0.0 Method *revised simplex* uses the revised simplex method as described in [9]_, except that a factorization [11]_ of the basis matrix, rather than its inverse, is efficiently maintained and used to solve the linear systems at each iteration of the algorithm. .. versionadded:: 1.3.0 Method *simplex* uses a traditional, full-tableau implementation of Dantzig's simplex algorithm [1]_, [2]_ (*not* the Nelder-Mead simplex). This algorithm is included for backwards compatibility and educational purposes. .. versionadded:: 0.15.0 Before applying *interior-point*, *revised simplex*, or *simplex*, a presolve procedure based on [8]_ attempts to identify trivial infeasibilities, trivial unboundedness, and potential problem simplifications. Specifically, it checks for: - rows of zeros in ``A_eq`` or ``A_ub``, representing trivial constraints; - columns of zeros in ``A_eq`` `and` ``A_ub``, representing unconstrained variables; - column singletons in ``A_eq``, representing fixed variables; and - column singletons in ``A_ub``, representing simple bounds. If presolve reveals that the problem is unbounded (e.g. an unconstrained and unbounded variable has negative cost) or infeasible (e.g., a row of zeros in ``A_eq`` corresponds with a nonzero in ``b_eq``), the solver terminates with the appropriate status code. Note that presolve terminates as soon as any sign of unboundedness is detected; consequently, a problem may be reported as unbounded when in reality the problem is infeasible (but infeasibility has not been detected yet). Therefore, if it is important to know whether the problem is actually infeasible, solve the problem again with option ``presolve=False``. If neither infeasibility nor unboundedness are detected in a single pass of the presolve, bounds are tightened where possible and fixed variables are removed from the problem. Then, linearly dependent rows of the ``A_eq`` matrix are removed, (unless they represent an infeasibility) to avoid numerical difficulties in the primary solve routine. Note that rows that are nearly linearly dependent (within a prescribed tolerance) may also be removed, which can change the optimal solution in rare cases. If this is a concern, eliminate redundancy from your problem formulation and run with option ``rr=False`` or ``presolve=False``. Several potential improvements can be made here: additional presolve checks outlined in [8]_ should be implemented, the presolve routine should be run multiple times (until no further simplifications can be made), and more of the efficiency improvements from [5]_ should be implemented in the redundancy removal routines. After presolve, the problem is transformed to standard form by converting the (tightened) simple bounds to upper bound constraints, introducing non-negative slack variables for inequality constraints, and expressing unbounded variables as the difference between two non-negative variables. Optionally, the problem is automatically scaled via equilibration [12]_. The selected algorithm solves the standard form problem, and a postprocessing routine converts the result to a solution to the original problem. References ---------- .. [1] Dantzig, George B., Linear programming and extensions. Rand Corporation Research Study Princeton Univ. Press, Princeton, NJ, 1963 .. [2] Hillier, S.H. and Lieberman, G.J. (1995), "Introduction to Mathematical Programming", McGraw-Hill, Chapter 4. .. [3] Bland, Robert G. New finite pivoting rules for the simplex method. Mathematics of Operations Research (2), 1977: pp. 103-107. .. [4] Andersen, Erling D., and Knud D. Andersen. "The MOSEK interior point optimizer for linear programming: an implementation of the homogeneous algorithm." High performance optimization. Springer US, 2000. 197-232. .. [5] Andersen, Erling D. "Finding all linearly dependent rows in large-scale linear programming." Optimization Methods and Software 6.3 (1995): 219-227. .. [6] Freund, Robert M. "Primal-Dual Interior-Point Methods for Linear Programming based on Newton's Method." Unpublished Course Notes, March 2004. Available 2/25/2017 at https://ocw.mit.edu/courses/sloan-school-of-management/15-084j-nonlinear-programming-spring-2004/lecture-notes/lec14_int_pt_mthd.pdf .. [7] Fourer, Robert. "Solving Linear Programs by Interior-Point Methods." Unpublished Course Notes, August 26, 2005. Available 2/25/2017 at http://www.4er.org/CourseNotes/Book%20B/B-III.pdf .. [8] Andersen, Erling D., and Knud D. Andersen. "Presolving in linear programming." Mathematical Programming 71.2 (1995): 221-245. .. [9] Bertsimas, Dimitris, and J. Tsitsiklis. "Introduction to linear programming." Athena Scientific 1 (1997): 997. .. [10] Andersen, Erling D., et al. Implementation of interior point methods for large scale linear programming. HEC/Universite de Geneve, 1996. .. [11] Bartels, Richard H. "A stabilization of the simplex method." Journal in Numerische Mathematik 16.5 (1971): 414-434. .. [12] Tomlin, J. A. "On scaling linear programming problems." Mathematical Programming Study 4 (1975): 146-166. .. [13] Huangfu, Q., Galabova, I., Feldmeier, M., and Hall, J. A. J. "HiGHS - high performance software for linear optimization." https://highs.dev/ .. [14] Huangfu, Q. and Hall, J. A. J. "Parallelizing the dual revised simplex method." Mathematical Programming Computation, 10 (1), 119-142, 2018. DOI: 10.1007/s12532-017-0130-5 Examples -------- Consider the following problem: .. math:: \min_{x_0, x_1} \ -x_0 + 4x_1 & \\ \mbox{such that} \ -3x_0 + x_1 & \leq 6,\\ -x_0 - 2x_1 & \geq -4,\\ x_1 & \geq -3. The problem is not presented in the form accepted by `linprog`. This is easily remedied by converting the "greater than" inequality constraint to a "less than" inequality constraint by multiplying both sides by a factor of :math:`-1`. Note also that the last constraint is really the simple bound :math:`-3 \leq x_1 \leq \infty`. Finally, since there are no bounds on :math:`x_0`, we must explicitly specify the bounds :math:`-\infty \leq x_0 \leq \infty`, as the default is for variables to be non-negative. After collecting coeffecients into arrays and tuples, the input for this problem is: >>> from scipy.optimize import linprog >>> c = [-1, 4] >>> A = [[-3, 1], [1, 2]] >>> b = [6, 4] >>> x0_bounds = (None, None) >>> x1_bounds = (-3, None) >>> res = linprog(c, A_ub=A, b_ub=b, bounds=[x0_bounds, x1_bounds]) >>> res.fun -22.0 >>> res.x array([10., -3.]) >>> res.message 'Optimization terminated successfully. (HiGHS Status 7: Optimal)' The marginals (AKA dual values / shadow prices / Lagrange multipliers) and residuals (slacks) are also available. >>> res.ineqlin residual: [ 3.900e+01 0.000e+00] marginals: [-0.000e+00 -1.000e+00] For example, because the marginal associated with the second inequality constraint is -1, we expect the optimal value of the objective function to decrease by ``eps`` if we add a small amount ``eps`` to the right hand side of the second inequality constraint: >>> eps = 0.05 >>> b[1] += eps >>> linprog(c, A_ub=A, b_ub=b, bounds=[x0_bounds, x1_bounds]).fun -22.05 Also, because the residual on the first inequality constraint is 39, we can decrease the right hand side of the first constraint by 39 without affecting the optimal solution. >>> b = [6, 4] # reset to original values >>> b[0] -= 39 >>> linprog(c, A_ub=A, b_ub=b, bounds=[x0_bounds, x1_bounds]).fun -22.0 """ meth = method.lower() methods = {"highs", "highs-ds", "highs-ipm", "simplex", "revised simplex", "interior-point"} if meth not in methods: raise ValueError(f"Unknown solver '{method}'") if x0 is not None and meth != "revised simplex": warning_message = "x0 is used only when method is 'revised simplex'. " warn(warning_message, OptimizeWarning) if np.any(integrality) and not meth == "highs": integrality = None warning_message = ("Only `method='highs'` supports integer " "constraints. Ignoring `integrality`.") warn(warning_message, OptimizeWarning) elif np.any(integrality): integrality = np.broadcast_to(integrality, np.shape(c)) lp = _LPProblem(c, A_ub, b_ub, A_eq, b_eq, bounds, x0, integrality) lp, solver_options = _parse_linprog(lp, options, meth) tol = solver_options.get('tol', 1e-9) # Give unmodified problem to HiGHS if meth.startswith('highs'): if callback is not None: raise NotImplementedError("HiGHS solvers do not support the " "callback interface.") highs_solvers = {'highs-ipm': 'ipm', 'highs-ds': 'simplex', 'highs': None} sol = _linprog_highs(lp, solver=highs_solvers[meth], **solver_options) sol['status'], sol['message'] = ( _check_result(sol['x'], sol['fun'], sol['status'], sol['slack'], sol['con'], lp.bounds, tol, sol['message'], integrality)) sol['success'] = sol['status'] == 0 return OptimizeResult(sol) warn(f"`method='{meth}'` is deprecated and will be removed in SciPy " "1.11.0. Please use one of the HiGHS solvers (e.g. " "`method='highs'`) in new code.", DeprecationWarning, stacklevel=2) iteration = 0 complete = False # will become True if solved in presolve undo = [] # Keep the original arrays to calculate slack/residuals for original # problem. lp_o = deepcopy(lp) # Solve trivial problem, eliminate variables, tighten bounds, etc. rr_method = solver_options.pop('rr_method', None) # need to pop these; rr = solver_options.pop('rr', True) # they're not passed to methods c0 = 0 # we might get a constant term in the objective if solver_options.pop('presolve', True): (lp, c0, x, undo, complete, status, message) = _presolve(lp, rr, rr_method, tol) C, b_scale = 1, 1 # for trivial unscaling if autoscale is not used postsolve_args = (lp_o._replace(bounds=lp.bounds), undo, C, b_scale) if not complete: A, b, c, c0, x0 = _get_Abc(lp, c0) if solver_options.pop('autoscale', False): A, b, c, x0, C, b_scale = _autoscale(A, b, c, x0) postsolve_args = postsolve_args[:-2] + (C, b_scale) if meth == 'simplex': x, status, message, iteration = _linprog_simplex( c, c0=c0, A=A, b=b, callback=callback, postsolve_args=postsolve_args, **solver_options) elif meth == 'interior-point': x, status, message, iteration = _linprog_ip( c, c0=c0, A=A, b=b, callback=callback, postsolve_args=postsolve_args, **solver_options) elif meth == 'revised simplex': x, status, message, iteration = _linprog_rs( c, c0=c0, A=A, b=b, x0=x0, callback=callback, postsolve_args=postsolve_args, **solver_options) # Eliminate artificial variables, re-introduce presolved variables, etc. disp = solver_options.get('disp', False) x, fun, slack, con = _postsolve(x, postsolve_args, complete) status, message = _check_result(x, fun, status, slack, con, lp_o.bounds, tol, message, integrality) if disp: _display_summary(message, status, fun, iteration) sol = { 'x': x, 'fun': fun, 'slack': slack, 'con': con, 'status': status, 'message': message, 'nit': iteration, 'success': status == 0} return OptimizeResult(sol)
29,666
40.608696
143
py
scipy
scipy-main/scipy/optimize/tnc.py
# This file is not meant for public use and will be removed in SciPy v2.0.0. # Use the `scipy.optimize` namespace for importing the functions # included below. import warnings from . import _tnc __all__ = [ # noqa: F822 'CONSTANT', 'FCONVERGED', 'INFEASIBLE', 'LOCALMINIMUM', 'LSFAIL', 'MAXFUN', 'MSGS', 'MSG_ALL', 'MSG_EXIT', 'MSG_INFO', 'MSG_ITER', 'MSG_NONE', 'MSG_VERS', 'MemoizeJac', 'NOPROGRESS', 'OptimizeResult', 'RCSTRINGS', 'USERABORT', 'XCONVERGED', 'array', 'asfarray', 'fmin_tnc', 'inf', 'moduleTNC', 'old_bound_to_new', 'zeros', ] def __dir__(): return __all__ def __getattr__(name): if name not in __all__: raise AttributeError( "scipy.optimize.tnc is deprecated and has no attribute " f"{name}. Try looking in scipy.optimize instead.") warnings.warn(f"Please use `{name}` from the `scipy.optimize` namespace, " "the `scipy.optimize.tnc` namespace is deprecated.", category=DeprecationWarning, stacklevel=2) return getattr(_tnc, name)
1,148
20.277778
78
py
scipy
scipy-main/scipy/optimize/_direct_py.py
from __future__ import annotations from typing import ( # noqa: UP035 Any, Callable, Iterable, TYPE_CHECKING ) import numpy as np from scipy.optimize import OptimizeResult from ._constraints import old_bound_to_new, Bounds from ._direct import direct as _direct # type: ignore if TYPE_CHECKING: import numpy.typing as npt __all__ = ['direct'] ERROR_MESSAGES = ( "Number of function evaluations done is larger than maxfun={}", "Number of iterations is larger than maxiter={}", "u[i] < l[i] for some i", "maxfun is too large", "Initialization failed", "There was an error in the creation of the sample points", "An error occurred while the function was sampled", "Maximum number of levels has been reached.", "Forced stop", "Invalid arguments", "Out of memory", ) SUCCESS_MESSAGES = ( ("The best function value found is within a relative error={} " "of the (known) global optimum f_min"), ("The volume of the hyperrectangle containing the lowest function value " "found is below vol_tol={}"), ("The side length measure of the hyperrectangle containing the lowest " "function value found is below len_tol={}"), ) def direct( func: Callable[[npt.ArrayLike, tuple[Any]], float], bounds: Iterable | Bounds, *, args: tuple = (), eps: float = 1e-4, maxfun: int | None = None, maxiter: int = 1000, locally_biased: bool = True, f_min: float = -np.inf, f_min_rtol: float = 1e-4, vol_tol: float = 1e-16, len_tol: float = 1e-6, callback: Callable[[npt.ArrayLike], None] | None = None ) -> OptimizeResult: """ Finds the global minimum of a function using the DIRECT algorithm. Parameters ---------- func : callable The objective function to be minimized. ``func(x, *args) -> float`` where ``x`` is an 1-D array with shape (n,) and ``args`` is a tuple of the fixed parameters needed to completely specify the function. bounds : sequence or `Bounds` Bounds for variables. There are two ways to specify the bounds: 1. Instance of `Bounds` class. 2. ``(min, max)`` pairs for each element in ``x``. args : tuple, optional Any additional fixed parameters needed to completely specify the objective function. eps : float, optional Minimal required difference of the objective function values between the current best hyperrectangle and the next potentially optimal hyperrectangle to be divided. In consequence, `eps` serves as a tradeoff between local and global search: the smaller, the more local the search becomes. Default is 1e-4. maxfun : int or None, optional Approximate upper bound on objective function evaluations. If `None`, will be automatically set to ``1000 * N`` where ``N`` represents the number of dimensions. Will be capped if necessary to limit DIRECT's RAM usage to app. 1GiB. This will only occur for very high dimensional problems and excessive `max_fun`. Default is `None`. maxiter : int, optional Maximum number of iterations. Default is 1000. locally_biased : bool, optional If `True` (default), use the locally biased variant of the algorithm known as DIRECT_L. If `False`, use the original unbiased DIRECT algorithm. For hard problems with many local minima, `False` is recommended. f_min : float, optional Function value of the global optimum. Set this value only if the global optimum is known. Default is ``-np.inf``, so that this termination criterion is deactivated. f_min_rtol : float, optional Terminate the optimization once the relative error between the current best minimum `f` and the supplied global minimum `f_min` is smaller than `f_min_rtol`. This parameter is only used if `f_min` is also set. Must lie between 0 and 1. Default is 1e-4. vol_tol : float, optional Terminate the optimization once the volume of the hyperrectangle containing the lowest function value is smaller than `vol_tol` of the complete search space. Must lie between 0 and 1. Default is 1e-16. len_tol : float, optional If `locally_biased=True`, terminate the optimization once half of the normalized maximal side length of the hyperrectangle containing the lowest function value is smaller than `len_tol`. If `locally_biased=False`, terminate the optimization once half of the normalized diagonal of the hyperrectangle containing the lowest function value is smaller than `len_tol`. Must lie between 0 and 1. Default is 1e-6. callback : callable, optional A callback function with signature ``callback(xk)`` where ``xk`` represents the best function value found so far. Returns ------- res : OptimizeResult The optimization result represented as a ``OptimizeResult`` object. Important attributes are: ``x`` the solution array, ``success`` a Boolean flag indicating if the optimizer exited successfully and ``message`` which describes the cause of the termination. See `OptimizeResult` for a description of other attributes. Notes ----- DIviding RECTangles (DIRECT) is a deterministic global optimization algorithm capable of minimizing a black box function with its variables subject to lower and upper bound constraints by sampling potential solutions in the search space [1]_. The algorithm starts by normalising the search space to an n-dimensional unit hypercube. It samples the function at the center of this hypercube and at 2n (n is the number of variables) more points, 2 in each coordinate direction. Using these function values, DIRECT then divides the domain into hyperrectangles, each having exactly one of the sampling points as its center. In each iteration, DIRECT chooses, using the `eps` parameter which defaults to 1e-4, some of the existing hyperrectangles to be further divided. This division process continues until either the maximum number of iterations or maximum function evaluations allowed are exceeded, or the hyperrectangle containing the minimal value found so far becomes small enough. If `f_min` is specified, the optimization will stop once this function value is reached within a relative tolerance. The locally biased variant of DIRECT (originally called DIRECT_L) [2]_ is used by default. It makes the search more locally biased and more efficient for cases with only a few local minima. A note about termination criteria: `vol_tol` refers to the volume of the hyperrectangle containing the lowest function value found so far. This volume decreases exponentially with increasing dimensionality of the problem. Therefore `vol_tol` should be decreased to avoid premature termination of the algorithm for higher dimensions. This does not hold for `len_tol`: it refers either to half of the maximal side length (for ``locally_biased=True``) or half of the diagonal of the hyperrectangle (for ``locally_biased=False``). This code is based on the DIRECT 2.0.4 Fortran code by Gablonsky et al. at https://ctk.math.ncsu.edu/SOFTWARE/DIRECTv204.tar.gz . This original version was initially converted via f2c and then cleaned up and reorganized by Steven G. Johnson, August 2007, for the NLopt project. The `direct` function wraps the C implementation. .. versionadded:: 1.9.0 References ---------- .. [1] Jones, D.R., Perttunen, C.D. & Stuckman, B.E. Lipschitzian optimization without the Lipschitz constant. J Optim Theory Appl 79, 157-181 (1993). .. [2] Gablonsky, J., Kelley, C. A Locally-Biased form of the DIRECT Algorithm. Journal of Global Optimization 21, 27-37 (2001). Examples -------- The following example is a 2-D problem with four local minima: minimizing the Styblinski-Tang function (https://en.wikipedia.org/wiki/Test_functions_for_optimization). >>> from scipy.optimize import direct, Bounds >>> def styblinski_tang(pos): ... x, y = pos ... return 0.5 * (x**4 - 16*x**2 + 5*x + y**4 - 16*y**2 + 5*y) >>> bounds = Bounds([-4., -4.], [4., 4.]) >>> result = direct(styblinski_tang, bounds) >>> result.x, result.fun, result.nfev array([-2.90321597, -2.90321597]), -78.3323279095383, 2011 The correct global minimum was found but with a huge number of function evaluations (2011). Loosening the termination tolerances `vol_tol` and `len_tol` can be used to stop DIRECT earlier. >>> result = direct(styblinski_tang, bounds, len_tol=1e-3) >>> result.x, result.fun, result.nfev array([-2.9044353, -2.9044353]), -78.33230330754142, 207 """ # convert bounds to new Bounds class if necessary if not isinstance(bounds, Bounds): if isinstance(bounds, list) or isinstance(bounds, tuple): lb, ub = old_bound_to_new(bounds) bounds = Bounds(lb, ub) else: message = ("bounds must be a sequence or " "instance of Bounds class") raise ValueError(message) lb = np.ascontiguousarray(bounds.lb, dtype=np.float64) ub = np.ascontiguousarray(bounds.ub, dtype=np.float64) # validate bounds # check that lower bounds are smaller than upper bounds if not np.all(lb < ub): raise ValueError('Bounds are not consistent min < max') # check for infs if (np.any(np.isinf(lb)) or np.any(np.isinf(ub))): raise ValueError("Bounds must not be inf.") # validate tolerances if (vol_tol < 0 or vol_tol > 1): raise ValueError("vol_tol must be between 0 and 1.") if (len_tol < 0 or len_tol > 1): raise ValueError("len_tol must be between 0 and 1.") if (f_min_rtol < 0 or f_min_rtol > 1): raise ValueError("f_min_rtol must be between 0 and 1.") # validate maxfun and maxiter if maxfun is None: maxfun = 1000 * lb.shape[0] if not isinstance(maxfun, int): raise ValueError("maxfun must be of type int.") if maxfun < 0: raise ValueError("maxfun must be > 0.") if not isinstance(maxiter, int): raise ValueError("maxiter must be of type int.") if maxiter < 0: raise ValueError("maxiter must be > 0.") # validate boolean parameters if not isinstance(locally_biased, bool): raise ValueError("locally_biased must be True or False.") def _func_wrap(x, args=None): x = np.asarray(x) if args is None: f = func(x) else: f = func(x, *args) # always return a float return np.asarray(f).item() # TODO: fix disp argument x, fun, ret_code, nfev, nit = _direct( _func_wrap, np.asarray(lb), np.asarray(ub), args, False, eps, maxfun, maxiter, locally_biased, f_min, f_min_rtol, vol_tol, len_tol, callback ) format_val = (maxfun, maxiter, f_min_rtol, vol_tol, len_tol) if ret_code > 2: message = SUCCESS_MESSAGES[ret_code - 3].format( format_val[ret_code - 1]) elif 0 < ret_code <= 2: message = ERROR_MESSAGES[ret_code - 1].format(format_val[ret_code - 1]) elif 0 > ret_code > -100: message = ERROR_MESSAGES[abs(ret_code) + 1] else: message = ERROR_MESSAGES[ret_code + 99] return OptimizeResult(x=np.asarray(x), fun=fun, status=ret_code, success=ret_code > 2, message=message, nfev=nfev, nit=nit)
11,798
41.290323
79
py
scipy
scipy-main/scipy/optimize/optimize.py
# This file is not meant for public use and will be removed in SciPy v2.0.0. # Use the `scipy.optimize` namespace for importing the functions # included below. import warnings from . import _optimize __all__ = [ # noqa: F822 'Brent', 'FD_METHODS', 'LineSearchWarning', 'MapWrapper', 'MemoizeJac', 'OptimizeResult', 'OptimizeWarning', 'ScalarFunction', 'approx_derivative', 'approx_fhess_p', 'approx_fprime', 'argmin', 'asarray', 'asfarray', 'atleast_1d', 'bracket', 'brent', 'brute', 'check_grad', 'check_random_state', 'eye', 'fmin', 'fmin_bfgs', 'fmin_cg', 'fmin_ncg', 'fmin_powell', 'fminbound', 'golden', 'is_array_scalar', 'line_search', 'line_search_wolfe1', 'line_search_wolfe2', 'main', 'rosen', 'rosen_der', 'rosen_hess', 'rosen_hess_prod', 'shape', 'show_options', 'sqrt', 'squeeze', 'sys', 'vecnorm', 'zeros', ] def __dir__(): return __all__ def __getattr__(name): if name not in __all__: raise AttributeError( "scipy.optimize.optimize is deprecated and has no attribute " f"{name}. Try looking in scipy.optimize instead.") warnings.warn(f"Please use `{name}` from the `scipy.optimize` namespace, " "the `scipy.optimize.optimize` namespace is deprecated.", category=DeprecationWarning, stacklevel=2) return getattr(_optimize, name)
1,513
20.027778
78
py
scipy
scipy-main/scipy/optimize/_linprog_util.py
""" Method agnostic utility functions for linear progamming """ import numpy as np import scipy.sparse as sps from warnings import warn from ._optimize import OptimizeWarning from scipy.optimize._remove_redundancy import ( _remove_redundancy_svd, _remove_redundancy_pivot_sparse, _remove_redundancy_pivot_dense, _remove_redundancy_id ) from collections import namedtuple _LPProblem = namedtuple('_LPProblem', 'c A_ub b_ub A_eq b_eq bounds x0 integrality') _LPProblem.__new__.__defaults__ = (None,) * 7 # make c the only required arg _LPProblem.__doc__ = \ """ Represents a linear-programming problem. Attributes ---------- c : 1D array The coefficients of the linear objective function to be minimized. A_ub : 2D array, optional The inequality constraint matrix. Each row of ``A_ub`` specifies the coefficients of a linear inequality constraint on ``x``. b_ub : 1D array, optional The inequality constraint vector. Each element represents an upper bound on the corresponding value of ``A_ub @ x``. A_eq : 2D array, optional The equality constraint matrix. Each row of ``A_eq`` specifies the coefficients of a linear equality constraint on ``x``. b_eq : 1D array, optional The equality constraint vector. Each element of ``A_eq @ x`` must equal the corresponding element of ``b_eq``. bounds : various valid formats, optional The bounds of ``x``, as ``min`` and ``max`` pairs. If bounds are specified for all N variables separately, valid formats are: * a 2D array (N x 2); * a sequence of N sequences, each with 2 values. If all variables have the same bounds, the bounds can be specified as a 1-D or 2-D array or sequence with 2 scalar values. If all variables have a lower bound of 0 and no upper bound, the bounds parameter can be omitted (or given as None). Absent lower and/or upper bounds can be specified as -numpy.inf (no lower bound), numpy.inf (no upper bound) or None (both). x0 : 1D array, optional Guess values of the decision variables, which will be refined by the optimization algorithm. This argument is currently used only by the 'revised simplex' method, and can only be used if `x0` represents a basic feasible solution. integrality : 1-D array or int, optional Indicates the type of integrality constraint on each decision variable. ``0`` : Continuous variable; no integrality constraint. ``1`` : Integer variable; decision variable must be an integer within `bounds`. ``2`` : Semi-continuous variable; decision variable must be within `bounds` or take value ``0``. ``3`` : Semi-integer variable; decision variable must be an integer within `bounds` or take value ``0``. By default, all variables are continuous. For mixed integrality constraints, supply an array of shape `c.shape`. To infer a constraint on each decision variable from shorter inputs, the argument will be broadcasted to `c.shape` using `np.broadcast_to`. This argument is currently used only by the ``'highs'`` method and ignored otherwise. Notes ----- This namedtuple supports 2 ways of initialization: >>> lp1 = _LPProblem(c=[-1, 4], A_ub=[[-3, 1], [1, 2]], b_ub=[6, 4]) >>> lp2 = _LPProblem([-1, 4], [[-3, 1], [1, 2]], [6, 4]) Note that only ``c`` is a required argument here, whereas all other arguments ``A_ub``, ``b_ub``, ``A_eq``, ``b_eq``, ``bounds``, ``x0`` are optional with default values of None. For example, ``A_eq`` and ``b_eq`` can be set without ``A_ub`` or ``b_ub``: >>> lp3 = _LPProblem(c=[-1, 4], A_eq=[[2, 1]], b_eq=[10]) """ def _check_sparse_inputs(options, meth, A_ub, A_eq): """ Check the provided ``A_ub`` and ``A_eq`` matrices conform to the specified optional sparsity variables. Parameters ---------- A_ub : 2-D array, optional 2-D array such that ``A_ub @ x`` gives the values of the upper-bound inequality constraints at ``x``. A_eq : 2-D array, optional 2-D array such that ``A_eq @ x`` gives the values of the equality constraints at ``x``. options : dict A dictionary of solver options. All methods accept the following generic options: maxiter : int Maximum number of iterations to perform. disp : bool Set to True to print convergence messages. For method-specific options, see :func:`show_options('linprog')`. method : str, optional The algorithm used to solve the standard form problem. Returns ------- A_ub : 2-D array, optional 2-D array such that ``A_ub @ x`` gives the values of the upper-bound inequality constraints at ``x``. A_eq : 2-D array, optional 2-D array such that ``A_eq @ x`` gives the values of the equality constraints at ``x``. options : dict A dictionary of solver options. All methods accept the following generic options: maxiter : int Maximum number of iterations to perform. disp : bool Set to True to print convergence messages. For method-specific options, see :func:`show_options('linprog')`. """ # This is an undocumented option for unit testing sparse presolve _sparse_presolve = options.pop('_sparse_presolve', False) if _sparse_presolve and A_eq is not None: A_eq = sps.coo_matrix(A_eq) if _sparse_presolve and A_ub is not None: A_ub = sps.coo_matrix(A_ub) sparse_constraint = sps.issparse(A_eq) or sps.issparse(A_ub) preferred_methods = {"highs", "highs-ds", "highs-ipm"} dense_methods = {"simplex", "revised simplex"} if meth in dense_methods and sparse_constraint: raise ValueError(f"Method '{meth}' does not support sparse " "constraint matrices. Please consider using one of " f"{preferred_methods}.") sparse = options.get('sparse', False) if not sparse and sparse_constraint and meth == 'interior-point': options['sparse'] = True warn("Sparse constraint matrix detected; setting 'sparse':True.", OptimizeWarning, stacklevel=4) return options, A_ub, A_eq def _format_A_constraints(A, n_x, sparse_lhs=False): """Format the left hand side of the constraints to a 2-D array Parameters ---------- A : 2-D array 2-D array such that ``A @ x`` gives the values of the upper-bound (in)equality constraints at ``x``. n_x : int The number of variables in the linear programming problem. sparse_lhs : bool Whether either of `A_ub` or `A_eq` are sparse. If true return a coo_matrix instead of a numpy array. Returns ------- np.ndarray or sparse.coo_matrix 2-D array such that ``A @ x`` gives the values of the upper-bound (in)equality constraints at ``x``. """ if sparse_lhs: return sps.coo_matrix( (0, n_x) if A is None else A, dtype=float, copy=True ) elif A is None: return np.zeros((0, n_x), dtype=float) else: return np.array(A, dtype=float, copy=True) def _format_b_constraints(b): """Format the upper bounds of the constraints to a 1-D array Parameters ---------- b : 1-D array 1-D array of values representing the upper-bound of each (in)equality constraint (row) in ``A``. Returns ------- 1-D np.array 1-D array of values representing the upper-bound of each (in)equality constraint (row) in ``A``. """ if b is None: return np.array([], dtype=float) b = np.array(b, dtype=float, copy=True).squeeze() return b if b.size != 1 else b.reshape(-1) def _clean_inputs(lp): """ Given user inputs for a linear programming problem, return the objective vector, upper bound constraints, equality constraints, and simple bounds in a preferred format. Parameters ---------- lp : A `scipy.optimize._linprog_util._LPProblem` consisting of the following fields: c : 1D array The coefficients of the linear objective function to be minimized. A_ub : 2D array, optional The inequality constraint matrix. Each row of ``A_ub`` specifies the coefficients of a linear inequality constraint on ``x``. b_ub : 1D array, optional The inequality constraint vector. Each element represents an upper bound on the corresponding value of ``A_ub @ x``. A_eq : 2D array, optional The equality constraint matrix. Each row of ``A_eq`` specifies the coefficients of a linear equality constraint on ``x``. b_eq : 1D array, optional The equality constraint vector. Each element of ``A_eq @ x`` must equal the corresponding element of ``b_eq``. bounds : various valid formats, optional The bounds of ``x``, as ``min`` and ``max`` pairs. If bounds are specified for all N variables separately, valid formats are: * a 2D array (2 x N or N x 2); * a sequence of N sequences, each with 2 values. If all variables have the same bounds, a single pair of values can be specified. Valid formats are: * a sequence with 2 scalar values; * a sequence with a single element containing 2 scalar values. If all variables have a lower bound of 0 and no upper bound, the bounds parameter can be omitted (or given as None). x0 : 1D array, optional Guess values of the decision variables, which will be refined by the optimization algorithm. This argument is currently used only by the 'revised simplex' method, and can only be used if `x0` represents a basic feasible solution. Returns ------- lp : A `scipy.optimize._linprog_util._LPProblem` consisting of the following fields: c : 1D array The coefficients of the linear objective function to be minimized. A_ub : 2D array, optional The inequality constraint matrix. Each row of ``A_ub`` specifies the coefficients of a linear inequality constraint on ``x``. b_ub : 1D array, optional The inequality constraint vector. Each element represents an upper bound on the corresponding value of ``A_ub @ x``. A_eq : 2D array, optional The equality constraint matrix. Each row of ``A_eq`` specifies the coefficients of a linear equality constraint on ``x``. b_eq : 1D array, optional The equality constraint vector. Each element of ``A_eq @ x`` must equal the corresponding element of ``b_eq``. bounds : 2D array The bounds of ``x``, as ``min`` and ``max`` pairs, one for each of the N elements of ``x``. The N x 2 array contains lower bounds in the first column and upper bounds in the 2nd. Unbounded variables have lower bound -np.inf and/or upper bound np.inf. x0 : 1D array, optional Guess values of the decision variables, which will be refined by the optimization algorithm. This argument is currently used only by the 'revised simplex' method, and can only be used if `x0` represents a basic feasible solution. """ c, A_ub, b_ub, A_eq, b_eq, bounds, x0, integrality = lp if c is None: raise TypeError try: c = np.array(c, dtype=np.float64, copy=True).squeeze() except ValueError as e: raise TypeError( "Invalid input for linprog: c must be a 1-D array of numerical " "coefficients") from e else: # If c is a single value, convert it to a 1-D array. if c.size == 1: c = c.reshape(-1) n_x = len(c) if n_x == 0 or len(c.shape) != 1: raise ValueError( "Invalid input for linprog: c must be a 1-D array and must " "not have more than one non-singleton dimension") if not np.isfinite(c).all(): raise ValueError( "Invalid input for linprog: c must not contain values " "inf, nan, or None") sparse_lhs = sps.issparse(A_eq) or sps.issparse(A_ub) try: A_ub = _format_A_constraints(A_ub, n_x, sparse_lhs=sparse_lhs) except ValueError as e: raise TypeError( "Invalid input for linprog: A_ub must be a 2-D array " "of numerical values") from e else: n_ub = A_ub.shape[0] if len(A_ub.shape) != 2 or A_ub.shape[1] != n_x: raise ValueError( "Invalid input for linprog: A_ub must have exactly two " "dimensions, and the number of columns in A_ub must be " "equal to the size of c") if (sps.issparse(A_ub) and not np.isfinite(A_ub.data).all() or not sps.issparse(A_ub) and not np.isfinite(A_ub).all()): raise ValueError( "Invalid input for linprog: A_ub must not contain values " "inf, nan, or None") try: b_ub = _format_b_constraints(b_ub) except ValueError as e: raise TypeError( "Invalid input for linprog: b_ub must be a 1-D array of " "numerical values, each representing the upper bound of an " "inequality constraint (row) in A_ub") from e else: if b_ub.shape != (n_ub,): raise ValueError( "Invalid input for linprog: b_ub must be a 1-D array; b_ub " "must not have more than one non-singleton dimension and " "the number of rows in A_ub must equal the number of values " "in b_ub") if not np.isfinite(b_ub).all(): raise ValueError( "Invalid input for linprog: b_ub must not contain values " "inf, nan, or None") try: A_eq = _format_A_constraints(A_eq, n_x, sparse_lhs=sparse_lhs) except ValueError as e: raise TypeError( "Invalid input for linprog: A_eq must be a 2-D array " "of numerical values") from e else: n_eq = A_eq.shape[0] if len(A_eq.shape) != 2 or A_eq.shape[1] != n_x: raise ValueError( "Invalid input for linprog: A_eq must have exactly two " "dimensions, and the number of columns in A_eq must be " "equal to the size of c") if (sps.issparse(A_eq) and not np.isfinite(A_eq.data).all() or not sps.issparse(A_eq) and not np.isfinite(A_eq).all()): raise ValueError( "Invalid input for linprog: A_eq must not contain values " "inf, nan, or None") try: b_eq = _format_b_constraints(b_eq) except ValueError as e: raise TypeError( "Invalid input for linprog: b_eq must be a dense, 1-D array of " "numerical values, each representing the right hand side of an " "equality constraint (row) in A_eq") from e else: if b_eq.shape != (n_eq,): raise ValueError( "Invalid input for linprog: b_eq must be a 1-D array; b_eq " "must not have more than one non-singleton dimension and " "the number of rows in A_eq must equal the number of values " "in b_eq") if not np.isfinite(b_eq).all(): raise ValueError( "Invalid input for linprog: b_eq must not contain values " "inf, nan, or None") # x0 gives a (optional) starting solution to the solver. If x0 is None, # skip the checks. Initial solution will be generated automatically. if x0 is not None: try: x0 = np.array(x0, dtype=float, copy=True).squeeze() except ValueError as e: raise TypeError( "Invalid input for linprog: x0 must be a 1-D array of " "numerical coefficients") from e if x0.ndim == 0: x0 = x0.reshape(-1) if len(x0) == 0 or x0.ndim != 1: raise ValueError( "Invalid input for linprog: x0 should be a 1-D array; it " "must not have more than one non-singleton dimension") if not x0.size == c.size: raise ValueError( "Invalid input for linprog: x0 and c should contain the " "same number of elements") if not np.isfinite(x0).all(): raise ValueError( "Invalid input for linprog: x0 must not contain values " "inf, nan, or None") # Bounds can be one of these formats: # (1) a 2-D array or sequence, with shape N x 2 # (2) a 1-D or 2-D sequence or array with 2 scalars # (3) None (or an empty sequence or array) # Unspecified bounds can be represented by None or (-)np.inf. # All formats are converted into a N x 2 np.array with (-)np.inf where # bounds are unspecified. # Prepare clean bounds array bounds_clean = np.zeros((n_x, 2), dtype=float) # Convert to a numpy array. # np.array(..,dtype=float) raises an error if dimensions are inconsistent # or if there are invalid data types in bounds. Just add a linprog prefix # to the error and re-raise. # Creating at least a 2-D array simplifies the cases to distinguish below. if bounds is None or np.array_equal(bounds, []) or np.array_equal(bounds, [[]]): bounds = (0, np.inf) try: bounds_conv = np.atleast_2d(np.array(bounds, dtype=float)) except ValueError as e: raise ValueError( "Invalid input for linprog: unable to interpret bounds, " "check values and dimensions: " + e.args[0]) from e except TypeError as e: raise TypeError( "Invalid input for linprog: unable to interpret bounds, " "check values and dimensions: " + e.args[0]) from e # Check bounds options bsh = bounds_conv.shape if len(bsh) > 2: # Do not try to handle multidimensional bounds input raise ValueError( "Invalid input for linprog: provide a 2-D array for bounds, " "not a {:d}-D array.".format(len(bsh))) elif np.all(bsh == (n_x, 2)): # Regular N x 2 array bounds_clean = bounds_conv elif (np.all(bsh == (2, 1)) or np.all(bsh == (1, 2))): # 2 values: interpret as overall lower and upper bound bounds_flat = bounds_conv.flatten() bounds_clean[:, 0] = bounds_flat[0] bounds_clean[:, 1] = bounds_flat[1] elif np.all(bsh == (2, n_x)): # Reject a 2 x N array raise ValueError( "Invalid input for linprog: provide a {:d} x 2 array for bounds, " "not a 2 x {:d} array.".format(n_x, n_x)) else: raise ValueError( "Invalid input for linprog: unable to interpret bounds with this " "dimension tuple: {}.".format(bsh)) # The process above creates nan-s where the input specified None # Convert the nan-s in the 1st column to -np.inf and in the 2nd column # to np.inf i_none = np.isnan(bounds_clean[:, 0]) bounds_clean[i_none, 0] = -np.inf i_none = np.isnan(bounds_clean[:, 1]) bounds_clean[i_none, 1] = np.inf return _LPProblem(c, A_ub, b_ub, A_eq, b_eq, bounds_clean, x0, integrality) def _presolve(lp, rr, rr_method, tol=1e-9): """ Given inputs for a linear programming problem in preferred format, presolve the problem: identify trivial infeasibilities, redundancies, and unboundedness, tighten bounds where possible, and eliminate fixed variables. Parameters ---------- lp : A `scipy.optimize._linprog_util._LPProblem` consisting of the following fields: c : 1D array The coefficients of the linear objective function to be minimized. A_ub : 2D array, optional The inequality constraint matrix. Each row of ``A_ub`` specifies the coefficients of a linear inequality constraint on ``x``. b_ub : 1D array, optional The inequality constraint vector. Each element represents an upper bound on the corresponding value of ``A_ub @ x``. A_eq : 2D array, optional The equality constraint matrix. Each row of ``A_eq`` specifies the coefficients of a linear equality constraint on ``x``. b_eq : 1D array, optional The equality constraint vector. Each element of ``A_eq @ x`` must equal the corresponding element of ``b_eq``. bounds : 2D array The bounds of ``x``, as ``min`` and ``max`` pairs, one for each of the N elements of ``x``. The N x 2 array contains lower bounds in the first column and upper bounds in the 2nd. Unbounded variables have lower bound -np.inf and/or upper bound np.inf. x0 : 1D array, optional Guess values of the decision variables, which will be refined by the optimization algorithm. This argument is currently used only by the 'revised simplex' method, and can only be used if `x0` represents a basic feasible solution. rr : bool If ``True`` attempts to eliminate any redundant rows in ``A_eq``. Set False if ``A_eq`` is known to be of full row rank, or if you are looking for a potential speedup (at the expense of reliability). rr_method : string Method used to identify and remove redundant rows from the equality constraint matrix after presolve. tol : float The tolerance which determines when a solution is "close enough" to zero in Phase 1 to be considered a basic feasible solution or close enough to positive to serve as an optimal solution. Returns ------- lp : A `scipy.optimize._linprog_util._LPProblem` consisting of the following fields: c : 1D array The coefficients of the linear objective function to be minimized. A_ub : 2D array, optional The inequality constraint matrix. Each row of ``A_ub`` specifies the coefficients of a linear inequality constraint on ``x``. b_ub : 1D array, optional The inequality constraint vector. Each element represents an upper bound on the corresponding value of ``A_ub @ x``. A_eq : 2D array, optional The equality constraint matrix. Each row of ``A_eq`` specifies the coefficients of a linear equality constraint on ``x``. b_eq : 1D array, optional The equality constraint vector. Each element of ``A_eq @ x`` must equal the corresponding element of ``b_eq``. bounds : 2D array The bounds of ``x``, as ``min`` and ``max`` pairs, possibly tightened. x0 : 1D array, optional Guess values of the decision variables, which will be refined by the optimization algorithm. This argument is currently used only by the 'revised simplex' method, and can only be used if `x0` represents a basic feasible solution. c0 : 1D array Constant term in objective function due to fixed (and eliminated) variables. x : 1D array Solution vector (when the solution is trivial and can be determined in presolve) revstack: list of functions the functions in the list reverse the operations of _presolve() the function signature is x_org = f(x_mod), where x_mod is the result of a presolve step and x_org the value at the start of the step (currently, the revstack contains only one function) complete: bool Whether the solution is complete (solved or determined to be infeasible or unbounded in presolve) status : int An integer representing the exit status of the optimization:: 0 : Optimization terminated successfully 1 : Iteration limit reached 2 : Problem appears to be infeasible 3 : Problem appears to be unbounded 4 : Serious numerical difficulties encountered message : str A string descriptor of the exit status of the optimization. References ---------- .. [5] Andersen, Erling D. "Finding all linearly dependent rows in large-scale linear programming." Optimization Methods and Software 6.3 (1995): 219-227. .. [8] Andersen, Erling D., and Knud D. Andersen. "Presolving in linear programming." Mathematical Programming 71.2 (1995): 221-245. """ # ideas from Reference [5] by Andersen and Andersen # however, unlike the reference, this is performed before converting # problem to standard form # There are a few advantages: # * artificial variables have not been added, so matrices are smaller # * bounds have not been converted to constraints yet. (It is better to # do that after presolve because presolve may adjust the simple bounds.) # There are many improvements that can be made, namely: # * implement remaining checks from [5] # * loop presolve until no additional changes are made # * implement additional efficiency improvements in redundancy removal [2] c, A_ub, b_ub, A_eq, b_eq, bounds, x0, _ = lp revstack = [] # record of variables eliminated from problem # constant term in cost function may be added if variables are eliminated c0 = 0 complete = False # complete is True if detected infeasible/unbounded x = np.zeros(c.shape) # this is solution vector if completed in presolve status = 0 # all OK unless determined otherwise message = "" # Lower and upper bounds. Copy to prevent feedback. lb = bounds[:, 0].copy() ub = bounds[:, 1].copy() m_eq, n = A_eq.shape m_ub, n = A_ub.shape if (rr_method is not None and rr_method.lower() not in {"svd", "pivot", "id"}): message = ("'" + str(rr_method) + "' is not a valid option " "for redundancy removal. Valid options are 'SVD', " "'pivot', and 'ID'.") raise ValueError(message) if sps.issparse(A_eq): A_eq = A_eq.tocsr() A_ub = A_ub.tocsr() def where(A): return A.nonzero() vstack = sps.vstack else: where = np.where vstack = np.vstack # upper bounds > lower bounds if np.any(ub < lb) or np.any(lb == np.inf) or np.any(ub == -np.inf): status = 2 message = ("The problem is (trivially) infeasible since one " "or more upper bounds are smaller than the corresponding " "lower bounds, a lower bound is np.inf or an upper bound " "is -np.inf.") complete = True return (_LPProblem(c, A_ub, b_ub, A_eq, b_eq, bounds, x0), c0, x, revstack, complete, status, message) # zero row in equality constraints zero_row = np.array(np.sum(A_eq != 0, axis=1) == 0).flatten() if np.any(zero_row): if np.any( np.logical_and( zero_row, np.abs(b_eq) > tol)): # test_zero_row_1 # infeasible if RHS is not zero status = 2 message = ("The problem is (trivially) infeasible due to a row " "of zeros in the equality constraint matrix with a " "nonzero corresponding constraint value.") complete = True return (_LPProblem(c, A_ub, b_ub, A_eq, b_eq, bounds, x0), c0, x, revstack, complete, status, message) else: # test_zero_row_2 # if RHS is zero, we can eliminate this equation entirely A_eq = A_eq[np.logical_not(zero_row), :] b_eq = b_eq[np.logical_not(zero_row)] # zero row in inequality constraints zero_row = np.array(np.sum(A_ub != 0, axis=1) == 0).flatten() if np.any(zero_row): if np.any(np.logical_and(zero_row, b_ub < -tol)): # test_zero_row_1 # infeasible if RHS is less than zero (because LHS is zero) status = 2 message = ("The problem is (trivially) infeasible due to a row " "of zeros in the equality constraint matrix with a " "nonzero corresponding constraint value.") complete = True return (_LPProblem(c, A_ub, b_ub, A_eq, b_eq, bounds, x0), c0, x, revstack, complete, status, message) else: # test_zero_row_2 # if LHS is >= 0, we can eliminate this constraint entirely A_ub = A_ub[np.logical_not(zero_row), :] b_ub = b_ub[np.logical_not(zero_row)] # zero column in (both) constraints # this indicates that a variable isn't constrained and can be removed A = vstack((A_eq, A_ub)) if A.shape[0] > 0: zero_col = np.array(np.sum(A != 0, axis=0) == 0).flatten() # variable will be at upper or lower bound, depending on objective x[np.logical_and(zero_col, c < 0)] = ub[ np.logical_and(zero_col, c < 0)] x[np.logical_and(zero_col, c > 0)] = lb[ np.logical_and(zero_col, c > 0)] if np.any(np.isinf(x)): # if an unconstrained variable has no bound status = 3 message = ("If feasible, the problem is (trivially) unbounded " "due to a zero column in the constraint matrices. If " "you wish to check whether the problem is infeasible, " "turn presolve off.") complete = True return (_LPProblem(c, A_ub, b_ub, A_eq, b_eq, bounds, x0), c0, x, revstack, complete, status, message) # variables will equal upper/lower bounds will be removed later lb[np.logical_and(zero_col, c < 0)] = ub[ np.logical_and(zero_col, c < 0)] ub[np.logical_and(zero_col, c > 0)] = lb[ np.logical_and(zero_col, c > 0)] # row singleton in equality constraints # this fixes a variable and removes the constraint singleton_row = np.array(np.sum(A_eq != 0, axis=1) == 1).flatten() rows = where(singleton_row)[0] cols = where(A_eq[rows, :])[1] if len(rows) > 0: for row, col in zip(rows, cols): val = b_eq[row] / A_eq[row, col] if not lb[col] - tol <= val <= ub[col] + tol: # infeasible if fixed value is not within bounds status = 2 message = ("The problem is (trivially) infeasible because a " "singleton row in the equality constraints is " "inconsistent with the bounds.") complete = True return (_LPProblem(c, A_ub, b_ub, A_eq, b_eq, bounds, x0), c0, x, revstack, complete, status, message) else: # sets upper and lower bounds at that fixed value - variable # will be removed later lb[col] = val ub[col] = val A_eq = A_eq[np.logical_not(singleton_row), :] b_eq = b_eq[np.logical_not(singleton_row)] # row singleton in inequality constraints # this indicates a simple bound and the constraint can be removed # simple bounds may be adjusted here # After all of the simple bound information is combined here, get_Abc will # turn the simple bounds into constraints singleton_row = np.array(np.sum(A_ub != 0, axis=1) == 1).flatten() cols = where(A_ub[singleton_row, :])[1] rows = where(singleton_row)[0] if len(rows) > 0: for row, col in zip(rows, cols): val = b_ub[row] / A_ub[row, col] if A_ub[row, col] > 0: # upper bound if val < lb[col] - tol: # infeasible complete = True elif val < ub[col]: # new upper bound ub[col] = val else: # lower bound if val > ub[col] + tol: # infeasible complete = True elif val > lb[col]: # new lower bound lb[col] = val if complete: status = 2 message = ("The problem is (trivially) infeasible because a " "singleton row in the upper bound constraints is " "inconsistent with the bounds.") return (_LPProblem(c, A_ub, b_ub, A_eq, b_eq, bounds, x0), c0, x, revstack, complete, status, message) A_ub = A_ub[np.logical_not(singleton_row), :] b_ub = b_ub[np.logical_not(singleton_row)] # identical bounds indicate that variable can be removed i_f = np.abs(lb - ub) < tol # indices of "fixed" variables i_nf = np.logical_not(i_f) # indices of "not fixed" variables # test_bounds_equal_but_infeasible if np.all(i_f): # if bounds define solution, check for consistency residual = b_eq - A_eq.dot(lb) slack = b_ub - A_ub.dot(lb) if ((A_ub.size > 0 and np.any(slack < 0)) or (A_eq.size > 0 and not np.allclose(residual, 0))): status = 2 message = ("The problem is (trivially) infeasible because the " "bounds fix all variables to values inconsistent with " "the constraints") complete = True return (_LPProblem(c, A_ub, b_ub, A_eq, b_eq, bounds, x0), c0, x, revstack, complete, status, message) ub_mod = ub lb_mod = lb if np.any(i_f): c0 += c[i_f].dot(lb[i_f]) b_eq = b_eq - A_eq[:, i_f].dot(lb[i_f]) b_ub = b_ub - A_ub[:, i_f].dot(lb[i_f]) c = c[i_nf] x_undo = lb[i_f] # not x[i_f], x is just zeroes x = x[i_nf] # user guess x0 stays separate from presolve solution x if x0 is not None: x0 = x0[i_nf] A_eq = A_eq[:, i_nf] A_ub = A_ub[:, i_nf] # modify bounds lb_mod = lb[i_nf] ub_mod = ub[i_nf] def rev(x_mod): # Function to restore x: insert x_undo into x_mod. # When elements have been removed at positions k1, k2, k3, ... # then these must be replaced at (after) positions k1-1, k2-2, # k3-3, ... in the modified array to recreate the original i = np.flatnonzero(i_f) # Number of variables to restore N = len(i) index_offset = np.arange(N) # Create insert indices insert_indices = i - index_offset x_rev = np.insert(x_mod.astype(float), insert_indices, x_undo) return x_rev # Use revstack as a list of functions, currently just this one. revstack.append(rev) # no constraints indicates that problem is trivial if A_eq.size == 0 and A_ub.size == 0: b_eq = np.array([]) b_ub = np.array([]) # test_empty_constraint_1 if c.size == 0: status = 0 message = ("The solution was determined in presolve as there are " "no non-trivial constraints.") elif (np.any(np.logical_and(c < 0, ub_mod == np.inf)) or np.any(np.logical_and(c > 0, lb_mod == -np.inf))): # test_no_constraints() # test_unbounded_no_nontrivial_constraints_1 # test_unbounded_no_nontrivial_constraints_2 status = 3 message = ("The problem is (trivially) unbounded " "because there are no non-trivial constraints and " "a) at least one decision variable is unbounded " "above and its corresponding cost is negative, or " "b) at least one decision variable is unbounded below " "and its corresponding cost is positive. ") else: # test_empty_constraint_2 status = 0 message = ("The solution was determined in presolve as there are " "no non-trivial constraints.") complete = True x[c < 0] = ub_mod[c < 0] x[c > 0] = lb_mod[c > 0] # where c is zero, set x to a finite bound or zero x_zero_c = ub_mod[c == 0] x_zero_c[np.isinf(x_zero_c)] = ub_mod[c == 0][np.isinf(x_zero_c)] x_zero_c[np.isinf(x_zero_c)] = 0 x[c == 0] = x_zero_c # if this is not the last step of presolve, should convert bounds back # to array and return here # Convert modified lb and ub back into N x 2 bounds bounds = np.hstack((lb_mod[:, np.newaxis], ub_mod[:, np.newaxis])) # remove redundant (linearly dependent) rows from equality constraints n_rows_A = A_eq.shape[0] redundancy_warning = ("A_eq does not appear to be of full row rank. To " "improve performance, check the problem formulation " "for redundant equality constraints.") if (sps.issparse(A_eq)): if rr and A_eq.size > 0: # TODO: Fast sparse rank check? rr_res = _remove_redundancy_pivot_sparse(A_eq, b_eq) A_eq, b_eq, status, message = rr_res if A_eq.shape[0] < n_rows_A: warn(redundancy_warning, OptimizeWarning, stacklevel=1) if status != 0: complete = True return (_LPProblem(c, A_ub, b_ub, A_eq, b_eq, bounds, x0), c0, x, revstack, complete, status, message) # This is a wild guess for which redundancy removal algorithm will be # faster. More testing would be good. small_nullspace = 5 if rr and A_eq.size > 0: try: # TODO: use results of first SVD in _remove_redundancy_svd rank = np.linalg.matrix_rank(A_eq) # oh well, we'll have to go with _remove_redundancy_pivot_dense except Exception: rank = 0 if rr and A_eq.size > 0 and rank < A_eq.shape[0]: warn(redundancy_warning, OptimizeWarning, stacklevel=3) dim_row_nullspace = A_eq.shape[0]-rank if rr_method is None: if dim_row_nullspace <= small_nullspace: rr_res = _remove_redundancy_svd(A_eq, b_eq) A_eq, b_eq, status, message = rr_res if dim_row_nullspace > small_nullspace or status == 4: rr_res = _remove_redundancy_pivot_dense(A_eq, b_eq) A_eq, b_eq, status, message = rr_res else: rr_method = rr_method.lower() if rr_method == "svd": rr_res = _remove_redundancy_svd(A_eq, b_eq) A_eq, b_eq, status, message = rr_res elif rr_method == "pivot": rr_res = _remove_redundancy_pivot_dense(A_eq, b_eq) A_eq, b_eq, status, message = rr_res elif rr_method == "id": rr_res = _remove_redundancy_id(A_eq, b_eq, rank) A_eq, b_eq, status, message = rr_res else: # shouldn't get here; option validity checked above pass if A_eq.shape[0] < rank: message = ("Due to numerical issues, redundant equality " "constraints could not be removed automatically. " "Try providing your constraint matrices as sparse " "matrices to activate sparse presolve, try turning " "off redundancy removal, or try turning off presolve " "altogether.") status = 4 if status != 0: complete = True return (_LPProblem(c, A_ub, b_ub, A_eq, b_eq, bounds, x0), c0, x, revstack, complete, status, message) def _parse_linprog(lp, options, meth): """ Parse the provided linear programming problem ``_parse_linprog`` employs two main steps ``_check_sparse_inputs`` and ``_clean_inputs``. ``_check_sparse_inputs`` checks for sparsity in the provided constraints (``A_ub`` and ``A_eq) and if these match the provided sparsity optional values. ``_clean inputs`` checks of the provided inputs. If no violations are identified the objective vector, upper bound constraints, equality constraints, and simple bounds are returned in the expected format. Parameters ---------- lp : A `scipy.optimize._linprog_util._LPProblem` consisting of the following fields: c : 1D array The coefficients of the linear objective function to be minimized. A_ub : 2D array, optional The inequality constraint matrix. Each row of ``A_ub`` specifies the coefficients of a linear inequality constraint on ``x``. b_ub : 1D array, optional The inequality constraint vector. Each element represents an upper bound on the corresponding value of ``A_ub @ x``. A_eq : 2D array, optional The equality constraint matrix. Each row of ``A_eq`` specifies the coefficients of a linear equality constraint on ``x``. b_eq : 1D array, optional The equality constraint vector. Each element of ``A_eq @ x`` must equal the corresponding element of ``b_eq``. bounds : various valid formats, optional The bounds of ``x``, as ``min`` and ``max`` pairs. If bounds are specified for all N variables separately, valid formats are: * a 2D array (2 x N or N x 2); * a sequence of N sequences, each with 2 values. If all variables have the same bounds, a single pair of values can be specified. Valid formats are: * a sequence with 2 scalar values; * a sequence with a single element containing 2 scalar values. If all variables have a lower bound of 0 and no upper bound, the bounds parameter can be omitted (or given as None). x0 : 1D array, optional Guess values of the decision variables, which will be refined by the optimization algorithm. This argument is currently used only by the 'revised simplex' method, and can only be used if `x0` represents a basic feasible solution. options : dict A dictionary of solver options. All methods accept the following generic options: maxiter : int Maximum number of iterations to perform. disp : bool Set to True to print convergence messages. For method-specific options, see :func:`show_options('linprog')`. Returns ------- lp : A `scipy.optimize._linprog_util._LPProblem` consisting of the following fields: c : 1D array The coefficients of the linear objective function to be minimized. A_ub : 2D array, optional The inequality constraint matrix. Each row of ``A_ub`` specifies the coefficients of a linear inequality constraint on ``x``. b_ub : 1D array, optional The inequality constraint vector. Each element represents an upper bound on the corresponding value of ``A_ub @ x``. A_eq : 2D array, optional The equality constraint matrix. Each row of ``A_eq`` specifies the coefficients of a linear equality constraint on ``x``. b_eq : 1D array, optional The equality constraint vector. Each element of ``A_eq @ x`` must equal the corresponding element of ``b_eq``. bounds : 2D array The bounds of ``x``, as ``min`` and ``max`` pairs, one for each of the N elements of ``x``. The N x 2 array contains lower bounds in the first column and upper bounds in the 2nd. Unbounded variables have lower bound -np.inf and/or upper bound np.inf. x0 : 1D array, optional Guess values of the decision variables, which will be refined by the optimization algorithm. This argument is currently used only by the 'revised simplex' method, and can only be used if `x0` represents a basic feasible solution. options : dict, optional A dictionary of solver options. All methods accept the following generic options: maxiter : int Maximum number of iterations to perform. disp : bool Set to True to print convergence messages. For method-specific options, see :func:`show_options('linprog')`. """ if options is None: options = {} solver_options = {k: v for k, v in options.items()} solver_options, A_ub, A_eq = _check_sparse_inputs(solver_options, meth, lp.A_ub, lp.A_eq) # Convert lists to numpy arrays, etc... lp = _clean_inputs(lp._replace(A_ub=A_ub, A_eq=A_eq)) return lp, solver_options def _get_Abc(lp, c0): """ Given a linear programming problem of the form: Minimize:: c @ x Subject to:: A_ub @ x <= b_ub A_eq @ x == b_eq lb <= x <= ub where ``lb = 0`` and ``ub = None`` unless set in ``bounds``. Return the problem in standard form: Minimize:: c @ x Subject to:: A @ x == b x >= 0 by adding slack variables and making variable substitutions as necessary. Parameters ---------- lp : A `scipy.optimize._linprog_util._LPProblem` consisting of the following fields: c : 1D array The coefficients of the linear objective function to be minimized. A_ub : 2D array, optional The inequality constraint matrix. Each row of ``A_ub`` specifies the coefficients of a linear inequality constraint on ``x``. b_ub : 1D array, optional The inequality constraint vector. Each element represents an upper bound on the corresponding value of ``A_ub @ x``. A_eq : 2D array, optional The equality constraint matrix. Each row of ``A_eq`` specifies the coefficients of a linear equality constraint on ``x``. b_eq : 1D array, optional The equality constraint vector. Each element of ``A_eq @ x`` must equal the corresponding element of ``b_eq``. bounds : 2D array The bounds of ``x``, lower bounds in the 1st column, upper bounds in the 2nd column. The bounds are possibly tightened by the presolve procedure. x0 : 1D array, optional Guess values of the decision variables, which will be refined by the optimization algorithm. This argument is currently used only by the 'revised simplex' method, and can only be used if `x0` represents a basic feasible solution. c0 : float Constant term in objective function due to fixed (and eliminated) variables. Returns ------- A : 2-D array 2-D array such that ``A`` @ ``x``, gives the values of the equality constraints at ``x``. b : 1-D array 1-D array of values representing the RHS of each equality constraint (row) in A (for standard form problem). c : 1-D array Coefficients of the linear objective function to be minimized (for standard form problem). c0 : float Constant term in objective function due to fixed (and eliminated) variables. x0 : 1-D array Starting values of the independent variables, which will be refined by the optimization algorithm References ---------- .. [9] Bertsimas, Dimitris, and J. Tsitsiklis. "Introduction to linear programming." Athena Scientific 1 (1997): 997. """ c, A_ub, b_ub, A_eq, b_eq, bounds, x0, integrality = lp if sps.issparse(A_eq): sparse = True A_eq = sps.csr_matrix(A_eq) A_ub = sps.csr_matrix(A_ub) def hstack(blocks): return sps.hstack(blocks, format="csr") def vstack(blocks): return sps.vstack(blocks, format="csr") zeros = sps.csr_matrix eye = sps.eye else: sparse = False hstack = np.hstack vstack = np.vstack zeros = np.zeros eye = np.eye # Variables lbs and ubs (see below) may be changed, which feeds back into # bounds, so copy. bounds = np.array(bounds, copy=True) # modify problem such that all variables have only non-negativity bounds lbs = bounds[:, 0] ubs = bounds[:, 1] m_ub, n_ub = A_ub.shape lb_none = np.equal(lbs, -np.inf) ub_none = np.equal(ubs, np.inf) lb_some = np.logical_not(lb_none) ub_some = np.logical_not(ub_none) # unbounded below: substitute xi = -xi' (unbounded above) # if -inf <= xi <= ub, then -ub <= -xi <= inf, so swap and invert bounds l_nolb_someub = np.logical_and(lb_none, ub_some) i_nolb = np.nonzero(l_nolb_someub)[0] lbs[l_nolb_someub], ubs[l_nolb_someub] = ( -ubs[l_nolb_someub], -lbs[l_nolb_someub]) lb_none = np.equal(lbs, -np.inf) ub_none = np.equal(ubs, np.inf) lb_some = np.logical_not(lb_none) ub_some = np.logical_not(ub_none) c[i_nolb] *= -1 if x0 is not None: x0[i_nolb] *= -1 if len(i_nolb) > 0: if A_ub.shape[0] > 0: # sometimes needed for sparse arrays... weird A_ub[:, i_nolb] *= -1 if A_eq.shape[0] > 0: A_eq[:, i_nolb] *= -1 # upper bound: add inequality constraint i_newub, = ub_some.nonzero() ub_newub = ubs[ub_some] n_bounds = len(i_newub) if n_bounds > 0: shape = (n_bounds, A_ub.shape[1]) if sparse: idxs = (np.arange(n_bounds), i_newub) A_ub = vstack((A_ub, sps.csr_matrix((np.ones(n_bounds), idxs), shape=shape))) else: A_ub = vstack((A_ub, np.zeros(shape))) A_ub[np.arange(m_ub, A_ub.shape[0]), i_newub] = 1 b_ub = np.concatenate((b_ub, np.zeros(n_bounds))) b_ub[m_ub:] = ub_newub A1 = vstack((A_ub, A_eq)) b = np.concatenate((b_ub, b_eq)) c = np.concatenate((c, np.zeros((A_ub.shape[0],)))) if x0 is not None: x0 = np.concatenate((x0, np.zeros((A_ub.shape[0],)))) # unbounded: substitute xi = xi+ + xi- l_free = np.logical_and(lb_none, ub_none) i_free = np.nonzero(l_free)[0] n_free = len(i_free) c = np.concatenate((c, np.zeros(n_free))) if x0 is not None: x0 = np.concatenate((x0, np.zeros(n_free))) A1 = hstack((A1[:, :n_ub], -A1[:, i_free])) c[n_ub:n_ub+n_free] = -c[i_free] if x0 is not None: i_free_neg = x0[i_free] < 0 x0[np.arange(n_ub, A1.shape[1])[i_free_neg]] = -x0[i_free[i_free_neg]] x0[i_free[i_free_neg]] = 0 # add slack variables A2 = vstack([eye(A_ub.shape[0]), zeros((A_eq.shape[0], A_ub.shape[0]))]) A = hstack([A1, A2]) # lower bound: substitute xi = xi' + lb # now there is a constant term in objective i_shift = np.nonzero(lb_some)[0] lb_shift = lbs[lb_some].astype(float) c0 += np.sum(lb_shift * c[i_shift]) if sparse: b = b.reshape(-1, 1) A = A.tocsc() b -= (A[:, i_shift] * sps.diags(lb_shift)).sum(axis=1) b = b.ravel() else: b -= (A[:, i_shift] * lb_shift).sum(axis=1) if x0 is not None: x0[i_shift] -= lb_shift return A, b, c, c0, x0 def _round_to_power_of_two(x): """ Round elements of the array to the nearest power of two. """ return 2**np.around(np.log2(x)) def _autoscale(A, b, c, x0): """ Scales the problem according to equilibration from [12]. Also normalizes the right hand side vector by its maximum element. """ m, n = A.shape C = 1 R = 1 if A.size > 0: R = np.max(np.abs(A), axis=1) if sps.issparse(A): R = R.toarray().flatten() R[R == 0] = 1 R = 1/_round_to_power_of_two(R) A = sps.diags(R)*A if sps.issparse(A) else A*R.reshape(m, 1) b = b*R C = np.max(np.abs(A), axis=0) if sps.issparse(A): C = C.toarray().flatten() C[C == 0] = 1 C = 1/_round_to_power_of_two(C) A = A*sps.diags(C) if sps.issparse(A) else A*C c = c*C b_scale = np.max(np.abs(b)) if b.size > 0 else 1 if b_scale == 0: b_scale = 1. b = b/b_scale if x0 is not None: x0 = x0/b_scale*(1/C) return A, b, c, x0, C, b_scale def _unscale(x, C, b_scale): """ Converts solution to _autoscale problem -> solution to original problem. """ try: n = len(C) # fails if sparse or scalar; that's OK. # this is only needed for original simplex (never sparse) except TypeError: n = len(x) return x[:n]*b_scale*C def _display_summary(message, status, fun, iteration): """ Print the termination summary of the linear program Parameters ---------- message : str A string descriptor of the exit status of the optimization. status : int An integer representing the exit status of the optimization:: 0 : Optimization terminated successfully 1 : Iteration limit reached 2 : Problem appears to be infeasible 3 : Problem appears to be unbounded 4 : Serious numerical difficulties encountered fun : float Value of the objective function. iteration : iteration The number of iterations performed. """ print(message) if status in (0, 1): print(f" Current function value: {fun: <12.6f}") print(f" Iterations: {iteration:d}") def _postsolve(x, postsolve_args, complete=False): """ Given solution x to presolved, standard form linear program x, add fixed variables back into the problem and undo the variable substitutions to get solution to original linear program. Also, calculate the objective function value, slack in original upper bound constraints, and residuals in original equality constraints. Parameters ---------- x : 1-D array Solution vector to the standard-form problem. postsolve_args : tuple Data needed by _postsolve to convert the solution to the standard-form problem into the solution to the original problem, including: lp : A `scipy.optimize._linprog_util._LPProblem` consisting of the following fields: c : 1D array The coefficients of the linear objective function to be minimized. A_ub : 2D array, optional The inequality constraint matrix. Each row of ``A_ub`` specifies the coefficients of a linear inequality constraint on ``x``. b_ub : 1D array, optional The inequality constraint vector. Each element represents an upper bound on the corresponding value of ``A_ub @ x``. A_eq : 2D array, optional The equality constraint matrix. Each row of ``A_eq`` specifies the coefficients of a linear equality constraint on ``x``. b_eq : 1D array, optional The equality constraint vector. Each element of ``A_eq @ x`` must equal the corresponding element of ``b_eq``. bounds : 2D array The bounds of ``x``, lower bounds in the 1st column, upper bounds in the 2nd column. The bounds are possibly tightened by the presolve procedure. x0 : 1D array, optional Guess values of the decision variables, which will be refined by the optimization algorithm. This argument is currently used only by the 'revised simplex' method, and can only be used if `x0` represents a basic feasible solution. revstack: list of functions the functions in the list reverse the operations of _presolve() the function signature is x_org = f(x_mod), where x_mod is the result of a presolve step and x_org the value at the start of the step complete : bool Whether the solution is was determined in presolve (``True`` if so) Returns ------- x : 1-D array Solution vector to original linear programming problem fun: float optimal objective value for original problem slack : 1-D array The (non-negative) slack in the upper bound constraints, that is, ``b_ub - A_ub @ x`` con : 1-D array The (nominally zero) residuals of the equality constraints, that is, ``b - A_eq @ x`` """ # note that all the inputs are the ORIGINAL, unmodified versions # no rows, columns have been removed c, A_ub, b_ub, A_eq, b_eq, bounds, x0, integrality = postsolve_args[0] revstack, C, b_scale = postsolve_args[1:] x = _unscale(x, C, b_scale) # Undo variable substitutions of _get_Abc() # if "complete", problem was solved in presolve; don't do anything here n_x = bounds.shape[0] if not complete and bounds is not None: # bounds are never none, probably n_unbounded = 0 for i, bi in enumerate(bounds): lbi = bi[0] ubi = bi[1] if lbi == -np.inf and ubi == np.inf: n_unbounded += 1 x[i] = x[i] - x[n_x + n_unbounded - 1] else: if lbi == -np.inf: x[i] = ubi - x[i] else: x[i] += lbi # all the rest of the variables were artificial x = x[:n_x] # If there were variables removed from the problem, add them back into the # solution vector # Apply the functions in revstack (reverse direction) for rev in reversed(revstack): x = rev(x) fun = x.dot(c) slack = b_ub - A_ub.dot(x) # report slack for ORIGINAL UB constraints # report residuals of ORIGINAL EQ constraints con = b_eq - A_eq.dot(x) return x, fun, slack, con def _check_result(x, fun, status, slack, con, bounds, tol, message, integrality): """ Check the validity of the provided solution. A valid (optimal) solution satisfies all bounds, all slack variables are negative and all equality constraint residuals are strictly non-zero. Further, the lower-bounds, upper-bounds, slack and residuals contain no nan values. Parameters ---------- x : 1-D array Solution vector to original linear programming problem fun: float optimal objective value for original problem status : int An integer representing the exit status of the optimization:: 0 : Optimization terminated successfully 1 : Iteration limit reached 2 : Problem appears to be infeasible 3 : Problem appears to be unbounded 4 : Serious numerical difficulties encountered slack : 1-D array The (non-negative) slack in the upper bound constraints, that is, ``b_ub - A_ub @ x`` con : 1-D array The (nominally zero) residuals of the equality constraints, that is, ``b - A_eq @ x`` bounds : 2D array The bounds on the original variables ``x`` message : str A string descriptor of the exit status of the optimization. tol : float Termination tolerance; see [1]_ Section 4.5. Returns ------- status : int An integer representing the exit status of the optimization:: 0 : Optimization terminated successfully 1 : Iteration limit reached 2 : Problem appears to be infeasible 3 : Problem appears to be unbounded 4 : Serious numerical difficulties encountered message : str A string descriptor of the exit status of the optimization. """ # Somewhat arbitrary tol = np.sqrt(tol) * 10 if x is None: # HiGHS does not provide x if infeasible/unbounded if status == 0: # Observed with HiGHS Simplex Primal status = 4 message = ("The solver did not provide a solution nor did it " "report a failure. Please submit a bug report.") return status, message contains_nans = ( np.isnan(x).any() or np.isnan(fun) or np.isnan(slack).any() or np.isnan(con).any() ) if contains_nans: is_feasible = False else: if integrality is None: integrality = 0 valid_bounds = (x >= bounds[:, 0] - tol) & (x <= bounds[:, 1] + tol) # When integrality is 2 or 3, x must be within bounds OR take value 0 valid_bounds |= (integrality > 1) & np.isclose(x, 0, atol=tol) invalid_bounds = not np.all(valid_bounds) invalid_slack = status != 3 and (slack < -tol).any() invalid_con = status != 3 and (np.abs(con) > tol).any() is_feasible = not (invalid_bounds or invalid_slack or invalid_con) if status == 0 and not is_feasible: status = 4 message = ("The solution does not satisfy the constraints within the " "required tolerance of " + f"{tol:.2E}" + ", yet " "no errors were raised and there is no certificate of " "infeasibility or unboundedness. Check whether " "the slack and constraint residuals are acceptable; " "if not, consider enabling presolve, adjusting the " "tolerance option(s), and/or using a different method. " "Please consider submitting a bug report.") elif status == 2 and is_feasible: # Occurs if the simplex method exits after phase one with a very # nearly basic feasible solution. Postsolving can make the solution # basic, however, this solution is NOT optimal status = 4 message = ("The solution is feasible, but the solver did not report " "that the solution was optimal. Please try a different " "method.") return status, message
62,773
40.217334
88
py
scipy
scipy-main/scipy/optimize/_trustregion.py
"""Trust-region optimization.""" import math import warnings import numpy as np import scipy.linalg from ._optimize import (_check_unknown_options, _status_message, OptimizeResult, _prepare_scalar_function, _call_callback_maybe_halt) from scipy.optimize._hessian_update_strategy import HessianUpdateStrategy from scipy.optimize._differentiable_functions import FD_METHODS __all__ = [] def _wrap_function(function, args): # wraps a minimizer function to count number of evaluations # and to easily provide an args kwd. ncalls = [0] if function is None: return ncalls, None def function_wrapper(x, *wrapper_args): ncalls[0] += 1 # A copy of x is sent to the user function (gh13740) return function(np.copy(x), *(wrapper_args + args)) return ncalls, function_wrapper class BaseQuadraticSubproblem: """ Base/abstract class defining the quadratic model for trust-region minimization. Child classes must implement the ``solve`` method. Values of the objective function, Jacobian and Hessian (if provided) at the current iterate ``x`` are evaluated on demand and then stored as attributes ``fun``, ``jac``, ``hess``. """ def __init__(self, x, fun, jac, hess=None, hessp=None): self._x = x self._f = None self._g = None self._h = None self._g_mag = None self._cauchy_point = None self._newton_point = None self._fun = fun self._jac = jac self._hess = hess self._hessp = hessp def __call__(self, p): return self.fun + np.dot(self.jac, p) + 0.5 * np.dot(p, self.hessp(p)) @property def fun(self): """Value of objective function at current iteration.""" if self._f is None: self._f = self._fun(self._x) return self._f @property def jac(self): """Value of Jacobian of objective function at current iteration.""" if self._g is None: self._g = self._jac(self._x) return self._g @property def hess(self): """Value of Hessian of objective function at current iteration.""" if self._h is None: self._h = self._hess(self._x) return self._h def hessp(self, p): if self._hessp is not None: return self._hessp(self._x, p) else: return np.dot(self.hess, p) @property def jac_mag(self): """Magnitude of jacobian of objective function at current iteration.""" if self._g_mag is None: self._g_mag = scipy.linalg.norm(self.jac) return self._g_mag def get_boundaries_intersections(self, z, d, trust_radius): """ Solve the scalar quadratic equation ||z + t d|| == trust_radius. This is like a line-sphere intersection. Return the two values of t, sorted from low to high. """ a = np.dot(d, d) b = 2 * np.dot(z, d) c = np.dot(z, z) - trust_radius**2 sqrt_discriminant = math.sqrt(b*b - 4*a*c) # The following calculation is mathematically # equivalent to: # ta = (-b - sqrt_discriminant) / (2*a) # tb = (-b + sqrt_discriminant) / (2*a) # but produce smaller round off errors. # Look at Matrix Computation p.97 # for a better justification. aux = b + math.copysign(sqrt_discriminant, b) ta = -aux / (2*a) tb = -2*c / aux return sorted([ta, tb]) def solve(self, trust_radius): raise NotImplementedError('The solve method should be implemented by ' 'the child class') def _minimize_trust_region(fun, x0, args=(), jac=None, hess=None, hessp=None, subproblem=None, initial_trust_radius=1.0, max_trust_radius=1000.0, eta=0.15, gtol=1e-4, maxiter=None, disp=False, return_all=False, callback=None, inexact=True, **unknown_options): """ Minimization of scalar function of one or more variables using a trust-region algorithm. Options for the trust-region algorithm are: initial_trust_radius : float Initial trust radius. max_trust_radius : float Never propose steps that are longer than this value. eta : float Trust region related acceptance stringency for proposed steps. gtol : float Gradient norm must be less than `gtol` before successful termination. maxiter : int Maximum number of iterations to perform. disp : bool If True, print convergence message. inexact : bool Accuracy to solve subproblems. If True requires less nonlinear iterations, but more vector products. Only effective for method trust-krylov. This function is called by the `minimize` function. It is not supposed to be called directly. """ _check_unknown_options(unknown_options) if jac is None: raise ValueError('Jacobian is currently required for trust-region ' 'methods') if hess is None and hessp is None: raise ValueError('Either the Hessian or the Hessian-vector product ' 'is currently required for trust-region methods') if subproblem is None: raise ValueError('A subproblem solving strategy is required for ' 'trust-region methods') if not (0 <= eta < 0.25): raise Exception('invalid acceptance stringency') if max_trust_radius <= 0: raise Exception('the max trust radius must be positive') if initial_trust_radius <= 0: raise ValueError('the initial trust radius must be positive') if initial_trust_radius >= max_trust_radius: raise ValueError('the initial trust radius must be less than the ' 'max trust radius') # force the initial guess into a nice format x0 = np.asarray(x0).flatten() # A ScalarFunction representing the problem. This caches calls to fun, jac, # hess. sf = _prepare_scalar_function(fun, x0, jac=jac, hess=hess, args=args) fun = sf.fun jac = sf.grad if callable(hess): hess = sf.hess elif callable(hessp): # this elif statement must come before examining whether hess # is estimated by FD methods or a HessianUpdateStrategy pass elif (hess in FD_METHODS or isinstance(hess, HessianUpdateStrategy)): # If the Hessian is being estimated by finite differences or a # Hessian update strategy then ScalarFunction.hess returns a # LinearOperator or a HessianUpdateStrategy. This enables the # calculation/creation of a hessp. BUT you only want to do this # if the user *hasn't* provided a callable(hessp) function. hess = None def hessp(x, p, *args): return sf.hess(x).dot(p) else: raise ValueError('Either the Hessian or the Hessian-vector product ' 'is currently required for trust-region methods') # ScalarFunction doesn't represent hessp nhessp, hessp = _wrap_function(hessp, args) # limit the number of iterations if maxiter is None: maxiter = len(x0)*200 # init the search status warnflag = 0 # initialize the search trust_radius = initial_trust_radius x = x0 if return_all: allvecs = [x] m = subproblem(x, fun, jac, hess, hessp) k = 0 # search for the function min # do not even start if the gradient is small enough while m.jac_mag >= gtol: # Solve the sub-problem. # This gives us the proposed step relative to the current position # and it tells us whether the proposed step # has reached the trust region boundary or not. try: p, hits_boundary = m.solve(trust_radius) except np.linalg.LinAlgError: warnflag = 3 break # calculate the predicted value at the proposed point predicted_value = m(p) # define the local approximation at the proposed point x_proposed = x + p m_proposed = subproblem(x_proposed, fun, jac, hess, hessp) # evaluate the ratio defined in equation (4.4) actual_reduction = m.fun - m_proposed.fun predicted_reduction = m.fun - predicted_value if predicted_reduction <= 0: warnflag = 2 break rho = actual_reduction / predicted_reduction # update the trust radius according to the actual/predicted ratio if rho < 0.25: trust_radius *= 0.25 elif rho > 0.75 and hits_boundary: trust_radius = min(2*trust_radius, max_trust_radius) # if the ratio is high enough then accept the proposed step if rho > eta: x = x_proposed m = m_proposed # append the best guess, call back, increment the iteration count if return_all: allvecs.append(np.copy(x)) k += 1 intermediate_result = OptimizeResult(x=x, fun=m.fun) if _call_callback_maybe_halt(callback, intermediate_result): break # check if the gradient is small enough to stop if m.jac_mag < gtol: warnflag = 0 break # check if we have looked at enough iterations if k >= maxiter: warnflag = 1 break # print some stuff if requested status_messages = ( _status_message['success'], _status_message['maxiter'], 'A bad approximation caused failure to predict improvement.', 'A linalg error occurred, such as a non-psd Hessian.', ) if disp: if warnflag == 0: print(status_messages[warnflag]) else: warnings.warn(status_messages[warnflag], RuntimeWarning, 3) print(" Current function value: %f" % m.fun) print(" Iterations: %d" % k) print(" Function evaluations: %d" % sf.nfev) print(" Gradient evaluations: %d" % sf.ngev) print(" Hessian evaluations: %d" % (sf.nhev + nhessp[0])) result = OptimizeResult(x=x, success=(warnflag == 0), status=warnflag, fun=m.fun, jac=m.jac, nfev=sf.nfev, njev=sf.ngev, nhev=sf.nhev + nhessp[0], nit=k, message=status_messages[warnflag]) if hess is not None: result['hess'] = m.hess if return_all: result['allvecs'] = allvecs return result
10,786
34.367213
79
py
scipy
scipy-main/scipy/optimize/_minimize.py
""" Unified interfaces to minimization algorithms. Functions --------- - minimize : minimization of a function of several variables. - minimize_scalar : minimization of a function of one variable. """ __all__ = ['minimize', 'minimize_scalar'] from warnings import warn import numpy as np # unconstrained minimization from ._optimize import (_minimize_neldermead, _minimize_powell, _minimize_cg, _minimize_bfgs, _minimize_newtoncg, _minimize_scalar_brent, _minimize_scalar_bounded, _minimize_scalar_golden, MemoizeJac, OptimizeResult, _wrap_callback, _recover_from_bracket_error) from ._trustregion_dogleg import _minimize_dogleg from ._trustregion_ncg import _minimize_trust_ncg from ._trustregion_krylov import _minimize_trust_krylov from ._trustregion_exact import _minimize_trustregion_exact from ._trustregion_constr import _minimize_trustregion_constr # constrained minimization from ._lbfgsb_py import _minimize_lbfgsb from ._tnc import _minimize_tnc from ._cobyla_py import _minimize_cobyla from ._slsqp_py import _minimize_slsqp from ._constraints import (old_bound_to_new, new_bounds_to_old, old_constraint_to_new, new_constraint_to_old, NonlinearConstraint, LinearConstraint, Bounds, PreparedConstraint) from ._differentiable_functions import FD_METHODS MINIMIZE_METHODS = ['nelder-mead', 'powell', 'cg', 'bfgs', 'newton-cg', 'l-bfgs-b', 'tnc', 'cobyla', 'slsqp', 'trust-constr', 'dogleg', 'trust-ncg', 'trust-exact', 'trust-krylov'] # These methods support the new callback interface (passed an OptimizeResult) MINIMIZE_METHODS_NEW_CB = ['nelder-mead', 'powell', 'cg', 'bfgs', 'newton-cg', 'l-bfgs-b', 'trust-constr', 'dogleg', 'trust-ncg', 'trust-exact', 'trust-krylov'] MINIMIZE_SCALAR_METHODS = ['brent', 'bounded', 'golden'] def minimize(fun, x0, args=(), method=None, jac=None, hess=None, hessp=None, bounds=None, constraints=(), tol=None, callback=None, options=None): """Minimization of scalar function of one or more variables. Parameters ---------- fun : callable The objective function to be minimized. ``fun(x, *args) -> float`` where ``x`` is a 1-D array with shape (n,) and ``args`` is a tuple of the fixed parameters needed to completely specify the function. x0 : ndarray, shape (n,) Initial guess. Array of real elements of size (n,), where ``n`` is the number of independent variables. args : tuple, optional Extra arguments passed to the objective function and its derivatives (`fun`, `jac` and `hess` functions). method : str or callable, optional Type of solver. Should be one of - 'Nelder-Mead' :ref:`(see here) <optimize.minimize-neldermead>` - 'Powell' :ref:`(see here) <optimize.minimize-powell>` - 'CG' :ref:`(see here) <optimize.minimize-cg>` - 'BFGS' :ref:`(see here) <optimize.minimize-bfgs>` - 'Newton-CG' :ref:`(see here) <optimize.minimize-newtoncg>` - 'L-BFGS-B' :ref:`(see here) <optimize.minimize-lbfgsb>` - 'TNC' :ref:`(see here) <optimize.minimize-tnc>` - 'COBYLA' :ref:`(see here) <optimize.minimize-cobyla>` - 'SLSQP' :ref:`(see here) <optimize.minimize-slsqp>` - 'trust-constr':ref:`(see here) <optimize.minimize-trustconstr>` - 'dogleg' :ref:`(see here) <optimize.minimize-dogleg>` - 'trust-ncg' :ref:`(see here) <optimize.minimize-trustncg>` - 'trust-exact' :ref:`(see here) <optimize.minimize-trustexact>` - 'trust-krylov' :ref:`(see here) <optimize.minimize-trustkrylov>` - custom - a callable object, see below for description. If not given, chosen to be one of ``BFGS``, ``L-BFGS-B``, ``SLSQP``, depending on whether or not the problem has constraints or bounds. jac : {callable, '2-point', '3-point', 'cs', bool}, optional Method for computing the gradient vector. Only for CG, BFGS, Newton-CG, L-BFGS-B, TNC, SLSQP, dogleg, trust-ncg, trust-krylov, trust-exact and trust-constr. If it is a callable, it should be a function that returns the gradient vector: ``jac(x, *args) -> array_like, shape (n,)`` where ``x`` is an array with shape (n,) and ``args`` is a tuple with the fixed parameters. If `jac` is a Boolean and is True, `fun` is assumed to return a tuple ``(f, g)`` containing the objective function and the gradient. Methods 'Newton-CG', 'trust-ncg', 'dogleg', 'trust-exact', and 'trust-krylov' require that either a callable be supplied, or that `fun` return the objective and gradient. If None or False, the gradient will be estimated using 2-point finite difference estimation with an absolute step size. Alternatively, the keywords {'2-point', '3-point', 'cs'} can be used to select a finite difference scheme for numerical estimation of the gradient with a relative step size. These finite difference schemes obey any specified `bounds`. hess : {callable, '2-point', '3-point', 'cs', HessianUpdateStrategy}, optional Method for computing the Hessian matrix. Only for Newton-CG, dogleg, trust-ncg, trust-krylov, trust-exact and trust-constr. If it is callable, it should return the Hessian matrix: ``hess(x, *args) -> {LinearOperator, spmatrix, array}, (n, n)`` where ``x`` is a (n,) ndarray and ``args`` is a tuple with the fixed parameters. The keywords {'2-point', '3-point', 'cs'} can also be used to select a finite difference scheme for numerical estimation of the hessian. Alternatively, objects implementing the `HessianUpdateStrategy` interface can be used to approximate the Hessian. Available quasi-Newton methods implementing this interface are: - `BFGS`; - `SR1`. Not all of the options are available for each of the methods; for availability refer to the notes. hessp : callable, optional Hessian of objective function times an arbitrary vector p. Only for Newton-CG, trust-ncg, trust-krylov, trust-constr. Only one of `hessp` or `hess` needs to be given. If `hess` is provided, then `hessp` will be ignored. `hessp` must compute the Hessian times an arbitrary vector: ``hessp(x, p, *args) -> ndarray shape (n,)`` where ``x`` is a (n,) ndarray, ``p`` is an arbitrary vector with dimension (n,) and ``args`` is a tuple with the fixed parameters. bounds : sequence or `Bounds`, optional Bounds on variables for Nelder-Mead, L-BFGS-B, TNC, SLSQP, Powell, trust-constr, and COBYLA methods. There are two ways to specify the bounds: 1. Instance of `Bounds` class. 2. Sequence of ``(min, max)`` pairs for each element in `x`. None is used to specify no bound. constraints : {Constraint, dict} or List of {Constraint, dict}, optional Constraints definition. Only for COBYLA, SLSQP and trust-constr. Constraints for 'trust-constr' are defined as a single object or a list of objects specifying constraints to the optimization problem. Available constraints are: - `LinearConstraint` - `NonlinearConstraint` Constraints for COBYLA, SLSQP are defined as a list of dictionaries. Each dictionary with fields: type : str Constraint type: 'eq' for equality, 'ineq' for inequality. fun : callable The function defining the constraint. jac : callable, optional The Jacobian of `fun` (only for SLSQP). args : sequence, optional Extra arguments to be passed to the function and Jacobian. Equality constraint means that the constraint function result is to be zero whereas inequality means that it is to be non-negative. Note that COBYLA only supports inequality constraints. tol : float, optional Tolerance for termination. When `tol` is specified, the selected minimization algorithm sets some relevant solver-specific tolerance(s) equal to `tol`. For detailed control, use solver-specific options. options : dict, optional A dictionary of solver options. All methods except `TNC` accept the following generic options: maxiter : int Maximum number of iterations to perform. Depending on the method each iteration may use several function evaluations. For `TNC` use `maxfun` instead of `maxiter`. disp : bool Set to True to print convergence messages. For method-specific options, see :func:`show_options()`. callback : callable, optional A callable called after each iteration. All methods except TNC, SLSQP, and COBYLA support a callable with the signature: ``callback(OptimizeResult: intermediate_result)`` where ``intermediate_result`` is a keyword parameter containing an `OptimizeResult` with attributes ``x`` and ``fun``, the present values of the parameter vector and objective function. Note that the name of the parameter must be ``intermediate_result`` for the callback to be passed an `OptimizeResult`. These methods will also terminate if the callback raises ``StopIteration``. All methods except trust-constr (also) support a signature like: ``callback(xk)`` where ``xk`` is the current parameter vector. Introspection is used to determine which of the signatures above to invoke. Returns ------- res : OptimizeResult The optimization result represented as a ``OptimizeResult`` object. Important attributes are: ``x`` the solution array, ``success`` a Boolean flag indicating if the optimizer exited successfully and ``message`` which describes the cause of the termination. See `OptimizeResult` for a description of other attributes. See also -------- minimize_scalar : Interface to minimization algorithms for scalar univariate functions show_options : Additional options accepted by the solvers Notes ----- This section describes the available solvers that can be selected by the 'method' parameter. The default method is *BFGS*. **Unconstrained minimization** Method :ref:`CG <optimize.minimize-cg>` uses a nonlinear conjugate gradient algorithm by Polak and Ribiere, a variant of the Fletcher-Reeves method described in [5]_ pp.120-122. Only the first derivatives are used. Method :ref:`BFGS <optimize.minimize-bfgs>` uses the quasi-Newton method of Broyden, Fletcher, Goldfarb, and Shanno (BFGS) [5]_ pp. 136. It uses the first derivatives only. BFGS has proven good performance even for non-smooth optimizations. This method also returns an approximation of the Hessian inverse, stored as `hess_inv` in the OptimizeResult object. Method :ref:`Newton-CG <optimize.minimize-newtoncg>` uses a Newton-CG algorithm [5]_ pp. 168 (also known as the truncated Newton method). It uses a CG method to the compute the search direction. See also *TNC* method for a box-constrained minimization with a similar algorithm. Suitable for large-scale problems. Method :ref:`dogleg <optimize.minimize-dogleg>` uses the dog-leg trust-region algorithm [5]_ for unconstrained minimization. This algorithm requires the gradient and Hessian; furthermore the Hessian is required to be positive definite. Method :ref:`trust-ncg <optimize.minimize-trustncg>` uses the Newton conjugate gradient trust-region algorithm [5]_ for unconstrained minimization. This algorithm requires the gradient and either the Hessian or a function that computes the product of the Hessian with a given vector. Suitable for large-scale problems. Method :ref:`trust-krylov <optimize.minimize-trustkrylov>` uses the Newton GLTR trust-region algorithm [14]_, [15]_ for unconstrained minimization. This algorithm requires the gradient and either the Hessian or a function that computes the product of the Hessian with a given vector. Suitable for large-scale problems. On indefinite problems it requires usually less iterations than the `trust-ncg` method and is recommended for medium and large-scale problems. Method :ref:`trust-exact <optimize.minimize-trustexact>` is a trust-region method for unconstrained minimization in which quadratic subproblems are solved almost exactly [13]_. This algorithm requires the gradient and the Hessian (which is *not* required to be positive definite). It is, in many situations, the Newton method to converge in fewer iterations and the most recommended for small and medium-size problems. **Bound-Constrained minimization** Method :ref:`Nelder-Mead <optimize.minimize-neldermead>` uses the Simplex algorithm [1]_, [2]_. This algorithm is robust in many applications. However, if numerical computation of derivative can be trusted, other algorithms using the first and/or second derivatives information might be preferred for their better performance in general. Method :ref:`L-BFGS-B <optimize.minimize-lbfgsb>` uses the L-BFGS-B algorithm [6]_, [7]_ for bound constrained minimization. Method :ref:`Powell <optimize.minimize-powell>` is a modification of Powell's method [3]_, [4]_ which is a conjugate direction method. It performs sequential one-dimensional minimizations along each vector of the directions set (`direc` field in `options` and `info`), which is updated at each iteration of the main minimization loop. The function need not be differentiable, and no derivatives are taken. If bounds are not provided, then an unbounded line search will be used. If bounds are provided and the initial guess is within the bounds, then every function evaluation throughout the minimization procedure will be within the bounds. If bounds are provided, the initial guess is outside the bounds, and `direc` is full rank (default has full rank), then some function evaluations during the first iteration may be outside the bounds, but every function evaluation after the first iteration will be within the bounds. If `direc` is not full rank, then some parameters may not be optimized and the solution is not guaranteed to be within the bounds. Method :ref:`TNC <optimize.minimize-tnc>` uses a truncated Newton algorithm [5]_, [8]_ to minimize a function with variables subject to bounds. This algorithm uses gradient information; it is also called Newton Conjugate-Gradient. It differs from the *Newton-CG* method described above as it wraps a C implementation and allows each variable to be given upper and lower bounds. **Constrained Minimization** Method :ref:`COBYLA <optimize.minimize-cobyla>` uses the Constrained Optimization BY Linear Approximation (COBYLA) method [9]_, [10]_, [11]_. The algorithm is based on linear approximations to the objective function and each constraint. The method wraps a FORTRAN implementation of the algorithm. The constraints functions 'fun' may return either a single number or an array or list of numbers. Method :ref:`SLSQP <optimize.minimize-slsqp>` uses Sequential Least SQuares Programming to minimize a function of several variables with any combination of bounds, equality and inequality constraints. The method wraps the SLSQP Optimization subroutine originally implemented by Dieter Kraft [12]_. Note that the wrapper handles infinite values in bounds by converting them into large floating values. Method :ref:`trust-constr <optimize.minimize-trustconstr>` is a trust-region algorithm for constrained optimization. It swiches between two implementations depending on the problem definition. It is the most versatile constrained minimization algorithm implemented in SciPy and the most appropriate for large-scale problems. For equality constrained problems it is an implementation of Byrd-Omojokun Trust-Region SQP method described in [17]_ and in [5]_, p. 549. When inequality constraints are imposed as well, it swiches to the trust-region interior point method described in [16]_. This interior point algorithm, in turn, solves inequality constraints by introducing slack variables and solving a sequence of equality-constrained barrier problems for progressively smaller values of the barrier parameter. The previously described equality constrained SQP method is used to solve the subproblems with increasing levels of accuracy as the iterate gets closer to a solution. **Finite-Difference Options** For Method :ref:`trust-constr <optimize.minimize-trustconstr>` the gradient and the Hessian may be approximated using three finite-difference schemes: {'2-point', '3-point', 'cs'}. The scheme 'cs' is, potentially, the most accurate but it requires the function to correctly handle complex inputs and to be differentiable in the complex plane. The scheme '3-point' is more accurate than '2-point' but requires twice as many operations. If the gradient is estimated via finite-differences the Hessian must be estimated using one of the quasi-Newton strategies. **Method specific options for the** `hess` **keyword** +--------------+------+----------+-------------------------+-----+ | method/Hess | None | callable | '2-point/'3-point'/'cs' | HUS | +==============+======+==========+=========================+=====+ | Newton-CG | x | (n, n) | x | x | | | | LO | | | +--------------+------+----------+-------------------------+-----+ | dogleg | | (n, n) | | | +--------------+------+----------+-------------------------+-----+ | trust-ncg | | (n, n) | x | x | +--------------+------+----------+-------------------------+-----+ | trust-krylov | | (n, n) | x | x | +--------------+------+----------+-------------------------+-----+ | trust-exact | | (n, n) | | | +--------------+------+----------+-------------------------+-----+ | trust-constr | x | (n, n) | x | x | | | | LO | | | | | | sp | | | +--------------+------+----------+-------------------------+-----+ where LO=LinearOperator, sp=Sparse matrix, HUS=HessianUpdateStrategy **Custom minimizers** It may be useful to pass a custom minimization method, for example when using a frontend to this method such as `scipy.optimize.basinhopping` or a different library. You can simply pass a callable as the ``method`` parameter. The callable is called as ``method(fun, x0, args, **kwargs, **options)`` where ``kwargs`` corresponds to any other parameters passed to `minimize` (such as `callback`, `hess`, etc.), except the `options` dict, which has its contents also passed as `method` parameters pair by pair. Also, if `jac` has been passed as a bool type, `jac` and `fun` are mangled so that `fun` returns just the function values and `jac` is converted to a function returning the Jacobian. The method shall return an `OptimizeResult` object. The provided `method` callable must be able to accept (and possibly ignore) arbitrary parameters; the set of parameters accepted by `minimize` may expand in future versions and then these parameters will be passed to the method. You can find an example in the scipy.optimize tutorial. References ---------- .. [1] Nelder, J A, and R Mead. 1965. A Simplex Method for Function Minimization. The Computer Journal 7: 308-13. .. [2] Wright M H. 1996. Direct search methods: Once scorned, now respectable, in Numerical Analysis 1995: Proceedings of the 1995 Dundee Biennial Conference in Numerical Analysis (Eds. D F Griffiths and G A Watson). Addison Wesley Longman, Harlow, UK. 191-208. .. [3] Powell, M J D. 1964. An efficient method for finding the minimum of a function of several variables without calculating derivatives. The Computer Journal 7: 155-162. .. [4] Press W, S A Teukolsky, W T Vetterling and B P Flannery. Numerical Recipes (any edition), Cambridge University Press. .. [5] Nocedal, J, and S J Wright. 2006. Numerical Optimization. Springer New York. .. [6] Byrd, R H and P Lu and J. Nocedal. 1995. A Limited Memory Algorithm for Bound Constrained Optimization. SIAM Journal on Scientific and Statistical Computing 16 (5): 1190-1208. .. [7] Zhu, C and R H Byrd and J Nocedal. 1997. L-BFGS-B: Algorithm 778: L-BFGS-B, FORTRAN routines for large scale bound constrained optimization. ACM Transactions on Mathematical Software 23 (4): 550-560. .. [8] Nash, S G. Newton-Type Minimization Via the Lanczos Method. 1984. SIAM Journal of Numerical Analysis 21: 770-778. .. [9] Powell, M J D. A direct search optimization method that models the objective and constraint functions by linear interpolation. 1994. Advances in Optimization and Numerical Analysis, eds. S. Gomez and J-P Hennart, Kluwer Academic (Dordrecht), 51-67. .. [10] Powell M J D. Direct search algorithms for optimization calculations. 1998. Acta Numerica 7: 287-336. .. [11] Powell M J D. A view of algorithms for optimization without derivatives. 2007.Cambridge University Technical Report DAMTP 2007/NA03 .. [12] Kraft, D. A software package for sequential quadratic programming. 1988. Tech. Rep. DFVLR-FB 88-28, DLR German Aerospace Center -- Institute for Flight Mechanics, Koln, Germany. .. [13] Conn, A. R., Gould, N. I., and Toint, P. L. Trust region methods. 2000. Siam. pp. 169-200. .. [14] F. Lenders, C. Kirches, A. Potschka: "trlib: A vector-free implementation of the GLTR method for iterative solution of the trust region problem", :arxiv:`1611.04718` .. [15] N. Gould, S. Lucidi, M. Roma, P. Toint: "Solving the Trust-Region Subproblem using the Lanczos Method", SIAM J. Optim., 9(2), 504--525, (1999). .. [16] Byrd, Richard H., Mary E. Hribar, and Jorge Nocedal. 1999. An interior point algorithm for large-scale nonlinear programming. SIAM Journal on Optimization 9.4: 877-900. .. [17] Lalee, Marucha, Jorge Nocedal, and Todd Plantega. 1998. On the implementation of an algorithm for large-scale equality constrained optimization. SIAM Journal on Optimization 8.3: 682-706. Examples -------- Let us consider the problem of minimizing the Rosenbrock function. This function (and its respective derivatives) is implemented in `rosen` (resp. `rosen_der`, `rosen_hess`) in the `scipy.optimize`. >>> from scipy.optimize import minimize, rosen, rosen_der A simple application of the *Nelder-Mead* method is: >>> x0 = [1.3, 0.7, 0.8, 1.9, 1.2] >>> res = minimize(rosen, x0, method='Nelder-Mead', tol=1e-6) >>> res.x array([ 1., 1., 1., 1., 1.]) Now using the *BFGS* algorithm, using the first derivative and a few options: >>> res = minimize(rosen, x0, method='BFGS', jac=rosen_der, ... options={'gtol': 1e-6, 'disp': True}) Optimization terminated successfully. Current function value: 0.000000 Iterations: 26 Function evaluations: 31 Gradient evaluations: 31 >>> res.x array([ 1., 1., 1., 1., 1.]) >>> print(res.message) Optimization terminated successfully. >>> res.hess_inv array([[ 0.00749589, 0.01255155, 0.02396251, 0.04750988, 0.09495377], # may vary [ 0.01255155, 0.02510441, 0.04794055, 0.09502834, 0.18996269], [ 0.02396251, 0.04794055, 0.09631614, 0.19092151, 0.38165151], [ 0.04750988, 0.09502834, 0.19092151, 0.38341252, 0.7664427 ], [ 0.09495377, 0.18996269, 0.38165151, 0.7664427, 1.53713523]]) Next, consider a minimization problem with several constraints (namely Example 16.4 from [5]_). The objective function is: >>> fun = lambda x: (x[0] - 1)**2 + (x[1] - 2.5)**2 There are three constraints defined as: >>> cons = ({'type': 'ineq', 'fun': lambda x: x[0] - 2 * x[1] + 2}, ... {'type': 'ineq', 'fun': lambda x: -x[0] - 2 * x[1] + 6}, ... {'type': 'ineq', 'fun': lambda x: -x[0] + 2 * x[1] + 2}) And variables must be positive, hence the following bounds: >>> bnds = ((0, None), (0, None)) The optimization problem is solved using the SLSQP method as: >>> res = minimize(fun, (2, 0), method='SLSQP', bounds=bnds, ... constraints=cons) It should converge to the theoretical solution (1.4 ,1.7). """ x0 = np.atleast_1d(np.asarray(x0)) if x0.ndim != 1: raise ValueError("'x0' must only have one dimension.") if x0.dtype.kind in np.typecodes["AllInteger"]: x0 = np.asarray(x0, dtype=float) if not isinstance(args, tuple): args = (args,) if method is None: # Select automatically if constraints: method = 'SLSQP' elif bounds is not None: method = 'L-BFGS-B' else: method = 'BFGS' if callable(method): meth = "_custom" else: meth = method.lower() if options is None: options = {} # check if optional parameters are supported by the selected method # - jac if meth in ('nelder-mead', 'powell', 'cobyla') and bool(jac): warn('Method %s does not use gradient information (jac).' % method, RuntimeWarning) # - hess if meth not in ('newton-cg', 'dogleg', 'trust-ncg', 'trust-constr', 'trust-krylov', 'trust-exact', '_custom') and hess is not None: warn('Method %s does not use Hessian information (hess).' % method, RuntimeWarning) # - hessp if meth not in ('newton-cg', 'trust-ncg', 'trust-constr', 'trust-krylov', '_custom') \ and hessp is not None: warn('Method %s does not use Hessian-vector product ' 'information (hessp).' % method, RuntimeWarning) # - constraints or bounds if (meth not in ('cobyla', 'slsqp', 'trust-constr', '_custom') and np.any(constraints)): warn('Method %s cannot handle constraints.' % method, RuntimeWarning) if meth not in ('nelder-mead', 'powell', 'l-bfgs-b', 'cobyla', 'slsqp', 'tnc', 'trust-constr', '_custom') and bounds is not None: warn('Method %s cannot handle bounds.' % method, RuntimeWarning) # - return_all if (meth in ('l-bfgs-b', 'tnc', 'cobyla', 'slsqp') and options.get('return_all', False)): warn('Method %s does not support the return_all option.' % method, RuntimeWarning) # check gradient vector if callable(jac): pass elif jac is True: # fun returns func and grad fun = MemoizeJac(fun) jac = fun.derivative elif (jac in FD_METHODS and meth in ['trust-constr', 'bfgs', 'cg', 'l-bfgs-b', 'tnc', 'slsqp']): # finite differences with relative step pass elif meth in ['trust-constr']: # default jac calculation for this method jac = '2-point' elif jac is None or bool(jac) is False: # this will cause e.g. LBFGS to use forward difference, absolute step jac = None else: # default if jac option is not understood jac = None # set default tolerances if tol is not None: options = dict(options) if meth == 'nelder-mead': options.setdefault('xatol', tol) options.setdefault('fatol', tol) if meth in ('newton-cg', 'powell', 'tnc'): options.setdefault('xtol', tol) if meth in ('powell', 'l-bfgs-b', 'tnc', 'slsqp'): options.setdefault('ftol', tol) if meth in ('bfgs', 'cg', 'l-bfgs-b', 'tnc', 'dogleg', 'trust-ncg', 'trust-exact', 'trust-krylov'): options.setdefault('gtol', tol) if meth in ('cobyla', '_custom'): options.setdefault('tol', tol) if meth == 'trust-constr': options.setdefault('xtol', tol) options.setdefault('gtol', tol) options.setdefault('barrier_tol', tol) if meth == '_custom': # custom method called before bounds and constraints are 'standardised' # custom method should be able to accept whatever bounds/constraints # are provided to it. return method(fun, x0, args=args, jac=jac, hess=hess, hessp=hessp, bounds=bounds, constraints=constraints, callback=callback, **options) constraints = standardize_constraints(constraints, x0, meth) remove_vars = False if bounds is not None: # convert to new-style bounds so we only have to consider one case bounds = standardize_bounds(bounds, x0, 'new') bounds = _validate_bounds(bounds, x0, meth) if meth in {"tnc", "slsqp", "l-bfgs-b"}: # These methods can't take the finite-difference derivatives they # need when a variable is fixed by the bounds. To avoid this issue, # remove fixed variables from the problem. # NOTE: if this list is expanded, then be sure to update the # accompanying tests and test_optimize.eb_data. Consider also if # default OptimizeResult will need updating. # determine whether any variables are fixed i_fixed = (bounds.lb == bounds.ub) if np.all(i_fixed): # all the parameters are fixed, a minimizer is not able to do # anything return _optimize_result_for_equal_bounds( fun, bounds, meth, args=args, constraints=constraints ) # determine whether finite differences are needed for any grad/jac fd_needed = (not callable(jac)) for con in constraints: if not callable(con.get('jac', None)): fd_needed = True # If finite differences are ever used, remove all fixed variables # Always remove fixed variables for TNC; see gh-14565 remove_vars = i_fixed.any() and (fd_needed or meth == "tnc") if remove_vars: x_fixed = (bounds.lb)[i_fixed] x0 = x0[~i_fixed] bounds = _remove_from_bounds(bounds, i_fixed) fun = _remove_from_func(fun, i_fixed, x_fixed) if callable(callback): callback = _remove_from_func(callback, i_fixed, x_fixed) if callable(jac): jac = _remove_from_func(jac, i_fixed, x_fixed, remove=1) # make a copy of the constraints so the user's version doesn't # get changed. (Shallow copy is ok) constraints = [con.copy() for con in constraints] for con in constraints: # yes, guaranteed to be a list con['fun'] = _remove_from_func(con['fun'], i_fixed, x_fixed, min_dim=1, remove=0) if callable(con.get('jac', None)): con['jac'] = _remove_from_func(con['jac'], i_fixed, x_fixed, min_dim=2, remove=1) bounds = standardize_bounds(bounds, x0, meth) callback = _wrap_callback(callback, meth) if meth == 'nelder-mead': res = _minimize_neldermead(fun, x0, args, callback, bounds=bounds, **options) elif meth == 'powell': res = _minimize_powell(fun, x0, args, callback, bounds, **options) elif meth == 'cg': res = _minimize_cg(fun, x0, args, jac, callback, **options) elif meth == 'bfgs': res = _minimize_bfgs(fun, x0, args, jac, callback, **options) elif meth == 'newton-cg': res = _minimize_newtoncg(fun, x0, args, jac, hess, hessp, callback, **options) elif meth == 'l-bfgs-b': res = _minimize_lbfgsb(fun, x0, args, jac, bounds, callback=callback, **options) elif meth == 'tnc': res = _minimize_tnc(fun, x0, args, jac, bounds, callback=callback, **options) elif meth == 'cobyla': res = _minimize_cobyla(fun, x0, args, constraints, callback=callback, bounds=bounds, **options) elif meth == 'slsqp': res = _minimize_slsqp(fun, x0, args, jac, bounds, constraints, callback=callback, **options) elif meth == 'trust-constr': res = _minimize_trustregion_constr(fun, x0, args, jac, hess, hessp, bounds, constraints, callback=callback, **options) elif meth == 'dogleg': res = _minimize_dogleg(fun, x0, args, jac, hess, callback=callback, **options) elif meth == 'trust-ncg': res = _minimize_trust_ncg(fun, x0, args, jac, hess, hessp, callback=callback, **options) elif meth == 'trust-krylov': res = _minimize_trust_krylov(fun, x0, args, jac, hess, hessp, callback=callback, **options) elif meth == 'trust-exact': res = _minimize_trustregion_exact(fun, x0, args, jac, hess, callback=callback, **options) else: raise ValueError('Unknown solver %s' % method) if remove_vars: res.x = _add_to_array(res.x, i_fixed, x_fixed) res.jac = _add_to_array(res.jac, i_fixed, np.nan) if "hess_inv" in res: res.hess_inv = None # unknown if getattr(callback, 'stop_iteration', False): res.success = False res.status = 99 res.message = "`callback` raised `StopIteration`." return res def minimize_scalar(fun, bracket=None, bounds=None, args=(), method=None, tol=None, options=None): """Minimization of scalar function of one variable. Parameters ---------- fun : callable Objective function. Scalar function, must return a scalar. bracket : sequence, optional For methods 'brent' and 'golden', `bracket` defines the bracketing interval and is required. Either a triple ``(xa, xb, xc)`` satisfying ``xa < xb < xc`` and ``func(xb) < func(xa) and func(xb) < func(xc)``, or a pair ``(xa, xb)`` to be used as initial points for a downhill bracket search (see `scipy.optimize.bracket`). The minimizer ``res.x`` will not necessarily satisfy ``xa <= res.x <= xb``. bounds : sequence, optional For method 'bounded', `bounds` is mandatory and must have two finite items corresponding to the optimization bounds. args : tuple, optional Extra arguments passed to the objective function. method : str or callable, optional Type of solver. Should be one of: - :ref:`Brent <optimize.minimize_scalar-brent>` - :ref:`Bounded <optimize.minimize_scalar-bounded>` - :ref:`Golden <optimize.minimize_scalar-golden>` - custom - a callable object (added in version 0.14.0), see below Default is "Bounded" if bounds are provided and "Brent" otherwise. See the 'Notes' section for details of each solver. tol : float, optional Tolerance for termination. For detailed control, use solver-specific options. options : dict, optional A dictionary of solver options. maxiter : int Maximum number of iterations to perform. disp : bool Set to True to print convergence messages. See :func:`show_options()` for solver-specific options. Returns ------- res : OptimizeResult The optimization result represented as a ``OptimizeResult`` object. Important attributes are: ``x`` the solution array, ``success`` a Boolean flag indicating if the optimizer exited successfully and ``message`` which describes the cause of the termination. See `OptimizeResult` for a description of other attributes. See also -------- minimize : Interface to minimization algorithms for scalar multivariate functions show_options : Additional options accepted by the solvers Notes ----- This section describes the available solvers that can be selected by the 'method' parameter. The default method is the ``"Bounded"`` Brent method if `bounds` are passed and unbounded ``"Brent"`` otherwise. Method :ref:`Brent <optimize.minimize_scalar-brent>` uses Brent's algorithm [1]_ to find a local minimum. The algorithm uses inverse parabolic interpolation when possible to speed up convergence of the golden section method. Method :ref:`Golden <optimize.minimize_scalar-golden>` uses the golden section search technique [1]_. It uses analog of the bisection method to decrease the bracketed interval. It is usually preferable to use the *Brent* method. Method :ref:`Bounded <optimize.minimize_scalar-bounded>` can perform bounded minimization [2]_ [3]_. It uses the Brent method to find a local minimum in the interval x1 < xopt < x2. **Custom minimizers** It may be useful to pass a custom minimization method, for example when using some library frontend to minimize_scalar. You can simply pass a callable as the ``method`` parameter. The callable is called as ``method(fun, args, **kwargs, **options)`` where ``kwargs`` corresponds to any other parameters passed to `minimize` (such as `bracket`, `tol`, etc.), except the `options` dict, which has its contents also passed as `method` parameters pair by pair. The method shall return an `OptimizeResult` object. The provided `method` callable must be able to accept (and possibly ignore) arbitrary parameters; the set of parameters accepted by `minimize` may expand in future versions and then these parameters will be passed to the method. You can find an example in the scipy.optimize tutorial. .. versionadded:: 0.11.0 References ---------- .. [1] Press, W., S.A. Teukolsky, W.T. Vetterling, and B.P. Flannery. Numerical Recipes in C. Cambridge University Press. .. [2] Forsythe, G.E., M. A. Malcolm, and C. B. Moler. "Computer Methods for Mathematical Computations." Prentice-Hall Series in Automatic Computation 259 (1977). .. [3] Brent, Richard P. Algorithms for Minimization Without Derivatives. Courier Corporation, 2013. Examples -------- Consider the problem of minimizing the following function. >>> def f(x): ... return (x - 2) * x * (x + 2)**2 Using the *Brent* method, we find the local minimum as: >>> from scipy.optimize import minimize_scalar >>> res = minimize_scalar(f) >>> res.fun -9.9149495908 The minimizer is: >>> res.x 1.28077640403 Using the *Bounded* method, we find a local minimum with specified bounds as: >>> res = minimize_scalar(f, bounds=(-3, -1), method='bounded') >>> res.fun # minimum 3.28365179850e-13 >>> res.x # minimizer -2.0000002026 """ if not isinstance(args, tuple): args = (args,) if callable(method): meth = "_custom" elif method is None: meth = 'brent' if bounds is None else 'bounded' else: meth = method.lower() if options is None: options = {} if bounds is not None and meth in {'brent', 'golden'}: message = f"Use of `bounds` is incompatible with 'method={method}'." raise ValueError(message) if tol is not None: options = dict(options) if meth == 'bounded' and 'xatol' not in options: warn("Method 'bounded' does not support relative tolerance in x; " "defaulting to absolute tolerance.", RuntimeWarning) options['xatol'] = tol elif meth == '_custom': options.setdefault('tol', tol) else: options.setdefault('xtol', tol) # replace boolean "disp" option, if specified, by an integer value. disp = options.get('disp') if isinstance(disp, bool): options['disp'] = 2 * int(disp) if meth == '_custom': res = method(fun, args=args, bracket=bracket, bounds=bounds, **options) elif meth == 'brent': res = _recover_from_bracket_error(_minimize_scalar_brent, fun, bracket, args, **options) elif meth == 'bounded': if bounds is None: raise ValueError('The `bounds` parameter is mandatory for ' 'method `bounded`.') res = _minimize_scalar_bounded(fun, bounds, args, **options) elif meth == 'golden': res = _recover_from_bracket_error(_minimize_scalar_golden, fun, bracket, args, **options) else: raise ValueError('Unknown solver %s' % method) # gh-16196 reported inconsistencies in the output shape of `res.x`. While # fixing this, future-proof it for when the function is vectorized: # the shape of `res.x` should match that of `res.fun`. res.fun = np.asarray(res.fun)[()] res.x = np.reshape(res.x, res.fun.shape)[()] return res def _remove_from_bounds(bounds, i_fixed): """Removes fixed variables from a `Bounds` instance""" lb = bounds.lb[~i_fixed] ub = bounds.ub[~i_fixed] return Bounds(lb, ub) # don't mutate original Bounds object def _remove_from_func(fun_in, i_fixed, x_fixed, min_dim=None, remove=0): """Wraps a function such that fixed variables need not be passed in""" def fun_out(x_in, *args, **kwargs): x_out = np.zeros_like(i_fixed, dtype=x_in.dtype) x_out[i_fixed] = x_fixed x_out[~i_fixed] = x_in y_out = fun_in(x_out, *args, **kwargs) y_out = np.array(y_out) if min_dim == 1: y_out = np.atleast_1d(y_out) elif min_dim == 2: y_out = np.atleast_2d(y_out) if remove == 1: y_out = y_out[..., ~i_fixed] elif remove == 2: y_out = y_out[~i_fixed, ~i_fixed] return y_out return fun_out def _add_to_array(x_in, i_fixed, x_fixed): """Adds fixed variables back to an array""" i_free = ~i_fixed if x_in.ndim == 2: i_free = i_free[:, None] @ i_free[None, :] x_out = np.zeros_like(i_free, dtype=x_in.dtype) x_out[~i_free] = x_fixed x_out[i_free] = x_in.ravel() return x_out def _validate_bounds(bounds, x0, meth): """Check that bounds are valid.""" msg = "An upper bound is less than the corresponding lower bound." if np.any(bounds.ub < bounds.lb): raise ValueError(msg) msg = "The number of bounds is not compatible with the length of `x0`." try: bounds.lb = np.broadcast_to(bounds.lb, x0.shape) bounds.ub = np.broadcast_to(bounds.ub, x0.shape) except Exception as e: raise ValueError(msg) from e return bounds def standardize_bounds(bounds, x0, meth): """Converts bounds to the form required by the solver.""" if meth in {'trust-constr', 'powell', 'nelder-mead', 'cobyla', 'new'}: if not isinstance(bounds, Bounds): lb, ub = old_bound_to_new(bounds) bounds = Bounds(lb, ub) elif meth in ('l-bfgs-b', 'tnc', 'slsqp', 'old'): if isinstance(bounds, Bounds): bounds = new_bounds_to_old(bounds.lb, bounds.ub, x0.shape[0]) return bounds def standardize_constraints(constraints, x0, meth): """Converts constraints to the form required by the solver.""" all_constraint_types = (NonlinearConstraint, LinearConstraint, dict) new_constraint_types = all_constraint_types[:-1] if constraints is None: constraints = [] elif isinstance(constraints, all_constraint_types): constraints = [constraints] else: constraints = list(constraints) # ensure it's a mutable sequence if meth in ['trust-constr', 'new']: for i, con in enumerate(constraints): if not isinstance(con, new_constraint_types): constraints[i] = old_constraint_to_new(i, con) else: # iterate over copy, changing original for i, con in enumerate(list(constraints)): if isinstance(con, new_constraint_types): old_constraints = new_constraint_to_old(con, x0) constraints[i] = old_constraints[0] constraints.extend(old_constraints[1:]) # appends 1 if present return constraints def _optimize_result_for_equal_bounds( fun, bounds, method, args=(), constraints=() ): """ Provides a default OptimizeResult for when a bounded minimization method has (lb == ub).all(). Parameters ---------- fun: callable bounds: Bounds method: str constraints: Constraint """ success = True message = 'All independent variables were fixed by bounds.' # bounds is new-style x0 = bounds.lb if constraints: message = ("All independent variables were fixed by bounds at values" " that satisfy the constraints.") constraints = standardize_constraints(constraints, x0, 'new') maxcv = 0 for c in constraints: pc = PreparedConstraint(c, x0) violation = pc.violation(x0) if np.sum(violation): maxcv = max(maxcv, np.max(violation)) success = False message = (f"All independent variables were fixed by bounds, but " f"the independent variables do not satisfy the " f"constraints exactly. (Maximum violation: {maxcv}).") return OptimizeResult( x=x0, fun=fun(x0, *args), success=success, message=message, nfev=1, njev=0, nhev=0, )
47,713
42.976037
89
py
scipy
scipy-main/scipy/optimize/_linprog_ip.py
"""Interior-point method for linear programming The *interior-point* method uses the primal-dual path following algorithm outlined in [1]_. This algorithm supports sparse constraint matrices and is typically faster than the simplex methods, especially for large, sparse problems. Note, however, that the solution returned may be slightly less accurate than those of the simplex methods and will not, in general, correspond with a vertex of the polytope defined by the constraints. .. versionadded:: 1.0.0 References ---------- .. [1] Andersen, Erling D., and Knud D. Andersen. "The MOSEK interior point optimizer for linear programming: an implementation of the homogeneous algorithm." High performance optimization. Springer US, 2000. 197-232. """ # Author: Matt Haberland import numpy as np import scipy as sp import scipy.sparse as sps from warnings import warn from scipy.linalg import LinAlgError from ._optimize import OptimizeWarning, OptimizeResult, _check_unknown_options from ._linprog_util import _postsolve has_umfpack = True has_cholmod = True try: import sksparse # noqa: F401 from sksparse.cholmod import cholesky as cholmod from sksparse.cholmod import analyze as cholmod_analyze except ImportError: has_cholmod = False try: import scikits.umfpack # test whether to use factorized except ImportError: has_umfpack = False def _get_solver(M, sparse=False, lstsq=False, sym_pos=True, cholesky=True, permc_spec='MMD_AT_PLUS_A'): """ Given solver options, return a handle to the appropriate linear system solver. Parameters ---------- M : 2-D array As defined in [4] Equation 8.31 sparse : bool (default = False) True if the system to be solved is sparse. This is typically set True when the original ``A_ub`` and ``A_eq`` arrays are sparse. lstsq : bool (default = False) True if the system is ill-conditioned and/or (nearly) singular and thus a more robust least-squares solver is desired. This is sometimes needed as the solution is approached. sym_pos : bool (default = True) True if the system matrix is symmetric positive definite Sometimes this needs to be set false as the solution is approached, even when the system should be symmetric positive definite, due to numerical difficulties. cholesky : bool (default = True) True if the system is to be solved by Cholesky, rather than LU, decomposition. This is typically faster unless the problem is very small or prone to numerical difficulties. permc_spec : str (default = 'MMD_AT_PLUS_A') Sparsity preservation strategy used by SuperLU. Acceptable values are: - ``NATURAL``: natural ordering. - ``MMD_ATA``: minimum degree ordering on the structure of A^T A. - ``MMD_AT_PLUS_A``: minimum degree ordering on the structure of A^T+A. - ``COLAMD``: approximate minimum degree column ordering. See SuperLU documentation. Returns ------- solve : function Handle to the appropriate solver function """ try: if sparse: if lstsq: def solve(r, sym_pos=False): return sps.linalg.lsqr(M, r)[0] elif cholesky: try: # Will raise an exception in the first call, # or when the matrix changes due to a new problem _get_solver.cholmod_factor.cholesky_inplace(M) except Exception: _get_solver.cholmod_factor = cholmod_analyze(M) _get_solver.cholmod_factor.cholesky_inplace(M) solve = _get_solver.cholmod_factor else: if has_umfpack and sym_pos: solve = sps.linalg.factorized(M) else: # factorized doesn't pass permc_spec solve = sps.linalg.splu(M, permc_spec=permc_spec).solve else: if lstsq: # sometimes necessary as solution is approached def solve(r): return sp.linalg.lstsq(M, r)[0] elif cholesky: L = sp.linalg.cho_factor(M) def solve(r): return sp.linalg.cho_solve(L, r) else: # this seems to cache the matrix factorization, so solving # with multiple right hand sides is much faster def solve(r, sym_pos=sym_pos): if sym_pos: return sp.linalg.solve(M, r, assume_a="pos") else: return sp.linalg.solve(M, r) # There are many things that can go wrong here, and it's hard to say # what all of them are. It doesn't really matter: if the matrix can't be # factorized, return None. get_solver will be called again with different # inputs, and a new routine will try to factorize the matrix. except KeyboardInterrupt: raise except Exception: return None return solve def _get_delta(A, b, c, x, y, z, tau, kappa, gamma, eta, sparse=False, lstsq=False, sym_pos=True, cholesky=True, pc=True, ip=False, permc_spec='MMD_AT_PLUS_A'): """ Given standard form problem defined by ``A``, ``b``, and ``c``; current variable estimates ``x``, ``y``, ``z``, ``tau``, and ``kappa``; algorithmic parameters ``gamma and ``eta; and options ``sparse``, ``lstsq``, ``sym_pos``, ``cholesky``, ``pc`` (predictor-corrector), and ``ip`` (initial point improvement), get the search direction for increments to the variable estimates. Parameters ---------- As defined in [4], except: sparse : bool True if the system to be solved is sparse. This is typically set True when the original ``A_ub`` and ``A_eq`` arrays are sparse. lstsq : bool True if the system is ill-conditioned and/or (nearly) singular and thus a more robust least-squares solver is desired. This is sometimes needed as the solution is approached. sym_pos : bool True if the system matrix is symmetric positive definite Sometimes this needs to be set false as the solution is approached, even when the system should be symmetric positive definite, due to numerical difficulties. cholesky : bool True if the system is to be solved by Cholesky, rather than LU, decomposition. This is typically faster unless the problem is very small or prone to numerical difficulties. pc : bool True if the predictor-corrector method of Mehrota is to be used. This is almost always (if not always) beneficial. Even though it requires the solution of an additional linear system, the factorization is typically (implicitly) reused so solution is efficient, and the number of algorithm iterations is typically reduced. ip : bool True if the improved initial point suggestion due to [4] section 4.3 is desired. It's unclear whether this is beneficial. permc_spec : str (default = 'MMD_AT_PLUS_A') (Has effect only with ``sparse = True``, ``lstsq = False``, ``sym_pos = True``.) A matrix is factorized in each iteration of the algorithm. This option specifies how to permute the columns of the matrix for sparsity preservation. Acceptable values are: - ``NATURAL``: natural ordering. - ``MMD_ATA``: minimum degree ordering on the structure of A^T A. - ``MMD_AT_PLUS_A``: minimum degree ordering on the structure of A^T+A. - ``COLAMD``: approximate minimum degree column ordering. This option can impact the convergence of the interior point algorithm; test different values to determine which performs best for your problem. For more information, refer to ``scipy.sparse.linalg.splu``. Returns ------- Search directions as defined in [4] References ---------- .. [4] Andersen, Erling D., and Knud D. Andersen. "The MOSEK interior point optimizer for linear programming: an implementation of the homogeneous algorithm." High performance optimization. Springer US, 2000. 197-232. """ if A.shape[0] == 0: # If there are no constraints, some solvers fail (understandably) # rather than returning empty solution. This gets the job done. sparse, lstsq, sym_pos, cholesky = False, False, True, False n_x = len(x) # [4] Equation 8.8 r_P = b * tau - A.dot(x) r_D = c * tau - A.T.dot(y) - z r_G = c.dot(x) - b.transpose().dot(y) + kappa mu = (x.dot(z) + tau * kappa) / (n_x + 1) # Assemble M from [4] Equation 8.31 Dinv = x / z if sparse: M = A.dot(sps.diags(Dinv, 0, format="csc").dot(A.T)) else: M = A.dot(Dinv.reshape(-1, 1) * A.T) solve = _get_solver(M, sparse, lstsq, sym_pos, cholesky, permc_spec) # pc: "predictor-corrector" [4] Section 4.1 # In development this option could be turned off # but it always seems to improve performance substantially n_corrections = 1 if pc else 0 i = 0 alpha, d_x, d_z, d_tau, d_kappa = 0, 0, 0, 0, 0 while i <= n_corrections: # Reference [4] Eq. 8.6 rhatp = eta(gamma) * r_P rhatd = eta(gamma) * r_D rhatg = eta(gamma) * r_G # Reference [4] Eq. 8.7 rhatxs = gamma * mu - x * z rhattk = gamma * mu - tau * kappa if i == 1: if ip: # if the correction is to get "initial point" # Reference [4] Eq. 8.23 rhatxs = ((1 - alpha) * gamma * mu - x * z - alpha**2 * d_x * d_z) rhattk = ((1 - alpha) * gamma * mu - tau * kappa - alpha**2 * d_tau * d_kappa) else: # if the correction is for "predictor-corrector" # Reference [4] Eq. 8.13 rhatxs -= d_x * d_z rhattk -= d_tau * d_kappa # sometimes numerical difficulties arise as the solution is approached # this loop tries to solve the equations using a sequence of functions # for solve. For dense systems, the order is: # 1. scipy.linalg.cho_factor/scipy.linalg.cho_solve, # 2. scipy.linalg.solve w/ sym_pos = True, # 3. scipy.linalg.solve w/ sym_pos = False, and if all else fails # 4. scipy.linalg.lstsq # For sparse systems, the order is: # 1. sksparse.cholmod.cholesky (if available) # 2. scipy.sparse.linalg.factorized (if umfpack available) # 3. scipy.sparse.linalg.splu # 4. scipy.sparse.linalg.lsqr solved = False while not solved: try: # [4] Equation 8.28 p, q = _sym_solve(Dinv, A, c, b, solve) # [4] Equation 8.29 u, v = _sym_solve(Dinv, A, rhatd - (1 / x) * rhatxs, rhatp, solve) if np.any(np.isnan(p)) or np.any(np.isnan(q)): raise LinAlgError solved = True except (LinAlgError, ValueError, TypeError) as e: # Usually this doesn't happen. If it does, it happens when # there are redundant constraints or when approaching the # solution. If so, change solver. if cholesky: cholesky = False warn( "Solving system with option 'cholesky':True " "failed. It is normal for this to happen " "occasionally, especially as the solution is " "approached. However, if you see this frequently, " "consider setting option 'cholesky' to False.", OptimizeWarning, stacklevel=5) elif sym_pos: sym_pos = False warn( "Solving system with option 'sym_pos':True " "failed. It is normal for this to happen " "occasionally, especially as the solution is " "approached. However, if you see this frequently, " "consider setting option 'sym_pos' to False.", OptimizeWarning, stacklevel=5) elif not lstsq: lstsq = True warn( "Solving system with option 'sym_pos':False " "failed. This may happen occasionally, " "especially as the solution is " "approached. However, if you see this frequently, " "your problem may be numerically challenging. " "If you cannot improve the formulation, consider " "setting 'lstsq' to True. Consider also setting " "`presolve` to True, if it is not already.", OptimizeWarning, stacklevel=5) else: raise e solve = _get_solver(M, sparse, lstsq, sym_pos, cholesky, permc_spec) # [4] Results after 8.29 d_tau = ((rhatg + 1 / tau * rhattk - (-c.dot(u) + b.dot(v))) / (1 / tau * kappa + (-c.dot(p) + b.dot(q)))) d_x = u + p * d_tau d_y = v + q * d_tau # [4] Relations between after 8.25 and 8.26 d_z = (1 / x) * (rhatxs - z * d_x) d_kappa = 1 / tau * (rhattk - kappa * d_tau) # [4] 8.12 and "Let alpha be the maximal possible step..." before 8.23 alpha = _get_step(x, d_x, z, d_z, tau, d_tau, kappa, d_kappa, 1) if ip: # initial point - see [4] 4.4 gamma = 10 else: # predictor-corrector, [4] definition after 8.12 beta1 = 0.1 # [4] pg. 220 (Table 8.1) gamma = (1 - alpha)**2 * min(beta1, (1 - alpha)) i += 1 return d_x, d_y, d_z, d_tau, d_kappa def _sym_solve(Dinv, A, r1, r2, solve): """ An implementation of [4] equation 8.31 and 8.32 References ---------- .. [4] Andersen, Erling D., and Knud D. Andersen. "The MOSEK interior point optimizer for linear programming: an implementation of the homogeneous algorithm." High performance optimization. Springer US, 2000. 197-232. """ # [4] 8.31 r = r2 + A.dot(Dinv * r1) v = solve(r) # [4] 8.32 u = Dinv * (A.T.dot(v) - r1) return u, v def _get_step(x, d_x, z, d_z, tau, d_tau, kappa, d_kappa, alpha0): """ An implementation of [4] equation 8.21 References ---------- .. [4] Andersen, Erling D., and Knud D. Andersen. "The MOSEK interior point optimizer for linear programming: an implementation of the homogeneous algorithm." High performance optimization. Springer US, 2000. 197-232. """ # [4] 4.3 Equation 8.21, ignoring 8.20 requirement # same step is taken in primal and dual spaces # alpha0 is basically beta3 from [4] Table 8.1, but instead of beta3 # the value 1 is used in Mehrota corrector and initial point correction i_x = d_x < 0 i_z = d_z < 0 alpha_x = alpha0 * np.min(x[i_x] / -d_x[i_x]) if np.any(i_x) else 1 alpha_tau = alpha0 * tau / -d_tau if d_tau < 0 else 1 alpha_z = alpha0 * np.min(z[i_z] / -d_z[i_z]) if np.any(i_z) else 1 alpha_kappa = alpha0 * kappa / -d_kappa if d_kappa < 0 else 1 alpha = np.min([1, alpha_x, alpha_tau, alpha_z, alpha_kappa]) return alpha def _get_message(status): """ Given problem status code, return a more detailed message. Parameters ---------- status : int An integer representing the exit status of the optimization:: 0 : Optimization terminated successfully 1 : Iteration limit reached 2 : Problem appears to be infeasible 3 : Problem appears to be unbounded 4 : Serious numerical difficulties encountered Returns ------- message : str A string descriptor of the exit status of the optimization. """ messages = ( ["Optimization terminated successfully.", "The iteration limit was reached before the algorithm converged.", "The algorithm terminated successfully and determined that the " "problem is infeasible.", "The algorithm terminated successfully and determined that the " "problem is unbounded.", "Numerical difficulties were encountered before the problem " "converged. Please check your problem formulation for errors, " "independence of linear equality constraints, and reasonable " "scaling and matrix condition numbers. If you continue to " "encounter this error, please submit a bug report." ]) return messages[status] def _do_step(x, y, z, tau, kappa, d_x, d_y, d_z, d_tau, d_kappa, alpha): """ An implementation of [4] Equation 8.9 References ---------- .. [4] Andersen, Erling D., and Knud D. Andersen. "The MOSEK interior point optimizer for linear programming: an implementation of the homogeneous algorithm." High performance optimization. Springer US, 2000. 197-232. """ x = x + alpha * d_x tau = tau + alpha * d_tau z = z + alpha * d_z kappa = kappa + alpha * d_kappa y = y + alpha * d_y return x, y, z, tau, kappa def _get_blind_start(shape): """ Return the starting point from [4] 4.4 References ---------- .. [4] Andersen, Erling D., and Knud D. Andersen. "The MOSEK interior point optimizer for linear programming: an implementation of the homogeneous algorithm." High performance optimization. Springer US, 2000. 197-232. """ m, n = shape x0 = np.ones(n) y0 = np.zeros(m) z0 = np.ones(n) tau0 = 1 kappa0 = 1 return x0, y0, z0, tau0, kappa0 def _indicators(A, b, c, c0, x, y, z, tau, kappa): """ Implementation of several equations from [4] used as indicators of the status of optimization. References ---------- .. [4] Andersen, Erling D., and Knud D. Andersen. "The MOSEK interior point optimizer for linear programming: an implementation of the homogeneous algorithm." High performance optimization. Springer US, 2000. 197-232. """ # residuals for termination are relative to initial values x0, y0, z0, tau0, kappa0 = _get_blind_start(A.shape) # See [4], Section 4 - The Homogeneous Algorithm, Equation 8.8 def r_p(x, tau): return b * tau - A.dot(x) def r_d(y, z, tau): return c * tau - A.T.dot(y) - z def r_g(x, y, kappa): return kappa + c.dot(x) - b.dot(y) # np.dot unpacks if they are arrays of size one def mu(x, tau, z, kappa): return (x.dot(z) + np.dot(tau, kappa)) / (len(x) + 1) obj = c.dot(x / tau) + c0 def norm(a): return np.linalg.norm(a) # See [4], Section 4.5 - The Stopping Criteria r_p0 = r_p(x0, tau0) r_d0 = r_d(y0, z0, tau0) r_g0 = r_g(x0, y0, kappa0) mu_0 = mu(x0, tau0, z0, kappa0) rho_A = norm(c.T.dot(x) - b.T.dot(y)) / (tau + norm(b.T.dot(y))) rho_p = norm(r_p(x, tau)) / max(1, norm(r_p0)) rho_d = norm(r_d(y, z, tau)) / max(1, norm(r_d0)) rho_g = norm(r_g(x, y, kappa)) / max(1, norm(r_g0)) rho_mu = mu(x, tau, z, kappa) / mu_0 return rho_p, rho_d, rho_A, rho_g, rho_mu, obj def _display_iter(rho_p, rho_d, rho_g, alpha, rho_mu, obj, header=False): """ Print indicators of optimization status to the console. Parameters ---------- rho_p : float The (normalized) primal feasibility, see [4] 4.5 rho_d : float The (normalized) dual feasibility, see [4] 4.5 rho_g : float The (normalized) duality gap, see [4] 4.5 alpha : float The step size, see [4] 4.3 rho_mu : float The (normalized) path parameter, see [4] 4.5 obj : float The objective function value of the current iterate header : bool True if a header is to be printed References ---------- .. [4] Andersen, Erling D., and Knud D. Andersen. "The MOSEK interior point optimizer for linear programming: an implementation of the homogeneous algorithm." High performance optimization. Springer US, 2000. 197-232. """ if header: print("Primal Feasibility ", "Dual Feasibility ", "Duality Gap ", "Step ", "Path Parameter ", "Objective ") # no clue why this works fmt = '{0:<20.13}{1:<20.13}{2:<20.13}{3:<17.13}{4:<20.13}{5:<20.13}' print(fmt.format( float(rho_p), float(rho_d), float(rho_g), alpha if isinstance(alpha, str) else float(alpha), float(rho_mu), float(obj))) def _ip_hsd(A, b, c, c0, alpha0, beta, maxiter, disp, tol, sparse, lstsq, sym_pos, cholesky, pc, ip, permc_spec, callback, postsolve_args): r""" Solve a linear programming problem in standard form: Minimize:: c @ x Subject to:: A @ x == b x >= 0 using the interior point method of [4]. Parameters ---------- A : 2-D array 2-D array such that ``A @ x``, gives the values of the equality constraints at ``x``. b : 1-D array 1-D array of values representing the RHS of each equality constraint (row) in ``A`` (for standard form problem). c : 1-D array Coefficients of the linear objective function to be minimized (for standard form problem). c0 : float Constant term in objective function due to fixed (and eliminated) variables. (Purely for display.) alpha0 : float The maximal step size for Mehrota's predictor-corrector search direction; see :math:`\beta_3`of [4] Table 8.1 beta : float The desired reduction of the path parameter :math:`\mu` (see [6]_) maxiter : int The maximum number of iterations of the algorithm. disp : bool Set to ``True`` if indicators of optimization status are to be printed to the console each iteration. tol : float Termination tolerance; see [4]_ Section 4.5. sparse : bool Set to ``True`` if the problem is to be treated as sparse. However, the inputs ``A_eq`` and ``A_ub`` should nonetheless be provided as (dense) arrays rather than sparse matrices. lstsq : bool Set to ``True`` if the problem is expected to be very poorly conditioned. This should always be left as ``False`` unless severe numerical difficulties are frequently encountered, and a better option would be to improve the formulation of the problem. sym_pos : bool Leave ``True`` if the problem is expected to yield a well conditioned symmetric positive definite normal equation matrix (almost always). cholesky : bool Set to ``True`` if the normal equations are to be solved by explicit Cholesky decomposition followed by explicit forward/backward substitution. This is typically faster for moderate, dense problems that are numerically well-behaved. pc : bool Leave ``True`` if the predictor-corrector method of Mehrota is to be used. This is almost always (if not always) beneficial. ip : bool Set to ``True`` if the improved initial point suggestion due to [4]_ Section 4.3 is desired. It's unclear whether this is beneficial. permc_spec : str (default = 'MMD_AT_PLUS_A') (Has effect only with ``sparse = True``, ``lstsq = False``, ``sym_pos = True``.) A matrix is factorized in each iteration of the algorithm. This option specifies how to permute the columns of the matrix for sparsity preservation. Acceptable values are: - ``NATURAL``: natural ordering. - ``MMD_ATA``: minimum degree ordering on the structure of A^T A. - ``MMD_AT_PLUS_A``: minimum degree ordering on the structure of A^T+A. - ``COLAMD``: approximate minimum degree column ordering. This option can impact the convergence of the interior point algorithm; test different values to determine which performs best for your problem. For more information, refer to ``scipy.sparse.linalg.splu``. callback : callable, optional If a callback function is provided, it will be called within each iteration of the algorithm. The callback function must accept a single `scipy.optimize.OptimizeResult` consisting of the following fields: x : 1-D array Current solution vector fun : float Current value of the objective function success : bool True only when an algorithm has completed successfully, so this is always False as the callback function is called only while the algorithm is still iterating. slack : 1-D array The values of the slack variables. Each slack variable corresponds to an inequality constraint. If the slack is zero, the corresponding constraint is active. con : 1-D array The (nominally zero) residuals of the equality constraints, that is, ``b - A_eq @ x`` phase : int The phase of the algorithm being executed. This is always 1 for the interior-point method because it has only one phase. status : int For revised simplex, this is always 0 because if a different status is detected, the algorithm terminates. nit : int The number of iterations performed. message : str A string descriptor of the exit status of the optimization. postsolve_args : tuple Data needed by _postsolve to convert the solution to the standard-form problem into the solution to the original problem. Returns ------- x_hat : float Solution vector (for standard form problem). status : int An integer representing the exit status of the optimization:: 0 : Optimization terminated successfully 1 : Iteration limit reached 2 : Problem appears to be infeasible 3 : Problem appears to be unbounded 4 : Serious numerical difficulties encountered message : str A string descriptor of the exit status of the optimization. iteration : int The number of iterations taken to solve the problem References ---------- .. [4] Andersen, Erling D., and Knud D. Andersen. "The MOSEK interior point optimizer for linear programming: an implementation of the homogeneous algorithm." High performance optimization. Springer US, 2000. 197-232. .. [6] Freund, Robert M. "Primal-Dual Interior-Point Methods for Linear Programming based on Newton's Method." Unpublished Course Notes, March 2004. Available 2/25/2017 at: https://ocw.mit.edu/courses/sloan-school-of-management/15-084j-nonlinear-programming-spring-2004/lecture-notes/lec14_int_pt_mthd.pdf """ iteration = 0 # default initial point x, y, z, tau, kappa = _get_blind_start(A.shape) # first iteration is special improvement of initial point ip = ip if pc else False # [4] 4.5 rho_p, rho_d, rho_A, rho_g, rho_mu, obj = _indicators( A, b, c, c0, x, y, z, tau, kappa) go = rho_p > tol or rho_d > tol or rho_A > tol # we might get lucky : ) if disp: _display_iter(rho_p, rho_d, rho_g, "-", rho_mu, obj, header=True) if callback is not None: x_o, fun, slack, con = _postsolve(x/tau, postsolve_args) res = OptimizeResult({'x': x_o, 'fun': fun, 'slack': slack, 'con': con, 'nit': iteration, 'phase': 1, 'complete': False, 'status': 0, 'message': "", 'success': False}) callback(res) status = 0 message = "Optimization terminated successfully." if sparse: A = sps.csc_matrix(A) while go: iteration += 1 if ip: # initial point # [4] Section 4.4 gamma = 1 def eta(g): return 1 else: # gamma = 0 in predictor step according to [4] 4.1 # if predictor/corrector is off, use mean of complementarity [6] # 5.1 / [4] Below Figure 10-4 gamma = 0 if pc else beta * np.mean(z * x) # [4] Section 4.1 def eta(g=gamma): return 1 - g try: # Solve [4] 8.6 and 8.7/8.13/8.23 d_x, d_y, d_z, d_tau, d_kappa = _get_delta( A, b, c, x, y, z, tau, kappa, gamma, eta, sparse, lstsq, sym_pos, cholesky, pc, ip, permc_spec) if ip: # initial point # [4] 4.4 # Formula after 8.23 takes a full step regardless if this will # take it negative alpha = 1.0 x, y, z, tau, kappa = _do_step( x, y, z, tau, kappa, d_x, d_y, d_z, d_tau, d_kappa, alpha) x[x < 1] = 1 z[z < 1] = 1 tau = max(1, tau) kappa = max(1, kappa) ip = False # done with initial point else: # [4] Section 4.3 alpha = _get_step(x, d_x, z, d_z, tau, d_tau, kappa, d_kappa, alpha0) # [4] Equation 8.9 x, y, z, tau, kappa = _do_step( x, y, z, tau, kappa, d_x, d_y, d_z, d_tau, d_kappa, alpha) except (LinAlgError, FloatingPointError, ValueError, ZeroDivisionError): # this can happen when sparse solver is used and presolve # is turned off. Also observed ValueError in AppVeyor Python 3.6 # Win32 build (PR #8676). I've never seen it otherwise. status = 4 message = _get_message(status) break # [4] 4.5 rho_p, rho_d, rho_A, rho_g, rho_mu, obj = _indicators( A, b, c, c0, x, y, z, tau, kappa) go = rho_p > tol or rho_d > tol or rho_A > tol if disp: _display_iter(rho_p, rho_d, rho_g, alpha, rho_mu, obj) if callback is not None: x_o, fun, slack, con = _postsolve(x/tau, postsolve_args) res = OptimizeResult({'x': x_o, 'fun': fun, 'slack': slack, 'con': con, 'nit': iteration, 'phase': 1, 'complete': False, 'status': 0, 'message': "", 'success': False}) callback(res) # [4] 4.5 inf1 = (rho_p < tol and rho_d < tol and rho_g < tol and tau < tol * max(1, kappa)) inf2 = rho_mu < tol and tau < tol * min(1, kappa) if inf1 or inf2: # [4] Lemma 8.4 / Theorem 8.3 if b.transpose().dot(y) > tol: status = 2 else: # elif c.T.dot(x) < tol: ? Probably not necessary. status = 3 message = _get_message(status) break elif iteration >= maxiter: status = 1 message = _get_message(status) break x_hat = x / tau # [4] Statement after Theorem 8.2 return x_hat, status, message, iteration def _linprog_ip(c, c0, A, b, callback, postsolve_args, maxiter=1000, tol=1e-8, disp=False, alpha0=.99995, beta=0.1, sparse=False, lstsq=False, sym_pos=True, cholesky=None, pc=True, ip=False, permc_spec='MMD_AT_PLUS_A', **unknown_options): r""" Minimize a linear objective function subject to linear equality and non-negativity constraints using the interior point method of [4]_. Linear programming is intended to solve problems of the following form: Minimize:: c @ x Subject to:: A @ x == b x >= 0 User-facing documentation is in _linprog_doc.py. Parameters ---------- c : 1-D array Coefficients of the linear objective function to be minimized. c0 : float Constant term in objective function due to fixed (and eliminated) variables. (Purely for display.) A : 2-D array 2-D array such that ``A @ x``, gives the values of the equality constraints at ``x``. b : 1-D array 1-D array of values representing the right hand side of each equality constraint (row) in ``A``. callback : callable, optional Callback function to be executed once per iteration. postsolve_args : tuple Data needed by _postsolve to convert the solution to the standard-form problem into the solution to the original problem. Options ------- maxiter : int (default = 1000) The maximum number of iterations of the algorithm. tol : float (default = 1e-8) Termination tolerance to be used for all termination criteria; see [4]_ Section 4.5. disp : bool (default = False) Set to ``True`` if indicators of optimization status are to be printed to the console each iteration. alpha0 : float (default = 0.99995) The maximal step size for Mehrota's predictor-corrector search direction; see :math:`\beta_{3}` of [4]_ Table 8.1. beta : float (default = 0.1) The desired reduction of the path parameter :math:`\mu` (see [6]_) when Mehrota's predictor-corrector is not in use (uncommon). sparse : bool (default = False) Set to ``True`` if the problem is to be treated as sparse after presolve. If either ``A_eq`` or ``A_ub`` is a sparse matrix, this option will automatically be set ``True``, and the problem will be treated as sparse even during presolve. If your constraint matrices contain mostly zeros and the problem is not very small (less than about 100 constraints or variables), consider setting ``True`` or providing ``A_eq`` and ``A_ub`` as sparse matrices. lstsq : bool (default = False) Set to ``True`` if the problem is expected to be very poorly conditioned. This should always be left ``False`` unless severe numerical difficulties are encountered. Leave this at the default unless you receive a warning message suggesting otherwise. sym_pos : bool (default = True) Leave ``True`` if the problem is expected to yield a well conditioned symmetric positive definite normal equation matrix (almost always). Leave this at the default unless you receive a warning message suggesting otherwise. cholesky : bool (default = True) Set to ``True`` if the normal equations are to be solved by explicit Cholesky decomposition followed by explicit forward/backward substitution. This is typically faster for problems that are numerically well-behaved. pc : bool (default = True) Leave ``True`` if the predictor-corrector method of Mehrota is to be used. This is almost always (if not always) beneficial. ip : bool (default = False) Set to ``True`` if the improved initial point suggestion due to [4]_ Section 4.3 is desired. Whether this is beneficial or not depends on the problem. permc_spec : str (default = 'MMD_AT_PLUS_A') (Has effect only with ``sparse = True``, ``lstsq = False``, ``sym_pos = True``, and no SuiteSparse.) A matrix is factorized in each iteration of the algorithm. This option specifies how to permute the columns of the matrix for sparsity preservation. Acceptable values are: - ``NATURAL``: natural ordering. - ``MMD_ATA``: minimum degree ordering on the structure of A^T A. - ``MMD_AT_PLUS_A``: minimum degree ordering on the structure of A^T+A. - ``COLAMD``: approximate minimum degree column ordering. This option can impact the convergence of the interior point algorithm; test different values to determine which performs best for your problem. For more information, refer to ``scipy.sparse.linalg.splu``. unknown_options : dict Optional arguments not used by this particular solver. If `unknown_options` is non-empty a warning is issued listing all unused options. Returns ------- x : 1-D array Solution vector. status : int An integer representing the exit status of the optimization:: 0 : Optimization terminated successfully 1 : Iteration limit reached 2 : Problem appears to be infeasible 3 : Problem appears to be unbounded 4 : Serious numerical difficulties encountered message : str A string descriptor of the exit status of the optimization. iteration : int The number of iterations taken to solve the problem. Notes ----- This method implements the algorithm outlined in [4]_ with ideas from [8]_ and a structure inspired by the simpler methods of [6]_. The primal-dual path following method begins with initial 'guesses' of the primal and dual variables of the standard form problem and iteratively attempts to solve the (nonlinear) Karush-Kuhn-Tucker conditions for the problem with a gradually reduced logarithmic barrier term added to the objective. This particular implementation uses a homogeneous self-dual formulation, which provides certificates of infeasibility or unboundedness where applicable. The default initial point for the primal and dual variables is that defined in [4]_ Section 4.4 Equation 8.22. Optionally (by setting initial point option ``ip=True``), an alternate (potentially improved) starting point can be calculated according to the additional recommendations of [4]_ Section 4.4. A search direction is calculated using the predictor-corrector method (single correction) proposed by Mehrota and detailed in [4]_ Section 4.1. (A potential improvement would be to implement the method of multiple corrections described in [4]_ Section 4.2.) In practice, this is accomplished by solving the normal equations, [4]_ Section 5.1 Equations 8.31 and 8.32, derived from the Newton equations [4]_ Section 5 Equations 8.25 (compare to [4]_ Section 4 Equations 8.6-8.8). The advantage of solving the normal equations rather than 8.25 directly is that the matrices involved are symmetric positive definite, so Cholesky decomposition can be used rather than the more expensive LU factorization. With default options, the solver used to perform the factorization depends on third-party software availability and the conditioning of the problem. For dense problems, solvers are tried in the following order: 1. ``scipy.linalg.cho_factor`` 2. ``scipy.linalg.solve`` with option ``sym_pos=True`` 3. ``scipy.linalg.solve`` with option ``sym_pos=False`` 4. ``scipy.linalg.lstsq`` For sparse problems: 1. ``sksparse.cholmod.cholesky`` (if scikit-sparse and SuiteSparse are installed) 2. ``scipy.sparse.linalg.factorized`` (if scikit-umfpack and SuiteSparse are installed) 3. ``scipy.sparse.linalg.splu`` (which uses SuperLU distributed with SciPy) 4. ``scipy.sparse.linalg.lsqr`` If the solver fails for any reason, successively more robust (but slower) solvers are attempted in the order indicated. Attempting, failing, and re-starting factorization can be time consuming, so if the problem is numerically challenging, options can be set to bypass solvers that are failing. Setting ``cholesky=False`` skips to solver 2, ``sym_pos=False`` skips to solver 3, and ``lstsq=True`` skips to solver 4 for both sparse and dense problems. Potential improvements for combatting issues associated with dense columns in otherwise sparse problems are outlined in [4]_ Section 5.3 and [10]_ Section 4.1-4.2; the latter also discusses the alleviation of accuracy issues associated with the substitution approach to free variables. After calculating the search direction, the maximum possible step size that does not activate the non-negativity constraints is calculated, and the smaller of this step size and unity is applied (as in [4]_ Section 4.1.) [4]_ Section 4.3 suggests improvements for choosing the step size. The new point is tested according to the termination conditions of [4]_ Section 4.5. The same tolerance, which can be set using the ``tol`` option, is used for all checks. (A potential improvement would be to expose the different tolerances to be set independently.) If optimality, unboundedness, or infeasibility is detected, the solve procedure terminates; otherwise it repeats. The expected problem formulation differs between the top level ``linprog`` module and the method specific solvers. The method specific solvers expect a problem in standard form: Minimize:: c @ x Subject to:: A @ x == b x >= 0 Whereas the top level ``linprog`` module expects a problem of form: Minimize:: c @ x Subject to:: A_ub @ x <= b_ub A_eq @ x == b_eq lb <= x <= ub where ``lb = 0`` and ``ub = None`` unless set in ``bounds``. The original problem contains equality, upper-bound and variable constraints whereas the method specific solver requires equality constraints and variable non-negativity. ``linprog`` module converts the original problem to standard form by converting the simple bounds to upper bound constraints, introducing non-negative slack variables for inequality constraints, and expressing unbounded variables as the difference between two non-negative variables. References ---------- .. [4] Andersen, Erling D., and Knud D. Andersen. "The MOSEK interior point optimizer for linear programming: an implementation of the homogeneous algorithm." High performance optimization. Springer US, 2000. 197-232. .. [6] Freund, Robert M. "Primal-Dual Interior-Point Methods for Linear Programming based on Newton's Method." Unpublished Course Notes, March 2004. Available 2/25/2017 at https://ocw.mit.edu/courses/sloan-school-of-management/15-084j-nonlinear-programming-spring-2004/lecture-notes/lec14_int_pt_mthd.pdf .. [8] Andersen, Erling D., and Knud D. Andersen. "Presolving in linear programming." Mathematical Programming 71.2 (1995): 221-245. .. [9] Bertsimas, Dimitris, and J. Tsitsiklis. "Introduction to linear programming." Athena Scientific 1 (1997): 997. .. [10] Andersen, Erling D., et al. Implementation of interior point methods for large scale linear programming. HEC/Universite de Geneve, 1996. """ _check_unknown_options(unknown_options) # These should be warnings, not errors if (cholesky or cholesky is None) and sparse and not has_cholmod: if cholesky: warn("Sparse cholesky is only available with scikit-sparse. " "Setting `cholesky = False`", OptimizeWarning, stacklevel=3) cholesky = False if sparse and lstsq: warn("Option combination 'sparse':True and 'lstsq':True " "is not recommended.", OptimizeWarning, stacklevel=3) if lstsq and cholesky: warn("Invalid option combination 'lstsq':True " "and 'cholesky':True; option 'cholesky' has no effect when " "'lstsq' is set True.", OptimizeWarning, stacklevel=3) valid_permc_spec = ('NATURAL', 'MMD_ATA', 'MMD_AT_PLUS_A', 'COLAMD') if permc_spec.upper() not in valid_permc_spec: warn("Invalid permc_spec option: '" + str(permc_spec) + "'. " "Acceptable values are 'NATURAL', 'MMD_ATA', 'MMD_AT_PLUS_A', " "and 'COLAMD'. Reverting to default.", OptimizeWarning, stacklevel=3) permc_spec = 'MMD_AT_PLUS_A' # This can be an error if not sym_pos and cholesky: raise ValueError( "Invalid option combination 'sym_pos':False " "and 'cholesky':True: Cholesky decomposition is only possible " "for symmetric positive definite matrices.") cholesky = cholesky or (cholesky is None and sym_pos and not lstsq) x, status, message, iteration = _ip_hsd(A, b, c, c0, alpha0, beta, maxiter, disp, tol, sparse, lstsq, sym_pos, cholesky, pc, ip, permc_spec, callback, postsolve_args) return x, status, message, iteration
45,749
39.630551
143
py
scipy
scipy-main/scipy/optimize/_cobyla_py.py
""" Interface to Constrained Optimization By Linear Approximation Functions --------- .. autosummary:: :toctree: generated/ fmin_cobyla """ import functools from threading import RLock import numpy as np from scipy.optimize import _cobyla as cobyla from ._optimize import OptimizeResult, _check_unknown_options try: from itertools import izip except ImportError: izip = zip __all__ = ['fmin_cobyla'] # Workarund as _cobyla.minimize is not threadsafe # due to an unknown f2py bug and can segfault, # see gh-9658. _module_lock = RLock() def synchronized(func): @functools.wraps(func) def wrapper(*args, **kwargs): with _module_lock: return func(*args, **kwargs) return wrapper @synchronized def fmin_cobyla(func, x0, cons, args=(), consargs=None, rhobeg=1.0, rhoend=1e-4, maxfun=1000, disp=None, catol=2e-4, *, callback=None): """ Minimize a function using the Constrained Optimization By Linear Approximation (COBYLA) method. This method wraps a FORTRAN implementation of the algorithm. Parameters ---------- func : callable Function to minimize. In the form func(x, \\*args). x0 : ndarray Initial guess. cons : sequence Constraint functions; must all be ``>=0`` (a single function if only 1 constraint). Each function takes the parameters `x` as its first argument, and it can return either a single number or an array or list of numbers. args : tuple, optional Extra arguments to pass to function. consargs : tuple, optional Extra arguments to pass to constraint functions (default of None means use same extra arguments as those passed to func). Use ``()`` for no extra arguments. rhobeg : float, optional Reasonable initial changes to the variables. rhoend : float, optional Final accuracy in the optimization (not precisely guaranteed). This is a lower bound on the size of the trust region. disp : {0, 1, 2, 3}, optional Controls the frequency of output; 0 implies no output. maxfun : int, optional Maximum number of function evaluations. catol : float, optional Absolute tolerance for constraint violations. callback : callable, optional Called after each iteration, as ``callback(x)``, where ``x`` is the current parameter vector. Returns ------- x : ndarray The argument that minimises `f`. See also -------- minimize: Interface to minimization algorithms for multivariate functions. See the 'COBYLA' `method` in particular. Notes ----- This algorithm is based on linear approximations to the objective function and each constraint. We briefly describe the algorithm. Suppose the function is being minimized over k variables. At the jth iteration the algorithm has k+1 points v_1, ..., v_(k+1), an approximate solution x_j, and a radius RHO_j. (i.e., linear plus a constant) approximations to the objective function and constraint functions such that their function values agree with the linear approximation on the k+1 points v_1,.., v_(k+1). This gives a linear program to solve (where the linear approximations of the constraint functions are constrained to be non-negative). However, the linear approximations are likely only good approximations near the current simplex, so the linear program is given the further requirement that the solution, which will become x_(j+1), must be within RHO_j from x_j. RHO_j only decreases, never increases. The initial RHO_j is rhobeg and the final RHO_j is rhoend. In this way COBYLA's iterations behave like a trust region algorithm. Additionally, the linear program may be inconsistent, or the approximation may give poor improvement. For details about how these issues are resolved, as well as how the points v_i are updated, refer to the source code or the references below. References ---------- Powell M.J.D. (1994), "A direct search optimization method that models the objective and constraint functions by linear interpolation.", in Advances in Optimization and Numerical Analysis, eds. S. Gomez and J-P Hennart, Kluwer Academic (Dordrecht), pp. 51-67 Powell M.J.D. (1998), "Direct search algorithms for optimization calculations", Acta Numerica 7, 287-336 Powell M.J.D. (2007), "A view of algorithms for optimization without derivatives", Cambridge University Technical Report DAMTP 2007/NA03 Examples -------- Minimize the objective function f(x,y) = x*y subject to the constraints x**2 + y**2 < 1 and y > 0:: >>> def objective(x): ... return x[0]*x[1] ... >>> def constr1(x): ... return 1 - (x[0]**2 + x[1]**2) ... >>> def constr2(x): ... return x[1] ... >>> from scipy.optimize import fmin_cobyla >>> fmin_cobyla(objective, [0.0, 0.1], [constr1, constr2], rhoend=1e-7) array([-0.70710685, 0.70710671]) The exact solution is (-sqrt(2)/2, sqrt(2)/2). """ err = "cons must be a sequence of callable functions or a single"\ " callable function." try: len(cons) except TypeError as e: if callable(cons): cons = [cons] else: raise TypeError(err) from e else: for thisfunc in cons: if not callable(thisfunc): raise TypeError(err) if consargs is None: consargs = args # build constraints con = tuple({'type': 'ineq', 'fun': c, 'args': consargs} for c in cons) # options opts = {'rhobeg': rhobeg, 'tol': rhoend, 'disp': disp, 'maxiter': maxfun, 'catol': catol, 'callback': callback} sol = _minimize_cobyla(func, x0, args, constraints=con, **opts) if disp and not sol['success']: print(f"COBYLA failed to find a solution: {sol.message}") return sol['x'] @synchronized def _minimize_cobyla(fun, x0, args=(), constraints=(), rhobeg=1.0, tol=1e-4, maxiter=1000, disp=False, catol=2e-4, callback=None, bounds=None, **unknown_options): """ Minimize a scalar function of one or more variables using the Constrained Optimization BY Linear Approximation (COBYLA) algorithm. Options ------- rhobeg : float Reasonable initial changes to the variables. tol : float Final accuracy in the optimization (not precisely guaranteed). This is a lower bound on the size of the trust region. disp : bool Set to True to print convergence messages. If False, `verbosity` is ignored as set to 0. maxiter : int Maximum number of function evaluations. catol : float Tolerance (absolute) for constraint violations """ _check_unknown_options(unknown_options) maxfun = maxiter rhoend = tol iprint = int(bool(disp)) # check constraints if isinstance(constraints, dict): constraints = (constraints, ) if bounds: i_lb = np.isfinite(bounds.lb) if np.any(i_lb): def lb_constraint(x, *args, **kwargs): return x[i_lb] - bounds.lb[i_lb] constraints.append({'type': 'ineq', 'fun': lb_constraint}) i_ub = np.isfinite(bounds.ub) if np.any(i_ub): def ub_constraint(x): return bounds.ub[i_ub] - x[i_ub] constraints.append({'type': 'ineq', 'fun': ub_constraint}) for ic, con in enumerate(constraints): # check type try: ctype = con['type'].lower() except KeyError as e: raise KeyError('Constraint %d has no type defined.' % ic) from e except TypeError as e: raise TypeError('Constraints must be defined using a ' 'dictionary.') from e except AttributeError as e: raise TypeError("Constraint's type must be a string.") from e else: if ctype != 'ineq': raise ValueError("Constraints of type '%s' not handled by " "COBYLA." % con['type']) # check function if 'fun' not in con: raise KeyError('Constraint %d has no function defined.' % ic) # check extra arguments if 'args' not in con: con['args'] = () # m is the total number of constraint values # it takes into account that some constraints may be vector-valued cons_lengths = [] for c in constraints: f = c['fun'](x0, *c['args']) try: cons_length = len(f) except TypeError: cons_length = 1 cons_lengths.append(cons_length) m = sum(cons_lengths) def calcfc(x, con): f = fun(np.copy(x), *args) i = 0 for size, c in izip(cons_lengths, constraints): con[i: i + size] = c['fun'](x, *c['args']) i += size return f def wrapped_callback(x): if callback is not None: callback(np.copy(x)) info = np.zeros(4, np.float64) xopt, info = cobyla.minimize(calcfc, m=m, x=np.copy(x0), rhobeg=rhobeg, rhoend=rhoend, iprint=iprint, maxfun=maxfun, dinfo=info, callback=wrapped_callback) if info[3] > catol: # Check constraint violation info[0] = 4 return OptimizeResult(x=xopt, status=int(info[0]), success=info[0] == 1, message={1: 'Optimization terminated successfully.', 2: 'Maximum number of function evaluations ' 'has been exceeded.', 3: 'Rounding errors are becoming damaging ' 'in COBYLA subroutine.', 4: 'Did not converge to a solution ' 'satisfying the constraints. See ' '`maxcv` for magnitude of violation.', 5: 'NaN result encountered.' }.get(info[0], 'Unknown exit status.'), nfev=int(info[1]), fun=info[2], maxcv=info[3])
10,662
33.508091
79
py
scipy
scipy-main/scipy/optimize/_differentiable_functions.py
import numpy as np import scipy.sparse as sps from ._numdiff import approx_derivative, group_columns from ._hessian_update_strategy import HessianUpdateStrategy from scipy.sparse.linalg import LinearOperator FD_METHODS = ('2-point', '3-point', 'cs') class ScalarFunction: """Scalar function and its derivatives. This class defines a scalar function F: R^n->R and methods for computing or approximating its first and second derivatives. Parameters ---------- fun : callable evaluates the scalar function. Must be of the form ``fun(x, *args)``, where ``x`` is the argument in the form of a 1-D array and ``args`` is a tuple of any additional fixed parameters needed to completely specify the function. Should return a scalar. x0 : array-like Provides an initial set of variables for evaluating fun. Array of real elements of size (n,), where 'n' is the number of independent variables. args : tuple, optional Any additional fixed parameters needed to completely specify the scalar function. grad : {callable, '2-point', '3-point', 'cs'} Method for computing the gradient vector. If it is a callable, it should be a function that returns the gradient vector: ``grad(x, *args) -> array_like, shape (n,)`` where ``x`` is an array with shape (n,) and ``args`` is a tuple with the fixed parameters. Alternatively, the keywords {'2-point', '3-point', 'cs'} can be used to select a finite difference scheme for numerical estimation of the gradient with a relative step size. These finite difference schemes obey any specified `bounds`. hess : {callable, '2-point', '3-point', 'cs', HessianUpdateStrategy} Method for computing the Hessian matrix. If it is callable, it should return the Hessian matrix: ``hess(x, *args) -> {LinearOperator, spmatrix, array}, (n, n)`` where x is a (n,) ndarray and `args` is a tuple with the fixed parameters. Alternatively, the keywords {'2-point', '3-point', 'cs'} select a finite difference scheme for numerical estimation. Or, objects implementing `HessianUpdateStrategy` interface can be used to approximate the Hessian. Whenever the gradient is estimated via finite-differences, the Hessian cannot be estimated with options {'2-point', '3-point', 'cs'} and needs to be estimated using one of the quasi-Newton strategies. finite_diff_rel_step : None or array_like Relative step size to use. The absolute step size is computed as ``h = finite_diff_rel_step * sign(x0) * max(1, abs(x0))``, possibly adjusted to fit into the bounds. For ``method='3-point'`` the sign of `h` is ignored. If None then finite_diff_rel_step is selected automatically, finite_diff_bounds : tuple of array_like Lower and upper bounds on independent variables. Defaults to no bounds, (-np.inf, np.inf). Each bound must match the size of `x0` or be a scalar, in the latter case the bound will be the same for all variables. Use it to limit the range of function evaluation. epsilon : None or array_like, optional Absolute step size to use, possibly adjusted to fit into the bounds. For ``method='3-point'`` the sign of `epsilon` is ignored. By default relative steps are used, only if ``epsilon is not None`` are absolute steps used. Notes ----- This class implements a memoization logic. There are methods `fun`, `grad`, hess` and corresponding attributes `f`, `g` and `H`. The following things should be considered: 1. Use only public methods `fun`, `grad` and `hess`. 2. After one of the methods is called, the corresponding attribute will be set. However, a subsequent call with a different argument of *any* of the methods may overwrite the attribute. """ def __init__(self, fun, x0, args, grad, hess, finite_diff_rel_step, finite_diff_bounds, epsilon=None): if not callable(grad) and grad not in FD_METHODS: raise ValueError( f"`grad` must be either callable or one of {FD_METHODS}." ) if not (callable(hess) or hess in FD_METHODS or isinstance(hess, HessianUpdateStrategy)): raise ValueError( f"`hess` must be either callable, HessianUpdateStrategy" f" or one of {FD_METHODS}." ) if grad in FD_METHODS and hess in FD_METHODS: raise ValueError("Whenever the gradient is estimated via " "finite-differences, we require the Hessian " "to be estimated using one of the " "quasi-Newton strategies.") # the astype call ensures that self.x is a copy of x0 self.x = np.atleast_1d(x0).astype(float) self.n = self.x.size self.nfev = 0 self.ngev = 0 self.nhev = 0 self.f_updated = False self.g_updated = False self.H_updated = False self._lowest_x = None self._lowest_f = np.inf finite_diff_options = {} if grad in FD_METHODS: finite_diff_options["method"] = grad finite_diff_options["rel_step"] = finite_diff_rel_step finite_diff_options["abs_step"] = epsilon finite_diff_options["bounds"] = finite_diff_bounds if hess in FD_METHODS: finite_diff_options["method"] = hess finite_diff_options["rel_step"] = finite_diff_rel_step finite_diff_options["abs_step"] = epsilon finite_diff_options["as_linear_operator"] = True # Function evaluation def fun_wrapped(x): self.nfev += 1 # Send a copy because the user may overwrite it. # Overwriting results in undefined behaviour because # fun(self.x) will change self.x, with the two no longer linked. fx = fun(np.copy(x), *args) # Make sure the function returns a true scalar if not np.isscalar(fx): try: fx = np.asarray(fx).item() except (TypeError, ValueError) as e: raise ValueError( "The user-provided objective function " "must return a scalar value." ) from e if fx < self._lowest_f: self._lowest_x = x self._lowest_f = fx return fx def update_fun(): self.f = fun_wrapped(self.x) self._update_fun_impl = update_fun self._update_fun() # Gradient evaluation if callable(grad): def grad_wrapped(x): self.ngev += 1 return np.atleast_1d(grad(np.copy(x), *args)) def update_grad(): self.g = grad_wrapped(self.x) elif grad in FD_METHODS: def update_grad(): self._update_fun() self.ngev += 1 self.g = approx_derivative(fun_wrapped, self.x, f0=self.f, **finite_diff_options) self._update_grad_impl = update_grad self._update_grad() # Hessian Evaluation if callable(hess): self.H = hess(np.copy(x0), *args) self.H_updated = True self.nhev += 1 if sps.issparse(self.H): def hess_wrapped(x): self.nhev += 1 return sps.csr_matrix(hess(np.copy(x), *args)) self.H = sps.csr_matrix(self.H) elif isinstance(self.H, LinearOperator): def hess_wrapped(x): self.nhev += 1 return hess(np.copy(x), *args) else: def hess_wrapped(x): self.nhev += 1 return np.atleast_2d(np.asarray(hess(np.copy(x), *args))) self.H = np.atleast_2d(np.asarray(self.H)) def update_hess(): self.H = hess_wrapped(self.x) elif hess in FD_METHODS: def update_hess(): self._update_grad() self.H = approx_derivative(grad_wrapped, self.x, f0=self.g, **finite_diff_options) return self.H update_hess() self.H_updated = True elif isinstance(hess, HessianUpdateStrategy): self.H = hess self.H.initialize(self.n, 'hess') self.H_updated = True self.x_prev = None self.g_prev = None def update_hess(): self._update_grad() self.H.update(self.x - self.x_prev, self.g - self.g_prev) self._update_hess_impl = update_hess if isinstance(hess, HessianUpdateStrategy): def update_x(x): self._update_grad() self.x_prev = self.x self.g_prev = self.g # ensure that self.x is a copy of x. Don't store a reference # otherwise the memoization doesn't work properly. self.x = np.atleast_1d(x).astype(float) self.f_updated = False self.g_updated = False self.H_updated = False self._update_hess() else: def update_x(x): # ensure that self.x is a copy of x. Don't store a reference # otherwise the memoization doesn't work properly. self.x = np.atleast_1d(x).astype(float) self.f_updated = False self.g_updated = False self.H_updated = False self._update_x_impl = update_x def _update_fun(self): if not self.f_updated: self._update_fun_impl() self.f_updated = True def _update_grad(self): if not self.g_updated: self._update_grad_impl() self.g_updated = True def _update_hess(self): if not self.H_updated: self._update_hess_impl() self.H_updated = True def fun(self, x): if not np.array_equal(x, self.x): self._update_x_impl(x) self._update_fun() return self.f def grad(self, x): if not np.array_equal(x, self.x): self._update_x_impl(x) self._update_grad() return self.g def hess(self, x): if not np.array_equal(x, self.x): self._update_x_impl(x) self._update_hess() return self.H def fun_and_grad(self, x): if not np.array_equal(x, self.x): self._update_x_impl(x) self._update_fun() self._update_grad() return self.f, self.g class VectorFunction: """Vector function and its derivatives. This class defines a vector function F: R^n->R^m and methods for computing or approximating its first and second derivatives. Notes ----- This class implements a memoization logic. There are methods `fun`, `jac`, hess` and corresponding attributes `f`, `J` and `H`. The following things should be considered: 1. Use only public methods `fun`, `jac` and `hess`. 2. After one of the methods is called, the corresponding attribute will be set. However, a subsequent call with a different argument of *any* of the methods may overwrite the attribute. """ def __init__(self, fun, x0, jac, hess, finite_diff_rel_step, finite_diff_jac_sparsity, finite_diff_bounds, sparse_jacobian): if not callable(jac) and jac not in FD_METHODS: raise ValueError("`jac` must be either callable or one of {}." .format(FD_METHODS)) if not (callable(hess) or hess in FD_METHODS or isinstance(hess, HessianUpdateStrategy)): raise ValueError("`hess` must be either callable," "HessianUpdateStrategy or one of {}." .format(FD_METHODS)) if jac in FD_METHODS and hess in FD_METHODS: raise ValueError("Whenever the Jacobian is estimated via " "finite-differences, we require the Hessian to " "be estimated using one of the quasi-Newton " "strategies.") self.x = np.atleast_1d(x0).astype(float) self.n = self.x.size self.nfev = 0 self.njev = 0 self.nhev = 0 self.f_updated = False self.J_updated = False self.H_updated = False finite_diff_options = {} if jac in FD_METHODS: finite_diff_options["method"] = jac finite_diff_options["rel_step"] = finite_diff_rel_step if finite_diff_jac_sparsity is not None: sparsity_groups = group_columns(finite_diff_jac_sparsity) finite_diff_options["sparsity"] = (finite_diff_jac_sparsity, sparsity_groups) finite_diff_options["bounds"] = finite_diff_bounds self.x_diff = np.copy(self.x) if hess in FD_METHODS: finite_diff_options["method"] = hess finite_diff_options["rel_step"] = finite_diff_rel_step finite_diff_options["as_linear_operator"] = True self.x_diff = np.copy(self.x) if jac in FD_METHODS and hess in FD_METHODS: raise ValueError("Whenever the Jacobian is estimated via " "finite-differences, we require the Hessian to " "be estimated using one of the quasi-Newton " "strategies.") # Function evaluation def fun_wrapped(x): self.nfev += 1 return np.atleast_1d(fun(x)) def update_fun(): self.f = fun_wrapped(self.x) self._update_fun_impl = update_fun update_fun() self.v = np.zeros_like(self.f) self.m = self.v.size # Jacobian Evaluation if callable(jac): self.J = jac(self.x) self.J_updated = True self.njev += 1 if (sparse_jacobian or sparse_jacobian is None and sps.issparse(self.J)): def jac_wrapped(x): self.njev += 1 return sps.csr_matrix(jac(x)) self.J = sps.csr_matrix(self.J) self.sparse_jacobian = True elif sps.issparse(self.J): def jac_wrapped(x): self.njev += 1 return jac(x).toarray() self.J = self.J.toarray() self.sparse_jacobian = False else: def jac_wrapped(x): self.njev += 1 return np.atleast_2d(jac(x)) self.J = np.atleast_2d(self.J) self.sparse_jacobian = False def update_jac(): self.J = jac_wrapped(self.x) elif jac in FD_METHODS: self.J = approx_derivative(fun_wrapped, self.x, f0=self.f, **finite_diff_options) self.J_updated = True if (sparse_jacobian or sparse_jacobian is None and sps.issparse(self.J)): def update_jac(): self._update_fun() self.J = sps.csr_matrix( approx_derivative(fun_wrapped, self.x, f0=self.f, **finite_diff_options)) self.J = sps.csr_matrix(self.J) self.sparse_jacobian = True elif sps.issparse(self.J): def update_jac(): self._update_fun() self.J = approx_derivative(fun_wrapped, self.x, f0=self.f, **finite_diff_options).toarray() self.J = self.J.toarray() self.sparse_jacobian = False else: def update_jac(): self._update_fun() self.J = np.atleast_2d( approx_derivative(fun_wrapped, self.x, f0=self.f, **finite_diff_options)) self.J = np.atleast_2d(self.J) self.sparse_jacobian = False self._update_jac_impl = update_jac # Define Hessian if callable(hess): self.H = hess(self.x, self.v) self.H_updated = True self.nhev += 1 if sps.issparse(self.H): def hess_wrapped(x, v): self.nhev += 1 return sps.csr_matrix(hess(x, v)) self.H = sps.csr_matrix(self.H) elif isinstance(self.H, LinearOperator): def hess_wrapped(x, v): self.nhev += 1 return hess(x, v) else: def hess_wrapped(x, v): self.nhev += 1 return np.atleast_2d(np.asarray(hess(x, v))) self.H = np.atleast_2d(np.asarray(self.H)) def update_hess(): self.H = hess_wrapped(self.x, self.v) elif hess in FD_METHODS: def jac_dot_v(x, v): return jac_wrapped(x).T.dot(v) def update_hess(): self._update_jac() self.H = approx_derivative(jac_dot_v, self.x, f0=self.J.T.dot(self.v), args=(self.v,), **finite_diff_options) update_hess() self.H_updated = True elif isinstance(hess, HessianUpdateStrategy): self.H = hess self.H.initialize(self.n, 'hess') self.H_updated = True self.x_prev = None self.J_prev = None def update_hess(): self._update_jac() # When v is updated before x was updated, then x_prev and # J_prev are None and we need this check. if self.x_prev is not None and self.J_prev is not None: delta_x = self.x - self.x_prev delta_g = self.J.T.dot(self.v) - self.J_prev.T.dot(self.v) self.H.update(delta_x, delta_g) self._update_hess_impl = update_hess if isinstance(hess, HessianUpdateStrategy): def update_x(x): self._update_jac() self.x_prev = self.x self.J_prev = self.J self.x = np.atleast_1d(x).astype(float) self.f_updated = False self.J_updated = False self.H_updated = False self._update_hess() else: def update_x(x): self.x = np.atleast_1d(x).astype(float) self.f_updated = False self.J_updated = False self.H_updated = False self._update_x_impl = update_x def _update_v(self, v): if not np.array_equal(v, self.v): self.v = v self.H_updated = False def _update_x(self, x): if not np.array_equal(x, self.x): self._update_x_impl(x) def _update_fun(self): if not self.f_updated: self._update_fun_impl() self.f_updated = True def _update_jac(self): if not self.J_updated: self._update_jac_impl() self.J_updated = True def _update_hess(self): if not self.H_updated: self._update_hess_impl() self.H_updated = True def fun(self, x): self._update_x(x) self._update_fun() return self.f def jac(self, x): self._update_x(x) self._update_jac() return self.J def hess(self, x, v): # v should be updated before x. self._update_v(v) self._update_x(x) self._update_hess() return self.H class LinearVectorFunction: """Linear vector function and its derivatives. Defines a linear function F = A x, where x is N-D vector and A is m-by-n matrix. The Jacobian is constant and equals to A. The Hessian is identically zero and it is returned as a csr matrix. """ def __init__(self, A, x0, sparse_jacobian): if sparse_jacobian or sparse_jacobian is None and sps.issparse(A): self.J = sps.csr_matrix(A) self.sparse_jacobian = True elif sps.issparse(A): self.J = A.toarray() self.sparse_jacobian = False else: # np.asarray makes sure A is ndarray and not matrix self.J = np.atleast_2d(np.asarray(A)) self.sparse_jacobian = False self.m, self.n = self.J.shape self.x = np.atleast_1d(x0).astype(float) self.f = self.J.dot(self.x) self.f_updated = True self.v = np.zeros(self.m, dtype=float) self.H = sps.csr_matrix((self.n, self.n)) def _update_x(self, x): if not np.array_equal(x, self.x): self.x = np.atleast_1d(x).astype(float) self.f_updated = False def fun(self, x): self._update_x(x) if not self.f_updated: self.f = self.J.dot(x) self.f_updated = True return self.f def jac(self, x): self._update_x(x) return self.J def hess(self, x, v): self._update_x(x) self.v = v return self.H class IdentityVectorFunction(LinearVectorFunction): """Identity vector function and its derivatives. The Jacobian is the identity matrix, returned as a dense array when `sparse_jacobian=False` and as a csr matrix otherwise. The Hessian is identically zero and it is returned as a csr matrix. """ def __init__(self, x0, sparse_jacobian): n = len(x0) if sparse_jacobian or sparse_jacobian is None: A = sps.eye(n, format='csr') sparse_jacobian = True else: A = np.eye(n) sparse_jacobian = False super().__init__(A, x0, sparse_jacobian)
22,719
35.823339
79
py
scipy
scipy-main/scipy/optimize/_linprog_rs.py
"""Revised simplex method for linear programming The *revised simplex* method uses the method described in [1]_, except that a factorization [2]_ of the basis matrix, rather than its inverse, is efficiently maintained and used to solve the linear systems at each iteration of the algorithm. .. versionadded:: 1.3.0 References ---------- .. [1] Bertsimas, Dimitris, and J. Tsitsiklis. "Introduction to linear programming." Athena Scientific 1 (1997): 997. .. [2] Bartels, Richard H. "A stabilization of the simplex method." Journal in Numerische Mathematik 16.5 (1971): 414-434. """ # Author: Matt Haberland import numpy as np from numpy.linalg import LinAlgError from scipy.linalg import solve from ._optimize import _check_unknown_options from ._bglu_dense import LU from ._bglu_dense import BGLU as BGLU from ._linprog_util import _postsolve from ._optimize import OptimizeResult def _phase_one(A, b, x0, callback, postsolve_args, maxiter, tol, disp, maxupdate, mast, pivot): """ The purpose of phase one is to find an initial basic feasible solution (BFS) to the original problem. Generates an auxiliary problem with a trivial BFS and an objective that minimizes infeasibility of the original problem. Solves the auxiliary problem using the main simplex routine (phase two). This either yields a BFS to the original problem or determines that the original problem is infeasible. If feasible, phase one detects redundant rows in the original constraint matrix and removes them, then chooses additional indices as necessary to complete a basis/BFS for the original problem. """ m, n = A.shape status = 0 # generate auxiliary problem to get initial BFS A, b, c, basis, x, status = _generate_auxiliary_problem(A, b, x0, tol) if status == 6: residual = c.dot(x) iter_k = 0 return x, basis, A, b, residual, status, iter_k # solve auxiliary problem phase_one_n = n iter_k = 0 x, basis, status, iter_k = _phase_two(c, A, x, basis, callback, postsolve_args, maxiter, tol, disp, maxupdate, mast, pivot, iter_k, phase_one_n) # check for infeasibility residual = c.dot(x) if status == 0 and residual > tol: status = 2 # drive artificial variables out of basis # TODO: test redundant row removal better # TODO: make solve more efficient with BGLU? This could take a while. keep_rows = np.ones(m, dtype=bool) for basis_column in basis[basis >= n]: B = A[:, basis] try: basis_finder = np.abs(solve(B, A)) # inefficient pertinent_row = np.argmax(basis_finder[:, basis_column]) eligible_columns = np.ones(n, dtype=bool) eligible_columns[basis[basis < n]] = 0 eligible_column_indices = np.where(eligible_columns)[0] index = np.argmax(basis_finder[:, :n] [pertinent_row, eligible_columns]) new_basis_column = eligible_column_indices[index] if basis_finder[pertinent_row, new_basis_column] < tol: keep_rows[pertinent_row] = False else: basis[basis == basis_column] = new_basis_column except LinAlgError: status = 4 # form solution to original problem A = A[keep_rows, :n] basis = basis[keep_rows] x = x[:n] m = A.shape[0] return x, basis, A, b, residual, status, iter_k def _get_more_basis_columns(A, basis): """ Called when the auxiliary problem terminates with artificial columns in the basis, which must be removed and replaced with non-artificial columns. Finds additional columns that do not make the matrix singular. """ m, n = A.shape # options for inclusion are those that aren't already in the basis a = np.arange(m+n) bl = np.zeros(len(a), dtype=bool) bl[basis] = 1 options = a[~bl] options = options[options < n] # and they have to be non-artificial # form basis matrix B = np.zeros((m, m)) B[:, 0:len(basis)] = A[:, basis] if (basis.size > 0 and np.linalg.matrix_rank(B[:, :len(basis)]) < len(basis)): raise Exception("Basis has dependent columns") rank = 0 # just enter the loop for i in range(n): # somewhat arbitrary, but we need another way out # permute the options, and take as many as needed new_basis = np.random.permutation(options)[:m-len(basis)] B[:, len(basis):] = A[:, new_basis] # update the basis matrix rank = np.linalg.matrix_rank(B) # check the rank if rank == m: break return np.concatenate((basis, new_basis)) def _generate_auxiliary_problem(A, b, x0, tol): """ Modifies original problem to create an auxiliary problem with a trivial initial basic feasible solution and an objective that minimizes infeasibility in the original problem. Conceptually, this is done by stacking an identity matrix on the right of the original constraint matrix, adding artificial variables to correspond with each of these new columns, and generating a cost vector that is all zeros except for ones corresponding with each of the new variables. A initial basic feasible solution is trivial: all variables are zero except for the artificial variables, which are set equal to the corresponding element of the right hand side `b`. Runnning the simplex method on this auxiliary problem drives all of the artificial variables - and thus the cost - to zero if the original problem is feasible. The original problem is declared infeasible otherwise. Much of the complexity below is to improve efficiency by using singleton columns in the original problem where possible, thus generating artificial variables only as necessary, and using an initial 'guess' basic feasible solution. """ status = 0 m, n = A.shape if x0 is not None: x = x0 else: x = np.zeros(n) r = b - A@x # residual; this must be all zeros for feasibility A[r < 0] = -A[r < 0] # express problem with RHS positive for trivial BFS b[r < 0] = -b[r < 0] # to the auxiliary problem r[r < 0] *= -1 # Rows which we will need to find a trivial way to zero. # This should just be the rows where there is a nonzero residual. # But then we would not necessarily have a column singleton in every row. # This makes it difficult to find an initial basis. if x0 is None: nonzero_constraints = np.arange(m) else: nonzero_constraints = np.where(r > tol)[0] # these are (at least some of) the initial basis columns basis = np.where(np.abs(x) > tol)[0] if len(nonzero_constraints) == 0 and len(basis) <= m: # already a BFS c = np.zeros(n) basis = _get_more_basis_columns(A, basis) return A, b, c, basis, x, status elif (len(nonzero_constraints) > m - len(basis) or np.any(x < 0)): # can't get trivial BFS c = np.zeros(n) status = 6 return A, b, c, basis, x, status # chooses existing columns appropriate for inclusion in initial basis cols, rows = _select_singleton_columns(A, r) # find the rows we need to zero that we _can_ zero with column singletons i_tofix = np.isin(rows, nonzero_constraints) # these columns can't already be in the basis, though # we are going to add them to the basis and change the corresponding x val i_notinbasis = np.logical_not(np.isin(cols, basis)) i_fix_without_aux = np.logical_and(i_tofix, i_notinbasis) rows = rows[i_fix_without_aux] cols = cols[i_fix_without_aux] # indices of the rows we can only zero with auxiliary variable # these rows will get a one in each auxiliary column arows = nonzero_constraints[np.logical_not( np.isin(nonzero_constraints, rows))] n_aux = len(arows) acols = n + np.arange(n_aux) # indices of auxiliary columns basis_ng = np.concatenate((cols, acols)) # basis columns not from guess basis_ng_rows = np.concatenate((rows, arows)) # rows we need to zero # add auxiliary singleton columns A = np.hstack((A, np.zeros((m, n_aux)))) A[arows, acols] = 1 # generate initial BFS x = np.concatenate((x, np.zeros(n_aux))) x[basis_ng] = r[basis_ng_rows]/A[basis_ng_rows, basis_ng] # generate costs to minimize infeasibility c = np.zeros(n_aux + n) c[acols] = 1 # basis columns correspond with nonzeros in guess, those with column # singletons we used to zero remaining constraints, and any additional # columns to get a full set (m columns) basis = np.concatenate((basis, basis_ng)) basis = _get_more_basis_columns(A, basis) # add columns as needed return A, b, c, basis, x, status def _select_singleton_columns(A, b): """ Finds singleton columns for which the singleton entry is of the same sign as the right-hand side; these columns are eligible for inclusion in an initial basis. Determines the rows in which the singleton entries are located. For each of these rows, returns the indices of the one singleton column and its corresponding row. """ # find indices of all singleton columns and corresponding row indicies column_indices = np.nonzero(np.sum(np.abs(A) != 0, axis=0) == 1)[0] columns = A[:, column_indices] # array of singleton columns row_indices = np.zeros(len(column_indices), dtype=int) nonzero_rows, nonzero_columns = np.nonzero(columns) row_indices[nonzero_columns] = nonzero_rows # corresponding row indicies # keep only singletons with entries that have same sign as RHS # this is necessary because all elements of BFS must be non-negative same_sign = A[row_indices, column_indices]*b[row_indices] >= 0 column_indices = column_indices[same_sign][::-1] row_indices = row_indices[same_sign][::-1] # Reversing the order so that steps below select rightmost columns # for initial basis, which will tend to be slack variables. (If the # guess corresponds with a basic feasible solution but a constraint # is not satisfied with the corresponding slack variable zero, the slack # variable must be basic.) # for each row, keep rightmost singleton column with an entry in that row unique_row_indices, first_columns = np.unique(row_indices, return_index=True) return column_indices[first_columns], unique_row_indices def _find_nonzero_rows(A, tol): """ Returns logical array indicating the locations of rows with at least one nonzero element. """ return np.any(np.abs(A) > tol, axis=1) def _select_enter_pivot(c_hat, bl, a, rule="bland", tol=1e-12): """ Selects a pivot to enter the basis. Currently Bland's rule - the smallest index that has a negative reduced cost - is the default. """ if rule.lower() == "mrc": # index with minimum reduced cost return a[~bl][np.argmin(c_hat)] else: # smallest index w/ negative reduced cost return a[~bl][c_hat < -tol][0] def _display_iter(phase, iteration, slack, con, fun): """ Print indicators of optimization status to the console. """ header = True if not iteration % 20 else False if header: print("Phase", "Iteration", "Minimum Slack ", "Constraint Residual", "Objective ") # :<X.Y left aligns Y digits in X digit spaces fmt = '{0:<6}{1:<10}{2:<20.13}{3:<20.13}{4:<20.13}' try: slack = np.min(slack) except ValueError: slack = "NA" print(fmt.format(phase, iteration, slack, np.linalg.norm(con), fun)) def _display_and_callback(phase_one_n, x, postsolve_args, status, iteration, disp, callback): if phase_one_n is not None: phase = 1 x_postsolve = x[:phase_one_n] else: phase = 2 x_postsolve = x x_o, fun, slack, con = _postsolve(x_postsolve, postsolve_args) if callback is not None: res = OptimizeResult({'x': x_o, 'fun': fun, 'slack': slack, 'con': con, 'nit': iteration, 'phase': phase, 'complete': False, 'status': status, 'message': "", 'success': False}) callback(res) if disp: _display_iter(phase, iteration, slack, con, fun) def _phase_two(c, A, x, b, callback, postsolve_args, maxiter, tol, disp, maxupdate, mast, pivot, iteration=0, phase_one_n=None): """ The heart of the simplex method. Beginning with a basic feasible solution, moves to adjacent basic feasible solutions successively lower reduced cost. Terminates when there are no basic feasible solutions with lower reduced cost or if the problem is determined to be unbounded. This implementation follows the revised simplex method based on LU decomposition. Rather than maintaining a tableau or an inverse of the basis matrix, we keep a factorization of the basis matrix that allows efficient solution of linear systems while avoiding stability issues associated with inverted matrices. """ m, n = A.shape status = 0 a = np.arange(n) # indices of columns of A ab = np.arange(m) # indices of columns of B if maxupdate: # basis matrix factorization object; similar to B = A[:, b] B = BGLU(A, b, maxupdate, mast) else: B = LU(A, b) for iteration in range(iteration, maxiter): if disp or callback is not None: _display_and_callback(phase_one_n, x, postsolve_args, status, iteration, disp, callback) bl = np.zeros(len(a), dtype=bool) bl[b] = 1 xb = x[b] # basic variables cb = c[b] # basic costs try: v = B.solve(cb, transposed=True) # similar to v = solve(B.T, cb) except LinAlgError: status = 4 break # TODO: cythonize? c_hat = c - v.dot(A) # reduced cost c_hat = c_hat[~bl] # Above is much faster than: # N = A[:, ~bl] # slow! # c_hat = c[~bl] - v.T.dot(N) # Can we perform the multiplication only on the nonbasic columns? if np.all(c_hat >= -tol): # all reduced costs positive -> terminate break j = _select_enter_pivot(c_hat, bl, a, rule=pivot, tol=tol) u = B.solve(A[:, j]) # similar to u = solve(B, A[:, j]) i = u > tol # if none of the u are positive, unbounded if not np.any(i): status = 3 break th = xb[i]/u[i] l = np.argmin(th) # implicitly selects smallest subscript th_star = th[l] # step size x[b] = x[b] - th_star*u # take step x[j] = th_star B.update(ab[i][l], j) # modify basis b = B.b # similar to b[ab[i][l]] = else: # If the end of the for loop is reached (without a break statement), # then another step has been taken, so the iteration counter should # increment, info should be displayed, and callback should be called. iteration += 1 status = 1 if disp or callback is not None: _display_and_callback(phase_one_n, x, postsolve_args, status, iteration, disp, callback) return x, b, status, iteration def _linprog_rs(c, c0, A, b, x0, callback, postsolve_args, maxiter=5000, tol=1e-12, disp=False, maxupdate=10, mast=False, pivot="mrc", **unknown_options): """ Solve the following linear programming problem via a two-phase revised simplex algorithm.:: minimize: c @ x subject to: A @ x == b 0 <= x < oo User-facing documentation is in _linprog_doc.py. Parameters ---------- c : 1-D array Coefficients of the linear objective function to be minimized. c0 : float Constant term in objective function due to fixed (and eliminated) variables. (Currently unused.) A : 2-D array 2-D array which, when matrix-multiplied by ``x``, gives the values of the equality constraints at ``x``. b : 1-D array 1-D array of values representing the RHS of each equality constraint (row) in ``A_eq``. x0 : 1-D array, optional Starting values of the independent variables, which will be refined by the optimization algorithm. For the revised simplex method, these must correspond with a basic feasible solution. callback : callable, optional If a callback function is provided, it will be called within each iteration of the algorithm. The callback function must accept a single `scipy.optimize.OptimizeResult` consisting of the following fields: x : 1-D array Current solution vector. fun : float Current value of the objective function ``c @ x``. success : bool True only when an algorithm has completed successfully, so this is always False as the callback function is called only while the algorithm is still iterating. slack : 1-D array The values of the slack variables. Each slack variable corresponds to an inequality constraint. If the slack is zero, the corresponding constraint is active. con : 1-D array The (nominally zero) residuals of the equality constraints, that is, ``b - A_eq @ x``. phase : int The phase of the algorithm being executed. status : int For revised simplex, this is always 0 because if a different status is detected, the algorithm terminates. nit : int The number of iterations performed. message : str A string descriptor of the exit status of the optimization. postsolve_args : tuple Data needed by _postsolve to convert the solution to the standard-form problem into the solution to the original problem. Options ------- maxiter : int The maximum number of iterations to perform in either phase. tol : float The tolerance which determines when a solution is "close enough" to zero in Phase 1 to be considered a basic feasible solution or close enough to positive to serve as an optimal solution. disp : bool Set to ``True`` if indicators of optimization status are to be printed to the console each iteration. maxupdate : int The maximum number of updates performed on the LU factorization. After this many updates is reached, the basis matrix is factorized from scratch. mast : bool Minimize Amortized Solve Time. If enabled, the average time to solve a linear system using the basis factorization is measured. Typically, the average solve time will decrease with each successive solve after initial factorization, as factorization takes much more time than the solve operation (and updates). Eventually, however, the updated factorization becomes sufficiently complex that the average solve time begins to increase. When this is detected, the basis is refactorized from scratch. Enable this option to maximize speed at the risk of nondeterministic behavior. Ignored if ``maxupdate`` is 0. pivot : "mrc" or "bland" Pivot rule: Minimum Reduced Cost (default) or Bland's rule. Choose Bland's rule if iteration limit is reached and cycling is suspected. unknown_options : dict Optional arguments not used by this particular solver. If `unknown_options` is non-empty a warning is issued listing all unused options. Returns ------- x : 1-D array Solution vector. status : int An integer representing the exit status of the optimization:: 0 : Optimization terminated successfully 1 : Iteration limit reached 2 : Problem appears to be infeasible 3 : Problem appears to be unbounded 4 : Numerical difficulties encountered 5 : No constraints; turn presolve on 6 : Guess x0 cannot be converted to a basic feasible solution message : str A string descriptor of the exit status of the optimization. iteration : int The number of iterations taken to solve the problem. """ _check_unknown_options(unknown_options) messages = ["Optimization terminated successfully.", "Iteration limit reached.", "The problem appears infeasible, as the phase one auxiliary " "problem terminated successfully with a residual of {0:.1e}, " "greater than the tolerance {1} required for the solution to " "be considered feasible. Consider increasing the tolerance to " "be greater than {0:.1e}. If this tolerance is unnaceptably " "large, the problem is likely infeasible.", "The problem is unbounded, as the simplex algorithm found " "a basic feasible solution from which there is a direction " "with negative reduced cost in which all decision variables " "increase.", "Numerical difficulties encountered; consider trying " "method='interior-point'.", "Problems with no constraints are trivially solved; please " "turn presolve on.", "The guess x0 cannot be converted to a basic feasible " "solution. " ] if A.size == 0: # address test_unbounded_below_no_presolve_corrected return np.zeros(c.shape), 5, messages[5], 0 x, basis, A, b, residual, status, iteration = ( _phase_one(A, b, x0, callback, postsolve_args, maxiter, tol, disp, maxupdate, mast, pivot)) if status == 0: x, basis, status, iteration = _phase_two(c, A, x, basis, callback, postsolve_args, maxiter, tol, disp, maxupdate, mast, pivot, iteration) return x, status, messages[status].format(residual, tol), iteration
23,149
39.401396
79
py
scipy
scipy-main/scipy/optimize/linesearch.py
# This file is not meant for public use and will be removed in SciPy v2.0.0. # Use the `scipy.optimize` namespace for importing the functions # included below. import warnings from . import _linesearch __all__ = [ # noqa: F822 'LineSearchWarning', 'line_search', 'line_search_BFGS', 'line_search_armijo', 'line_search_wolfe1', 'line_search_wolfe2', 'minpack2', 'scalar_search_armijo', 'scalar_search_wolfe1', 'scalar_search_wolfe2', 'warn', ] def __dir__(): return __all__ def __getattr__(name): if name not in __all__: raise AttributeError( "scipy.optimize.linesearch is deprecated and has no attribute " f"{name}. Try looking in scipy.optimize instead.") warnings.warn(f"Please use `{name}` from the `scipy.optimize` namespace, " "the `scipy.optimize.linesearch` namespace is deprecated.", category=DeprecationWarning, stacklevel=2) return getattr(_linesearch, name)
1,007
24.846154
78
py
scipy
scipy-main/scipy/optimize/_dual_annealing.py
# Dual Annealing implementation. # Copyright (c) 2018 Sylvain Gubian <sylvain.gubian@pmi.com>, # Yang Xiang <yang.xiang@pmi.com> # Author: Sylvain Gubian, Yang Xiang, PMP S.A. """ A Dual Annealing global optimization algorithm """ import numpy as np from scipy.optimize import OptimizeResult from scipy.optimize import minimize, Bounds from scipy.special import gammaln from scipy._lib._util import check_random_state from scipy.optimize._constraints import new_bounds_to_old __all__ = ['dual_annealing'] class VisitingDistribution: """ Class used to generate new coordinates based on the distorted Cauchy-Lorentz distribution. Depending on the steps within the strategy chain, the class implements the strategy for generating new location changes. Parameters ---------- lb : array_like A 1-D NumPy ndarray containing lower bounds of the generated components. Neither NaN or inf are allowed. ub : array_like A 1-D NumPy ndarray containing upper bounds for the generated components. Neither NaN or inf are allowed. visiting_param : float Parameter for visiting distribution. Default value is 2.62. Higher values give the visiting distribution a heavier tail, this makes the algorithm jump to a more distant region. The value range is (1, 3]. Its value is fixed for the life of the object. rand_gen : {`~numpy.random.RandomState`, `~numpy.random.Generator`} A `~numpy.random.RandomState`, `~numpy.random.Generator` object for using the current state of the created random generator container. """ TAIL_LIMIT = 1.e8 MIN_VISIT_BOUND = 1.e-10 def __init__(self, lb, ub, visiting_param, rand_gen): # if you wish to make _visiting_param adjustable during the life of # the object then _factor2, _factor3, _factor5, _d1, _factor6 will # have to be dynamically calculated in `visit_fn`. They're factored # out here so they don't need to be recalculated all the time. self._visiting_param = visiting_param self.rand_gen = rand_gen self.lower = lb self.upper = ub self.bound_range = ub - lb # these are invariant numbers unless visiting_param changes self._factor2 = np.exp((4.0 - self._visiting_param) * np.log( self._visiting_param - 1.0)) self._factor3 = np.exp((2.0 - self._visiting_param) * np.log(2.0) / (self._visiting_param - 1.0)) self._factor4_p = np.sqrt(np.pi) * self._factor2 / (self._factor3 * ( 3.0 - self._visiting_param)) self._factor5 = 1.0 / (self._visiting_param - 1.0) - 0.5 self._d1 = 2.0 - self._factor5 self._factor6 = np.pi * (1.0 - self._factor5) / np.sin( np.pi * (1.0 - self._factor5)) / np.exp(gammaln(self._d1)) def visiting(self, x, step, temperature): """ Based on the step in the strategy chain, new coordinates are generated by changing all components is the same time or only one of them, the new values are computed with visit_fn method """ dim = x.size if step < dim: # Changing all coordinates with a new visiting value visits = self.visit_fn(temperature, dim) upper_sample, lower_sample = self.rand_gen.uniform(size=2) visits[visits > self.TAIL_LIMIT] = self.TAIL_LIMIT * upper_sample visits[visits < -self.TAIL_LIMIT] = -self.TAIL_LIMIT * lower_sample x_visit = visits + x a = x_visit - self.lower b = np.fmod(a, self.bound_range) + self.bound_range x_visit = np.fmod(b, self.bound_range) + self.lower x_visit[np.fabs( x_visit - self.lower) < self.MIN_VISIT_BOUND] += 1.e-10 else: # Changing only one coordinate at a time based on strategy # chain step x_visit = np.copy(x) visit = self.visit_fn(temperature, 1)[0] if visit > self.TAIL_LIMIT: visit = self.TAIL_LIMIT * self.rand_gen.uniform() elif visit < -self.TAIL_LIMIT: visit = -self.TAIL_LIMIT * self.rand_gen.uniform() index = step - dim x_visit[index] = visit + x[index] a = x_visit[index] - self.lower[index] b = np.fmod(a, self.bound_range[index]) + self.bound_range[index] x_visit[index] = np.fmod(b, self.bound_range[ index]) + self.lower[index] if np.fabs(x_visit[index] - self.lower[ index]) < self.MIN_VISIT_BOUND: x_visit[index] += self.MIN_VISIT_BOUND return x_visit def visit_fn(self, temperature, dim): """ Formula Visita from p. 405 of reference [2] """ x, y = self.rand_gen.normal(size=(dim, 2)).T factor1 = np.exp(np.log(temperature) / (self._visiting_param - 1.0)) factor4 = self._factor4_p * factor1 # sigmax x *= np.exp(-(self._visiting_param - 1.0) * np.log( self._factor6 / factor4) / (3.0 - self._visiting_param)) den = np.exp((self._visiting_param - 1.0) * np.log(np.fabs(y)) / (3.0 - self._visiting_param)) return x / den class EnergyState: """ Class used to record the energy state. At any time, it knows what is the currently used coordinates and the most recent best location. Parameters ---------- lower : array_like A 1-D NumPy ndarray containing lower bounds for generating an initial random components in the `reset` method. upper : array_like A 1-D NumPy ndarray containing upper bounds for generating an initial random components in the `reset` method components. Neither NaN or inf are allowed. callback : callable, ``callback(x, f, context)``, optional A callback function which will be called for all minima found. ``x`` and ``f`` are the coordinates and function value of the latest minimum found, and `context` has value in [0, 1, 2] """ # Maximum number of trials for generating a valid starting point MAX_REINIT_COUNT = 1000 def __init__(self, lower, upper, callback=None): self.ebest = None self.current_energy = None self.current_location = None self.xbest = None self.lower = lower self.upper = upper self.callback = callback def reset(self, func_wrapper, rand_gen, x0=None): """ Initialize current location is the search domain. If `x0` is not provided, a random location within the bounds is generated. """ if x0 is None: self.current_location = rand_gen.uniform(self.lower, self.upper, size=len(self.lower)) else: self.current_location = np.copy(x0) init_error = True reinit_counter = 0 while init_error: self.current_energy = func_wrapper.fun(self.current_location) if self.current_energy is None: raise ValueError('Objective function is returning None') if (not np.isfinite(self.current_energy) or np.isnan( self.current_energy)): if reinit_counter >= EnergyState.MAX_REINIT_COUNT: init_error = False message = ( 'Stopping algorithm because function ' 'create NaN or (+/-) infinity values even with ' 'trying new random parameters' ) raise ValueError(message) self.current_location = rand_gen.uniform(self.lower, self.upper, size=self.lower.size) reinit_counter += 1 else: init_error = False # If first time reset, initialize ebest and xbest if self.ebest is None and self.xbest is None: self.ebest = self.current_energy self.xbest = np.copy(self.current_location) # Otherwise, we keep them in case of reannealing reset def update_best(self, e, x, context): self.ebest = e self.xbest = np.copy(x) if self.callback is not None: val = self.callback(x, e, context) if val is not None: if val: return ('Callback function requested to stop early by ' 'returning True') def update_current(self, e, x): self.current_energy = e self.current_location = np.copy(x) class StrategyChain: """ Class that implements within a Markov chain the strategy for location acceptance and local search decision making. Parameters ---------- acceptance_param : float Parameter for acceptance distribution. It is used to control the probability of acceptance. The lower the acceptance parameter, the smaller the probability of acceptance. Default value is -5.0 with a range (-1e4, -5]. visit_dist : VisitingDistribution Instance of `VisitingDistribution` class. func_wrapper : ObjectiveFunWrapper Instance of `ObjectiveFunWrapper` class. minimizer_wrapper: LocalSearchWrapper Instance of `LocalSearchWrapper` class. rand_gen : {None, int, `numpy.random.Generator`, `numpy.random.RandomState`}, optional If `seed` is None (or `np.random`), the `numpy.random.RandomState` singleton is used. If `seed` is an int, a new ``RandomState`` instance is used, seeded with `seed`. If `seed` is already a ``Generator`` or ``RandomState`` instance then that instance is used. energy_state: EnergyState Instance of `EnergyState` class. """ def __init__(self, acceptance_param, visit_dist, func_wrapper, minimizer_wrapper, rand_gen, energy_state): # Local strategy chain minimum energy and location self.emin = energy_state.current_energy self.xmin = np.array(energy_state.current_location) # Global optimizer state self.energy_state = energy_state # Acceptance parameter self.acceptance_param = acceptance_param # Visiting distribution instance self.visit_dist = visit_dist # Wrapper to objective function self.func_wrapper = func_wrapper # Wrapper to the local minimizer self.minimizer_wrapper = minimizer_wrapper self.not_improved_idx = 0 self.not_improved_max_idx = 1000 self._rand_gen = rand_gen self.temperature_step = 0 self.K = 100 * len(energy_state.current_location) def accept_reject(self, j, e, x_visit): r = self._rand_gen.uniform() pqv_temp = 1.0 - ((1.0 - self.acceptance_param) * (e - self.energy_state.current_energy) / self.temperature_step) if pqv_temp <= 0.: pqv = 0. else: pqv = np.exp(np.log(pqv_temp) / ( 1. - self.acceptance_param)) if r <= pqv: # We accept the new location and update state self.energy_state.update_current(e, x_visit) self.xmin = np.copy(self.energy_state.current_location) # No improvement for a long time if self.not_improved_idx >= self.not_improved_max_idx: if j == 0 or self.energy_state.current_energy < self.emin: self.emin = self.energy_state.current_energy self.xmin = np.copy(self.energy_state.current_location) def run(self, step, temperature): self.temperature_step = temperature / float(step + 1) self.not_improved_idx += 1 for j in range(self.energy_state.current_location.size * 2): if j == 0: if step == 0: self.energy_state_improved = True else: self.energy_state_improved = False x_visit = self.visit_dist.visiting( self.energy_state.current_location, j, temperature) # Calling the objective function e = self.func_wrapper.fun(x_visit) if e < self.energy_state.current_energy: # We have got a better energy value self.energy_state.update_current(e, x_visit) if e < self.energy_state.ebest: val = self.energy_state.update_best(e, x_visit, 0) if val is not None: if val: return val self.energy_state_improved = True self.not_improved_idx = 0 else: # We have not improved but do we accept the new location? self.accept_reject(j, e, x_visit) if self.func_wrapper.nfev >= self.func_wrapper.maxfun: return ('Maximum number of function call reached ' 'during annealing') # End of StrategyChain loop def local_search(self): # Decision making for performing a local search # based on strategy chain results # If energy has been improved or no improvement since too long, # performing a local search with the best strategy chain location if self.energy_state_improved: # Global energy has improved, let's see if LS improves further e, x = self.minimizer_wrapper.local_search(self.energy_state.xbest, self.energy_state.ebest) if e < self.energy_state.ebest: self.not_improved_idx = 0 val = self.energy_state.update_best(e, x, 1) if val is not None: if val: return val self.energy_state.update_current(e, x) if self.func_wrapper.nfev >= self.func_wrapper.maxfun: return ('Maximum number of function call reached ' 'during local search') # Check probability of a need to perform a LS even if no improvement do_ls = False if self.K < 90 * len(self.energy_state.current_location): pls = np.exp(self.K * ( self.energy_state.ebest - self.energy_state.current_energy) / self.temperature_step) if pls >= self._rand_gen.uniform(): do_ls = True # Global energy not improved, let's see what LS gives # on the best strategy chain location if self.not_improved_idx >= self.not_improved_max_idx: do_ls = True if do_ls: e, x = self.minimizer_wrapper.local_search(self.xmin, self.emin) self.xmin = np.copy(x) self.emin = e self.not_improved_idx = 0 self.not_improved_max_idx = self.energy_state.current_location.size if e < self.energy_state.ebest: val = self.energy_state.update_best( self.emin, self.xmin, 2) if val is not None: if val: return val self.energy_state.update_current(e, x) if self.func_wrapper.nfev >= self.func_wrapper.maxfun: return ('Maximum number of function call reached ' 'during dual annealing') class ObjectiveFunWrapper: def __init__(self, func, maxfun=1e7, *args): self.func = func self.args = args # Number of objective function evaluations self.nfev = 0 # Number of gradient function evaluation if used self.ngev = 0 # Number of hessian of the objective function if used self.nhev = 0 self.maxfun = maxfun def fun(self, x): self.nfev += 1 return self.func(x, *self.args) class LocalSearchWrapper: """ Class used to wrap around the minimizer used for local search Default local minimizer is SciPy minimizer L-BFGS-B """ LS_MAXITER_RATIO = 6 LS_MAXITER_MIN = 100 LS_MAXITER_MAX = 1000 def __init__(self, search_bounds, func_wrapper, *args, **kwargs): self.func_wrapper = func_wrapper self.kwargs = kwargs self.jac = self.kwargs.get('jac', None) self.minimizer = minimize bounds_list = list(zip(*search_bounds)) self.lower = np.array(bounds_list[0]) self.upper = np.array(bounds_list[1]) # If no minimizer specified, use SciPy minimize with 'L-BFGS-B' method if not self.kwargs: n = len(self.lower) ls_max_iter = min(max(n * self.LS_MAXITER_RATIO, self.LS_MAXITER_MIN), self.LS_MAXITER_MAX) self.kwargs['method'] = 'L-BFGS-B' self.kwargs['options'] = { 'maxiter': ls_max_iter, } self.kwargs['bounds'] = list(zip(self.lower, self.upper)) elif callable(self.jac): def wrapped_jac(x): return self.jac(x, *args) self.kwargs['jac'] = wrapped_jac def local_search(self, x, e): # Run local search from the given x location where energy value is e x_tmp = np.copy(x) mres = self.minimizer(self.func_wrapper.fun, x, **self.kwargs) if 'njev' in mres: self.func_wrapper.ngev += mres.njev if 'nhev' in mres: self.func_wrapper.nhev += mres.nhev # Check if is valid value is_finite = np.all(np.isfinite(mres.x)) and np.isfinite(mres.fun) in_bounds = np.all(mres.x >= self.lower) and np.all( mres.x <= self.upper) is_valid = is_finite and in_bounds # Use the new point only if it is valid and return a better results if is_valid and mres.fun < e: return mres.fun, mres.x else: return e, x_tmp def dual_annealing(func, bounds, args=(), maxiter=1000, minimizer_kwargs=None, initial_temp=5230., restart_temp_ratio=2.e-5, visit=2.62, accept=-5.0, maxfun=1e7, seed=None, no_local_search=False, callback=None, x0=None): """ Find the global minimum of a function using Dual Annealing. Parameters ---------- func : callable The objective function to be minimized. Must be in the form ``f(x, *args)``, where ``x`` is the argument in the form of a 1-D array and ``args`` is a tuple of any additional fixed parameters needed to completely specify the function. bounds : sequence or `Bounds` Bounds for variables. There are two ways to specify the bounds: 1. Instance of `Bounds` class. 2. Sequence of ``(min, max)`` pairs for each element in `x`. args : tuple, optional Any additional fixed parameters needed to completely specify the objective function. maxiter : int, optional The maximum number of global search iterations. Default value is 1000. minimizer_kwargs : dict, optional Extra keyword arguments to be passed to the local minimizer (`minimize`). Some important options could be: ``method`` for the minimizer method to use and ``args`` for objective function additional arguments. initial_temp : float, optional The initial temperature, use higher values to facilitates a wider search of the energy landscape, allowing dual_annealing to escape local minima that it is trapped in. Default value is 5230. Range is (0.01, 5.e4]. restart_temp_ratio : float, optional During the annealing process, temperature is decreasing, when it reaches ``initial_temp * restart_temp_ratio``, the reannealing process is triggered. Default value of the ratio is 2e-5. Range is (0, 1). visit : float, optional Parameter for visiting distribution. Default value is 2.62. Higher values give the visiting distribution a heavier tail, this makes the algorithm jump to a more distant region. The value range is (1, 3]. accept : float, optional Parameter for acceptance distribution. It is used to control the probability of acceptance. The lower the acceptance parameter, the smaller the probability of acceptance. Default value is -5.0 with a range (-1e4, -5]. maxfun : int, optional Soft limit for the number of objective function calls. If the algorithm is in the middle of a local search, this number will be exceeded, the algorithm will stop just after the local search is done. Default value is 1e7. seed : {None, int, `numpy.random.Generator`, `numpy.random.RandomState`}, optional If `seed` is None (or `np.random`), the `numpy.random.RandomState` singleton is used. If `seed` is an int, a new ``RandomState`` instance is used, seeded with `seed`. If `seed` is already a ``Generator`` or ``RandomState`` instance then that instance is used. Specify `seed` for repeatable minimizations. The random numbers generated with this seed only affect the visiting distribution function and new coordinates generation. no_local_search : bool, optional If `no_local_search` is set to True, a traditional Generalized Simulated Annealing will be performed with no local search strategy applied. callback : callable, optional A callback function with signature ``callback(x, f, context)``, which will be called for all minima found. ``x`` and ``f`` are the coordinates and function value of the latest minimum found, and ``context`` has value in [0, 1, 2], with the following meaning: - 0: minimum detected in the annealing process. - 1: detection occurred in the local search process. - 2: detection done in the dual annealing process. If the callback implementation returns True, the algorithm will stop. x0 : ndarray, shape(n,), optional Coordinates of a single N-D starting point. Returns ------- res : OptimizeResult The optimization result represented as a `OptimizeResult` object. Important attributes are: ``x`` the solution array, ``fun`` the value of the function at the solution, and ``message`` which describes the cause of the termination. See `OptimizeResult` for a description of other attributes. Notes ----- This function implements the Dual Annealing optimization. This stochastic approach derived from [3]_ combines the generalization of CSA (Classical Simulated Annealing) and FSA (Fast Simulated Annealing) [1]_ [2]_ coupled to a strategy for applying a local search on accepted locations [4]_. An alternative implementation of this same algorithm is described in [5]_ and benchmarks are presented in [6]_. This approach introduces an advanced method to refine the solution found by the generalized annealing process. This algorithm uses a distorted Cauchy-Lorentz visiting distribution, with its shape controlled by the parameter :math:`q_{v}` .. math:: g_{q_{v}}(\\Delta x(t)) \\propto \\frac{ \\ \\left[T_{q_{v}}(t) \\right]^{-\\frac{D}{3-q_{v}}}}{ \\ \\left[{1+(q_{v}-1)\\frac{(\\Delta x(t))^{2}} { \\ \\left[T_{q_{v}}(t)\\right]^{\\frac{2}{3-q_{v}}}}}\\right]^{ \\ \\frac{1}{q_{v}-1}+\\frac{D-1}{2}}} Where :math:`t` is the artificial time. This visiting distribution is used to generate a trial jump distance :math:`\\Delta x(t)` of variable :math:`x(t)` under artificial temperature :math:`T_{q_{v}}(t)`. From the starting point, after calling the visiting distribution function, the acceptance probability is computed as follows: .. math:: p_{q_{a}} = \\min{\\{1,\\left[1-(1-q_{a}) \\beta \\Delta E \\right]^{ \\ \\frac{1}{1-q_{a}}}\\}} Where :math:`q_{a}` is a acceptance parameter. For :math:`q_{a}<1`, zero acceptance probability is assigned to the cases where .. math:: [1-(1-q_{a}) \\beta \\Delta E] < 0 The artificial temperature :math:`T_{q_{v}}(t)` is decreased according to .. math:: T_{q_{v}}(t) = T_{q_{v}}(1) \\frac{2^{q_{v}-1}-1}{\\left( \\ 1 + t\\right)^{q_{v}-1}-1} Where :math:`q_{v}` is the visiting parameter. .. versionadded:: 1.2.0 References ---------- .. [1] Tsallis C. Possible generalization of Boltzmann-Gibbs statistics. Journal of Statistical Physics, 52, 479-487 (1998). .. [2] Tsallis C, Stariolo DA. Generalized Simulated Annealing. Physica A, 233, 395-406 (1996). .. [3] Xiang Y, Sun DY, Fan W, Gong XG. Generalized Simulated Annealing Algorithm and Its Application to the Thomson Model. Physics Letters A, 233, 216-220 (1997). .. [4] Xiang Y, Gong XG. Efficiency of Generalized Simulated Annealing. Physical Review E, 62, 4473 (2000). .. [5] Xiang Y, Gubian S, Suomela B, Hoeng J. Generalized Simulated Annealing for Efficient Global Optimization: the GenSA Package for R. The R Journal, Volume 5/1 (2013). .. [6] Mullen, K. Continuous Global Optimization in R. Journal of Statistical Software, 60(6), 1 - 45, (2014). :doi:`10.18637/jss.v060.i06` Examples -------- The following example is a 10-D problem, with many local minima. The function involved is called Rastrigin (https://en.wikipedia.org/wiki/Rastrigin_function) >>> import numpy as np >>> from scipy.optimize import dual_annealing >>> func = lambda x: np.sum(x*x - 10*np.cos(2*np.pi*x)) + 10*np.size(x) >>> lw = [-5.12] * 10 >>> up = [5.12] * 10 >>> ret = dual_annealing(func, bounds=list(zip(lw, up))) >>> ret.x array([-4.26437714e-09, -3.91699361e-09, -1.86149218e-09, -3.97165720e-09, -6.29151648e-09, -6.53145322e-09, -3.93616815e-09, -6.55623025e-09, -6.05775280e-09, -5.00668935e-09]) # random >>> ret.fun 0.000000 """ if isinstance(bounds, Bounds): bounds = new_bounds_to_old(bounds.lb, bounds.ub, len(bounds.lb)) # noqa: E501 if x0 is not None and not len(x0) == len(bounds): raise ValueError('Bounds size does not match x0') lu = list(zip(*bounds)) lower = np.array(lu[0]) upper = np.array(lu[1]) # Check that restart temperature ratio is correct if restart_temp_ratio <= 0. or restart_temp_ratio >= 1.: raise ValueError('Restart temperature ratio has to be in range (0, 1)') # Checking bounds are valid if (np.any(np.isinf(lower)) or np.any(np.isinf(upper)) or np.any( np.isnan(lower)) or np.any(np.isnan(upper))): raise ValueError('Some bounds values are inf values or nan values') # Checking that bounds are consistent if not np.all(lower < upper): raise ValueError('Bounds are not consistent min < max') # Checking that bounds are the same length if not len(lower) == len(upper): raise ValueError('Bounds do not have the same dimensions') # Wrapper for the objective function func_wrapper = ObjectiveFunWrapper(func, maxfun, *args) # minimizer_kwargs has to be a dict, not None minimizer_kwargs = minimizer_kwargs or {} minimizer_wrapper = LocalSearchWrapper( bounds, func_wrapper, *args, **minimizer_kwargs) # Initialization of random Generator for reproducible runs if seed provided rand_state = check_random_state(seed) # Initialization of the energy state energy_state = EnergyState(lower, upper, callback) energy_state.reset(func_wrapper, rand_state, x0) # Minimum value of annealing temperature reached to perform # re-annealing temperature_restart = initial_temp * restart_temp_ratio # VisitingDistribution instance visit_dist = VisitingDistribution(lower, upper, visit, rand_state) # Strategy chain instance strategy_chain = StrategyChain(accept, visit_dist, func_wrapper, minimizer_wrapper, rand_state, energy_state) need_to_stop = False iteration = 0 message = [] # OptimizeResult object to be returned optimize_res = OptimizeResult() optimize_res.success = True optimize_res.status = 0 t1 = np.exp((visit - 1) * np.log(2.0)) - 1.0 # Run the search loop while not need_to_stop: for i in range(maxiter): # Compute temperature for this step s = float(i) + 2.0 t2 = np.exp((visit - 1) * np.log(s)) - 1.0 temperature = initial_temp * t1 / t2 if iteration >= maxiter: message.append("Maximum number of iteration reached") need_to_stop = True break # Need a re-annealing process? if temperature < temperature_restart: energy_state.reset(func_wrapper, rand_state) break # starting strategy chain val = strategy_chain.run(i, temperature) if val is not None: message.append(val) need_to_stop = True optimize_res.success = False break # Possible local search at the end of the strategy chain if not no_local_search: val = strategy_chain.local_search() if val is not None: message.append(val) need_to_stop = True optimize_res.success = False break iteration += 1 # Setting the OptimizeResult values optimize_res.x = energy_state.xbest optimize_res.fun = energy_state.ebest optimize_res.nit = iteration optimize_res.nfev = func_wrapper.nfev optimize_res.njev = func_wrapper.ngev optimize_res.nhev = func_wrapper.nhev optimize_res.message = message return optimize_res
30,363
41.348675
86
py
scipy
scipy-main/scipy/optimize/_basinhopping.py
""" basinhopping: The basinhopping global optimization algorithm """ import numpy as np import math import inspect import scipy.optimize from scipy._lib._util import check_random_state __all__ = ['basinhopping'] _params = (inspect.Parameter('res_new', kind=inspect.Parameter.KEYWORD_ONLY), inspect.Parameter('res_old', kind=inspect.Parameter.KEYWORD_ONLY)) _new_accept_test_signature = inspect.Signature(parameters=_params) class Storage: """ Class used to store the lowest energy structure """ def __init__(self, minres): self._add(minres) def _add(self, minres): self.minres = minres self.minres.x = np.copy(minres.x) def update(self, minres): if minres.success and (minres.fun < self.minres.fun or not self.minres.success): self._add(minres) return True else: return False def get_lowest(self): return self.minres class BasinHoppingRunner: """This class implements the core of the basinhopping algorithm. x0 : ndarray The starting coordinates. minimizer : callable The local minimizer, with signature ``result = minimizer(x)``. The return value is an `optimize.OptimizeResult` object. step_taking : callable This function displaces the coordinates randomly. Signature should be ``x_new = step_taking(x)``. Note that `x` may be modified in-place. accept_tests : list of callables Each test is passed the kwargs `f_new`, `x_new`, `f_old` and `x_old`. These tests will be used to judge whether or not to accept the step. The acceptable return values are True, False, or ``"force accept"``. If any of the tests return False then the step is rejected. If ``"force accept"``, then this will override any other tests in order to accept the step. This can be used, for example, to forcefully escape from a local minimum that ``basinhopping`` is trapped in. disp : bool, optional Display status messages. """ def __init__(self, x0, minimizer, step_taking, accept_tests, disp=False): self.x = np.copy(x0) self.minimizer = minimizer self.step_taking = step_taking self.accept_tests = accept_tests self.disp = disp self.nstep = 0 # initialize return object self.res = scipy.optimize.OptimizeResult() self.res.minimization_failures = 0 # do initial minimization minres = minimizer(self.x) if not minres.success: self.res.minimization_failures += 1 if self.disp: print("warning: basinhopping: local minimization failure") self.x = np.copy(minres.x) self.energy = minres.fun self.incumbent_minres = minres # best minimize result found so far if self.disp: print("basinhopping step %d: f %g" % (self.nstep, self.energy)) # initialize storage class self.storage = Storage(minres) if hasattr(minres, "nfev"): self.res.nfev = minres.nfev if hasattr(minres, "njev"): self.res.njev = minres.njev if hasattr(minres, "nhev"): self.res.nhev = minres.nhev def _monte_carlo_step(self): """Do one Monte Carlo iteration Randomly displace the coordinates, minimize, and decide whether or not to accept the new coordinates. """ # Take a random step. Make a copy of x because the step_taking # algorithm might change x in place x_after_step = np.copy(self.x) x_after_step = self.step_taking(x_after_step) # do a local minimization minres = self.minimizer(x_after_step) x_after_quench = minres.x energy_after_quench = minres.fun if not minres.success: self.res.minimization_failures += 1 if self.disp: print("warning: basinhopping: local minimization failure") if hasattr(minres, "nfev"): self.res.nfev += minres.nfev if hasattr(minres, "njev"): self.res.njev += minres.njev if hasattr(minres, "nhev"): self.res.nhev += minres.nhev # accept the move based on self.accept_tests. If any test is False, # then reject the step. If any test returns the special string # 'force accept', then accept the step regardless. This can be used # to forcefully escape from a local minimum if normal basin hopping # steps are not sufficient. accept = True for test in self.accept_tests: if inspect.signature(test) == _new_accept_test_signature: testres = test(res_new=minres, res_old=self.incumbent_minres) else: testres = test(f_new=energy_after_quench, x_new=x_after_quench, f_old=self.energy, x_old=self.x) if testres == 'force accept': accept = True break elif testres is None: raise ValueError("accept_tests must return True, False, or " "'force accept'") elif not testres: accept = False # Report the result of the acceptance test to the take step class. # This is for adaptive step taking if hasattr(self.step_taking, "report"): self.step_taking.report(accept, f_new=energy_after_quench, x_new=x_after_quench, f_old=self.energy, x_old=self.x) return accept, minres def one_cycle(self): """Do one cycle of the basinhopping algorithm """ self.nstep += 1 new_global_min = False accept, minres = self._monte_carlo_step() if accept: self.energy = minres.fun self.x = np.copy(minres.x) self.incumbent_minres = minres # best minimize result found so far new_global_min = self.storage.update(minres) # print some information if self.disp: self.print_report(minres.fun, accept) if new_global_min: print("found new global minimum on step %d with function" " value %g" % (self.nstep, self.energy)) # save some variables as BasinHoppingRunner attributes self.xtrial = minres.x self.energy_trial = minres.fun self.accept = accept return new_global_min def print_report(self, energy_trial, accept): """print a status update""" minres = self.storage.get_lowest() print("basinhopping step %d: f %g trial_f %g accepted %d " " lowest_f %g" % (self.nstep, self.energy, energy_trial, accept, minres.fun)) class AdaptiveStepsize: """ Class to implement adaptive stepsize. This class wraps the step taking class and modifies the stepsize to ensure the true acceptance rate is as close as possible to the target. Parameters ---------- takestep : callable The step taking routine. Must contain modifiable attribute takestep.stepsize accept_rate : float, optional The target step acceptance rate interval : int, optional Interval for how often to update the stepsize factor : float, optional The step size is multiplied or divided by this factor upon each update. verbose : bool, optional Print information about each update """ def __init__(self, takestep, accept_rate=0.5, interval=50, factor=0.9, verbose=True): self.takestep = takestep self.target_accept_rate = accept_rate self.interval = interval self.factor = factor self.verbose = verbose self.nstep = 0 self.nstep_tot = 0 self.naccept = 0 def __call__(self, x): return self.take_step(x) def _adjust_step_size(self): old_stepsize = self.takestep.stepsize accept_rate = float(self.naccept) / self.nstep if accept_rate > self.target_accept_rate: # We're accepting too many steps. This generally means we're # trapped in a basin. Take bigger steps. self.takestep.stepsize /= self.factor else: # We're not accepting enough steps. Take smaller steps. self.takestep.stepsize *= self.factor if self.verbose: print("adaptive stepsize: acceptance rate {:f} target {:f} new " "stepsize {:g} old stepsize {:g}".format(accept_rate, self.target_accept_rate, self.takestep.stepsize, old_stepsize)) def take_step(self, x): self.nstep += 1 self.nstep_tot += 1 if self.nstep % self.interval == 0: self._adjust_step_size() return self.takestep(x) def report(self, accept, **kwargs): "called by basinhopping to report the result of the step" if accept: self.naccept += 1 class RandomDisplacement: """Add a random displacement of maximum size `stepsize` to each coordinate. Calling this updates `x` in-place. Parameters ---------- stepsize : float, optional Maximum stepsize in any dimension random_gen : {None, int, `numpy.random.Generator`, `numpy.random.RandomState`}, optional If `seed` is None (or `np.random`), the `numpy.random.RandomState` singleton is used. If `seed` is an int, a new ``RandomState`` instance is used, seeded with `seed`. If `seed` is already a ``Generator`` or ``RandomState`` instance then that instance is used. """ def __init__(self, stepsize=0.5, random_gen=None): self.stepsize = stepsize self.random_gen = check_random_state(random_gen) def __call__(self, x): x += self.random_gen.uniform(-self.stepsize, self.stepsize, np.shape(x)) return x class MinimizerWrapper: """ wrap a minimizer function as a minimizer class """ def __init__(self, minimizer, func=None, **kwargs): self.minimizer = minimizer self.func = func self.kwargs = kwargs def __call__(self, x0): if self.func is None: return self.minimizer(x0, **self.kwargs) else: return self.minimizer(self.func, x0, **self.kwargs) class Metropolis: """Metropolis acceptance criterion. Parameters ---------- T : float The "temperature" parameter for the accept or reject criterion. random_gen : {None, int, `numpy.random.Generator`, `numpy.random.RandomState`}, optional If `seed` is None (or `np.random`), the `numpy.random.RandomState` singleton is used. If `seed` is an int, a new ``RandomState`` instance is used, seeded with `seed`. If `seed` is already a ``Generator`` or ``RandomState`` instance then that instance is used. Random number generator used for acceptance test. """ def __init__(self, T, random_gen=None): # Avoid ZeroDivisionError since "MBH can be regarded as a special case # of the BH framework with the Metropolis criterion, where temperature # T = 0." (Reject all steps that increase energy.) self.beta = 1.0 / T if T != 0 else float('inf') self.random_gen = check_random_state(random_gen) def accept_reject(self, res_new, res_old): """ Assuming the local search underlying res_new was successful: If new energy is lower than old, it will always be accepted. If new is higher than old, there is a chance it will be accepted, less likely for larger differences. """ with np.errstate(invalid='ignore'): # The energy values being fed to Metropolis are 1-length arrays, and if # they are equal, their difference is 0, which gets multiplied by beta, # which is inf, and array([0]) * float('inf') causes # # RuntimeWarning: invalid value encountered in multiply # # Ignore this warning so when the algorithm is on a flat plane, it always # accepts the step, to try to move off the plane. prod = -(res_new.fun - res_old.fun) * self.beta w = math.exp(min(0, prod)) rand = self.random_gen.uniform() return w >= rand and (res_new.success or not res_old.success) def __call__(self, *, res_new, res_old): """ f_new and f_old are mandatory in kwargs """ return bool(self.accept_reject(res_new, res_old)) def basinhopping(func, x0, niter=100, T=1.0, stepsize=0.5, minimizer_kwargs=None, take_step=None, accept_test=None, callback=None, interval=50, disp=False, niter_success=None, seed=None, *, target_accept_rate=0.5, stepwise_factor=0.9): """Find the global minimum of a function using the basin-hopping algorithm. Basin-hopping is a two-phase method that combines a global stepping algorithm with local minimization at each step. Designed to mimic the natural process of energy minimization of clusters of atoms, it works well for similar problems with "funnel-like, but rugged" energy landscapes [5]_. As the step-taking, step acceptance, and minimization methods are all customizable, this function can also be used to implement other two-phase methods. Parameters ---------- func : callable ``f(x, *args)`` Function to be optimized. ``args`` can be passed as an optional item in the dict `minimizer_kwargs` x0 : array_like Initial guess. niter : integer, optional The number of basin-hopping iterations. There will be a total of ``niter + 1`` runs of the local minimizer. T : float, optional The "temperature" parameter for the acceptance or rejection criterion. Higher "temperatures" mean that larger jumps in function value will be accepted. For best results `T` should be comparable to the separation (in function value) between local minima. stepsize : float, optional Maximum step size for use in the random displacement. minimizer_kwargs : dict, optional Extra keyword arguments to be passed to the local minimizer `scipy.optimize.minimize` Some important options could be: method : str The minimization method (e.g. ``"L-BFGS-B"``) args : tuple Extra arguments passed to the objective function (`func`) and its derivatives (Jacobian, Hessian). take_step : callable ``take_step(x)``, optional Replace the default step-taking routine with this routine. The default step-taking routine is a random displacement of the coordinates, but other step-taking algorithms may be better for some systems. `take_step` can optionally have the attribute ``take_step.stepsize``. If this attribute exists, then `basinhopping` will adjust ``take_step.stepsize`` in order to try to optimize the global minimum search. accept_test : callable, ``accept_test(f_new=f_new, x_new=x_new, f_old=fold, x_old=x_old)``, optional Define a test which will be used to judge whether to accept the step. This will be used in addition to the Metropolis test based on "temperature" `T`. The acceptable return values are True, False, or ``"force accept"``. If any of the tests return False then the step is rejected. If the latter, then this will override any other tests in order to accept the step. This can be used, for example, to forcefully escape from a local minimum that `basinhopping` is trapped in. callback : callable, ``callback(x, f, accept)``, optional A callback function which will be called for all minima found. ``x`` and ``f`` are the coordinates and function value of the trial minimum, and ``accept`` is whether that minimum was accepted. This can be used, for example, to save the lowest N minima found. Also, `callback` can be used to specify a user defined stop criterion by optionally returning True to stop the `basinhopping` routine. interval : integer, optional interval for how often to update the `stepsize` disp : bool, optional Set to True to print status messages niter_success : integer, optional Stop the run if the global minimum candidate remains the same for this number of iterations. seed : {None, int, `numpy.random.Generator`, `numpy.random.RandomState`}, optional If `seed` is None (or `np.random`), the `numpy.random.RandomState` singleton is used. If `seed` is an int, a new ``RandomState`` instance is used, seeded with `seed`. If `seed` is already a ``Generator`` or ``RandomState`` instance then that instance is used. Specify `seed` for repeatable minimizations. The random numbers generated with this seed only affect the default Metropolis `accept_test` and the default `take_step`. If you supply your own `take_step` and `accept_test`, and these functions use random number generation, then those functions are responsible for the state of their random number generator. target_accept_rate : float, optional The target acceptance rate that is used to adjust the `stepsize`. If the current acceptance rate is greater than the target, then the `stepsize` is increased. Otherwise, it is decreased. Range is (0, 1). Default is 0.5. .. versionadded:: 1.8.0 stepwise_factor : float, optional The `stepsize` is multiplied or divided by this stepwise factor upon each update. Range is (0, 1). Default is 0.9. .. versionadded:: 1.8.0 Returns ------- res : OptimizeResult The optimization result represented as a `OptimizeResult` object. Important attributes are: ``x`` the solution array, ``fun`` the value of the function at the solution, and ``message`` which describes the cause of the termination. The ``OptimizeResult`` object returned by the selected minimizer at the lowest minimum is also contained within this object and can be accessed through the ``lowest_optimization_result`` attribute. See `OptimizeResult` for a description of other attributes. See Also -------- minimize : The local minimization function called once for each basinhopping step. `minimizer_kwargs` is passed to this routine. Notes ----- Basin-hopping is a stochastic algorithm which attempts to find the global minimum of a smooth scalar function of one or more variables [1]_ [2]_ [3]_ [4]_. The algorithm in its current form was described by David Wales and Jonathan Doye [2]_ http://www-wales.ch.cam.ac.uk/. The algorithm is iterative with each cycle composed of the following features 1) random perturbation of the coordinates 2) local minimization 3) accept or reject the new coordinates based on the minimized function value The acceptance test used here is the Metropolis criterion of standard Monte Carlo algorithms, although there are many other possibilities [3]_. This global minimization method has been shown to be extremely efficient for a wide variety of problems in physics and chemistry. It is particularly useful when the function has many minima separated by large barriers. See the `Cambridge Cluster Database <https://www-wales.ch.cam.ac.uk/CCD.html>`_ for databases of molecular systems that have been optimized primarily using basin-hopping. This database includes minimization problems exceeding 300 degrees of freedom. See the free software program `GMIN <https://www-wales.ch.cam.ac.uk/GMIN>`_ for a Fortran implementation of basin-hopping. This implementation has many variations of the procedure described above, including more advanced step taking algorithms and alternate acceptance criterion. For stochastic global optimization there is no way to determine if the true global minimum has actually been found. Instead, as a consistency check, the algorithm can be run from a number of different random starting points to ensure the lowest minimum found in each example has converged to the global minimum. For this reason, `basinhopping` will by default simply run for the number of iterations `niter` and return the lowest minimum found. It is left to the user to ensure that this is in fact the global minimum. Choosing `stepsize`: This is a crucial parameter in `basinhopping` and depends on the problem being solved. The step is chosen uniformly in the region from x0-stepsize to x0+stepsize, in each dimension. Ideally, it should be comparable to the typical separation (in argument values) between local minima of the function being optimized. `basinhopping` will, by default, adjust `stepsize` to find an optimal value, but this may take many iterations. You will get quicker results if you set a sensible initial value for ``stepsize``. Choosing `T`: The parameter `T` is the "temperature" used in the Metropolis criterion. Basinhopping steps are always accepted if ``func(xnew) < func(xold)``. Otherwise, they are accepted with probability:: exp( -(func(xnew) - func(xold)) / T ) So, for best results, `T` should to be comparable to the typical difference (in function values) between local minima. (The height of "walls" between local minima is irrelevant.) If `T` is 0, the algorithm becomes Monotonic Basin-Hopping, in which all steps that increase energy are rejected. .. versionadded:: 0.12.0 References ---------- .. [1] Wales, David J. 2003, Energy Landscapes, Cambridge University Press, Cambridge, UK. .. [2] Wales, D J, and Doye J P K, Global Optimization by Basin-Hopping and the Lowest Energy Structures of Lennard-Jones Clusters Containing up to 110 Atoms. Journal of Physical Chemistry A, 1997, 101, 5111. .. [3] Li, Z. and Scheraga, H. A., Monte Carlo-minimization approach to the multiple-minima problem in protein folding, Proc. Natl. Acad. Sci. USA, 1987, 84, 6611. .. [4] Wales, D. J. and Scheraga, H. A., Global optimization of clusters, crystals, and biomolecules, Science, 1999, 285, 1368. .. [5] Olson, B., Hashmi, I., Molloy, K., and Shehu1, A., Basin Hopping as a General and Versatile Optimization Framework for the Characterization of Biological Macromolecules, Advances in Artificial Intelligence, Volume 2012 (2012), Article ID 674832, :doi:`10.1155/2012/674832` Examples -------- The following example is a 1-D minimization problem, with many local minima superimposed on a parabola. >>> import numpy as np >>> from scipy.optimize import basinhopping >>> func = lambda x: np.cos(14.5 * x - 0.3) + (x + 0.2) * x >>> x0 = [1.] Basinhopping, internally, uses a local minimization algorithm. We will use the parameter `minimizer_kwargs` to tell basinhopping which algorithm to use and how to set up that minimizer. This parameter will be passed to `scipy.optimize.minimize`. >>> minimizer_kwargs = {"method": "BFGS"} >>> ret = basinhopping(func, x0, minimizer_kwargs=minimizer_kwargs, ... niter=200) >>> print("global minimum: x = %.4f, f(x) = %.4f" % (ret.x, ret.fun)) global minimum: x = -0.1951, f(x) = -1.0009 Next consider a 2-D minimization problem. Also, this time, we will use gradient information to significantly speed up the search. >>> def func2d(x): ... f = np.cos(14.5 * x[0] - 0.3) + (x[1] + 0.2) * x[1] + (x[0] + ... 0.2) * x[0] ... df = np.zeros(2) ... df[0] = -14.5 * np.sin(14.5 * x[0] - 0.3) + 2. * x[0] + 0.2 ... df[1] = 2. * x[1] + 0.2 ... return f, df We'll also use a different local minimization algorithm. Also, we must tell the minimizer that our function returns both energy and gradient (Jacobian). >>> minimizer_kwargs = {"method":"L-BFGS-B", "jac":True} >>> x0 = [1.0, 1.0] >>> ret = basinhopping(func2d, x0, minimizer_kwargs=minimizer_kwargs, ... niter=200) >>> print("global minimum: x = [%.4f, %.4f], f(x) = %.4f" % (ret.x[0], ... ret.x[1], ... ret.fun)) global minimum: x = [-0.1951, -0.1000], f(x) = -1.0109 Here is an example using a custom step-taking routine. Imagine you want the first coordinate to take larger steps than the rest of the coordinates. This can be implemented like so: >>> class MyTakeStep: ... def __init__(self, stepsize=0.5): ... self.stepsize = stepsize ... self.rng = np.random.default_rng() ... def __call__(self, x): ... s = self.stepsize ... x[0] += self.rng.uniform(-2.*s, 2.*s) ... x[1:] += self.rng.uniform(-s, s, x[1:].shape) ... return x Since ``MyTakeStep.stepsize`` exists basinhopping will adjust the magnitude of `stepsize` to optimize the search. We'll use the same 2-D function as before >>> mytakestep = MyTakeStep() >>> ret = basinhopping(func2d, x0, minimizer_kwargs=minimizer_kwargs, ... niter=200, take_step=mytakestep) >>> print("global minimum: x = [%.4f, %.4f], f(x) = %.4f" % (ret.x[0], ... ret.x[1], ... ret.fun)) global minimum: x = [-0.1951, -0.1000], f(x) = -1.0109 Now, let's do an example using a custom callback function which prints the value of every minimum found >>> def print_fun(x, f, accepted): ... print("at minimum %.4f accepted %d" % (f, int(accepted))) We'll run it for only 10 basinhopping steps this time. >>> rng = np.random.default_rng() >>> ret = basinhopping(func2d, x0, minimizer_kwargs=minimizer_kwargs, ... niter=10, callback=print_fun, seed=rng) at minimum 0.4159 accepted 1 at minimum -0.4317 accepted 1 at minimum -1.0109 accepted 1 at minimum -0.9073 accepted 1 at minimum -0.4317 accepted 0 at minimum -0.1021 accepted 1 at minimum -0.7425 accepted 1 at minimum -0.9073 accepted 1 at minimum -0.4317 accepted 0 at minimum -0.7425 accepted 1 at minimum -0.9073 accepted 1 The minimum at -1.0109 is actually the global minimum, found already on the 8th iteration. """ if target_accept_rate <= 0. or target_accept_rate >= 1.: raise ValueError('target_accept_rate has to be in range (0, 1)') if stepwise_factor <= 0. or stepwise_factor >= 1.: raise ValueError('stepwise_factor has to be in range (0, 1)') x0 = np.array(x0) # set up the np.random generator rng = check_random_state(seed) # set up minimizer if minimizer_kwargs is None: minimizer_kwargs = dict() wrapped_minimizer = MinimizerWrapper(scipy.optimize.minimize, func, **minimizer_kwargs) # set up step-taking algorithm if take_step is not None: if not callable(take_step): raise TypeError("take_step must be callable") # if take_step.stepsize exists then use AdaptiveStepsize to control # take_step.stepsize if hasattr(take_step, "stepsize"): take_step_wrapped = AdaptiveStepsize( take_step, interval=interval, accept_rate=target_accept_rate, factor=stepwise_factor, verbose=disp) else: take_step_wrapped = take_step else: # use default displace = RandomDisplacement(stepsize=stepsize, random_gen=rng) take_step_wrapped = AdaptiveStepsize(displace, interval=interval, accept_rate=target_accept_rate, factor=stepwise_factor, verbose=disp) # set up accept tests accept_tests = [] if accept_test is not None: if not callable(accept_test): raise TypeError("accept_test must be callable") accept_tests = [accept_test] # use default metropolis = Metropolis(T, random_gen=rng) accept_tests.append(metropolis) if niter_success is None: niter_success = niter + 2 bh = BasinHoppingRunner(x0, wrapped_minimizer, take_step_wrapped, accept_tests, disp=disp) # The wrapped minimizer is called once during construction of # BasinHoppingRunner, so run the callback if callable(callback): callback(bh.storage.minres.x, bh.storage.minres.fun, True) # start main iteration loop count, i = 0, 0 message = ["requested number of basinhopping iterations completed" " successfully"] for i in range(niter): new_global_min = bh.one_cycle() if callable(callback): # should we pass a copy of x? val = callback(bh.xtrial, bh.energy_trial, bh.accept) if val is not None: if val: message = ["callback function requested stop early by" "returning True"] break count += 1 if new_global_min: count = 0 elif count > niter_success: message = ["success condition satisfied"] break # prepare return object res = bh.res res.lowest_optimization_result = bh.storage.get_lowest() res.x = np.copy(res.lowest_optimization_result.x) res.fun = res.lowest_optimization_result.fun res.message = message res.nit = i + 1 res.success = res.lowest_optimization_result.success return res
30,657
39.660477
104
py
scipy
scipy-main/scipy/optimize/_trustregion_krylov.py
from ._trustregion import (_minimize_trust_region) from ._trlib import (get_trlib_quadratic_subproblem) __all__ = ['_minimize_trust_krylov'] def _minimize_trust_krylov(fun, x0, args=(), jac=None, hess=None, hessp=None, inexact=True, **trust_region_options): """ Minimization of a scalar function of one or more variables using a nearly exact trust-region algorithm that only requires matrix vector products with the hessian matrix. .. versionadded:: 1.0.0 Options ------- inexact : bool, optional Accuracy to solve subproblems. If True requires less nonlinear iterations, but more vector products. """ if jac is None: raise ValueError('Jacobian is required for trust region ', 'exact minimization.') if hess is None and hessp is None: raise ValueError('Either the Hessian or the Hessian-vector product ' 'is required for Krylov trust-region minimization') # tol_rel specifies the termination tolerance relative to the initial # gradient norm in the Krylov subspace iteration. # - tol_rel_i specifies the tolerance for interior convergence. # - tol_rel_b specifies the tolerance for boundary convergence. # in nonlinear programming applications it is not necessary to solve # the boundary case as exact as the interior case. # - setting tol_rel_i=-2 leads to a forcing sequence in the Krylov # subspace iteration leading to quadratic convergence if eventually # the trust region stays inactive. # - setting tol_rel_b=-3 leads to a forcing sequence in the Krylov # subspace iteration leading to superlinear convergence as long # as the iterates hit the trust region boundary. # For details consult the documentation of trlib_krylov_min # in _trlib/trlib_krylov.h # # Optimality of this choice of parameters among a range of possibilities # has been tested on the unconstrained subset of the CUTEst library. if inexact: return _minimize_trust_region(fun, x0, args=args, jac=jac, hess=hess, hessp=hessp, subproblem=get_trlib_quadratic_subproblem( tol_rel_i=-2.0, tol_rel_b=-3.0, disp=trust_region_options.get('disp', False) ), **trust_region_options) else: return _minimize_trust_region(fun, x0, args=args, jac=jac, hess=hess, hessp=hessp, subproblem=get_trlib_quadratic_subproblem( tol_rel_i=1e-8, tol_rel_b=1e-6, disp=trust_region_options.get('disp', False) ), **trust_region_options)
3,030
44.924242
86
py
scipy
scipy-main/scipy/optimize/slsqp.py
# This file is not meant for public use and will be removed in SciPy v2.0.0. # Use the `scipy.optimize` namespace for importing the functions # included below. import warnings from . import _slsqp_py __all__ = [ # noqa: F822 'OptimizeResult', 'append', 'approx_derivative', 'approx_jacobian', 'array', 'asfarray', 'atleast_1d', 'concatenate', 'exp', 'finfo', 'fmin_slsqp', 'inf', 'isfinite', 'linalg', 'old_bound_to_new', 'slsqp', 'sqrt', 'vstack', 'zeros', ] def __dir__(): return __all__ def __getattr__(name): if name not in __all__: raise AttributeError( "scipy.optimize.slsqp is deprecated and has no attribute " f"{name}. Try looking in scipy.optimize instead.") warnings.warn(f"Please use `{name}` from the `scipy.optimize` namespace, " "the `scipy.optimize.slsqp` namespace is deprecated.", category=DeprecationWarning, stacklevel=2) return getattr(_slsqp_py, name)
1,044
21.234043
78
py
scipy
scipy-main/scipy/optimize/_spectral.py
""" Spectral Algorithm for Nonlinear Equations """ import collections import numpy as np from scipy.optimize import OptimizeResult from scipy.optimize._optimize import _check_unknown_options from ._linesearch import _nonmonotone_line_search_cruz, _nonmonotone_line_search_cheng class _NoConvergence(Exception): pass def _root_df_sane(func, x0, args=(), ftol=1e-8, fatol=1e-300, maxfev=1000, fnorm=None, callback=None, disp=False, M=10, eta_strategy=None, sigma_eps=1e-10, sigma_0=1.0, line_search='cruz', **unknown_options): r""" Solve nonlinear equation with the DF-SANE method Options ------- ftol : float, optional Relative norm tolerance. fatol : float, optional Absolute norm tolerance. Algorithm terminates when ``||func(x)|| < fatol + ftol ||func(x_0)||``. fnorm : callable, optional Norm to use in the convergence check. If None, 2-norm is used. maxfev : int, optional Maximum number of function evaluations. disp : bool, optional Whether to print convergence process to stdout. eta_strategy : callable, optional Choice of the ``eta_k`` parameter, which gives slack for growth of ``||F||**2``. Called as ``eta_k = eta_strategy(k, x, F)`` with `k` the iteration number, `x` the current iterate and `F` the current residual. Should satisfy ``eta_k > 0`` and ``sum(eta, k=0..inf) < inf``. Default: ``||F||**2 / (1 + k)**2``. sigma_eps : float, optional The spectral coefficient is constrained to ``sigma_eps < sigma < 1/sigma_eps``. Default: 1e-10 sigma_0 : float, optional Initial spectral coefficient. Default: 1.0 M : int, optional Number of iterates to include in the nonmonotonic line search. Default: 10 line_search : {'cruz', 'cheng'} Type of line search to employ. 'cruz' is the original one defined in [Martinez & Raydan. Math. Comp. 75, 1429 (2006)], 'cheng' is a modified search defined in [Cheng & Li. IMA J. Numer. Anal. 29, 814 (2009)]. Default: 'cruz' References ---------- .. [1] "Spectral residual method without gradient information for solving large-scale nonlinear systems of equations." W. La Cruz, J.M. Martinez, M. Raydan. Math. Comp. **75**, 1429 (2006). .. [2] W. La Cruz, Opt. Meth. Software, 29, 24 (2014). .. [3] W. Cheng, D.-H. Li. IMA J. Numer. Anal. **29**, 814 (2009). """ _check_unknown_options(unknown_options) if line_search not in ('cheng', 'cruz'): raise ValueError(f"Invalid value {line_search!r} for 'line_search'") nexp = 2 if eta_strategy is None: # Different choice from [1], as their eta is not invariant # vs. scaling of F. def eta_strategy(k, x, F): # Obtain squared 2-norm of the initial residual from the outer scope return f_0 / (1 + k)**2 if fnorm is None: def fnorm(F): # Obtain squared 2-norm of the current residual from the outer scope return f_k**(1.0/nexp) def fmerit(F): return np.linalg.norm(F)**nexp nfev = [0] f, x_k, x_shape, f_k, F_k, is_complex = _wrap_func(func, x0, fmerit, nfev, maxfev, args) k = 0 f_0 = f_k sigma_k = sigma_0 F_0_norm = fnorm(F_k) # For the 'cruz' line search prev_fs = collections.deque([f_k], M) # For the 'cheng' line search Q = 1.0 C = f_0 converged = False message = "too many function evaluations required" while True: F_k_norm = fnorm(F_k) if disp: print("iter %d: ||F|| = %g, sigma = %g" % (k, F_k_norm, sigma_k)) if callback is not None: callback(x_k, F_k) if F_k_norm < ftol * F_0_norm + fatol: # Converged! message = "successful convergence" converged = True break # Control spectral parameter, from [2] if abs(sigma_k) > 1/sigma_eps: sigma_k = 1/sigma_eps * np.sign(sigma_k) elif abs(sigma_k) < sigma_eps: sigma_k = sigma_eps # Line search direction d = -sigma_k * F_k # Nonmonotone line search eta = eta_strategy(k, x_k, F_k) try: if line_search == 'cruz': alpha, xp, fp, Fp = _nonmonotone_line_search_cruz(f, x_k, d, prev_fs, eta=eta) elif line_search == 'cheng': alpha, xp, fp, Fp, C, Q = _nonmonotone_line_search_cheng(f, x_k, d, f_k, C, Q, eta=eta) except _NoConvergence: break # Update spectral parameter s_k = xp - x_k y_k = Fp - F_k sigma_k = np.vdot(s_k, s_k) / np.vdot(s_k, y_k) # Take step x_k = xp F_k = Fp f_k = fp # Store function value if line_search == 'cruz': prev_fs.append(fp) k += 1 x = _wrap_result(x_k, is_complex, shape=x_shape) F = _wrap_result(F_k, is_complex) result = OptimizeResult(x=x, success=converged, message=message, fun=F, nfev=nfev[0], nit=k) return result def _wrap_func(func, x0, fmerit, nfev_list, maxfev, args=()): """ Wrap a function and an initial value so that (i) complex values are wrapped to reals, and (ii) value for a merit function fmerit(x, f) is computed at the same time, (iii) iteration count is maintained and an exception is raised if it is exceeded. Parameters ---------- func : callable Function to wrap x0 : ndarray Initial value fmerit : callable Merit function fmerit(f) for computing merit value from residual. nfev_list : list List to store number of evaluations in. Should be [0] in the beginning. maxfev : int Maximum number of evaluations before _NoConvergence is raised. args : tuple Extra arguments to func Returns ------- wrap_func : callable Wrapped function, to be called as ``F, fp = wrap_func(x0)`` x0_wrap : ndarray of float Wrapped initial value; raveled to 1-D and complex values mapped to reals. x0_shape : tuple Shape of the initial value array f : float Merit function at F F : ndarray of float Residual at x0_wrap is_complex : bool Whether complex values were mapped to reals """ x0 = np.asarray(x0) x0_shape = x0.shape F = np.asarray(func(x0, *args)).ravel() is_complex = np.iscomplexobj(x0) or np.iscomplexobj(F) x0 = x0.ravel() nfev_list[0] = 1 if is_complex: def wrap_func(x): if nfev_list[0] >= maxfev: raise _NoConvergence() nfev_list[0] += 1 z = _real2complex(x).reshape(x0_shape) v = np.asarray(func(z, *args)).ravel() F = _complex2real(v) f = fmerit(F) return f, F x0 = _complex2real(x0) F = _complex2real(F) else: def wrap_func(x): if nfev_list[0] >= maxfev: raise _NoConvergence() nfev_list[0] += 1 x = x.reshape(x0_shape) F = np.asarray(func(x, *args)).ravel() f = fmerit(F) return f, F return wrap_func, x0, x0_shape, fmerit(F), F, is_complex def _wrap_result(result, is_complex, shape=None): """ Convert from real to complex and reshape result arrays. """ if is_complex: z = _real2complex(result) else: z = result if shape is not None: z = z.reshape(shape) return z def _real2complex(x): return np.ascontiguousarray(x, dtype=float).view(np.complex128) def _complex2real(z): return np.ascontiguousarray(z, dtype=complex).view(np.float64)
7,920
29.70155
103
py
scipy
scipy-main/scipy/optimize/_remove_redundancy.py
""" Routines for removing redundant (linearly dependent) equations from linear programming equality constraints. """ # Author: Matt Haberland import numpy as np from scipy.linalg import svd from scipy.linalg.interpolative import interp_decomp import scipy from scipy.linalg.blas import dtrsm def _row_count(A): """ Counts the number of nonzeros in each row of input array A. Nonzeros are defined as any element with absolute value greater than tol = 1e-13. This value should probably be an input to the function. Parameters ---------- A : 2-D array An array representing a matrix Returns ------- rowcount : 1-D array Number of nonzeros in each row of A """ tol = 1e-13 return np.array((abs(A) > tol).sum(axis=1)).flatten() def _get_densest(A, eligibleRows): """ Returns the index of the densest row of A. Ignores rows that are not eligible for consideration. Parameters ---------- A : 2-D array An array representing a matrix eligibleRows : 1-D logical array Values indicate whether the corresponding row of A is eligible to be considered Returns ------- i_densest : int Index of the densest row in A eligible for consideration """ rowCounts = _row_count(A) return np.argmax(rowCounts * eligibleRows) def _remove_zero_rows(A, b): """ Eliminates trivial equations from system of equations defined by Ax = b and identifies trivial infeasibilities Parameters ---------- A : 2-D array An array representing the left-hand side of a system of equations b : 1-D array An array representing the right-hand side of a system of equations Returns ------- A : 2-D array An array representing the left-hand side of a system of equations b : 1-D array An array representing the right-hand side of a system of equations status: int An integer indicating the status of the removal operation 0: No infeasibility identified 2: Trivially infeasible message : str A string descriptor of the exit status of the optimization. """ status = 0 message = "" i_zero = _row_count(A) == 0 A = A[np.logical_not(i_zero), :] if not np.allclose(b[i_zero], 0): status = 2 message = "There is a zero row in A_eq with a nonzero corresponding " \ "entry in b_eq. The problem is infeasible." b = b[np.logical_not(i_zero)] return A, b, status, message def bg_update_dense(plu, perm_r, v, j): LU, p = plu vperm = v[perm_r] u = dtrsm(1, LU, vperm, lower=1, diag=1) LU[:j+1, j] = u[:j+1] l = u[j+1:] piv = LU[j, j] LU[j+1:, j] += (l/piv) return LU, p def _remove_redundancy_pivot_dense(A, rhs, true_rank=None): """ Eliminates redundant equations from system of equations defined by Ax = b and identifies infeasibilities. Parameters ---------- A : 2-D sparse matrix An matrix representing the left-hand side of a system of equations rhs : 1-D array An array representing the right-hand side of a system of equations Returns ------- A : 2-D sparse matrix A matrix representing the left-hand side of a system of equations rhs : 1-D array An array representing the right-hand side of a system of equations status: int An integer indicating the status of the system 0: No infeasibility identified 2: Trivially infeasible message : str A string descriptor of the exit status of the optimization. References ---------- .. [2] Andersen, Erling D. "Finding all linearly dependent rows in large-scale linear programming." Optimization Methods and Software 6.3 (1995): 219-227. """ tolapiv = 1e-8 tolprimal = 1e-8 status = 0 message = "" inconsistent = ("There is a linear combination of rows of A_eq that " "results in zero, suggesting a redundant constraint. " "However the same linear combination of b_eq is " "nonzero, suggesting that the constraints conflict " "and the problem is infeasible.") A, rhs, status, message = _remove_zero_rows(A, rhs) if status != 0: return A, rhs, status, message m, n = A.shape v = list(range(m)) # Artificial column indices. b = list(v) # Basis column indices. # This is better as a list than a set because column order of basis matrix # needs to be consistent. d = [] # Indices of dependent rows perm_r = None A_orig = A A = np.zeros((m, m + n), order='F') np.fill_diagonal(A, 1) A[:, m:] = A_orig e = np.zeros(m) js_candidates = np.arange(m, m+n, dtype=int) # candidate columns for basis # manual masking was faster than masked array js_mask = np.ones(js_candidates.shape, dtype=bool) # Implements basic algorithm from [2] # Uses some of the suggested improvements (removing zero rows and # Bartels-Golub update idea). # Removing column singletons would be easy, but it is not as important # because the procedure is performed only on the equality constraint # matrix from the original problem - not on the canonical form matrix, # which would have many more column singletons due to slack variables # from the inequality constraints. # The thoughts on "crashing" the initial basis are only really useful if # the matrix is sparse. lu = np.eye(m, order='F'), np.arange(m) # initial LU is trivial perm_r = lu[1] for i in v: e[i] = 1 if i > 0: e[i-1] = 0 try: # fails for i==0 and any time it gets ill-conditioned j = b[i-1] lu = bg_update_dense(lu, perm_r, A[:, j], i-1) except Exception: lu = scipy.linalg.lu_factor(A[:, b]) LU, p = lu perm_r = list(range(m)) for i1, i2 in enumerate(p): perm_r[i1], perm_r[i2] = perm_r[i2], perm_r[i1] pi = scipy.linalg.lu_solve(lu, e, trans=1) js = js_candidates[js_mask] batch = 50 # This is a tiny bit faster than looping over columns indivually, # like for j in js: if abs(A[:,j].transpose().dot(pi)) > tolapiv: for j_index in range(0, len(js), batch): j_indices = js[j_index: min(j_index+batch, len(js))] c = abs(A[:, j_indices].transpose().dot(pi)) if (c > tolapiv).any(): j = js[j_index + np.argmax(c)] # very independent column b[i] = j js_mask[j-m] = False break else: bibar = pi.T.dot(rhs.reshape(-1, 1)) bnorm = np.linalg.norm(rhs) if abs(bibar)/(1+bnorm) > tolprimal: # inconsistent status = 2 message = inconsistent return A_orig, rhs, status, message else: # dependent d.append(i) if true_rank is not None and len(d) == m - true_rank: break # found all redundancies keep = set(range(m)) keep = list(keep - set(d)) return A_orig[keep, :], rhs[keep], status, message def _remove_redundancy_pivot_sparse(A, rhs): """ Eliminates redundant equations from system of equations defined by Ax = b and identifies infeasibilities. Parameters ---------- A : 2-D sparse matrix An matrix representing the left-hand side of a system of equations rhs : 1-D array An array representing the right-hand side of a system of equations Returns ------- A : 2-D sparse matrix A matrix representing the left-hand side of a system of equations rhs : 1-D array An array representing the right-hand side of a system of equations status: int An integer indicating the status of the system 0: No infeasibility identified 2: Trivially infeasible message : str A string descriptor of the exit status of the optimization. References ---------- .. [2] Andersen, Erling D. "Finding all linearly dependent rows in large-scale linear programming." Optimization Methods and Software 6.3 (1995): 219-227. """ tolapiv = 1e-8 tolprimal = 1e-8 status = 0 message = "" inconsistent = ("There is a linear combination of rows of A_eq that " "results in zero, suggesting a redundant constraint. " "However the same linear combination of b_eq is " "nonzero, suggesting that the constraints conflict " "and the problem is infeasible.") A, rhs, status, message = _remove_zero_rows(A, rhs) if status != 0: return A, rhs, status, message m, n = A.shape v = list(range(m)) # Artificial column indices. b = list(v) # Basis column indices. # This is better as a list than a set because column order of basis matrix # needs to be consistent. k = set(range(m, m+n)) # Structural column indices. d = [] # Indices of dependent rows A_orig = A A = scipy.sparse.hstack((scipy.sparse.eye(m), A)).tocsc() e = np.zeros(m) # Implements basic algorithm from [2] # Uses only one of the suggested improvements (removing zero rows). # Removing column singletons would be easy, but it is not as important # because the procedure is performed only on the equality constraint # matrix from the original problem - not on the canonical form matrix, # which would have many more column singletons due to slack variables # from the inequality constraints. # The thoughts on "crashing" the initial basis sound useful, but the # description of the procedure seems to assume a lot of familiarity with # the subject; it is not very explicit. I already went through enough # trouble getting the basic algorithm working, so I was not interested in # trying to decipher this, too. (Overall, the paper is fraught with # mistakes and ambiguities - which is strange, because the rest of # Andersen's papers are quite good.) # I tried and tried and tried to improve performance using the # Bartels-Golub update. It works, but it's only practical if the LU # factorization can be specialized as described, and that is not possible # until the SciPy SuperLU interface permits control over column # permutation - see issue #7700. for i in v: B = A[:, b] e[i] = 1 if i > 0: e[i-1] = 0 pi = scipy.sparse.linalg.spsolve(B.transpose(), e).reshape(-1, 1) js = list(k-set(b)) # not efficient, but this is not the time sink... # Due to overhead, it tends to be faster (for problems tested) to # compute the full matrix-vector product rather than individual # vector-vector products (with the chance of terminating as soon # as any are nonzero). For very large matrices, it might be worth # it to compute, say, 100 or 1000 at a time and stop when a nonzero # is found. c = (np.abs(A[:, js].transpose().dot(pi)) > tolapiv).nonzero()[0] if len(c) > 0: # independent j = js[c[0]] # in a previous commit, the previous line was changed to choose # index j corresponding with the maximum dot product. # While this avoided issues with almost # singular matrices, it slowed the routine in most NETLIB tests. # I think this is because these columns were denser than the # first column with nonzero dot product (c[0]). # It would be nice to have a heuristic that balances sparsity with # high dot product, but I don't think it's worth the time to # develop one right now. Bartels-Golub update is a much higher # priority. b[i] = j # replace artificial column else: bibar = pi.T.dot(rhs.reshape(-1, 1)) bnorm = np.linalg.norm(rhs) if abs(bibar)/(1 + bnorm) > tolprimal: status = 2 message = inconsistent return A_orig, rhs, status, message else: # dependent d.append(i) keep = set(range(m)) keep = list(keep - set(d)) return A_orig[keep, :], rhs[keep], status, message def _remove_redundancy_svd(A, b): """ Eliminates redundant equations from system of equations defined by Ax = b and identifies infeasibilities. Parameters ---------- A : 2-D array An array representing the left-hand side of a system of equations b : 1-D array An array representing the right-hand side of a system of equations Returns ------- A : 2-D array An array representing the left-hand side of a system of equations b : 1-D array An array representing the right-hand side of a system of equations status: int An integer indicating the status of the system 0: No infeasibility identified 2: Trivially infeasible message : str A string descriptor of the exit status of the optimization. References ---------- .. [2] Andersen, Erling D. "Finding all linearly dependent rows in large-scale linear programming." Optimization Methods and Software 6.3 (1995): 219-227. """ A, b, status, message = _remove_zero_rows(A, b) if status != 0: return A, b, status, message U, s, Vh = svd(A) eps = np.finfo(float).eps tol = s.max() * max(A.shape) * eps m, n = A.shape s_min = s[-1] if m <= n else 0 # this algorithm is faster than that of [2] when the nullspace is small # but it could probably be improvement by randomized algorithms and with # a sparse implementation. # it relies on repeated singular value decomposition to find linearly # dependent rows (as identified by columns of U that correspond with zero # singular values). Unfortunately, only one row can be removed per # decomposition (I tried otherwise; doing so can cause problems.) # It would be nice if we could do truncated SVD like sp.sparse.linalg.svds # but that function is unreliable at finding singular values near zero. # Finding max eigenvalue L of A A^T, then largest eigenvalue (and # associated eigenvector) of -A A^T + L I (I is identity) via power # iteration would also work in theory, but is only efficient if the # smallest nonzero eigenvalue of A A^T is close to the largest nonzero # eigenvalue. while abs(s_min) < tol: v = U[:, -1] # TODO: return these so user can eliminate from problem? # rows need to be represented in significant amount eligibleRows = np.abs(v) > tol * 10e6 if not np.any(eligibleRows) or np.any(np.abs(v.dot(A)) > tol): status = 4 message = ("Due to numerical issues, redundant equality " "constraints could not be removed automatically. " "Try providing your constraint matrices as sparse " "matrices to activate sparse presolve, try turning " "off redundancy removal, or try turning off presolve " "altogether.") break if np.any(np.abs(v.dot(b)) > tol * 100): # factor of 100 to fix 10038 and 10349 status = 2 message = ("There is a linear combination of rows of A_eq that " "results in zero, suggesting a redundant constraint. " "However the same linear combination of b_eq is " "nonzero, suggesting that the constraints conflict " "and the problem is infeasible.") break i_remove = _get_densest(A, eligibleRows) A = np.delete(A, i_remove, axis=0) b = np.delete(b, i_remove) U, s, Vh = svd(A) m, n = A.shape s_min = s[-1] if m <= n else 0 return A, b, status, message def _remove_redundancy_id(A, rhs, rank=None, randomized=True): """Eliminates redundant equations from a system of equations. Eliminates redundant equations from system of equations defined by Ax = b and identifies infeasibilities. Parameters ---------- A : 2-D array An array representing the left-hand side of a system of equations rhs : 1-D array An array representing the right-hand side of a system of equations rank : int, optional The rank of A randomized: bool, optional True for randomized interpolative decomposition Returns ------- A : 2-D array An array representing the left-hand side of a system of equations rhs : 1-D array An array representing the right-hand side of a system of equations status: int An integer indicating the status of the system 0: No infeasibility identified 2: Trivially infeasible message : str A string descriptor of the exit status of the optimization. """ status = 0 message = "" inconsistent = ("There is a linear combination of rows of A_eq that " "results in zero, suggesting a redundant constraint. " "However the same linear combination of b_eq is " "nonzero, suggesting that the constraints conflict " "and the problem is infeasible.") A, rhs, status, message = _remove_zero_rows(A, rhs) if status != 0: return A, rhs, status, message m, n = A.shape k = rank if rank is None: k = np.linalg.matrix_rank(A) idx, proj = interp_decomp(A.T, k, rand=randomized) # first k entries in idx are indices of the independent rows # remaining entries are the indices of the m-k dependent rows # proj provides a linear combinations of rows of A2 that form the # remaining m-k (dependent) rows. The same linear combination of entries # in rhs2 must give the remaining m-k entries. If not, the system is # inconsistent, and the problem is infeasible. if not np.allclose(rhs[idx[:k]] @ proj, rhs[idx[k:]]): status = 2 message = inconsistent # sort indices because the other redundancy removal routines leave rows # in original order and tests were written with that in mind idx = sorted(idx[:k]) A2 = A[idx, :] rhs2 = rhs[idx] return A2, rhs2, status, message
18,767
34.885277
88
py
scipy
scipy-main/scipy/optimize/__init__.py
""" ===================================================== Optimization and root finding (:mod:`scipy.optimize`) ===================================================== .. currentmodule:: scipy.optimize .. toctree:: :hidden: optimize.cython_optimize SciPy ``optimize`` provides functions for minimizing (or maximizing) objective functions, possibly subject to constraints. It includes solvers for nonlinear problems (with support for both local and global optimization algorithms), linear programing, constrained and nonlinear least-squares, root finding, and curve fitting. Common functions and objects, shared across different solvers, are: .. autosummary:: :toctree: generated/ show_options - Show specific options optimization solvers. OptimizeResult - The optimization result returned by some optimizers. OptimizeWarning - The optimization encountered problems. Optimization ============ Scalar functions optimization ----------------------------- .. autosummary:: :toctree: generated/ minimize_scalar - Interface for minimizers of univariate functions The `minimize_scalar` function supports the following methods: .. toctree:: optimize.minimize_scalar-brent optimize.minimize_scalar-bounded optimize.minimize_scalar-golden Local (multivariate) optimization --------------------------------- .. autosummary:: :toctree: generated/ minimize - Interface for minimizers of multivariate functions. The `minimize` function supports the following methods: .. toctree:: optimize.minimize-neldermead optimize.minimize-powell optimize.minimize-cg optimize.minimize-bfgs optimize.minimize-newtoncg optimize.minimize-lbfgsb optimize.minimize-tnc optimize.minimize-cobyla optimize.minimize-slsqp optimize.minimize-trustconstr optimize.minimize-dogleg optimize.minimize-trustncg optimize.minimize-trustkrylov optimize.minimize-trustexact Constraints are passed to `minimize` function as a single object or as a list of objects from the following classes: .. autosummary:: :toctree: generated/ NonlinearConstraint - Class defining general nonlinear constraints. LinearConstraint - Class defining general linear constraints. Simple bound constraints are handled separately and there is a special class for them: .. autosummary:: :toctree: generated/ Bounds - Bound constraints. Quasi-Newton strategies implementing `HessianUpdateStrategy` interface can be used to approximate the Hessian in `minimize` function (available only for the 'trust-constr' method). Available quasi-Newton methods implementing this interface are: .. autosummary:: :toctree: generated/ BFGS - Broyden-Fletcher-Goldfarb-Shanno (BFGS) Hessian update strategy. SR1 - Symmetric-rank-1 Hessian update strategy. Global optimization ------------------- .. autosummary:: :toctree: generated/ basinhopping - Basinhopping stochastic optimizer. brute - Brute force searching optimizer. differential_evolution - Stochastic optimizer using differential evolution. shgo - Simplicial homology global optimizer. dual_annealing - Dual annealing stochastic optimizer. direct - DIRECT (Dividing Rectangles) optimizer. Least-squares and curve fitting =============================== Nonlinear least-squares ----------------------- .. autosummary:: :toctree: generated/ least_squares - Solve a nonlinear least-squares problem with bounds on the variables. Linear least-squares -------------------- .. autosummary:: :toctree: generated/ nnls - Linear least-squares problem with non-negativity constraint. lsq_linear - Linear least-squares problem with bound constraints. Curve fitting ------------- .. autosummary:: :toctree: generated/ curve_fit -- Fit curve to a set of points. Root finding ============ Scalar functions ---------------- .. autosummary:: :toctree: generated/ root_scalar - Unified interface for nonlinear solvers of scalar functions. brentq - quadratic interpolation Brent method. brenth - Brent method, modified by Harris with hyperbolic extrapolation. ridder - Ridder's method. bisect - Bisection method. newton - Newton's method (also Secant and Halley's methods). toms748 - Alefeld, Potra & Shi Algorithm 748. RootResults - The root finding result returned by some root finders. The `root_scalar` function supports the following methods: .. toctree:: optimize.root_scalar-brentq optimize.root_scalar-brenth optimize.root_scalar-bisect optimize.root_scalar-ridder optimize.root_scalar-newton optimize.root_scalar-toms748 optimize.root_scalar-secant optimize.root_scalar-halley The table below lists situations and appropriate methods, along with *asymptotic* convergence rates per iteration (and per function evaluation) for successful convergence to a simple root(*). Bisection is the slowest of them all, adding one bit of accuracy for each function evaluation, but is guaranteed to converge. The other bracketing methods all (eventually) increase the number of accurate bits by about 50% for every function evaluation. The derivative-based methods, all built on `newton`, can converge quite quickly if the initial value is close to the root. They can also be applied to functions defined on (a subset of) the complex plane. +-------------+----------+----------+-----------+-------------+-------------+----------------+ | Domain of f | Bracket? | Derivatives? | Solvers | Convergence | + + +----------+-----------+ +-------------+----------------+ | | | `fprime` | `fprime2` | | Guaranteed? | Rate(s)(*) | +=============+==========+==========+===========+=============+=============+================+ | `R` | Yes | N/A | N/A | - bisection | - Yes | - 1 "Linear" | | | | | | - brentq | - Yes | - >=1, <= 1.62 | | | | | | - brenth | - Yes | - >=1, <= 1.62 | | | | | | - ridder | - Yes | - 2.0 (1.41) | | | | | | - toms748 | - Yes | - 2.7 (1.65) | +-------------+----------+----------+-----------+-------------+-------------+----------------+ | `R` or `C` | No | No | No | secant | No | 1.62 (1.62) | +-------------+----------+----------+-----------+-------------+-------------+----------------+ | `R` or `C` | No | Yes | No | newton | No | 2.00 (1.41) | +-------------+----------+----------+-----------+-------------+-------------+----------------+ | `R` or `C` | No | Yes | Yes | halley | No | 3.00 (1.44) | +-------------+----------+----------+-----------+-------------+-------------+----------------+ .. seealso:: `scipy.optimize.cython_optimize` -- Typed Cython versions of root finding functions Fixed point finding: .. autosummary:: :toctree: generated/ fixed_point - Single-variable fixed-point solver. Multidimensional ---------------- .. autosummary:: :toctree: generated/ root - Unified interface for nonlinear solvers of multivariate functions. The `root` function supports the following methods: .. toctree:: optimize.root-hybr optimize.root-lm optimize.root-broyden1 optimize.root-broyden2 optimize.root-anderson optimize.root-linearmixing optimize.root-diagbroyden optimize.root-excitingmixing optimize.root-krylov optimize.root-dfsane Linear programming / MILP ========================= .. autosummary:: :toctree: generated/ milp -- Mixed integer linear programming. linprog -- Unified interface for minimizers of linear programming problems. The `linprog` function supports the following methods: .. toctree:: optimize.linprog-simplex optimize.linprog-interior-point optimize.linprog-revised_simplex optimize.linprog-highs-ipm optimize.linprog-highs-ds optimize.linprog-highs The simplex, interior-point, and revised simplex methods support callback functions, such as: .. autosummary:: :toctree: generated/ linprog_verbose_callback -- Sample callback function for linprog (simplex). Assignment problems =================== .. autosummary:: :toctree: generated/ linear_sum_assignment -- Solves the linear-sum assignment problem. quadratic_assignment -- Solves the quadratic assignment problem. The `quadratic_assignment` function supports the following methods: .. toctree:: optimize.qap-faq optimize.qap-2opt Utilities ========= Finite-difference approximation ------------------------------- .. autosummary:: :toctree: generated/ approx_fprime - Approximate the gradient of a scalar function. check_grad - Check the supplied derivative using finite differences. Line search ----------- .. autosummary:: :toctree: generated/ bracket - Bracket a minimum, given two starting points. line_search - Return a step that satisfies the strong Wolfe conditions. Hessian approximation --------------------- .. autosummary:: :toctree: generated/ LbfgsInvHessProduct - Linear operator for L-BFGS approximate inverse Hessian. HessianUpdateStrategy - Interface for implementing Hessian update strategies Benchmark problems ------------------ .. autosummary:: :toctree: generated/ rosen - The Rosenbrock function. rosen_der - The derivative of the Rosenbrock function. rosen_hess - The Hessian matrix of the Rosenbrock function. rosen_hess_prod - Product of the Rosenbrock Hessian with a vector. Legacy functions ================ The functions below are not recommended for use in new scripts; all of these methods are accessible via a newer, more consistent interfaces, provided by the interfaces above. Optimization ------------ General-purpose multivariate methods: .. autosummary:: :toctree: generated/ fmin - Nelder-Mead Simplex algorithm. fmin_powell - Powell's (modified) conjugate direction method. fmin_cg - Non-linear (Polak-Ribiere) conjugate gradient algorithm. fmin_bfgs - Quasi-Newton method (Broydon-Fletcher-Goldfarb-Shanno). fmin_ncg - Line-search Newton Conjugate Gradient. Constrained multivariate methods: .. autosummary:: :toctree: generated/ fmin_l_bfgs_b - Zhu, Byrd, and Nocedal's constrained optimizer. fmin_tnc - Truncated Newton code. fmin_cobyla - Constrained optimization by linear approximation. fmin_slsqp - Minimization using sequential least-squares programming. Univariate (scalar) minimization methods: .. autosummary:: :toctree: generated/ fminbound - Bounded minimization of a scalar function. brent - 1-D function minimization using Brent method. golden - 1-D function minimization using Golden Section method. Least-squares ------------- .. autosummary:: :toctree: generated/ leastsq - Minimize the sum of squares of M equations in N unknowns. Root finding ------------ General nonlinear solvers: .. autosummary:: :toctree: generated/ fsolve - Non-linear multivariable equation solver. broyden1 - Broyden's first method. broyden2 - Broyden's second method. Large-scale nonlinear solvers: .. autosummary:: :toctree: generated/ newton_krylov anderson BroydenFirst InverseJacobian KrylovJacobian Simple iteration solvers: .. autosummary:: :toctree: generated/ excitingmixing linearmixing diagbroyden """ # noqa: E501 from ._optimize import * from ._minimize import * from ._root import * from ._root_scalar import * from ._minpack_py import * from ._zeros_py import * from ._lbfgsb_py import fmin_l_bfgs_b, LbfgsInvHessProduct from ._tnc import fmin_tnc from ._cobyla_py import fmin_cobyla from ._nonlin import * from ._slsqp_py import fmin_slsqp from ._nnls import nnls from ._basinhopping import basinhopping from ._linprog import linprog, linprog_verbose_callback from ._lsap import linear_sum_assignment from ._differentialevolution import differential_evolution from ._lsq import least_squares, lsq_linear from ._constraints import (NonlinearConstraint, LinearConstraint, Bounds) from ._hessian_update_strategy import HessianUpdateStrategy, BFGS, SR1 from ._shgo import shgo from ._dual_annealing import dual_annealing from ._qap import quadratic_assignment from ._direct_py import direct from ._milp import milp # Deprecated namespaces, to be removed in v2.0.0 from . import ( cobyla, lbfgsb, linesearch, minpack, minpack2, moduleTNC, nonlin, optimize, slsqp, tnc, zeros ) __all__ = [s for s in dir() if not s.startswith('_')] from scipy._lib._testutils import PytestTester test = PytestTester(__name__) del PytestTester
12,880
27.816555
94
py
scipy
scipy-main/scipy/optimize/minpack.py
# This file is not meant for public use and will be removed in SciPy v2.0.0. # Use the `scipy.optimize` namespace for importing the functions # included below. import warnings from . import _minpack_py __all__ = [ # noqa: F822 'LEASTSQ_FAILURE', 'LEASTSQ_SUCCESS', 'LinAlgError', 'OptimizeResult', 'OptimizeWarning', 'asarray', 'atleast_1d', 'check_gradient', 'cholesky', 'curve_fit', 'dot', 'dtype', 'error', 'eye', 'finfo', 'fixed_point', 'fsolve', 'greater', 'inexact', 'inf', 'inv', 'issubdtype', 'least_squares', 'leastsq', 'prepare_bounds', 'prod', 'shape', 'solve_triangular', 'svd', 'take', 'transpose', 'triu', 'zeros', ] def __dir__(): return __all__ def __getattr__(name): if name not in __all__: raise AttributeError( "scipy.optimize.minpack is deprecated and has no attribute " f"{name}. Try looking in scipy.optimize instead.") warnings.warn(f"Please use `{name}` from the `scipy.optimize` namespace, " "the `scipy.optimize.minpack` namespace is deprecated.", category=DeprecationWarning, stacklevel=2) return getattr(_minpack_py, name)
1,277
19.95082
78
py
scipy
scipy-main/scipy/optimize/_constraints.py
"""Constraints definition for minimize.""" import numpy as np from ._hessian_update_strategy import BFGS from ._differentiable_functions import ( VectorFunction, LinearVectorFunction, IdentityVectorFunction) from ._optimize import OptimizeWarning from warnings import warn, catch_warnings, simplefilter from numpy.testing import suppress_warnings from scipy.sparse import issparse def _arr_to_scalar(x): # If x is a numpy array, return x.item(). This will # fail if the array has more than one element. return x.item() if isinstance(x, np.ndarray) else x class NonlinearConstraint: """Nonlinear constraint on the variables. The constraint has the general inequality form:: lb <= fun(x) <= ub Here the vector of independent variables x is passed as ndarray of shape (n,) and ``fun`` returns a vector with m components. It is possible to use equal bounds to represent an equality constraint or infinite bounds to represent a one-sided constraint. Parameters ---------- fun : callable The function defining the constraint. The signature is ``fun(x) -> array_like, shape (m,)``. lb, ub : array_like Lower and upper bounds on the constraint. Each array must have the shape (m,) or be a scalar, in the latter case a bound will be the same for all components of the constraint. Use ``np.inf`` with an appropriate sign to specify a one-sided constraint. Set components of `lb` and `ub` equal to represent an equality constraint. Note that you can mix constraints of different types: interval, one-sided or equality, by setting different components of `lb` and `ub` as necessary. jac : {callable, '2-point', '3-point', 'cs'}, optional Method of computing the Jacobian matrix (an m-by-n matrix, where element (i, j) is the partial derivative of f[i] with respect to x[j]). The keywords {'2-point', '3-point', 'cs'} select a finite difference scheme for the numerical estimation. A callable must have the following signature: ``jac(x) -> {ndarray, sparse matrix}, shape (m, n)``. Default is '2-point'. hess : {callable, '2-point', '3-point', 'cs', HessianUpdateStrategy, None}, optional Method for computing the Hessian matrix. The keywords {'2-point', '3-point', 'cs'} select a finite difference scheme for numerical estimation. Alternatively, objects implementing `HessianUpdateStrategy` interface can be used to approximate the Hessian. Currently available implementations are: - `BFGS` (default option) - `SR1` A callable must return the Hessian matrix of ``dot(fun, v)`` and must have the following signature: ``hess(x, v) -> {LinearOperator, sparse matrix, array_like}, shape (n, n)``. Here ``v`` is ndarray with shape (m,) containing Lagrange multipliers. keep_feasible : array_like of bool, optional Whether to keep the constraint components feasible throughout iterations. A single value set this property for all components. Default is False. Has no effect for equality constraints. finite_diff_rel_step: None or array_like, optional Relative step size for the finite difference approximation. Default is None, which will select a reasonable value automatically depending on a finite difference scheme. finite_diff_jac_sparsity: {None, array_like, sparse matrix}, optional Defines the sparsity structure of the Jacobian matrix for finite difference estimation, its shape must be (m, n). If the Jacobian has only few non-zero elements in *each* row, providing the sparsity structure will greatly speed up the computations. A zero entry means that a corresponding element in the Jacobian is identically zero. If provided, forces the use of 'lsmr' trust-region solver. If None (default) then dense differencing will be used. Notes ----- Finite difference schemes {'2-point', '3-point', 'cs'} may be used for approximating either the Jacobian or the Hessian. We, however, do not allow its use for approximating both simultaneously. Hence whenever the Jacobian is estimated via finite-differences, we require the Hessian to be estimated using one of the quasi-Newton strategies. The scheme 'cs' is potentially the most accurate, but requires the function to correctly handles complex inputs and be analytically continuable to the complex plane. The scheme '3-point' is more accurate than '2-point' but requires twice as many operations. Examples -------- Constrain ``x[0] < sin(x[1]) + 1.9`` >>> from scipy.optimize import NonlinearConstraint >>> import numpy as np >>> con = lambda x: x[0] - np.sin(x[1]) >>> nlc = NonlinearConstraint(con, -np.inf, 1.9) """ def __init__(self, fun, lb, ub, jac='2-point', hess=BFGS(), keep_feasible=False, finite_diff_rel_step=None, finite_diff_jac_sparsity=None): self.fun = fun self.lb = lb self.ub = ub self.finite_diff_rel_step = finite_diff_rel_step self.finite_diff_jac_sparsity = finite_diff_jac_sparsity self.jac = jac self.hess = hess self.keep_feasible = keep_feasible class LinearConstraint: """Linear constraint on the variables. The constraint has the general inequality form:: lb <= A.dot(x) <= ub Here the vector of independent variables x is passed as ndarray of shape (n,) and the matrix A has shape (m, n). It is possible to use equal bounds to represent an equality constraint or infinite bounds to represent a one-sided constraint. Parameters ---------- A : {array_like, sparse matrix}, shape (m, n) Matrix defining the constraint. lb, ub : dense array_like, optional Lower and upper limits on the constraint. Each array must have the shape (m,) or be a scalar, in the latter case a bound will be the same for all components of the constraint. Use ``np.inf`` with an appropriate sign to specify a one-sided constraint. Set components of `lb` and `ub` equal to represent an equality constraint. Note that you can mix constraints of different types: interval, one-sided or equality, by setting different components of `lb` and `ub` as necessary. Defaults to ``lb = -np.inf`` and ``ub = np.inf`` (no limits). keep_feasible : dense array_like of bool, optional Whether to keep the constraint components feasible throughout iterations. A single value set this property for all components. Default is False. Has no effect for equality constraints. """ def _input_validation(self): if self.A.ndim != 2: message = "`A` must have exactly two dimensions." raise ValueError(message) try: shape = self.A.shape[0:1] self.lb = np.broadcast_to(self.lb, shape) self.ub = np.broadcast_to(self.ub, shape) self.keep_feasible = np.broadcast_to(self.keep_feasible, shape) except ValueError: message = ("`lb`, `ub`, and `keep_feasible` must be broadcastable " "to shape `A.shape[0:1]`") raise ValueError(message) def __init__(self, A, lb=-np.inf, ub=np.inf, keep_feasible=False): if not issparse(A): # In some cases, if the constraint is not valid, this emits a # VisibleDeprecationWarning about ragged nested sequences # before eventually causing an error. `scipy.optimize.milp` would # prefer that this just error out immediately so it can handle it # rather than concerning the user. with catch_warnings(): simplefilter("error") self.A = np.atleast_2d(A).astype(np.float64) else: self.A = A if issparse(lb) or issparse(ub): raise ValueError("Constraint limits must be dense arrays.") self.lb = np.atleast_1d(lb).astype(np.float64) self.ub = np.atleast_1d(ub).astype(np.float64) if issparse(keep_feasible): raise ValueError("`keep_feasible` must be a dense array.") self.keep_feasible = np.atleast_1d(keep_feasible).astype(bool) self._input_validation() def residual(self, x): """ Calculate the residual between the constraint function and the limits For a linear constraint of the form:: lb <= A@x <= ub the lower and upper residuals between ``A@x`` and the limits are values ``sl`` and ``sb`` such that:: lb + sl == A@x == ub - sb When all elements of ``sl`` and ``sb`` are positive, all elements of the constraint are satisfied; a negative element in ``sl`` or ``sb`` indicates that the corresponding element of the constraint is not satisfied. Parameters ---------- x: array_like Vector of independent variables Returns ------- sl, sb : array-like The lower and upper residuals """ return self.A@x - self.lb, self.ub - self.A@x class Bounds: """Bounds constraint on the variables. The constraint has the general inequality form:: lb <= x <= ub It is possible to use equal bounds to represent an equality constraint or infinite bounds to represent a one-sided constraint. Parameters ---------- lb, ub : dense array_like, optional Lower and upper bounds on independent variables. `lb`, `ub`, and `keep_feasible` must be the same shape or broadcastable. Set components of `lb` and `ub` equal to fix a variable. Use ``np.inf`` with an appropriate sign to disable bounds on all or some variables. Note that you can mix constraints of different types: interval, one-sided or equality, by setting different components of `lb` and `ub` as necessary. Defaults to ``lb = -np.inf`` and ``ub = np.inf`` (no bounds). keep_feasible : dense array_like of bool, optional Whether to keep the constraint components feasible throughout iterations. Must be broadcastable with `lb` and `ub`. Default is False. Has no effect for equality constraints. """ def _input_validation(self): try: res = np.broadcast_arrays(self.lb, self.ub, self.keep_feasible) self.lb, self.ub, self.keep_feasible = res except ValueError: message = "`lb`, `ub`, and `keep_feasible` must be broadcastable." raise ValueError(message) def __init__(self, lb=-np.inf, ub=np.inf, keep_feasible=False): if issparse(lb) or issparse(ub): raise ValueError("Lower and upper bounds must be dense arrays.") self.lb = np.atleast_1d(lb) self.ub = np.atleast_1d(ub) if issparse(keep_feasible): raise ValueError("`keep_feasible` must be a dense array.") self.keep_feasible = np.atleast_1d(keep_feasible).astype(bool) self._input_validation() def __repr__(self): start = f"{type(self).__name__}({self.lb!r}, {self.ub!r}" if np.any(self.keep_feasible): end = f", keep_feasible={self.keep_feasible!r})" else: end = ")" return start + end def residual(self, x): """Calculate the residual (slack) between the input and the bounds For a bound constraint of the form:: lb <= x <= ub the lower and upper residuals between `x` and the bounds are values ``sl`` and ``sb`` such that:: lb + sl == x == ub - sb When all elements of ``sl`` and ``sb`` are positive, all elements of ``x`` lie within the bounds; a negative element in ``sl`` or ``sb`` indicates that the corresponding element of ``x`` is out of bounds. Parameters ---------- x: array_like Vector of independent variables Returns ------- sl, sb : array-like The lower and upper residuals """ return x - self.lb, self.ub - x class PreparedConstraint: """Constraint prepared from a user defined constraint. On creation it will check whether a constraint definition is valid and the initial point is feasible. If created successfully, it will contain the attributes listed below. Parameters ---------- constraint : {NonlinearConstraint, LinearConstraint`, Bounds} Constraint to check and prepare. x0 : array_like Initial vector of independent variables. sparse_jacobian : bool or None, optional If bool, then the Jacobian of the constraint will be converted to the corresponded format if necessary. If None (default), such conversion is not made. finite_diff_bounds : 2-tuple, optional Lower and upper bounds on the independent variables for the finite difference approximation, if applicable. Defaults to no bounds. Attributes ---------- fun : {VectorFunction, LinearVectorFunction, IdentityVectorFunction} Function defining the constraint wrapped by one of the convenience classes. bounds : 2-tuple Contains lower and upper bounds for the constraints --- lb and ub. These are converted to ndarray and have a size equal to the number of the constraints. keep_feasible : ndarray Array indicating which components must be kept feasible with a size equal to the number of the constraints. """ def __init__(self, constraint, x0, sparse_jacobian=None, finite_diff_bounds=(-np.inf, np.inf)): if isinstance(constraint, NonlinearConstraint): fun = VectorFunction(constraint.fun, x0, constraint.jac, constraint.hess, constraint.finite_diff_rel_step, constraint.finite_diff_jac_sparsity, finite_diff_bounds, sparse_jacobian) elif isinstance(constraint, LinearConstraint): fun = LinearVectorFunction(constraint.A, x0, sparse_jacobian) elif isinstance(constraint, Bounds): fun = IdentityVectorFunction(x0, sparse_jacobian) else: raise ValueError("`constraint` of an unknown type is passed.") m = fun.m lb = np.asarray(constraint.lb, dtype=float) ub = np.asarray(constraint.ub, dtype=float) keep_feasible = np.asarray(constraint.keep_feasible, dtype=bool) lb = np.broadcast_to(lb, m) ub = np.broadcast_to(ub, m) keep_feasible = np.broadcast_to(keep_feasible, m) if keep_feasible.shape != (m,): raise ValueError("`keep_feasible` has a wrong shape.") mask = keep_feasible & (lb != ub) f0 = fun.f if np.any(f0[mask] < lb[mask]) or np.any(f0[mask] > ub[mask]): raise ValueError("`x0` is infeasible with respect to some " "inequality constraint with `keep_feasible` " "set to True.") self.fun = fun self.bounds = (lb, ub) self.keep_feasible = keep_feasible def violation(self, x): """How much the constraint is exceeded by. Parameters ---------- x : array-like Vector of independent variables Returns ------- excess : array-like How much the constraint is exceeded by, for each of the constraints specified by `PreparedConstraint.fun`. """ with suppress_warnings() as sup: sup.filter(UserWarning) ev = self.fun.fun(np.asarray(x)) excess_lb = np.maximum(self.bounds[0] - ev, 0) excess_ub = np.maximum(ev - self.bounds[1], 0) return excess_lb + excess_ub def new_bounds_to_old(lb, ub, n): """Convert the new bounds representation to the old one. The new representation is a tuple (lb, ub) and the old one is a list containing n tuples, ith containing lower and upper bound on a ith variable. If any of the entries in lb/ub are -np.inf/np.inf they are replaced by None. """ lb = np.broadcast_to(lb, n) ub = np.broadcast_to(ub, n) lb = [float(x) if x > -np.inf else None for x in lb] ub = [float(x) if x < np.inf else None for x in ub] return list(zip(lb, ub)) def old_bound_to_new(bounds): """Convert the old bounds representation to the new one. The new representation is a tuple (lb, ub) and the old one is a list containing n tuples, ith containing lower and upper bound on a ith variable. If any of the entries in lb/ub are None they are replaced by -np.inf/np.inf. """ lb, ub = zip(*bounds) # Convert occurrences of None to -inf or inf, and replace occurrences of # any numpy array x with x.item(). Then wrap the results in numpy arrays. lb = np.array([float(_arr_to_scalar(x)) if x is not None else -np.inf for x in lb]) ub = np.array([float(_arr_to_scalar(x)) if x is not None else np.inf for x in ub]) return lb, ub def strict_bounds(lb, ub, keep_feasible, n_vars): """Remove bounds which are not asked to be kept feasible.""" strict_lb = np.resize(lb, n_vars).astype(float) strict_ub = np.resize(ub, n_vars).astype(float) keep_feasible = np.resize(keep_feasible, n_vars) strict_lb[~keep_feasible] = -np.inf strict_ub[~keep_feasible] = np.inf return strict_lb, strict_ub def new_constraint_to_old(con, x0): """ Converts new-style constraint objects to old-style constraint dictionaries. """ if isinstance(con, NonlinearConstraint): if (con.finite_diff_jac_sparsity is not None or con.finite_diff_rel_step is not None or not isinstance(con.hess, BFGS) or # misses user specified BFGS con.keep_feasible): warn("Constraint options `finite_diff_jac_sparsity`, " "`finite_diff_rel_step`, `keep_feasible`, and `hess`" "are ignored by this method.", OptimizeWarning) fun = con.fun if callable(con.jac): jac = con.jac else: jac = None else: # LinearConstraint if np.any(con.keep_feasible): warn("Constraint option `keep_feasible` is ignored by this " "method.", OptimizeWarning) A = con.A if issparse(A): A = A.toarray() def fun(x): return np.dot(A, x) def jac(x): return A # FIXME: when bugs in VectorFunction/LinearVectorFunction are worked out, # use pcon.fun.fun and pcon.fun.jac. Until then, get fun/jac above. pcon = PreparedConstraint(con, x0) lb, ub = pcon.bounds i_eq = lb == ub i_bound_below = np.logical_xor(lb != -np.inf, i_eq) i_bound_above = np.logical_xor(ub != np.inf, i_eq) i_unbounded = np.logical_and(lb == -np.inf, ub == np.inf) if np.any(i_unbounded): warn("At least one constraint is unbounded above and below. Such " "constraints are ignored.", OptimizeWarning) ceq = [] if np.any(i_eq): def f_eq(x): y = np.array(fun(x)).flatten() return y[i_eq] - lb[i_eq] ceq = [{"type": "eq", "fun": f_eq}] if jac is not None: def j_eq(x): dy = jac(x) if issparse(dy): dy = dy.toarray() dy = np.atleast_2d(dy) return dy[i_eq, :] ceq[0]["jac"] = j_eq cineq = [] n_bound_below = np.sum(i_bound_below) n_bound_above = np.sum(i_bound_above) if n_bound_below + n_bound_above: def f_ineq(x): y = np.zeros(n_bound_below + n_bound_above) y_all = np.array(fun(x)).flatten() y[:n_bound_below] = y_all[i_bound_below] - lb[i_bound_below] y[n_bound_below:] = -(y_all[i_bound_above] - ub[i_bound_above]) return y cineq = [{"type": "ineq", "fun": f_ineq}] if jac is not None: def j_ineq(x): dy = np.zeros((n_bound_below + n_bound_above, len(x0))) dy_all = jac(x) if issparse(dy_all): dy_all = dy_all.toarray() dy_all = np.atleast_2d(dy_all) dy[:n_bound_below, :] = dy_all[i_bound_below] dy[n_bound_below:, :] = -dy_all[i_bound_above] return dy cineq[0]["jac"] = j_ineq old_constraints = ceq + cineq if len(old_constraints) > 1: warn("Equality and inequality constraints are specified in the same " "element of the constraint list. For efficient use with this " "method, equality and inequality constraints should be specified " "in separate elements of the constraint list. ", OptimizeWarning) return old_constraints def old_constraint_to_new(ic, con): """ Converts old-style constraint dictionaries to new-style constraint objects. """ # check type try: ctype = con['type'].lower() except KeyError as e: raise KeyError('Constraint %d has no type defined.' % ic) from e except TypeError as e: raise TypeError( 'Constraints must be a sequence of dictionaries.' ) from e except AttributeError as e: raise TypeError("Constraint's type must be a string.") from e else: if ctype not in ['eq', 'ineq']: raise ValueError("Unknown constraint type '%s'." % con['type']) if 'fun' not in con: raise ValueError('Constraint %d has no function defined.' % ic) lb = 0 if ctype == 'eq': ub = 0 else: ub = np.inf jac = '2-point' if 'args' in con: args = con['args'] def fun(x): return con["fun"](x, *args) if 'jac' in con: def jac(x): return con["jac"](x, *args) else: fun = con['fun'] if 'jac' in con: jac = con['jac'] return NonlinearConstraint(fun, lb, ub, jac)
22,552
37.552137
88
py
scipy
scipy-main/scipy/optimize/_group_columns.py
""" Pythran implementation of columns grouping for finite difference Jacobian estimation. Used by ._numdiff.group_columns and based on the Cython version. """ import numpy as np #pythran export group_dense(int, int, intc[:,:]) #pythran export group_dense(int, int, int[:,:]) def group_dense(m, n, A): B = A.T # Transposed view for convenience. # FIXME: use np.full once pythran supports it groups = -np.ones(n, dtype=np.intp) current_group = 0 union = np.empty(m, dtype=np.intp) # Loop through all the columns. for i in range(n): if groups[i] >= 0: # A group was already assigned. continue groups[i] = current_group all_grouped = True union[:] = B[i] # Here we store the union of grouped columns. for j in range(groups.shape[0]): if groups[j] < 0: all_grouped = False else: continue # Determine if j-th column intersects with the union. intersect = False for k in range(m): if union[k] > 0 and B[j, k] > 0: intersect = True break # If not, add it to the union and assign the group to it. if not intersect: union += B[j] groups[j] = current_group if all_grouped: break current_group += 1 return groups #pythran export group_sparse(int, int, int32[], int32[]) #pythran export group_sparse(int, int, int64[], int64[]) #pythran export group_sparse(int, int, int32[::], int32[::]) #pythran export group_sparse(int, int, int64[::], int64[::]) def group_sparse(m, n, indices, indptr): groups = -np.ones(n, dtype=np.intp) current_group = 0 union = np.empty(m, dtype=np.intp) for i in range(n): if groups[i] >= 0: continue groups[i] = current_group all_grouped = True union.fill(0) for k in range(indptr[i], indptr[i + 1]): union[indices[k]] = 1 for j in range(groups.shape[0]): if groups[j] < 0: all_grouped = False else: continue intersect = False for k in range(indptr[j], indptr[j + 1]): if union[indices[k]] == 1: intersect = True break if not intersect: for k in range(indptr[j], indptr[j + 1]): union[indices[k]] = 1 groups[j] = current_group if all_grouped: break current_group += 1 return groups
2,659
26.142857
76
py
scipy
scipy-main/scipy/optimize/_tnc.py
# TNC Python interface # @(#) $Jeannot: tnc.py,v 1.11 2005/01/28 18:27:31 js Exp $ # Copyright (c) 2004-2005, Jean-Sebastien Roy (js@jeannot.org) # Permission is hereby granted, free of charge, to any person obtaining a # copy of this software and associated documentation files (the # "Software"), to deal in the Software without restriction, including # without limitation the rights to use, copy, modify, merge, publish, # distribute, sublicense, and/or sell copies of the Software, and to # permit persons to whom the Software is furnished to do so, subject to # the following conditions: # The above copyright notice and this permission notice shall be included # in all copies or substantial portions of the Software. # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS # OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF # MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. # IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY # CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, # TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE # SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. """ TNC: A Python interface to the TNC non-linear optimizer TNC is a non-linear optimizer. To use it, you must provide a function to minimize. The function must take one argument: the list of coordinates where to evaluate the function; and it must return either a tuple, whose first element is the value of the function, and whose second argument is the gradient of the function (as a list of values); or None, to abort the minimization. """ from scipy.optimize import _moduleTNC as moduleTNC from ._optimize import (MemoizeJac, OptimizeResult, _check_unknown_options, _prepare_scalar_function) from ._constraints import old_bound_to_new from numpy import inf, array, zeros, asfarray __all__ = ['fmin_tnc'] MSG_NONE = 0 # No messages MSG_ITER = 1 # One line per iteration MSG_INFO = 2 # Informational messages MSG_VERS = 4 # Version info MSG_EXIT = 8 # Exit reasons MSG_ALL = MSG_ITER + MSG_INFO + MSG_VERS + MSG_EXIT MSGS = { MSG_NONE: "No messages", MSG_ITER: "One line per iteration", MSG_INFO: "Informational messages", MSG_VERS: "Version info", MSG_EXIT: "Exit reasons", MSG_ALL: "All messages" } INFEASIBLE = -1 # Infeasible (lower bound > upper bound) LOCALMINIMUM = 0 # Local minimum reached (|pg| ~= 0) FCONVERGED = 1 # Converged (|f_n-f_(n-1)| ~= 0) XCONVERGED = 2 # Converged (|x_n-x_(n-1)| ~= 0) MAXFUN = 3 # Max. number of function evaluations reached LSFAIL = 4 # Linear search failed CONSTANT = 5 # All lower bounds are equal to the upper bounds NOPROGRESS = 6 # Unable to progress USERABORT = 7 # User requested end of minimization RCSTRINGS = { INFEASIBLE: "Infeasible (lower bound > upper bound)", LOCALMINIMUM: "Local minimum reached (|pg| ~= 0)", FCONVERGED: "Converged (|f_n-f_(n-1)| ~= 0)", XCONVERGED: "Converged (|x_n-x_(n-1)| ~= 0)", MAXFUN: "Max. number of function evaluations reached", LSFAIL: "Linear search failed", CONSTANT: "All lower bounds are equal to the upper bounds", NOPROGRESS: "Unable to progress", USERABORT: "User requested end of minimization" } # Changes to interface made by Travis Oliphant, Apr. 2004 for inclusion in # SciPy def fmin_tnc(func, x0, fprime=None, args=(), approx_grad=0, bounds=None, epsilon=1e-8, scale=None, offset=None, messages=MSG_ALL, maxCGit=-1, maxfun=None, eta=-1, stepmx=0, accuracy=0, fmin=0, ftol=-1, xtol=-1, pgtol=-1, rescale=-1, disp=None, callback=None): """ Minimize a function with variables subject to bounds, using gradient information in a truncated Newton algorithm. This method wraps a C implementation of the algorithm. Parameters ---------- func : callable ``func(x, *args)`` Function to minimize. Must do one of: 1. Return f and g, where f is the value of the function and g its gradient (a list of floats). 2. Return the function value but supply gradient function separately as `fprime`. 3. Return the function value and set ``approx_grad=True``. If the function returns None, the minimization is aborted. x0 : array_like Initial estimate of minimum. fprime : callable ``fprime(x, *args)``, optional Gradient of `func`. If None, then either `func` must return the function value and the gradient (``f,g = func(x, *args)``) or `approx_grad` must be True. args : tuple, optional Arguments to pass to function. approx_grad : bool, optional If true, approximate the gradient numerically. bounds : list, optional (min, max) pairs for each element in x0, defining the bounds on that parameter. Use None or +/-inf for one of min or max when there is no bound in that direction. epsilon : float, optional Used if approx_grad is True. The stepsize in a finite difference approximation for fprime. scale : array_like, optional Scaling factors to apply to each variable. If None, the factors are up-low for interval bounded variables and 1+|x| for the others. Defaults to None. offset : array_like, optional Value to subtract from each variable. If None, the offsets are (up+low)/2 for interval bounded variables and x for the others. messages : int, optional Bit mask used to select messages display during minimization values defined in the MSGS dict. Defaults to MGS_ALL. disp : int, optional Integer interface to messages. 0 = no message, 5 = all messages maxCGit : int, optional Maximum number of hessian*vector evaluations per main iteration. If maxCGit == 0, the direction chosen is -gradient if maxCGit < 0, maxCGit is set to max(1,min(50,n/2)). Defaults to -1. maxfun : int, optional Maximum number of function evaluation. If None, maxfun is set to max(100, 10*len(x0)). Defaults to None. Note that this function may violate the limit because of evaluating gradients by numerical differentiation. eta : float, optional Severity of the line search. If < 0 or > 1, set to 0.25. Defaults to -1. stepmx : float, optional Maximum step for the line search. May be increased during call. If too small, it will be set to 10.0. Defaults to 0. accuracy : float, optional Relative precision for finite difference calculations. If <= machine_precision, set to sqrt(machine_precision). Defaults to 0. fmin : float, optional Minimum function value estimate. Defaults to 0. ftol : float, optional Precision goal for the value of f in the stopping criterion. If ftol < 0.0, ftol is set to 0.0 defaults to -1. xtol : float, optional Precision goal for the value of x in the stopping criterion (after applying x scaling factors). If xtol < 0.0, xtol is set to sqrt(machine_precision). Defaults to -1. pgtol : float, optional Precision goal for the value of the projected gradient in the stopping criterion (after applying x scaling factors). If pgtol < 0.0, pgtol is set to 1e-2 * sqrt(accuracy). Setting it to 0.0 is not recommended. Defaults to -1. rescale : float, optional Scaling factor (in log10) used to trigger f value rescaling. If 0, rescale at each iteration. If a large value, never rescale. If < 0, rescale is set to 1.3. callback : callable, optional Called after each iteration, as callback(xk), where xk is the current parameter vector. Returns ------- x : ndarray The solution. nfeval : int The number of function evaluations. rc : int Return code, see below See also -------- minimize: Interface to minimization algorithms for multivariate functions. See the 'TNC' `method` in particular. Notes ----- The underlying algorithm is truncated Newton, also called Newton Conjugate-Gradient. This method differs from scipy.optimize.fmin_ncg in that 1. it wraps a C implementation of the algorithm 2. it allows each variable to be given an upper and lower bound. The algorithm incorporates the bound constraints by determining the descent direction as in an unconstrained truncated Newton, but never taking a step-size large enough to leave the space of feasible x's. The algorithm keeps track of a set of currently active constraints, and ignores them when computing the minimum allowable step size. (The x's associated with the active constraint are kept fixed.) If the maximum allowable step size is zero then a new constraint is added. At the end of each iteration one of the constraints may be deemed no longer active and removed. A constraint is considered no longer active is if it is currently active but the gradient for that variable points inward from the constraint. The specific constraint removed is the one associated with the variable of largest index whose constraint is no longer active. Return codes are defined as follows:: -1 : Infeasible (lower bound > upper bound) 0 : Local minimum reached (|pg| ~= 0) 1 : Converged (|f_n-f_(n-1)| ~= 0) 2 : Converged (|x_n-x_(n-1)| ~= 0) 3 : Max. number of function evaluations reached 4 : Linear search failed 5 : All lower bounds are equal to the upper bounds 6 : Unable to progress 7 : User requested end of minimization References ---------- Wright S., Nocedal J. (2006), 'Numerical Optimization' Nash S.G. (1984), "Newton-Type Minimization Via the Lanczos Method", SIAM Journal of Numerical Analysis 21, pp. 770-778 """ # handle fprime/approx_grad if approx_grad: fun = func jac = None elif fprime is None: fun = MemoizeJac(func) jac = fun.derivative else: fun = func jac = fprime if disp is not None: # disp takes precedence over messages mesg_num = disp else: mesg_num = {0:MSG_NONE, 1:MSG_ITER, 2:MSG_INFO, 3:MSG_VERS, 4:MSG_EXIT, 5:MSG_ALL}.get(messages, MSG_ALL) # build options opts = {'eps': epsilon, 'scale': scale, 'offset': offset, 'mesg_num': mesg_num, 'maxCGit': maxCGit, 'maxfun': maxfun, 'eta': eta, 'stepmx': stepmx, 'accuracy': accuracy, 'minfev': fmin, 'ftol': ftol, 'xtol': xtol, 'gtol': pgtol, 'rescale': rescale, 'disp': False} res = _minimize_tnc(fun, x0, args, jac, bounds, callback=callback, **opts) return res['x'], res['nfev'], res['status'] def _minimize_tnc(fun, x0, args=(), jac=None, bounds=None, eps=1e-8, scale=None, offset=None, mesg_num=None, maxCGit=-1, eta=-1, stepmx=0, accuracy=0, minfev=0, ftol=-1, xtol=-1, gtol=-1, rescale=-1, disp=False, callback=None, finite_diff_rel_step=None, maxfun=None, **unknown_options): """ Minimize a scalar function of one or more variables using a truncated Newton (TNC) algorithm. Options ------- eps : float or ndarray If `jac is None` the absolute step size used for numerical approximation of the jacobian via forward differences. scale : list of floats Scaling factors to apply to each variable. If None, the factors are up-low for interval bounded variables and 1+|x] fo the others. Defaults to None. offset : float Value to subtract from each variable. If None, the offsets are (up+low)/2 for interval bounded variables and x for the others. disp : bool Set to True to print convergence messages. maxCGit : int Maximum number of hessian*vector evaluations per main iteration. If maxCGit == 0, the direction chosen is -gradient if maxCGit < 0, maxCGit is set to max(1,min(50,n/2)). Defaults to -1. eta : float Severity of the line search. If < 0 or > 1, set to 0.25. Defaults to -1. stepmx : float Maximum step for the line search. May be increased during call. If too small, it will be set to 10.0. Defaults to 0. accuracy : float Relative precision for finite difference calculations. If <= machine_precision, set to sqrt(machine_precision). Defaults to 0. minfev : float Minimum function value estimate. Defaults to 0. ftol : float Precision goal for the value of f in the stopping criterion. If ftol < 0.0, ftol is set to 0.0 defaults to -1. xtol : float Precision goal for the value of x in the stopping criterion (after applying x scaling factors). If xtol < 0.0, xtol is set to sqrt(machine_precision). Defaults to -1. gtol : float Precision goal for the value of the projected gradient in the stopping criterion (after applying x scaling factors). If gtol < 0.0, gtol is set to 1e-2 * sqrt(accuracy). Setting it to 0.0 is not recommended. Defaults to -1. rescale : float Scaling factor (in log10) used to trigger f value rescaling. If 0, rescale at each iteration. If a large value, never rescale. If < 0, rescale is set to 1.3. finite_diff_rel_step : None or array_like, optional If `jac in ['2-point', '3-point', 'cs']` the relative step size to use for numerical approximation of the jacobian. The absolute step size is computed as ``h = rel_step * sign(x) * max(1, abs(x))``, possibly adjusted to fit into the bounds. For ``method='3-point'`` the sign of `h` is ignored. If None (default) then step is selected automatically. maxfun : int Maximum number of function evaluations. If None, `maxfun` is set to max(100, 10*len(x0)). Defaults to None. """ _check_unknown_options(unknown_options) fmin = minfev pgtol = gtol x0 = asfarray(x0).flatten() n = len(x0) if bounds is None: bounds = [(None,None)] * n if len(bounds) != n: raise ValueError('length of x0 != length of bounds') new_bounds = old_bound_to_new(bounds) if mesg_num is not None: messages = {0:MSG_NONE, 1:MSG_ITER, 2:MSG_INFO, 3:MSG_VERS, 4:MSG_EXIT, 5:MSG_ALL}.get(mesg_num, MSG_ALL) elif disp: messages = MSG_ALL else: messages = MSG_NONE sf = _prepare_scalar_function(fun, x0, jac=jac, args=args, epsilon=eps, finite_diff_rel_step=finite_diff_rel_step, bounds=new_bounds) func_and_grad = sf.fun_and_grad """ low, up : the bounds (lists of floats) if low is None, the lower bounds are removed. if up is None, the upper bounds are removed. low and up defaults to None """ low = zeros(n) up = zeros(n) for i in range(n): if bounds[i] is None: l, u = -inf, inf else: l,u = bounds[i] if l is None: low[i] = -inf else: low[i] = l if u is None: up[i] = inf else: up[i] = u if scale is None: scale = array([]) if offset is None: offset = array([]) if maxfun is None: maxfun = max(100, 10*len(x0)) rc, nf, nit, x, funv, jacv = moduleTNC.tnc_minimize( func_and_grad, x0, low, up, scale, offset, messages, maxCGit, maxfun, eta, stepmx, accuracy, fmin, ftol, xtol, pgtol, rescale, callback ) # the TNC documentation states: "On output, x, f and g may be very # slightly out of sync because of scaling". Therefore re-evaluate # func_and_grad so they are synced. funv, jacv = func_and_grad(x) return OptimizeResult(x=x, fun=funv, jac=jacv, nfev=sf.nfev, nit=nit, status=rc, message=RCSTRINGS[rc], success=(-1 < rc < 3))
16,678
38.337264
84
py
scipy
scipy-main/scipy/optimize/cobyla.py
# This file is not meant for public use and will be removed in SciPy v2.0.0. # Use the `scipy.optimize` namespace for importing the functions # included below. import warnings from . import _cobyla_py __all__ = [ # noqa: F822 'OptimizeResult', 'RLock', 'fmin_cobyla', 'functools', 'izip', 'synchronized', ] def __dir__(): return __all__ def __getattr__(name): if name not in __all__: raise AttributeError( "scipy.optimize.cobyla is deprecated and has no attribute " f"{name}. Try looking in scipy.optimize instead.") warnings.warn(f"Please use `{name}` from the `scipy.optimize` namespace, " "the `scipy.optimize.cobyla` namespace is deprecated.", category=DeprecationWarning, stacklevel=2) return getattr(_cobyla_py, name)
840
25.28125
78
py
scipy
scipy-main/scipy/optimize/nonlin.py
# This file is not meant for public use and will be removed in SciPy v2.0.0. # Use the `scipy.optimize` namespace for importing the functions # included below. import warnings from . import _nonlin __all__ = [ # noqa: F822 'Anderson', 'BroydenFirst', 'BroydenSecond', 'DiagBroyden', 'ExcitingMixing', 'GenericBroyden', 'InverseJacobian', 'Jacobian', 'KrylovJacobian', 'LinAlgError', 'LinearMixing', 'LowRankMatrix', 'NoConvergence', 'TerminationCondition', 'anderson', 'asarray', 'asjacobian', 'broyden1', 'broyden2', 'diagbroyden', 'dot', 'excitingmixing', 'get_blas_funcs', 'inspect', 'inv', 'linearmixing', 'maxnorm', 'newton_krylov', 'nonlin_solve', 'norm', 'qr', 'scalar_search_armijo', 'scalar_search_wolfe1', 'scipy', 'solve', 'svd', 'sys', 'vdot', ] def __dir__(): return __all__ def __getattr__(name): if name not in __all__: raise AttributeError( "scipy.optimize.nonlin is deprecated and has no attribute " f"{name}. Try looking in scipy.optimize instead.") warnings.warn(f"Please use `{name}` from the `scipy.optimize` namespace, " "the `scipy.optimize.nonlin` namespace is deprecated.", category=DeprecationWarning, stacklevel=2) return getattr(_nonlin, name)
1,418
20.5
78
py
scipy
scipy-main/scipy/optimize/_slsqp_py.py
""" This module implements the Sequential Least Squares Programming optimization algorithm (SLSQP), originally developed by Dieter Kraft. See http://www.netlib.org/toms/733 Functions --------- .. autosummary:: :toctree: generated/ approx_jacobian fmin_slsqp """ __all__ = ['approx_jacobian', 'fmin_slsqp'] import numpy as np from scipy.optimize._slsqp import slsqp from numpy import (zeros, array, linalg, append, asfarray, concatenate, finfo, sqrt, vstack, isfinite, atleast_1d) from ._optimize import (OptimizeResult, _check_unknown_options, _prepare_scalar_function, _clip_x_for_func, _check_clip_x) from ._numdiff import approx_derivative from ._constraints import old_bound_to_new, _arr_to_scalar __docformat__ = "restructuredtext en" _epsilon = sqrt(finfo(float).eps) def approx_jacobian(x, func, epsilon, *args): """ Approximate the Jacobian matrix of a callable function. Parameters ---------- x : array_like The state vector at which to compute the Jacobian matrix. func : callable f(x,*args) The vector-valued function. epsilon : float The perturbation used to determine the partial derivatives. args : sequence Additional arguments passed to func. Returns ------- An array of dimensions ``(lenf, lenx)`` where ``lenf`` is the length of the outputs of `func`, and ``lenx`` is the number of elements in `x`. Notes ----- The approximation is done using forward differences. """ # approx_derivative returns (m, n) == (lenf, lenx) jac = approx_derivative(func, x, method='2-point', abs_step=epsilon, args=args) # if func returns a scalar jac.shape will be (lenx,). Make sure # it's at least a 2D array. return np.atleast_2d(jac) def fmin_slsqp(func, x0, eqcons=(), f_eqcons=None, ieqcons=(), f_ieqcons=None, bounds=(), fprime=None, fprime_eqcons=None, fprime_ieqcons=None, args=(), iter=100, acc=1.0E-6, iprint=1, disp=None, full_output=0, epsilon=_epsilon, callback=None): """ Minimize a function using Sequential Least Squares Programming Python interface function for the SLSQP Optimization subroutine originally implemented by Dieter Kraft. Parameters ---------- func : callable f(x,*args) Objective function. Must return a scalar. x0 : 1-D ndarray of float Initial guess for the independent variable(s). eqcons : list, optional A list of functions of length n such that eqcons[j](x,*args) == 0.0 in a successfully optimized problem. f_eqcons : callable f(x,*args), optional Returns a 1-D array in which each element must equal 0.0 in a successfully optimized problem. If f_eqcons is specified, eqcons is ignored. ieqcons : list, optional A list of functions of length n such that ieqcons[j](x,*args) >= 0.0 in a successfully optimized problem. f_ieqcons : callable f(x,*args), optional Returns a 1-D ndarray in which each element must be greater or equal to 0.0 in a successfully optimized problem. If f_ieqcons is specified, ieqcons is ignored. bounds : list, optional A list of tuples specifying the lower and upper bound for each independent variable [(xl0, xu0),(xl1, xu1),...] Infinite values will be interpreted as large floating values. fprime : callable `f(x,*args)`, optional A function that evaluates the partial derivatives of func. fprime_eqcons : callable `f(x,*args)`, optional A function of the form `f(x, *args)` that returns the m by n array of equality constraint normals. If not provided, the normals will be approximated. The array returned by fprime_eqcons should be sized as ( len(eqcons), len(x0) ). fprime_ieqcons : callable `f(x,*args)`, optional A function of the form `f(x, *args)` that returns the m by n array of inequality constraint normals. If not provided, the normals will be approximated. The array returned by fprime_ieqcons should be sized as ( len(ieqcons), len(x0) ). args : sequence, optional Additional arguments passed to func and fprime. iter : int, optional The maximum number of iterations. acc : float, optional Requested accuracy. iprint : int, optional The verbosity of fmin_slsqp : * iprint <= 0 : Silent operation * iprint == 1 : Print summary upon completion (default) * iprint >= 2 : Print status of each iterate and summary disp : int, optional Overrides the iprint interface (preferred). full_output : bool, optional If False, return only the minimizer of func (default). Otherwise, output final objective function and summary information. epsilon : float, optional The step size for finite-difference derivative estimates. callback : callable, optional Called after each iteration, as ``callback(x)``, where ``x`` is the current parameter vector. Returns ------- out : ndarray of float The final minimizer of func. fx : ndarray of float, if full_output is true The final value of the objective function. its : int, if full_output is true The number of iterations. imode : int, if full_output is true The exit mode from the optimizer (see below). smode : string, if full_output is true Message describing the exit mode from the optimizer. See also -------- minimize: Interface to minimization algorithms for multivariate functions. See the 'SLSQP' `method` in particular. Notes ----- Exit modes are defined as follows :: -1 : Gradient evaluation required (g & a) 0 : Optimization terminated successfully 1 : Function evaluation required (f & c) 2 : More equality constraints than independent variables 3 : More than 3*n iterations in LSQ subproblem 4 : Inequality constraints incompatible 5 : Singular matrix E in LSQ subproblem 6 : Singular matrix C in LSQ subproblem 7 : Rank-deficient equality constraint subproblem HFTI 8 : Positive directional derivative for linesearch 9 : Iteration limit reached Examples -------- Examples are given :ref:`in the tutorial <tutorial-sqlsp>`. """ if disp is not None: iprint = disp opts = {'maxiter': iter, 'ftol': acc, 'iprint': iprint, 'disp': iprint != 0, 'eps': epsilon, 'callback': callback} # Build the constraints as a tuple of dictionaries cons = () # 1. constraints of the 1st kind (eqcons, ieqcons); no Jacobian; take # the same extra arguments as the objective function. cons += tuple({'type': 'eq', 'fun': c, 'args': args} for c in eqcons) cons += tuple({'type': 'ineq', 'fun': c, 'args': args} for c in ieqcons) # 2. constraints of the 2nd kind (f_eqcons, f_ieqcons) and their Jacobian # (fprime_eqcons, fprime_ieqcons); also take the same extra arguments # as the objective function. if f_eqcons: cons += ({'type': 'eq', 'fun': f_eqcons, 'jac': fprime_eqcons, 'args': args}, ) if f_ieqcons: cons += ({'type': 'ineq', 'fun': f_ieqcons, 'jac': fprime_ieqcons, 'args': args}, ) res = _minimize_slsqp(func, x0, args, jac=fprime, bounds=bounds, constraints=cons, **opts) if full_output: return res['x'], res['fun'], res['nit'], res['status'], res['message'] else: return res['x'] def _minimize_slsqp(func, x0, args=(), jac=None, bounds=None, constraints=(), maxiter=100, ftol=1.0E-6, iprint=1, disp=False, eps=_epsilon, callback=None, finite_diff_rel_step=None, **unknown_options): """ Minimize a scalar function of one or more variables using Sequential Least Squares Programming (SLSQP). Options ------- ftol : float Precision goal for the value of f in the stopping criterion. eps : float Step size used for numerical approximation of the Jacobian. disp : bool Set to True to print convergence messages. If False, `verbosity` is ignored and set to 0. maxiter : int Maximum number of iterations. finite_diff_rel_step : None or array_like, optional If `jac in ['2-point', '3-point', 'cs']` the relative step size to use for numerical approximation of `jac`. The absolute step size is computed as ``h = rel_step * sign(x) * max(1, abs(x))``, possibly adjusted to fit into the bounds. For ``method='3-point'`` the sign of `h` is ignored. If None (default) then step is selected automatically. """ _check_unknown_options(unknown_options) iter = maxiter - 1 acc = ftol epsilon = eps if not disp: iprint = 0 # Transform x0 into an array. x = asfarray(x0).flatten() # SLSQP is sent 'old-style' bounds, 'new-style' bounds are required by # ScalarFunction if bounds is None or len(bounds) == 0: new_bounds = (-np.inf, np.inf) else: new_bounds = old_bound_to_new(bounds) # clip the initial guess to bounds, otherwise ScalarFunction doesn't work x = np.clip(x, new_bounds[0], new_bounds[1]) # Constraints are triaged per type into a dictionary of tuples if isinstance(constraints, dict): constraints = (constraints, ) cons = {'eq': (), 'ineq': ()} for ic, con in enumerate(constraints): # check type try: ctype = con['type'].lower() except KeyError as e: raise KeyError('Constraint %d has no type defined.' % ic) from e except TypeError as e: raise TypeError('Constraints must be defined using a ' 'dictionary.') from e except AttributeError as e: raise TypeError("Constraint's type must be a string.") from e else: if ctype not in ['eq', 'ineq']: raise ValueError("Unknown constraint type '%s'." % con['type']) # check function if 'fun' not in con: raise ValueError('Constraint %d has no function defined.' % ic) # check Jacobian cjac = con.get('jac') if cjac is None: # approximate Jacobian function. The factory function is needed # to keep a reference to `fun`, see gh-4240. def cjac_factory(fun): def cjac(x, *args): x = _check_clip_x(x, new_bounds) if jac in ['2-point', '3-point', 'cs']: return approx_derivative(fun, x, method=jac, args=args, rel_step=finite_diff_rel_step, bounds=new_bounds) else: return approx_derivative(fun, x, method='2-point', abs_step=epsilon, args=args, bounds=new_bounds) return cjac cjac = cjac_factory(con['fun']) # update constraints' dictionary cons[ctype] += ({'fun': con['fun'], 'jac': cjac, 'args': con.get('args', ())}, ) exit_modes = {-1: "Gradient evaluation required (g & a)", 0: "Optimization terminated successfully", 1: "Function evaluation required (f & c)", 2: "More equality constraints than independent variables", 3: "More than 3*n iterations in LSQ subproblem", 4: "Inequality constraints incompatible", 5: "Singular matrix E in LSQ subproblem", 6: "Singular matrix C in LSQ subproblem", 7: "Rank-deficient equality constraint subproblem HFTI", 8: "Positive directional derivative for linesearch", 9: "Iteration limit reached"} # Set the parameters that SLSQP will need # meq, mieq: number of equality and inequality constraints meq = sum(map(len, [atleast_1d(c['fun'](x, *c['args'])) for c in cons['eq']])) mieq = sum(map(len, [atleast_1d(c['fun'](x, *c['args'])) for c in cons['ineq']])) # m = The total number of constraints m = meq + mieq # la = The number of constraints, or 1 if there are no constraints la = array([1, m]).max() # n = The number of independent variables n = len(x) # Define the workspaces for SLSQP n1 = n + 1 mineq = m - meq + n1 + n1 len_w = (3*n1+m)*(n1+1)+(n1-meq+1)*(mineq+2) + 2*mineq+(n1+mineq)*(n1-meq) \ + 2*meq + n1 + ((n+1)*n)//2 + 2*m + 3*n + 3*n1 + 1 len_jw = mineq w = zeros(len_w) jw = zeros(len_jw) # Decompose bounds into xl and xu if bounds is None or len(bounds) == 0: xl = np.empty(n, dtype=float) xu = np.empty(n, dtype=float) xl.fill(np.nan) xu.fill(np.nan) else: bnds = array([(_arr_to_scalar(l), _arr_to_scalar(u)) for (l, u) in bounds], float) if bnds.shape[0] != n: raise IndexError('SLSQP Error: the length of bounds is not ' 'compatible with that of x0.') with np.errstate(invalid='ignore'): bnderr = bnds[:, 0] > bnds[:, 1] if bnderr.any(): raise ValueError('SLSQP Error: lb > ub in bounds %s.' % ', '.join(str(b) for b in bnderr)) xl, xu = bnds[:, 0], bnds[:, 1] # Mark infinite bounds with nans; the Fortran code understands this infbnd = ~isfinite(bnds) xl[infbnd[:, 0]] = np.nan xu[infbnd[:, 1]] = np.nan # ScalarFunction provides function and gradient evaluation sf = _prepare_scalar_function(func, x, jac=jac, args=args, epsilon=eps, finite_diff_rel_step=finite_diff_rel_step, bounds=new_bounds) # gh11403 SLSQP sometimes exceeds bounds by 1 or 2 ULP, make sure this # doesn't get sent to the func/grad evaluator. wrapped_fun = _clip_x_for_func(sf.fun, new_bounds) wrapped_grad = _clip_x_for_func(sf.grad, new_bounds) # Initialize the iteration counter and the mode value mode = array(0, int) acc = array(acc, float) majiter = array(iter, int) majiter_prev = 0 # Initialize internal SLSQP state variables alpha = array(0, float) f0 = array(0, float) gs = array(0, float) h1 = array(0, float) h2 = array(0, float) h3 = array(0, float) h4 = array(0, float) t = array(0, float) t0 = array(0, float) tol = array(0, float) iexact = array(0, int) incons = array(0, int) ireset = array(0, int) itermx = array(0, int) line = array(0, int) n1 = array(0, int) n2 = array(0, int) n3 = array(0, int) # Print the header if iprint >= 2 if iprint >= 2: print("%5s %5s %16s %16s" % ("NIT", "FC", "OBJFUN", "GNORM")) # mode is zero on entry, so call objective, constraints and gradients # there should be no func evaluations here because it's cached from # ScalarFunction fx = wrapped_fun(x) g = append(wrapped_grad(x), 0.0) c = _eval_constraint(x, cons) a = _eval_con_normals(x, cons, la, n, m, meq, mieq) while 1: # Call SLSQP slsqp(m, meq, x, xl, xu, fx, c, g, a, acc, majiter, mode, w, jw, alpha, f0, gs, h1, h2, h3, h4, t, t0, tol, iexact, incons, ireset, itermx, line, n1, n2, n3) if mode == 1: # objective and constraint evaluation required fx = wrapped_fun(x) c = _eval_constraint(x, cons) if mode == -1: # gradient evaluation required g = append(wrapped_grad(x), 0.0) a = _eval_con_normals(x, cons, la, n, m, meq, mieq) if majiter > majiter_prev: # call callback if major iteration has incremented if callback is not None: callback(np.copy(x)) # Print the status of the current iterate if iprint > 2 if iprint >= 2: print("%5i %5i % 16.6E % 16.6E" % (majiter, sf.nfev, fx, linalg.norm(g))) # If exit mode is not -1 or 1, slsqp has completed if abs(mode) != 1: break majiter_prev = int(majiter) # Optimization loop complete. Print status if requested if iprint >= 1: print(exit_modes[int(mode)] + " (Exit mode " + str(mode) + ')') print(" Current function value:", fx) print(" Iterations:", majiter) print(" Function evaluations:", sf.nfev) print(" Gradient evaluations:", sf.ngev) return OptimizeResult(x=x, fun=fx, jac=g[:-1], nit=int(majiter), nfev=sf.nfev, njev=sf.ngev, status=int(mode), message=exit_modes[int(mode)], success=(mode == 0)) def _eval_constraint(x, cons): # Compute constraints if cons['eq']: c_eq = concatenate([atleast_1d(con['fun'](x, *con['args'])) for con in cons['eq']]) else: c_eq = zeros(0) if cons['ineq']: c_ieq = concatenate([atleast_1d(con['fun'](x, *con['args'])) for con in cons['ineq']]) else: c_ieq = zeros(0) # Now combine c_eq and c_ieq into a single matrix c = concatenate((c_eq, c_ieq)) return c def _eval_con_normals(x, cons, la, n, m, meq, mieq): # Compute the normals of the constraints if cons['eq']: a_eq = vstack([con['jac'](x, *con['args']) for con in cons['eq']]) else: # no equality constraint a_eq = zeros((meq, n)) if cons['ineq']: a_ieq = vstack([con['jac'](x, *con['args']) for con in cons['ineq']]) else: # no inequality constraint a_ieq = zeros((mieq, n)) # Now combine a_eq and a_ieq into a single a matrix if m == 0: # no constraints a = zeros((la, n)) else: a = vstack((a_eq, a_ieq)) a = concatenate((a, zeros([la, 1])), 1) return a
18,767
36.164356
80
py
scipy
scipy-main/scipy/optimize/_linprog_highs.py
"""HiGHS Linear Optimization Methods Interface to HiGHS linear optimization software. https://highs.dev/ .. versionadded:: 1.5.0 References ---------- .. [1] Q. Huangfu and J.A.J. Hall. "Parallelizing the dual revised simplex method." Mathematical Programming Computation, 10 (1), 119-142, 2018. DOI: 10.1007/s12532-017-0130-5 """ import inspect import numpy as np from ._optimize import OptimizeWarning, OptimizeResult from warnings import warn from ._highs._highs_wrapper import _highs_wrapper from ._highs._highs_constants import ( CONST_INF, MESSAGE_LEVEL_NONE, HIGHS_OBJECTIVE_SENSE_MINIMIZE, MODEL_STATUS_NOTSET, MODEL_STATUS_LOAD_ERROR, MODEL_STATUS_MODEL_ERROR, MODEL_STATUS_PRESOLVE_ERROR, MODEL_STATUS_SOLVE_ERROR, MODEL_STATUS_POSTSOLVE_ERROR, MODEL_STATUS_MODEL_EMPTY, MODEL_STATUS_OPTIMAL, MODEL_STATUS_INFEASIBLE, MODEL_STATUS_UNBOUNDED_OR_INFEASIBLE, MODEL_STATUS_UNBOUNDED, MODEL_STATUS_REACHED_DUAL_OBJECTIVE_VALUE_UPPER_BOUND as MODEL_STATUS_RDOVUB, MODEL_STATUS_REACHED_OBJECTIVE_TARGET, MODEL_STATUS_REACHED_TIME_LIMIT, MODEL_STATUS_REACHED_ITERATION_LIMIT, HIGHS_SIMPLEX_STRATEGY_DUAL, HIGHS_SIMPLEX_CRASH_STRATEGY_OFF, HIGHS_SIMPLEX_EDGE_WEIGHT_STRATEGY_CHOOSE, HIGHS_SIMPLEX_EDGE_WEIGHT_STRATEGY_DANTZIG, HIGHS_SIMPLEX_EDGE_WEIGHT_STRATEGY_DEVEX, HIGHS_SIMPLEX_EDGE_WEIGHT_STRATEGY_STEEPEST_EDGE, ) from scipy.sparse import csc_matrix, vstack, issparse def _highs_to_scipy_status_message(highs_status, highs_message): """Converts HiGHS status number/message to SciPy status number/message""" scipy_statuses_messages = { None: (4, "HiGHS did not provide a status code. "), MODEL_STATUS_NOTSET: (4, ""), MODEL_STATUS_LOAD_ERROR: (4, ""), MODEL_STATUS_MODEL_ERROR: (2, ""), MODEL_STATUS_PRESOLVE_ERROR: (4, ""), MODEL_STATUS_SOLVE_ERROR: (4, ""), MODEL_STATUS_POSTSOLVE_ERROR: (4, ""), MODEL_STATUS_MODEL_EMPTY: (4, ""), MODEL_STATUS_RDOVUB: (4, ""), MODEL_STATUS_REACHED_OBJECTIVE_TARGET: (4, ""), MODEL_STATUS_OPTIMAL: (0, "Optimization terminated successfully. "), MODEL_STATUS_REACHED_TIME_LIMIT: (1, "Time limit reached. "), MODEL_STATUS_REACHED_ITERATION_LIMIT: (1, "Iteration limit reached. "), MODEL_STATUS_INFEASIBLE: (2, "The problem is infeasible. "), MODEL_STATUS_UNBOUNDED: (3, "The problem is unbounded. "), MODEL_STATUS_UNBOUNDED_OR_INFEASIBLE: (4, "The problem is unbounded " "or infeasible. ")} unrecognized = (4, "The HiGHS status code was not recognized. ") scipy_status, scipy_message = ( scipy_statuses_messages.get(highs_status, unrecognized)) scipy_message = (f"{scipy_message}" f"(HiGHS Status {highs_status}: {highs_message})") return scipy_status, scipy_message def _replace_inf(x): # Replace `np.inf` with CONST_INF infs = np.isinf(x) with np.errstate(invalid="ignore"): x[infs] = np.sign(x[infs])*CONST_INF return x def _convert_to_highs_enum(option, option_str, choices): # If option is in the choices we can look it up, if not use # the default value taken from function signature and warn: try: return choices[option.lower()] except AttributeError: return choices[option] except KeyError: sig = inspect.signature(_linprog_highs) default_str = sig.parameters[option_str].default warn(f"Option {option_str} is {option}, but only values in " f"{set(choices.keys())} are allowed. Using default: " f"{default_str}.", OptimizeWarning, stacklevel=3) return choices[default_str] def _linprog_highs(lp, solver, time_limit=None, presolve=True, disp=False, maxiter=None, dual_feasibility_tolerance=None, primal_feasibility_tolerance=None, ipm_optimality_tolerance=None, simplex_dual_edge_weight_strategy=None, mip_rel_gap=None, mip_max_nodes=None, **unknown_options): r""" Solve the following linear programming problem using one of the HiGHS solvers: User-facing documentation is in _linprog_doc.py. Parameters ---------- lp : _LPProblem A ``scipy.optimize._linprog_util._LPProblem`` ``namedtuple``. solver : "ipm" or "simplex" or None Which HiGHS solver to use. If ``None``, "simplex" will be used. Options ------- maxiter : int The maximum number of iterations to perform in either phase. For ``solver='ipm'``, this does not include the number of crossover iterations. Default is the largest possible value for an ``int`` on the platform. disp : bool Set to ``True`` if indicators of optimization status are to be printed to the console each iteration; default ``False``. time_limit : float The maximum time in seconds allotted to solve the problem; default is the largest possible value for a ``double`` on the platform. presolve : bool Presolve attempts to identify trivial infeasibilities, identify trivial unboundedness, and simplify the problem before sending it to the main solver. It is generally recommended to keep the default setting ``True``; set to ``False`` if presolve is to be disabled. dual_feasibility_tolerance : double Dual feasibility tolerance. Default is 1e-07. The minimum of this and ``primal_feasibility_tolerance`` is used for the feasibility tolerance when ``solver='ipm'``. primal_feasibility_tolerance : double Primal feasibility tolerance. Default is 1e-07. The minimum of this and ``dual_feasibility_tolerance`` is used for the feasibility tolerance when ``solver='ipm'``. ipm_optimality_tolerance : double Optimality tolerance for ``solver='ipm'``. Default is 1e-08. Minimum possible value is 1e-12 and must be smaller than the largest possible value for a ``double`` on the platform. simplex_dual_edge_weight_strategy : str (default: None) Strategy for simplex dual edge weights. The default, ``None``, automatically selects one of the following. ``'dantzig'`` uses Dantzig's original strategy of choosing the most negative reduced cost. ``'devex'`` uses the strategy described in [15]_. ``steepest`` uses the exact steepest edge strategy as described in [16]_. ``'steepest-devex'`` begins with the exact steepest edge strategy until the computation is too costly or inexact and then switches to the devex method. Curently, using ``None`` always selects ``'steepest-devex'``, but this may change as new options become available. mip_max_nodes : int The maximum number of nodes allotted to solve the problem; default is the largest possible value for a ``HighsInt`` on the platform. Ignored if not using the MIP solver. unknown_options : dict Optional arguments not used by this particular solver. If ``unknown_options`` is non-empty, a warning is issued listing all unused options. Returns ------- sol : dict A dictionary consisting of the fields: x : 1D array The values of the decision variables that minimizes the objective function while satisfying the constraints. fun : float The optimal value of the objective function ``c @ x``. slack : 1D array The (nominally positive) values of the slack, ``b_ub - A_ub @ x``. con : 1D array The (nominally zero) residuals of the equality constraints, ``b_eq - A_eq @ x``. success : bool ``True`` when the algorithm succeeds in finding an optimal solution. status : int An integer representing the exit status of the algorithm. ``0`` : Optimization terminated successfully. ``1`` : Iteration or time limit reached. ``2`` : Problem appears to be infeasible. ``3`` : Problem appears to be unbounded. ``4`` : The HiGHS solver ran into a problem. message : str A string descriptor of the exit status of the algorithm. nit : int The total number of iterations performed. For ``solver='simplex'``, this includes iterations in all phases. For ``solver='ipm'``, this does not include crossover iterations. crossover_nit : int The number of primal/dual pushes performed during the crossover routine for ``solver='ipm'``. This is ``0`` for ``solver='simplex'``. ineqlin : OptimizeResult Solution and sensitivity information corresponding to the inequality constraints, `b_ub`. A dictionary consisting of the fields: residual : np.ndnarray The (nominally positive) values of the slack variables, ``b_ub - A_ub @ x``. This quantity is also commonly referred to as "slack". marginals : np.ndarray The sensitivity (partial derivative) of the objective function with respect to the right-hand side of the inequality constraints, `b_ub`. eqlin : OptimizeResult Solution and sensitivity information corresponding to the equality constraints, `b_eq`. A dictionary consisting of the fields: residual : np.ndarray The (nominally zero) residuals of the equality constraints, ``b_eq - A_eq @ x``. marginals : np.ndarray The sensitivity (partial derivative) of the objective function with respect to the right-hand side of the equality constraints, `b_eq`. lower, upper : OptimizeResult Solution and sensitivity information corresponding to the lower and upper bounds on decision variables, `bounds`. residual : np.ndarray The (nominally positive) values of the quantity ``x - lb`` (lower) or ``ub - x`` (upper). marginals : np.ndarray The sensitivity (partial derivative) of the objective function with respect to the lower and upper `bounds`. mip_node_count : int The number of subproblems or "nodes" solved by the MILP solver. Only present when `integrality` is not `None`. mip_dual_bound : float The MILP solver's final estimate of the lower bound on the optimal solution. Only present when `integrality` is not `None`. mip_gap : float The difference between the final objective function value and the final dual bound, scaled by the final objective function value. Only present when `integrality` is not `None`. Notes ----- The result fields `ineqlin`, `eqlin`, `lower`, and `upper` all contain `marginals`, or partial derivatives of the objective function with respect to the right-hand side of each constraint. These partial derivatives are also referred to as "Lagrange multipliers", "dual values", and "shadow prices". The sign convention of `marginals` is opposite that of Lagrange multipliers produced by many nonlinear solvers. References ---------- .. [15] Harris, Paula MJ. "Pivot selection methods of the Devex LP code." Mathematical programming 5.1 (1973): 1-28. .. [16] Goldfarb, Donald, and John Ker Reid. "A practicable steepest-edge simplex algorithm." Mathematical Programming 12.1 (1977): 361-371. """ if unknown_options: message = (f"Unrecognized options detected: {unknown_options}. " "These will be passed to HiGHS verbatim.") warn(message, OptimizeWarning, stacklevel=3) # Map options to HiGHS enum values simplex_dual_edge_weight_strategy_enum = _convert_to_highs_enum( simplex_dual_edge_weight_strategy, 'simplex_dual_edge_weight_strategy', choices={'dantzig': HIGHS_SIMPLEX_EDGE_WEIGHT_STRATEGY_DANTZIG, 'devex': HIGHS_SIMPLEX_EDGE_WEIGHT_STRATEGY_DEVEX, 'steepest-devex': HIGHS_SIMPLEX_EDGE_WEIGHT_STRATEGY_CHOOSE, 'steepest': HIGHS_SIMPLEX_EDGE_WEIGHT_STRATEGY_STEEPEST_EDGE, None: None}) c, A_ub, b_ub, A_eq, b_eq, bounds, x0, integrality = lp lb, ub = bounds.T.copy() # separate bounds, copy->C-cntgs # highs_wrapper solves LHS <= A*x <= RHS, not equality constraints with np.errstate(invalid="ignore"): lhs_ub = -np.ones_like(b_ub)*np.inf # LHS of UB constraints is -inf rhs_ub = b_ub # RHS of UB constraints is b_ub lhs_eq = b_eq # Equality constaint is inequality rhs_eq = b_eq # constraint with LHS=RHS lhs = np.concatenate((lhs_ub, lhs_eq)) rhs = np.concatenate((rhs_ub, rhs_eq)) if issparse(A_ub) or issparse(A_eq): A = vstack((A_ub, A_eq)) else: A = np.vstack((A_ub, A_eq)) A = csc_matrix(A) options = { 'presolve': presolve, 'sense': HIGHS_OBJECTIVE_SENSE_MINIMIZE, 'solver': solver, 'time_limit': time_limit, 'highs_debug_level': MESSAGE_LEVEL_NONE, 'dual_feasibility_tolerance': dual_feasibility_tolerance, 'ipm_optimality_tolerance': ipm_optimality_tolerance, 'log_to_console': disp, 'mip_max_nodes': mip_max_nodes, 'output_flag': disp, 'primal_feasibility_tolerance': primal_feasibility_tolerance, 'simplex_dual_edge_weight_strategy': simplex_dual_edge_weight_strategy_enum, 'simplex_strategy': HIGHS_SIMPLEX_STRATEGY_DUAL, 'simplex_crash_strategy': HIGHS_SIMPLEX_CRASH_STRATEGY_OFF, 'ipm_iteration_limit': maxiter, 'simplex_iteration_limit': maxiter, 'mip_rel_gap': mip_rel_gap, } options.update(unknown_options) # np.inf doesn't work; use very large constant rhs = _replace_inf(rhs) lhs = _replace_inf(lhs) lb = _replace_inf(lb) ub = _replace_inf(ub) if integrality is None or np.sum(integrality) == 0: integrality = np.empty(0) else: integrality = np.array(integrality) res = _highs_wrapper(c, A.indptr, A.indices, A.data, lhs, rhs, lb, ub, integrality.astype(np.uint8), options) # HiGHS represents constraints as lhs/rhs, so # Ax + s = b => Ax = b - s # and we need to split up s by A_ub and A_eq if 'slack' in res: slack = res['slack'] con = np.array(slack[len(b_ub):]) slack = np.array(slack[:len(b_ub)]) else: slack, con = None, None # lagrange multipliers for equalities/inequalities and upper/lower bounds if 'lambda' in res: lamda = res['lambda'] marg_ineqlin = np.array(lamda[:len(b_ub)]) marg_eqlin = np.array(lamda[len(b_ub):]) marg_upper = np.array(res['marg_bnds'][1, :]) marg_lower = np.array(res['marg_bnds'][0, :]) else: marg_ineqlin, marg_eqlin = None, None marg_upper, marg_lower = None, None # this needs to be updated if we start choosing the solver intelligently # Convert to scipy-style status and message highs_status = res.get('status', None) highs_message = res.get('message', None) status, message = _highs_to_scipy_status_message(highs_status, highs_message) x = np.array(res['x']) if 'x' in res else None sol = {'x': x, 'slack': slack, 'con': con, 'ineqlin': OptimizeResult({ 'residual': slack, 'marginals': marg_ineqlin, }), 'eqlin': OptimizeResult({ 'residual': con, 'marginals': marg_eqlin, }), 'lower': OptimizeResult({ 'residual': None if x is None else x - lb, 'marginals': marg_lower, }), 'upper': OptimizeResult({ 'residual': None if x is None else ub - x, 'marginals': marg_upper }), 'fun': res.get('fun'), 'status': status, 'success': res['status'] == MODEL_STATUS_OPTIMAL, 'message': message, 'nit': res.get('simplex_nit', 0) or res.get('ipm_nit', 0), 'crossover_nit': res.get('crossover_nit'), } if np.any(x) and integrality is not None: sol.update({ 'mip_node_count': res.get('mip_node_count', 0), 'mip_dual_bound': res.get('mip_dual_bound', 0.0), 'mip_gap': res.get('mip_gap', 0.0), }) return sol
17,571
38.845805
79
py
scipy
scipy-main/scipy/optimize/_numdiff.py
"""Routines for numerical differentiation.""" import functools import numpy as np from numpy.linalg import norm from scipy.sparse.linalg import LinearOperator from ..sparse import issparse, csc_matrix, csr_matrix, coo_matrix, find from ._group_columns import group_dense, group_sparse def _adjust_scheme_to_bounds(x0, h, num_steps, scheme, lb, ub): """Adjust final difference scheme to the presence of bounds. Parameters ---------- x0 : ndarray, shape (n,) Point at which we wish to estimate derivative. h : ndarray, shape (n,) Desired absolute finite difference steps. num_steps : int Number of `h` steps in one direction required to implement finite difference scheme. For example, 2 means that we need to evaluate f(x0 + 2 * h) or f(x0 - 2 * h) scheme : {'1-sided', '2-sided'} Whether steps in one or both directions are required. In other words '1-sided' applies to forward and backward schemes, '2-sided' applies to center schemes. lb : ndarray, shape (n,) Lower bounds on independent variables. ub : ndarray, shape (n,) Upper bounds on independent variables. Returns ------- h_adjusted : ndarray, shape (n,) Adjusted absolute step sizes. Step size decreases only if a sign flip or switching to one-sided scheme doesn't allow to take a full step. use_one_sided : ndarray of bool, shape (n,) Whether to switch to one-sided scheme. Informative only for ``scheme='2-sided'``. """ if scheme == '1-sided': use_one_sided = np.ones_like(h, dtype=bool) elif scheme == '2-sided': h = np.abs(h) use_one_sided = np.zeros_like(h, dtype=bool) else: raise ValueError("`scheme` must be '1-sided' or '2-sided'.") if np.all((lb == -np.inf) & (ub == np.inf)): return h, use_one_sided h_total = h * num_steps h_adjusted = h.copy() lower_dist = x0 - lb upper_dist = ub - x0 if scheme == '1-sided': x = x0 + h_total violated = (x < lb) | (x > ub) fitting = np.abs(h_total) <= np.maximum(lower_dist, upper_dist) h_adjusted[violated & fitting] *= -1 forward = (upper_dist >= lower_dist) & ~fitting h_adjusted[forward] = upper_dist[forward] / num_steps backward = (upper_dist < lower_dist) & ~fitting h_adjusted[backward] = -lower_dist[backward] / num_steps elif scheme == '2-sided': central = (lower_dist >= h_total) & (upper_dist >= h_total) forward = (upper_dist >= lower_dist) & ~central h_adjusted[forward] = np.minimum( h[forward], 0.5 * upper_dist[forward] / num_steps) use_one_sided[forward] = True backward = (upper_dist < lower_dist) & ~central h_adjusted[backward] = -np.minimum( h[backward], 0.5 * lower_dist[backward] / num_steps) use_one_sided[backward] = True min_dist = np.minimum(upper_dist, lower_dist) / num_steps adjusted_central = (~central & (np.abs(h_adjusted) <= min_dist)) h_adjusted[adjusted_central] = min_dist[adjusted_central] use_one_sided[adjusted_central] = False return h_adjusted, use_one_sided @functools.lru_cache def _eps_for_method(x0_dtype, f0_dtype, method): """ Calculates relative EPS step to use for a given data type and numdiff step method. Progressively smaller steps are used for larger floating point types. Parameters ---------- f0_dtype: np.dtype dtype of function evaluation x0_dtype: np.dtype dtype of parameter vector method: {'2-point', '3-point', 'cs'} Returns ------- EPS: float relative step size. May be np.float16, np.float32, np.float64 Notes ----- The default relative step will be np.float64. However, if x0 or f0 are smaller floating point types (np.float16, np.float32), then the smallest floating point type is chosen. """ # the default EPS value EPS = np.finfo(np.float64).eps x0_is_fp = False if np.issubdtype(x0_dtype, np.inexact): # if you're a floating point type then over-ride the default EPS EPS = np.finfo(x0_dtype).eps x0_itemsize = np.dtype(x0_dtype).itemsize x0_is_fp = True if np.issubdtype(f0_dtype, np.inexact): f0_itemsize = np.dtype(f0_dtype).itemsize # choose the smallest itemsize between x0 and f0 if x0_is_fp and f0_itemsize < x0_itemsize: EPS = np.finfo(f0_dtype).eps if method in ["2-point", "cs"]: return EPS**0.5 elif method in ["3-point"]: return EPS**(1/3) else: raise RuntimeError("Unknown step method, should be one of " "{'2-point', '3-point', 'cs'}") def _compute_absolute_step(rel_step, x0, f0, method): """ Computes an absolute step from a relative step for finite difference calculation. Parameters ---------- rel_step: None or array-like Relative step for the finite difference calculation x0 : np.ndarray Parameter vector f0 : np.ndarray or scalar method : {'2-point', '3-point', 'cs'} Returns ------- h : float The absolute step size Notes ----- `h` will always be np.float64. However, if `x0` or `f0` are smaller floating point dtypes (e.g. np.float32), then the absolute step size will be calculated from the smallest floating point size. """ # this is used instead of np.sign(x0) because we need # sign_x0 to be 1 when x0 == 0. sign_x0 = (x0 >= 0).astype(float) * 2 - 1 rstep = _eps_for_method(x0.dtype, f0.dtype, method) if rel_step is None: abs_step = rstep * sign_x0 * np.maximum(1.0, np.abs(x0)) else: # User has requested specific relative steps. # Don't multiply by max(1, abs(x0) because if x0 < 1 then their # requested step is not used. abs_step = rel_step * sign_x0 * np.abs(x0) # however we don't want an abs_step of 0, which can happen if # rel_step is 0, or x0 is 0. Instead, substitute a realistic step dx = ((x0 + abs_step) - x0) abs_step = np.where(dx == 0, rstep * sign_x0 * np.maximum(1.0, np.abs(x0)), abs_step) return abs_step def _prepare_bounds(bounds, x0): """ Prepares new-style bounds from a two-tuple specifying the lower and upper limits for values in x0. If a value is not bound then the lower/upper bound will be expected to be -np.inf/np.inf. Examples -------- >>> _prepare_bounds([(0, 1, 2), (1, 2, np.inf)], [0.5, 1.5, 2.5]) (array([0., 1., 2.]), array([ 1., 2., inf])) """ lb, ub = (np.asarray(b, dtype=float) for b in bounds) if lb.ndim == 0: lb = np.resize(lb, x0.shape) if ub.ndim == 0: ub = np.resize(ub, x0.shape) return lb, ub def group_columns(A, order=0): """Group columns of a 2-D matrix for sparse finite differencing [1]_. Two columns are in the same group if in each row at least one of them has zero. A greedy sequential algorithm is used to construct groups. Parameters ---------- A : array_like or sparse matrix, shape (m, n) Matrix of which to group columns. order : int, iterable of int with shape (n,) or None Permutation array which defines the order of columns enumeration. If int or None, a random permutation is used with `order` used as a random seed. Default is 0, that is use a random permutation but guarantee repeatability. Returns ------- groups : ndarray of int, shape (n,) Contains values from 0 to n_groups-1, where n_groups is the number of found groups. Each value ``groups[i]`` is an index of a group to which ith column assigned. The procedure was helpful only if n_groups is significantly less than n. References ---------- .. [1] A. Curtis, M. J. D. Powell, and J. Reid, "On the estimation of sparse Jacobian matrices", Journal of the Institute of Mathematics and its Applications, 13 (1974), pp. 117-120. """ if issparse(A): A = csc_matrix(A) else: A = np.atleast_2d(A) A = (A != 0).astype(np.int32) if A.ndim != 2: raise ValueError("`A` must be 2-dimensional.") m, n = A.shape if order is None or np.isscalar(order): rng = np.random.RandomState(order) order = rng.permutation(n) else: order = np.asarray(order) if order.shape != (n,): raise ValueError("`order` has incorrect shape.") A = A[:, order] if issparse(A): groups = group_sparse(m, n, A.indices, A.indptr) else: groups = group_dense(m, n, A) groups[order] = groups.copy() return groups def approx_derivative(fun, x0, method='3-point', rel_step=None, abs_step=None, f0=None, bounds=(-np.inf, np.inf), sparsity=None, as_linear_operator=False, args=(), kwargs={}): """Compute finite difference approximation of the derivatives of a vector-valued function. If a function maps from R^n to R^m, its derivatives form m-by-n matrix called the Jacobian, where an element (i, j) is a partial derivative of f[i] with respect to x[j]. Parameters ---------- fun : callable Function of which to estimate the derivatives. The argument x passed to this function is ndarray of shape (n,) (never a scalar even if n=1). It must return 1-D array_like of shape (m,) or a scalar. x0 : array_like of shape (n,) or float Point at which to estimate the derivatives. Float will be converted to a 1-D array. method : {'3-point', '2-point', 'cs'}, optional Finite difference method to use: - '2-point' - use the first order accuracy forward or backward difference. - '3-point' - use central difference in interior points and the second order accuracy forward or backward difference near the boundary. - 'cs' - use a complex-step finite difference scheme. This assumes that the user function is real-valued and can be analytically continued to the complex plane. Otherwise, produces bogus results. rel_step : None or array_like, optional Relative step size to use. If None (default) the absolute step size is computed as ``h = rel_step * sign(x0) * max(1, abs(x0))``, with `rel_step` being selected automatically, see Notes. Otherwise ``h = rel_step * sign(x0) * abs(x0)``. For ``method='3-point'`` the sign of `h` is ignored. The calculated step size is possibly adjusted to fit into the bounds. abs_step : array_like, optional Absolute step size to use, possibly adjusted to fit into the bounds. For ``method='3-point'`` the sign of `abs_step` is ignored. By default relative steps are used, only if ``abs_step is not None`` are absolute steps used. f0 : None or array_like, optional If not None it is assumed to be equal to ``fun(x0)``, in this case the ``fun(x0)`` is not called. Default is None. bounds : tuple of array_like, optional Lower and upper bounds on independent variables. Defaults to no bounds. Each bound must match the size of `x0` or be a scalar, in the latter case the bound will be the same for all variables. Use it to limit the range of function evaluation. Bounds checking is not implemented when `as_linear_operator` is True. sparsity : {None, array_like, sparse matrix, 2-tuple}, optional Defines a sparsity structure of the Jacobian matrix. If the Jacobian matrix is known to have only few non-zero elements in each row, then it's possible to estimate its several columns by a single function evaluation [3]_. To perform such economic computations two ingredients are required: * structure : array_like or sparse matrix of shape (m, n). A zero element means that a corresponding element of the Jacobian identically equals to zero. * groups : array_like of shape (n,). A column grouping for a given sparsity structure, use `group_columns` to obtain it. A single array or a sparse matrix is interpreted as a sparsity structure, and groups are computed inside the function. A tuple is interpreted as (structure, groups). If None (default), a standard dense differencing will be used. Note, that sparse differencing makes sense only for large Jacobian matrices where each row contains few non-zero elements. as_linear_operator : bool, optional When True the function returns an `scipy.sparse.linalg.LinearOperator`. Otherwise it returns a dense array or a sparse matrix depending on `sparsity`. The linear operator provides an efficient way of computing ``J.dot(p)`` for any vector ``p`` of shape (n,), but does not allow direct access to individual elements of the matrix. By default `as_linear_operator` is False. args, kwargs : tuple and dict, optional Additional arguments passed to `fun`. Both empty by default. The calling signature is ``fun(x, *args, **kwargs)``. Returns ------- J : {ndarray, sparse matrix, LinearOperator} Finite difference approximation of the Jacobian matrix. If `as_linear_operator` is True returns a LinearOperator with shape (m, n). Otherwise it returns a dense array or sparse matrix depending on how `sparsity` is defined. If `sparsity` is None then a ndarray with shape (m, n) is returned. If `sparsity` is not None returns a csr_matrix with shape (m, n). For sparse matrices and linear operators it is always returned as a 2-D structure, for ndarrays, if m=1 it is returned as a 1-D gradient array with shape (n,). See Also -------- check_derivative : Check correctness of a function computing derivatives. Notes ----- If `rel_step` is not provided, it assigned as ``EPS**(1/s)``, where EPS is determined from the smallest floating point dtype of `x0` or `fun(x0)`, ``np.finfo(x0.dtype).eps``, s=2 for '2-point' method and s=3 for '3-point' method. Such relative step approximately minimizes a sum of truncation and round-off errors, see [1]_. Relative steps are used by default. However, absolute steps are used when ``abs_step is not None``. If any of the absolute or relative steps produces an indistinguishable difference from the original `x0`, ``(x0 + dx) - x0 == 0``, then a automatic step size is substituted for that particular entry. A finite difference scheme for '3-point' method is selected automatically. The well-known central difference scheme is used for points sufficiently far from the boundary, and 3-point forward or backward scheme is used for points near the boundary. Both schemes have the second-order accuracy in terms of Taylor expansion. Refer to [2]_ for the formulas of 3-point forward and backward difference schemes. For dense differencing when m=1 Jacobian is returned with a shape (n,), on the other hand when n=1 Jacobian is returned with a shape (m, 1). Our motivation is the following: a) It handles a case of gradient computation (m=1) in a conventional way. b) It clearly separates these two different cases. b) In all cases np.atleast_2d can be called to get 2-D Jacobian with correct dimensions. References ---------- .. [1] W. H. Press et. al. "Numerical Recipes. The Art of Scientific Computing. 3rd edition", sec. 5.7. .. [2] A. Curtis, M. J. D. Powell, and J. Reid, "On the estimation of sparse Jacobian matrices", Journal of the Institute of Mathematics and its Applications, 13 (1974), pp. 117-120. .. [3] B. Fornberg, "Generation of Finite Difference Formulas on Arbitrarily Spaced Grids", Mathematics of Computation 51, 1988. Examples -------- >>> import numpy as np >>> from scipy.optimize._numdiff import approx_derivative >>> >>> def f(x, c1, c2): ... return np.array([x[0] * np.sin(c1 * x[1]), ... x[0] * np.cos(c2 * x[1])]) ... >>> x0 = np.array([1.0, 0.5 * np.pi]) >>> approx_derivative(f, x0, args=(1, 2)) array([[ 1., 0.], [-1., 0.]]) Bounds can be used to limit the region of function evaluation. In the example below we compute left and right derivative at point 1.0. >>> def g(x): ... return x**2 if x >= 1 else x ... >>> x0 = 1.0 >>> approx_derivative(g, x0, bounds=(-np.inf, 1.0)) array([ 1.]) >>> approx_derivative(g, x0, bounds=(1.0, np.inf)) array([ 2.]) """ if method not in ['2-point', '3-point', 'cs']: raise ValueError("Unknown method '%s'. " % method) x0 = np.atleast_1d(x0) if x0.ndim > 1: raise ValueError("`x0` must have at most 1 dimension.") lb, ub = _prepare_bounds(bounds, x0) if lb.shape != x0.shape or ub.shape != x0.shape: raise ValueError("Inconsistent shapes between bounds and `x0`.") if as_linear_operator and not (np.all(np.isinf(lb)) and np.all(np.isinf(ub))): raise ValueError("Bounds not supported when " "`as_linear_operator` is True.") def fun_wrapped(x): f = np.atleast_1d(fun(x, *args, **kwargs)) if f.ndim > 1: raise RuntimeError("`fun` return value has " "more than 1 dimension.") return f if f0 is None: f0 = fun_wrapped(x0) else: f0 = np.atleast_1d(f0) if f0.ndim > 1: raise ValueError("`f0` passed has more than 1 dimension.") if np.any((x0 < lb) | (x0 > ub)): raise ValueError("`x0` violates bound constraints.") if as_linear_operator: if rel_step is None: rel_step = _eps_for_method(x0.dtype, f0.dtype, method) return _linear_operator_difference(fun_wrapped, x0, f0, rel_step, method) else: # by default we use rel_step if abs_step is None: h = _compute_absolute_step(rel_step, x0, f0, method) else: # user specifies an absolute step sign_x0 = (x0 >= 0).astype(float) * 2 - 1 h = abs_step # cannot have a zero step. This might happen if x0 is very large # or small. In which case fall back to relative step. dx = ((x0 + h) - x0) h = np.where(dx == 0, _eps_for_method(x0.dtype, f0.dtype, method) * sign_x0 * np.maximum(1.0, np.abs(x0)), h) if method == '2-point': h, use_one_sided = _adjust_scheme_to_bounds( x0, h, 1, '1-sided', lb, ub) elif method == '3-point': h, use_one_sided = _adjust_scheme_to_bounds( x0, h, 1, '2-sided', lb, ub) elif method == 'cs': use_one_sided = False if sparsity is None: return _dense_difference(fun_wrapped, x0, f0, h, use_one_sided, method) else: if not issparse(sparsity) and len(sparsity) == 2: structure, groups = sparsity else: structure = sparsity groups = group_columns(sparsity) if issparse(structure): structure = csc_matrix(structure) else: structure = np.atleast_2d(structure) groups = np.atleast_1d(groups) return _sparse_difference(fun_wrapped, x0, f0, h, use_one_sided, structure, groups, method) def _linear_operator_difference(fun, x0, f0, h, method): m = f0.size n = x0.size if method == '2-point': def matvec(p): if np.array_equal(p, np.zeros_like(p)): return np.zeros(m) dx = h / norm(p) x = x0 + dx*p df = fun(x) - f0 return df / dx elif method == '3-point': def matvec(p): if np.array_equal(p, np.zeros_like(p)): return np.zeros(m) dx = 2*h / norm(p) x1 = x0 - (dx/2)*p x2 = x0 + (dx/2)*p f1 = fun(x1) f2 = fun(x2) df = f2 - f1 return df / dx elif method == 'cs': def matvec(p): if np.array_equal(p, np.zeros_like(p)): return np.zeros(m) dx = h / norm(p) x = x0 + dx*p*1.j f1 = fun(x) df = f1.imag return df / dx else: raise RuntimeError("Never be here.") return LinearOperator((m, n), matvec) def _dense_difference(fun, x0, f0, h, use_one_sided, method): m = f0.size n = x0.size J_transposed = np.empty((n, m)) h_vecs = np.diag(h) for i in range(h.size): if method == '2-point': x = x0 + h_vecs[i] dx = x[i] - x0[i] # Recompute dx as exactly representable number. df = fun(x) - f0 elif method == '3-point' and use_one_sided[i]: x1 = x0 + h_vecs[i] x2 = x0 + 2 * h_vecs[i] dx = x2[i] - x0[i] f1 = fun(x1) f2 = fun(x2) df = -3.0 * f0 + 4 * f1 - f2 elif method == '3-point' and not use_one_sided[i]: x1 = x0 - h_vecs[i] x2 = x0 + h_vecs[i] dx = x2[i] - x1[i] f1 = fun(x1) f2 = fun(x2) df = f2 - f1 elif method == 'cs': f1 = fun(x0 + h_vecs[i]*1.j) df = f1.imag dx = h_vecs[i, i] else: raise RuntimeError("Never be here.") J_transposed[i] = df / dx if m == 1: J_transposed = np.ravel(J_transposed) return J_transposed.T def _sparse_difference(fun, x0, f0, h, use_one_sided, structure, groups, method): m = f0.size n = x0.size row_indices = [] col_indices = [] fractions = [] n_groups = np.max(groups) + 1 for group in range(n_groups): # Perturb variables which are in the same group simultaneously. e = np.equal(group, groups) h_vec = h * e if method == '2-point': x = x0 + h_vec dx = x - x0 df = fun(x) - f0 # The result is written to columns which correspond to perturbed # variables. cols, = np.nonzero(e) # Find all non-zero elements in selected columns of Jacobian. i, j, _ = find(structure[:, cols]) # Restore column indices in the full array. j = cols[j] elif method == '3-point': # Here we do conceptually the same but separate one-sided # and two-sided schemes. x1 = x0.copy() x2 = x0.copy() mask_1 = use_one_sided & e x1[mask_1] += h_vec[mask_1] x2[mask_1] += 2 * h_vec[mask_1] mask_2 = ~use_one_sided & e x1[mask_2] -= h_vec[mask_2] x2[mask_2] += h_vec[mask_2] dx = np.zeros(n) dx[mask_1] = x2[mask_1] - x0[mask_1] dx[mask_2] = x2[mask_2] - x1[mask_2] f1 = fun(x1) f2 = fun(x2) cols, = np.nonzero(e) i, j, _ = find(structure[:, cols]) j = cols[j] mask = use_one_sided[j] df = np.empty(m) rows = i[mask] df[rows] = -3 * f0[rows] + 4 * f1[rows] - f2[rows] rows = i[~mask] df[rows] = f2[rows] - f1[rows] elif method == 'cs': f1 = fun(x0 + h_vec*1.j) df = f1.imag dx = h_vec cols, = np.nonzero(e) i, j, _ = find(structure[:, cols]) j = cols[j] else: raise ValueError("Never be here.") # All that's left is to compute the fraction. We store i, j and # fractions as separate arrays and later construct coo_matrix. row_indices.append(i) col_indices.append(j) fractions.append(df[i] / dx[j]) row_indices = np.hstack(row_indices) col_indices = np.hstack(col_indices) fractions = np.hstack(fractions) J = coo_matrix((fractions, (row_indices, col_indices)), shape=(m, n)) return csr_matrix(J) def check_derivative(fun, jac, x0, bounds=(-np.inf, np.inf), args=(), kwargs={}): """Check correctness of a function computing derivatives (Jacobian or gradient) by comparison with a finite difference approximation. Parameters ---------- fun : callable Function of which to estimate the derivatives. The argument x passed to this function is ndarray of shape (n,) (never a scalar even if n=1). It must return 1-D array_like of shape (m,) or a scalar. jac : callable Function which computes Jacobian matrix of `fun`. It must work with argument x the same way as `fun`. The return value must be array_like or sparse matrix with an appropriate shape. x0 : array_like of shape (n,) or float Point at which to estimate the derivatives. Float will be converted to 1-D array. bounds : 2-tuple of array_like, optional Lower and upper bounds on independent variables. Defaults to no bounds. Each bound must match the size of `x0` or be a scalar, in the latter case the bound will be the same for all variables. Use it to limit the range of function evaluation. args, kwargs : tuple and dict, optional Additional arguments passed to `fun` and `jac`. Both empty by default. The calling signature is ``fun(x, *args, **kwargs)`` and the same for `jac`. Returns ------- accuracy : float The maximum among all relative errors for elements with absolute values higher than 1 and absolute errors for elements with absolute values less or equal than 1. If `accuracy` is on the order of 1e-6 or lower, then it is likely that your `jac` implementation is correct. See Also -------- approx_derivative : Compute finite difference approximation of derivative. Examples -------- >>> import numpy as np >>> from scipy.optimize._numdiff import check_derivative >>> >>> >>> def f(x, c1, c2): ... return np.array([x[0] * np.sin(c1 * x[1]), ... x[0] * np.cos(c2 * x[1])]) ... >>> def jac(x, c1, c2): ... return np.array([ ... [np.sin(c1 * x[1]), c1 * x[0] * np.cos(c1 * x[1])], ... [np.cos(c2 * x[1]), -c2 * x[0] * np.sin(c2 * x[1])] ... ]) ... >>> >>> x0 = np.array([1.0, 0.5 * np.pi]) >>> check_derivative(f, jac, x0, args=(1, 2)) 2.4492935982947064e-16 """ J_to_test = jac(x0, *args, **kwargs) if issparse(J_to_test): J_diff = approx_derivative(fun, x0, bounds=bounds, sparsity=J_to_test, args=args, kwargs=kwargs) J_to_test = csr_matrix(J_to_test) abs_err = J_to_test - J_diff i, j, abs_err_data = find(abs_err) J_diff_data = np.asarray(J_diff[i, j]).ravel() return np.max(np.abs(abs_err_data) / np.maximum(1, np.abs(J_diff_data))) else: J_diff = approx_derivative(fun, x0, bounds=bounds, args=args, kwargs=kwargs) abs_err = np.abs(J_to_test - J_diff) return np.max(abs_err / np.maximum(1, np.abs(J_diff)))
28,279
36.112861
79
py
scipy
scipy-main/scipy/optimize/_linesearch.py
""" Functions --------- .. autosummary:: :toctree: generated/ line_search_armijo line_search_wolfe1 line_search_wolfe2 scalar_search_wolfe1 scalar_search_wolfe2 """ from warnings import warn from scipy.optimize import _minpack2 as minpack2 import numpy as np __all__ = ['LineSearchWarning', 'line_search_wolfe1', 'line_search_wolfe2', 'scalar_search_wolfe1', 'scalar_search_wolfe2', 'line_search_armijo'] class LineSearchWarning(RuntimeWarning): pass #------------------------------------------------------------------------------ # Minpack's Wolfe line and scalar searches #------------------------------------------------------------------------------ def line_search_wolfe1(f, fprime, xk, pk, gfk=None, old_fval=None, old_old_fval=None, args=(), c1=1e-4, c2=0.9, amax=50, amin=1e-8, xtol=1e-14): """ As `scalar_search_wolfe1` but do a line search to direction `pk` Parameters ---------- f : callable Function `f(x)` fprime : callable Gradient of `f` xk : array_like Current point pk : array_like Search direction gfk : array_like, optional Gradient of `f` at point `xk` old_fval : float, optional Value of `f` at point `xk` old_old_fval : float, optional Value of `f` at point preceding `xk` The rest of the parameters are the same as for `scalar_search_wolfe1`. Returns ------- stp, f_count, g_count, fval, old_fval As in `line_search_wolfe1` gval : array Gradient of `f` at the final point """ if gfk is None: gfk = fprime(xk, *args) gval = [gfk] gc = [0] fc = [0] def phi(s): fc[0] += 1 return f(xk + s*pk, *args) def derphi(s): gval[0] = fprime(xk + s*pk, *args) gc[0] += 1 return np.dot(gval[0], pk) derphi0 = np.dot(gfk, pk) stp, fval, old_fval = scalar_search_wolfe1( phi, derphi, old_fval, old_old_fval, derphi0, c1=c1, c2=c2, amax=amax, amin=amin, xtol=xtol) return stp, fc[0], gc[0], fval, old_fval, gval[0] def scalar_search_wolfe1(phi, derphi, phi0=None, old_phi0=None, derphi0=None, c1=1e-4, c2=0.9, amax=50, amin=1e-8, xtol=1e-14): """ Scalar function search for alpha that satisfies strong Wolfe conditions alpha > 0 is assumed to be a descent direction. Parameters ---------- phi : callable phi(alpha) Function at point `alpha` derphi : callable phi'(alpha) Objective function derivative. Returns a scalar. phi0 : float, optional Value of phi at 0 old_phi0 : float, optional Value of phi at previous point derphi0 : float, optional Value derphi at 0 c1 : float, optional Parameter for Armijo condition rule. c2 : float, optional Parameter for curvature condition rule. amax, amin : float, optional Maximum and minimum step size xtol : float, optional Relative tolerance for an acceptable step. Returns ------- alpha : float Step size, or None if no suitable step was found phi : float Value of `phi` at the new point `alpha` phi0 : float Value of `phi` at `alpha=0` Notes ----- Uses routine DCSRCH from MINPACK. """ if phi0 is None: phi0 = phi(0.) if derphi0 is None: derphi0 = derphi(0.) if old_phi0 is not None and derphi0 != 0: alpha1 = min(1.0, 1.01*2*(phi0 - old_phi0)/derphi0) if alpha1 < 0: alpha1 = 1.0 else: alpha1 = 1.0 phi1 = phi0 derphi1 = derphi0 isave = np.zeros((2,), np.intc) dsave = np.zeros((13,), float) task = b'START' maxiter = 100 for i in range(maxiter): stp, phi1, derphi1, task = minpack2.dcsrch(alpha1, phi1, derphi1, c1, c2, xtol, task, amin, amax, isave, dsave) if task[:2] == b'FG': alpha1 = stp phi1 = phi(stp) derphi1 = derphi(stp) else: break else: # maxiter reached, the line search did not converge stp = None if task[:5] == b'ERROR' or task[:4] == b'WARN': stp = None # failed return stp, phi1, phi0 line_search = line_search_wolfe1 #------------------------------------------------------------------------------ # Pure-Python Wolfe line and scalar searches #------------------------------------------------------------------------------ # Note: `line_search_wolfe2` is the public `scipy.optimize.line_search` def line_search_wolfe2(f, myfprime, xk, pk, gfk=None, old_fval=None, old_old_fval=None, args=(), c1=1e-4, c2=0.9, amax=None, extra_condition=None, maxiter=10): """Find alpha that satisfies strong Wolfe conditions. Parameters ---------- f : callable f(x,*args) Objective function. myfprime : callable f'(x,*args) Objective function gradient. xk : ndarray Starting point. pk : ndarray Search direction. The search direction must be a descent direction for the algorithm to converge. gfk : ndarray, optional Gradient value for x=xk (xk being the current parameter estimate). Will be recomputed if omitted. old_fval : float, optional Function value for x=xk. Will be recomputed if omitted. old_old_fval : float, optional Function value for the point preceding x=xk. args : tuple, optional Additional arguments passed to objective function. c1 : float, optional Parameter for Armijo condition rule. c2 : float, optional Parameter for curvature condition rule. amax : float, optional Maximum step size extra_condition : callable, optional A callable of the form ``extra_condition(alpha, x, f, g)`` returning a boolean. Arguments are the proposed step ``alpha`` and the corresponding ``x``, ``f`` and ``g`` values. The line search accepts the value of ``alpha`` only if this callable returns ``True``. If the callable returns ``False`` for the step length, the algorithm will continue with new iterates. The callable is only called for iterates satisfying the strong Wolfe conditions. maxiter : int, optional Maximum number of iterations to perform. Returns ------- alpha : float or None Alpha for which ``x_new = x0 + alpha * pk``, or None if the line search algorithm did not converge. fc : int Number of function evaluations made. gc : int Number of gradient evaluations made. new_fval : float or None New function value ``f(x_new)=f(x0+alpha*pk)``, or None if the line search algorithm did not converge. old_fval : float Old function value ``f(x0)``. new_slope : float or None The local slope along the search direction at the new value ``<myfprime(x_new), pk>``, or None if the line search algorithm did not converge. Notes ----- Uses the line search algorithm to enforce strong Wolfe conditions. See Wright and Nocedal, 'Numerical Optimization', 1999, pp. 59-61. The search direction `pk` must be a descent direction (e.g. ``-myfprime(xk)``) to find a step length that satisfies the strong Wolfe conditions. If the search direction is not a descent direction (e.g. ``myfprime(xk)``), then `alpha`, `new_fval`, and `new_slope` will be None. Examples -------- >>> import numpy as np >>> from scipy.optimize import line_search A objective function and its gradient are defined. >>> def obj_func(x): ... return (x[0])**2+(x[1])**2 >>> def obj_grad(x): ... return [2*x[0], 2*x[1]] We can find alpha that satisfies strong Wolfe conditions. >>> start_point = np.array([1.8, 1.7]) >>> search_gradient = np.array([-1.0, -1.0]) >>> line_search(obj_func, obj_grad, start_point, search_gradient) (1.0, 2, 1, 1.1300000000000001, 6.13, [1.6, 1.4]) """ fc = [0] gc = [0] gval = [None] gval_alpha = [None] def phi(alpha): fc[0] += 1 return f(xk + alpha * pk, *args) fprime = myfprime def derphi(alpha): gc[0] += 1 gval[0] = fprime(xk + alpha * pk, *args) # store for later use gval_alpha[0] = alpha return np.dot(gval[0], pk) if gfk is None: gfk = fprime(xk, *args) derphi0 = np.dot(gfk, pk) if extra_condition is not None: # Add the current gradient as argument, to avoid needless # re-evaluation def extra_condition2(alpha, phi): if gval_alpha[0] != alpha: derphi(alpha) x = xk + alpha * pk return extra_condition(alpha, x, phi, gval[0]) else: extra_condition2 = None alpha_star, phi_star, old_fval, derphi_star = scalar_search_wolfe2( phi, derphi, old_fval, old_old_fval, derphi0, c1, c2, amax, extra_condition2, maxiter=maxiter) if derphi_star is None: warn('The line search algorithm did not converge', LineSearchWarning) else: # derphi_star is a number (derphi) -- so use the most recently # calculated gradient used in computing it derphi = gfk*pk # this is the gradient at the next step no need to compute it # again in the outer loop. derphi_star = gval[0] return alpha_star, fc[0], gc[0], phi_star, old_fval, derphi_star def scalar_search_wolfe2(phi, derphi, phi0=None, old_phi0=None, derphi0=None, c1=1e-4, c2=0.9, amax=None, extra_condition=None, maxiter=10): """Find alpha that satisfies strong Wolfe conditions. alpha > 0 is assumed to be a descent direction. Parameters ---------- phi : callable phi(alpha) Objective scalar function. derphi : callable phi'(alpha) Objective function derivative. Returns a scalar. phi0 : float, optional Value of phi at 0. old_phi0 : float, optional Value of phi at previous point. derphi0 : float, optional Value of derphi at 0 c1 : float, optional Parameter for Armijo condition rule. c2 : float, optional Parameter for curvature condition rule. amax : float, optional Maximum step size. extra_condition : callable, optional A callable of the form ``extra_condition(alpha, phi_value)`` returning a boolean. The line search accepts the value of ``alpha`` only if this callable returns ``True``. If the callable returns ``False`` for the step length, the algorithm will continue with new iterates. The callable is only called for iterates satisfying the strong Wolfe conditions. maxiter : int, optional Maximum number of iterations to perform. Returns ------- alpha_star : float or None Best alpha, or None if the line search algorithm did not converge. phi_star : float phi at alpha_star. phi0 : float phi at 0. derphi_star : float or None derphi at alpha_star, or None if the line search algorithm did not converge. Notes ----- Uses the line search algorithm to enforce strong Wolfe conditions. See Wright and Nocedal, 'Numerical Optimization', 1999, pp. 59-61. """ if phi0 is None: phi0 = phi(0.) if derphi0 is None: derphi0 = derphi(0.) alpha0 = 0 if old_phi0 is not None and derphi0 != 0: alpha1 = min(1.0, 1.01*2*(phi0 - old_phi0)/derphi0) else: alpha1 = 1.0 if alpha1 < 0: alpha1 = 1.0 if amax is not None: alpha1 = min(alpha1, amax) phi_a1 = phi(alpha1) #derphi_a1 = derphi(alpha1) evaluated below phi_a0 = phi0 derphi_a0 = derphi0 if extra_condition is None: def extra_condition(alpha, phi): return True for i in range(maxiter): if alpha1 == 0 or (amax is not None and alpha0 == amax): # alpha1 == 0: This shouldn't happen. Perhaps the increment has # slipped below machine precision? alpha_star = None phi_star = phi0 phi0 = old_phi0 derphi_star = None if alpha1 == 0: msg = 'Rounding errors prevent the line search from converging' else: msg = "The line search algorithm could not find a solution " + \ "less than or equal to amax: %s" % amax warn(msg, LineSearchWarning) break not_first_iteration = i > 0 if (phi_a1 > phi0 + c1 * alpha1 * derphi0) or \ ((phi_a1 >= phi_a0) and not_first_iteration): alpha_star, phi_star, derphi_star = \ _zoom(alpha0, alpha1, phi_a0, phi_a1, derphi_a0, phi, derphi, phi0, derphi0, c1, c2, extra_condition) break derphi_a1 = derphi(alpha1) if (abs(derphi_a1) <= -c2*derphi0): if extra_condition(alpha1, phi_a1): alpha_star = alpha1 phi_star = phi_a1 derphi_star = derphi_a1 break if (derphi_a1 >= 0): alpha_star, phi_star, derphi_star = \ _zoom(alpha1, alpha0, phi_a1, phi_a0, derphi_a1, phi, derphi, phi0, derphi0, c1, c2, extra_condition) break alpha2 = 2 * alpha1 # increase by factor of two on each iteration if amax is not None: alpha2 = min(alpha2, amax) alpha0 = alpha1 alpha1 = alpha2 phi_a0 = phi_a1 phi_a1 = phi(alpha1) derphi_a0 = derphi_a1 else: # stopping test maxiter reached alpha_star = alpha1 phi_star = phi_a1 derphi_star = None warn('The line search algorithm did not converge', LineSearchWarning) return alpha_star, phi_star, phi0, derphi_star def _cubicmin(a, fa, fpa, b, fb, c, fc): """ Finds the minimizer for a cubic polynomial that goes through the points (a,fa), (b,fb), and (c,fc) with derivative at a of fpa. If no minimizer can be found, return None. """ # f(x) = A *(x-a)^3 + B*(x-a)^2 + C*(x-a) + D with np.errstate(divide='raise', over='raise', invalid='raise'): try: C = fpa db = b - a dc = c - a denom = (db * dc) ** 2 * (db - dc) d1 = np.empty((2, 2)) d1[0, 0] = dc ** 2 d1[0, 1] = -db ** 2 d1[1, 0] = -dc ** 3 d1[1, 1] = db ** 3 [A, B] = np.dot(d1, np.asarray([fb - fa - C * db, fc - fa - C * dc]).flatten()) A /= denom B /= denom radical = B * B - 3 * A * C xmin = a + (-B + np.sqrt(radical)) / (3 * A) except ArithmeticError: return None if not np.isfinite(xmin): return None return xmin def _quadmin(a, fa, fpa, b, fb): """ Finds the minimizer for a quadratic polynomial that goes through the points (a,fa), (b,fb) with derivative at a of fpa. """ # f(x) = B*(x-a)^2 + C*(x-a) + D with np.errstate(divide='raise', over='raise', invalid='raise'): try: D = fa C = fpa db = b - a * 1.0 B = (fb - D - C * db) / (db * db) xmin = a - C / (2.0 * B) except ArithmeticError: return None if not np.isfinite(xmin): return None return xmin def _zoom(a_lo, a_hi, phi_lo, phi_hi, derphi_lo, phi, derphi, phi0, derphi0, c1, c2, extra_condition): """Zoom stage of approximate linesearch satisfying strong Wolfe conditions. Part of the optimization algorithm in `scalar_search_wolfe2`. Notes ----- Implements Algorithm 3.6 (zoom) in Wright and Nocedal, 'Numerical Optimization', 1999, pp. 61. """ maxiter = 10 i = 0 delta1 = 0.2 # cubic interpolant check delta2 = 0.1 # quadratic interpolant check phi_rec = phi0 a_rec = 0 while True: # interpolate to find a trial step length between a_lo and # a_hi Need to choose interpolation here. Use cubic # interpolation and then if the result is within delta * # dalpha or outside of the interval bounded by a_lo or a_hi # then use quadratic interpolation, if the result is still too # close, then use bisection dalpha = a_hi - a_lo if dalpha < 0: a, b = a_hi, a_lo else: a, b = a_lo, a_hi # minimizer of cubic interpolant # (uses phi_lo, derphi_lo, phi_hi, and the most recent value of phi) # # if the result is too close to the end points (or out of the # interval), then use quadratic interpolation with phi_lo, # derphi_lo and phi_hi if the result is still too close to the # end points (or out of the interval) then use bisection if (i > 0): cchk = delta1 * dalpha a_j = _cubicmin(a_lo, phi_lo, derphi_lo, a_hi, phi_hi, a_rec, phi_rec) if (i == 0) or (a_j is None) or (a_j > b - cchk) or (a_j < a + cchk): qchk = delta2 * dalpha a_j = _quadmin(a_lo, phi_lo, derphi_lo, a_hi, phi_hi) if (a_j is None) or (a_j > b-qchk) or (a_j < a+qchk): a_j = a_lo + 0.5*dalpha # Check new value of a_j phi_aj = phi(a_j) if (phi_aj > phi0 + c1*a_j*derphi0) or (phi_aj >= phi_lo): phi_rec = phi_hi a_rec = a_hi a_hi = a_j phi_hi = phi_aj else: derphi_aj = derphi(a_j) if abs(derphi_aj) <= -c2*derphi0 and extra_condition(a_j, phi_aj): a_star = a_j val_star = phi_aj valprime_star = derphi_aj break if derphi_aj*(a_hi - a_lo) >= 0: phi_rec = phi_hi a_rec = a_hi a_hi = a_lo phi_hi = phi_lo else: phi_rec = phi_lo a_rec = a_lo a_lo = a_j phi_lo = phi_aj derphi_lo = derphi_aj i += 1 if (i > maxiter): # Failed to find a conforming step size a_star = None val_star = None valprime_star = None break return a_star, val_star, valprime_star #------------------------------------------------------------------------------ # Armijo line and scalar searches #------------------------------------------------------------------------------ def line_search_armijo(f, xk, pk, gfk, old_fval, args=(), c1=1e-4, alpha0=1): """Minimize over alpha, the function ``f(xk+alpha pk)``. Parameters ---------- f : callable Function to be minimized. xk : array_like Current point. pk : array_like Search direction. gfk : array_like Gradient of `f` at point `xk`. old_fval : float Value of `f` at point `xk`. args : tuple, optional Optional arguments. c1 : float, optional Value to control stopping criterion. alpha0 : scalar, optional Value of `alpha` at start of the optimization. Returns ------- alpha f_count f_val_at_alpha Notes ----- Uses the interpolation algorithm (Armijo backtracking) as suggested by Wright and Nocedal in 'Numerical Optimization', 1999, pp. 56-57 """ xk = np.atleast_1d(xk) fc = [0] def phi(alpha1): fc[0] += 1 return f(xk + alpha1*pk, *args) if old_fval is None: phi0 = phi(0.) else: phi0 = old_fval # compute f(xk) -- done in past loop derphi0 = np.dot(gfk, pk) alpha, phi1 = scalar_search_armijo(phi, phi0, derphi0, c1=c1, alpha0=alpha0) return alpha, fc[0], phi1 def line_search_BFGS(f, xk, pk, gfk, old_fval, args=(), c1=1e-4, alpha0=1): """ Compatibility wrapper for `line_search_armijo` """ r = line_search_armijo(f, xk, pk, gfk, old_fval, args=args, c1=c1, alpha0=alpha0) return r[0], r[1], 0, r[2] def scalar_search_armijo(phi, phi0, derphi0, c1=1e-4, alpha0=1, amin=0): """Minimize over alpha, the function ``phi(alpha)``. Uses the interpolation algorithm (Armijo backtracking) as suggested by Wright and Nocedal in 'Numerical Optimization', 1999, pp. 56-57 alpha > 0 is assumed to be a descent direction. Returns ------- alpha phi1 """ phi_a0 = phi(alpha0) if phi_a0 <= phi0 + c1*alpha0*derphi0: return alpha0, phi_a0 # Otherwise, compute the minimizer of a quadratic interpolant: alpha1 = -(derphi0) * alpha0**2 / 2.0 / (phi_a0 - phi0 - derphi0 * alpha0) phi_a1 = phi(alpha1) if (phi_a1 <= phi0 + c1*alpha1*derphi0): return alpha1, phi_a1 # Otherwise, loop with cubic interpolation until we find an alpha which # satisfies the first Wolfe condition (since we are backtracking, we will # assume that the value of alpha is not too small and satisfies the second # condition. while alpha1 > amin: # we are assuming alpha>0 is a descent direction factor = alpha0**2 * alpha1**2 * (alpha1-alpha0) a = alpha0**2 * (phi_a1 - phi0 - derphi0*alpha1) - \ alpha1**2 * (phi_a0 - phi0 - derphi0*alpha0) a = a / factor b = -alpha0**3 * (phi_a1 - phi0 - derphi0*alpha1) + \ alpha1**3 * (phi_a0 - phi0 - derphi0*alpha0) b = b / factor alpha2 = (-b + np.sqrt(abs(b**2 - 3 * a * derphi0))) / (3.0*a) phi_a2 = phi(alpha2) if (phi_a2 <= phi0 + c1*alpha2*derphi0): return alpha2, phi_a2 if (alpha1 - alpha2) > alpha1 / 2.0 or (1 - alpha2/alpha1) < 0.96: alpha2 = alpha1 / 2.0 alpha0 = alpha1 alpha1 = alpha2 phi_a0 = phi_a1 phi_a1 = phi_a2 # Failed to find a suitable step length return None, phi_a1 #------------------------------------------------------------------------------ # Non-monotone line search for DF-SANE #------------------------------------------------------------------------------ def _nonmonotone_line_search_cruz(f, x_k, d, prev_fs, eta, gamma=1e-4, tau_min=0.1, tau_max=0.5): """ Nonmonotone backtracking line search as described in [1]_ Parameters ---------- f : callable Function returning a tuple ``(f, F)`` where ``f`` is the value of a merit function and ``F`` the residual. x_k : ndarray Initial position. d : ndarray Search direction. prev_fs : float List of previous merit function values. Should have ``len(prev_fs) <= M`` where ``M`` is the nonmonotonicity window parameter. eta : float Allowed merit function increase, see [1]_ gamma, tau_min, tau_max : float, optional Search parameters, see [1]_ Returns ------- alpha : float Step length xp : ndarray Next position fp : float Merit function value at next position Fp : ndarray Residual at next position References ---------- [1] "Spectral residual method without gradient information for solving large-scale nonlinear systems of equations." W. La Cruz, J.M. Martinez, M. Raydan. Math. Comp. **75**, 1429 (2006). """ f_k = prev_fs[-1] f_bar = max(prev_fs) alpha_p = 1 alpha_m = 1 alpha = 1 while True: xp = x_k + alpha_p * d fp, Fp = f(xp) if fp <= f_bar + eta - gamma * alpha_p**2 * f_k: alpha = alpha_p break alpha_tp = alpha_p**2 * f_k / (fp + (2*alpha_p - 1)*f_k) xp = x_k - alpha_m * d fp, Fp = f(xp) if fp <= f_bar + eta - gamma * alpha_m**2 * f_k: alpha = -alpha_m break alpha_tm = alpha_m**2 * f_k / (fp + (2*alpha_m - 1)*f_k) alpha_p = np.clip(alpha_tp, tau_min * alpha_p, tau_max * alpha_p) alpha_m = np.clip(alpha_tm, tau_min * alpha_m, tau_max * alpha_m) return alpha, xp, fp, Fp def _nonmonotone_line_search_cheng(f, x_k, d, f_k, C, Q, eta, gamma=1e-4, tau_min=0.1, tau_max=0.5, nu=0.85): """ Nonmonotone line search from [1] Parameters ---------- f : callable Function returning a tuple ``(f, F)`` where ``f`` is the value of a merit function and ``F`` the residual. x_k : ndarray Initial position. d : ndarray Search direction. f_k : float Initial merit function value. C, Q : float Control parameters. On the first iteration, give values Q=1.0, C=f_k eta : float Allowed merit function increase, see [1]_ nu, gamma, tau_min, tau_max : float, optional Search parameters, see [1]_ Returns ------- alpha : float Step length xp : ndarray Next position fp : float Merit function value at next position Fp : ndarray Residual at next position C : float New value for the control parameter C Q : float New value for the control parameter Q References ---------- .. [1] W. Cheng & D.-H. Li, ''A derivative-free nonmonotone line search and its application to the spectral residual method'', IMA J. Numer. Anal. 29, 814 (2009). """ alpha_p = 1 alpha_m = 1 alpha = 1 while True: xp = x_k + alpha_p * d fp, Fp = f(xp) if fp <= C + eta - gamma * alpha_p**2 * f_k: alpha = alpha_p break alpha_tp = alpha_p**2 * f_k / (fp + (2*alpha_p - 1)*f_k) xp = x_k - alpha_m * d fp, Fp = f(xp) if fp <= C + eta - gamma * alpha_m**2 * f_k: alpha = -alpha_m break alpha_tm = alpha_m**2 * f_k / (fp + (2*alpha_m - 1)*f_k) alpha_p = np.clip(alpha_tp, tau_min * alpha_p, tau_max * alpha_p) alpha_m = np.clip(alpha_tm, tau_min * alpha_m, tau_max * alpha_m) # Update C and Q Q_next = nu * Q + 1 C = (nu * Q * (C + eta) + fp) / Q_next Q = Q_next return alpha, xp, fp, Fp, C, Q
27,044
29.353535
81
py
scipy
scipy-main/scipy/optimize/_nnls.py
import numpy as np from scipy.linalg import solve __all__ = ['nnls'] def nnls(A, b, maxiter=None, *, atol=None): """ Solve ``argmin_x || Ax - b ||_2`` for ``x>=0``. This problem, often called as NonNegative Least Squares, is a convex optimization problem with convex constraints. It typically arises when the ``x`` models quantities for which only nonnegative values are attainable; weight of ingredients, component costs and so on. Parameters ---------- A : (m, n) ndarray Coefficient array b : (m,) ndarray, float Right-hand side vector. maxiter: int, optional Maximum number of iterations, optional. Default value is ``3 * n``. atol: float Tolerance value used in the algorithm to assess closeness to zero in the projected residual ``(A.T @ (A x - b)`` entries. Increasing this value relaxes the solution constraints. A typical relaxation value can be selected as ``max(m, n) * np.linalg.norm(a, 1) * np.spacing(1.)``. This value is not set as default since the norm operation becomes expensive for large problems hence can be used only when necessary. Returns ------- x : ndarray Solution vector. rnorm : float The 2-norm of the residual, ``|| Ax-b ||_2``. See Also -------- lsq_linear : Linear least squares with bounds on the variables Notes ----- The code is based on [2]_ which is an improved version of the classical algorithm of [1]_. It utilizes an active set method and solves the KKT (Karush-Kuhn-Tucker) conditions for the non-negative least squares problem. References ---------- .. [1] : Lawson C., Hanson R.J., "Solving Least Squares Problems", SIAM, 1995, :doi:`10.1137/1.9781611971217` .. [2] : Bro, Rasmus and de Jong, Sijmen, "A Fast Non-Negativity- Constrained Least Squares Algorithm", Journal Of Chemometrics, 1997, :doi:`10.1002/(SICI)1099-128X(199709/10)11:5<393::AID-CEM483>3.0.CO;2-L` Examples -------- >>> import numpy as np >>> from scipy.optimize import nnls ... >>> A = np.array([[1, 0], [1, 0], [0, 1]]) >>> b = np.array([2, 1, 1]) >>> nnls(A, b) (array([1.5, 1. ]), 0.7071067811865475) >>> b = np.array([-1, -1, -1]) >>> nnls(A, b) (array([0., 0.]), 1.7320508075688772) """ A = np.asarray_chkfinite(A) b = np.asarray_chkfinite(b) if len(A.shape) != 2: raise ValueError("Expected a two-dimensional array (matrix)" + f", but the shape of A is {A.shape}") if len(b.shape) != 1: raise ValueError("Expected a one-dimensional array (vector)" + f", but the shape of b is {b.shape}") m, n = A.shape if m != b.shape[0]: raise ValueError( "Incompatible dimensions. The first dimension of " + f"A is {m}, while the shape of b is {(b.shape[0], )}") x, rnorm, mode = _nnls(A, b, maxiter, tol=atol) if mode != 1: raise RuntimeError("Maximum number of iterations reached.") return x, rnorm def _nnls(A, b, maxiter=None, tol=None): """ This is a single RHS algorithm from ref [2] above. For multiple RHS support, the algorithm is given in :doi:`10.1002/cem.889` """ m, n = A.shape AtA = A.T @ A Atb = b @ A # Result is 1D - let NumPy figure it out if not maxiter: maxiter = 3*n if tol is None: tol = 10 * max(m, n) * np.spacing(1.) # Initialize vars x = np.zeros(n, dtype=np.float64) # Inactive constraint switches P = np.zeros(n, dtype=bool) # Projected residual resid = Atb.copy().astype(np.float64) # x=0. Skip (-AtA @ x) term # Overall iteration counter # Outer loop is not counted, inner iter is counted across outer spins iter = 0 while (not P.all()) and (resid[~P] > tol).any(): # B # Get the "most" active coeff index and move to inactive set resid[P] = -np.inf k = np.argmax(resid) # B.2 P[k] = True # B.3 # Iteration solution s = np.zeros(n, dtype=np.float64) P_ind = P.nonzero()[0] s[P] = solve(AtA[P_ind[:, None], P_ind[None, :]], Atb[P], assume_a='sym', check_finite=False) # B.4 # Inner loop while (iter < maxiter) and (s[P].min() <= tol): # C.1 alpha_ind = ((s < tol) & P).nonzero() alpha = (x[alpha_ind] / (x[alpha_ind] - s[alpha_ind])).min() # C.2 x *= (1 - alpha) x += alpha*s P[x < tol] = False s[P] = solve(AtA[np.ix_(P, P)], Atb[P], assume_a='sym', check_finite=False) s[~P] = 0 # C.6 iter += 1 x[:] = s[:] resid = Atb - AtA @ x if iter == maxiter: # Typically following line should return # return x, np.linalg.norm(A@x - b), -1 # however at the top level, -1 raises an exception wasting norm # Instead return dummy number 0. return x, 0., -1 return x, np.linalg.norm(A@x - b), 1
5,189
31.236025
79
py
scipy
scipy-main/scipy/optimize/_hessian_update_strategy.py
"""Hessian update strategies for quasi-Newton optimization methods.""" import numpy as np from numpy.linalg import norm from scipy.linalg import get_blas_funcs from warnings import warn __all__ = ['HessianUpdateStrategy', 'BFGS', 'SR1'] class HessianUpdateStrategy: """Interface for implementing Hessian update strategies. Many optimization methods make use of Hessian (or inverse Hessian) approximations, such as the quasi-Newton methods BFGS, SR1, L-BFGS. Some of these approximations, however, do not actually need to store the entire matrix or can compute the internal matrix product with a given vector in a very efficiently manner. This class serves as an abstract interface between the optimization algorithm and the quasi-Newton update strategies, giving freedom of implementation to store and update the internal matrix as efficiently as possible. Different choices of initialization and update procedure will result in different quasi-Newton strategies. Four methods should be implemented in derived classes: ``initialize``, ``update``, ``dot`` and ``get_matrix``. Notes ----- Any instance of a class that implements this interface, can be accepted by the method ``minimize`` and used by the compatible solvers to approximate the Hessian (or inverse Hessian) used by the optimization algorithms. """ def initialize(self, n, approx_type): """Initialize internal matrix. Allocate internal memory for storing and updating the Hessian or its inverse. Parameters ---------- n : int Problem dimension. approx_type : {'hess', 'inv_hess'} Selects either the Hessian or the inverse Hessian. When set to 'hess' the Hessian will be stored and updated. When set to 'inv_hess' its inverse will be used instead. """ raise NotImplementedError("The method ``initialize(n, approx_type)``" " is not implemented.") def update(self, delta_x, delta_grad): """Update internal matrix. Update Hessian matrix or its inverse (depending on how 'approx_type' is defined) using information about the last evaluated points. Parameters ---------- delta_x : ndarray The difference between two points the gradient function have been evaluated at: ``delta_x = x2 - x1``. delta_grad : ndarray The difference between the gradients: ``delta_grad = grad(x2) - grad(x1)``. """ raise NotImplementedError("The method ``update(delta_x, delta_grad)``" " is not implemented.") def dot(self, p): """Compute the product of the internal matrix with the given vector. Parameters ---------- p : array_like 1-D array representing a vector. Returns ------- Hp : array 1-D represents the result of multiplying the approximation matrix by vector p. """ raise NotImplementedError("The method ``dot(p)``" " is not implemented.") def get_matrix(self): """Return current internal matrix. Returns ------- H : ndarray, shape (n, n) Dense matrix containing either the Hessian or its inverse (depending on how 'approx_type' is defined). """ raise NotImplementedError("The method ``get_matrix(p)``" " is not implemented.") class FullHessianUpdateStrategy(HessianUpdateStrategy): """Hessian update strategy with full dimensional internal representation. """ _syr = get_blas_funcs('syr', dtype='d') # Symmetric rank 1 update _syr2 = get_blas_funcs('syr2', dtype='d') # Symmetric rank 2 update # Symmetric matrix-vector product _symv = get_blas_funcs('symv', dtype='d') def __init__(self, init_scale='auto'): self.init_scale = init_scale # Until initialize is called we can't really use the class, # so it makes sense to set everything to None. self.first_iteration = None self.approx_type = None self.B = None self.H = None def initialize(self, n, approx_type): """Initialize internal matrix. Allocate internal memory for storing and updating the Hessian or its inverse. Parameters ---------- n : int Problem dimension. approx_type : {'hess', 'inv_hess'} Selects either the Hessian or the inverse Hessian. When set to 'hess' the Hessian will be stored and updated. When set to 'inv_hess' its inverse will be used instead. """ self.first_iteration = True self.n = n self.approx_type = approx_type if approx_type not in ('hess', 'inv_hess'): raise ValueError("`approx_type` must be 'hess' or 'inv_hess'.") # Create matrix if self.approx_type == 'hess': self.B = np.eye(n, dtype=float) else: self.H = np.eye(n, dtype=float) def _auto_scale(self, delta_x, delta_grad): # Heuristic to scale matrix at first iteration. # Described in Nocedal and Wright "Numerical Optimization" # p.143 formula (6.20). s_norm2 = np.dot(delta_x, delta_x) y_norm2 = np.dot(delta_grad, delta_grad) ys = np.abs(np.dot(delta_grad, delta_x)) if ys == 0.0 or y_norm2 == 0 or s_norm2 == 0: return 1 if self.approx_type == 'hess': return y_norm2 / ys else: return ys / y_norm2 def _update_implementation(self, delta_x, delta_grad): raise NotImplementedError("The method ``_update_implementation``" " is not implemented.") def update(self, delta_x, delta_grad): """Update internal matrix. Update Hessian matrix or its inverse (depending on how 'approx_type' is defined) using information about the last evaluated points. Parameters ---------- delta_x : ndarray The difference between two points the gradient function have been evaluated at: ``delta_x = x2 - x1``. delta_grad : ndarray The difference between the gradients: ``delta_grad = grad(x2) - grad(x1)``. """ if np.all(delta_x == 0.0): return if np.all(delta_grad == 0.0): warn('delta_grad == 0.0. Check if the approximated ' 'function is linear. If the function is linear ' 'better results can be obtained by defining the ' 'Hessian as zero instead of using quasi-Newton ' 'approximations.', UserWarning) return if self.first_iteration: # Get user specific scale if self.init_scale == "auto": scale = self._auto_scale(delta_x, delta_grad) else: scale = float(self.init_scale) # Scale initial matrix with ``scale * np.eye(n)`` if self.approx_type == 'hess': self.B *= scale else: self.H *= scale self.first_iteration = False self._update_implementation(delta_x, delta_grad) def dot(self, p): """Compute the product of the internal matrix with the given vector. Parameters ---------- p : array_like 1-D array representing a vector. Returns ------- Hp : array 1-D represents the result of multiplying the approximation matrix by vector p. """ if self.approx_type == 'hess': return self._symv(1, self.B, p) else: return self._symv(1, self.H, p) def get_matrix(self): """Return the current internal matrix. Returns ------- M : ndarray, shape (n, n) Dense matrix containing either the Hessian or its inverse (depending on how `approx_type` was defined). """ if self.approx_type == 'hess': M = np.copy(self.B) else: M = np.copy(self.H) li = np.tril_indices_from(M, k=-1) M[li] = M.T[li] return M class BFGS(FullHessianUpdateStrategy): """Broyden-Fletcher-Goldfarb-Shanno (BFGS) Hessian update strategy. Parameters ---------- exception_strategy : {'skip_update', 'damp_update'}, optional Define how to proceed when the curvature condition is violated. Set it to 'skip_update' to just skip the update. Or, alternatively, set it to 'damp_update' to interpolate between the actual BFGS result and the unmodified matrix. Both exceptions strategies are explained in [1]_, p.536-537. min_curvature : float This number, scaled by a normalization factor, defines the minimum curvature ``dot(delta_grad, delta_x)`` allowed to go unaffected by the exception strategy. By default is equal to 1e-8 when ``exception_strategy = 'skip_update'`` and equal to 0.2 when ``exception_strategy = 'damp_update'``. init_scale : {float, 'auto'} Matrix scale at first iteration. At the first iteration the Hessian matrix or its inverse will be initialized with ``init_scale*np.eye(n)``, where ``n`` is the problem dimension. Set it to 'auto' in order to use an automatic heuristic for choosing the initial scale. The heuristic is described in [1]_, p.143. By default uses 'auto'. Notes ----- The update is based on the description in [1]_, p.140. References ---------- .. [1] Nocedal, Jorge, and Stephen J. Wright. "Numerical optimization" Second Edition (2006). """ def __init__(self, exception_strategy='skip_update', min_curvature=None, init_scale='auto'): if exception_strategy == 'skip_update': if min_curvature is not None: self.min_curvature = min_curvature else: self.min_curvature = 1e-8 elif exception_strategy == 'damp_update': if min_curvature is not None: self.min_curvature = min_curvature else: self.min_curvature = 0.2 else: raise ValueError("`exception_strategy` must be 'skip_update' " "or 'damp_update'.") super().__init__(init_scale) self.exception_strategy = exception_strategy def _update_inverse_hessian(self, ys, Hy, yHy, s): """Update the inverse Hessian matrix. BFGS update using the formula: ``H <- H + ((H*y).T*y + s.T*y)/(s.T*y)^2 * (s*s.T) - 1/(s.T*y) * ((H*y)*s.T + s*(H*y).T)`` where ``s = delta_x`` and ``y = delta_grad``. This formula is equivalent to (6.17) in [1]_ written in a more efficient way for implementation. References ---------- .. [1] Nocedal, Jorge, and Stephen J. Wright. "Numerical optimization" Second Edition (2006). """ self.H = self._syr2(-1.0 / ys, s, Hy, a=self.H) self.H = self._syr((ys+yHy)/ys**2, s, a=self.H) def _update_hessian(self, ys, Bs, sBs, y): """Update the Hessian matrix. BFGS update using the formula: ``B <- B - (B*s)*(B*s).T/s.T*(B*s) + y*y^T/s.T*y`` where ``s`` is short for ``delta_x`` and ``y`` is short for ``delta_grad``. Formula (6.19) in [1]_. References ---------- .. [1] Nocedal, Jorge, and Stephen J. Wright. "Numerical optimization" Second Edition (2006). """ self.B = self._syr(1.0 / ys, y, a=self.B) self.B = self._syr(-1.0 / sBs, Bs, a=self.B) def _update_implementation(self, delta_x, delta_grad): # Auxiliary variables w and z if self.approx_type == 'hess': w = delta_x z = delta_grad else: w = delta_grad z = delta_x # Do some common operations wz = np.dot(w, z) Mw = self.dot(w) wMw = Mw.dot(w) # Guarantee that wMw > 0 by reinitializing matrix. # While this is always true in exact arithmetics, # indefinite matrix may appear due to roundoff errors. if wMw <= 0.0: scale = self._auto_scale(delta_x, delta_grad) # Reinitialize matrix if self.approx_type == 'hess': self.B = scale * np.eye(self.n, dtype=float) else: self.H = scale * np.eye(self.n, dtype=float) # Do common operations for new matrix Mw = self.dot(w) wMw = Mw.dot(w) # Check if curvature condition is violated if wz <= self.min_curvature * wMw: # If the option 'skip_update' is set # we just skip the update when the condion # is violated. if self.exception_strategy == 'skip_update': return # If the option 'damp_update' is set we # interpolate between the actual BFGS # result and the unmodified matrix. elif self.exception_strategy == 'damp_update': update_factor = (1-self.min_curvature) / (1 - wz/wMw) z = update_factor*z + (1-update_factor)*Mw wz = np.dot(w, z) # Update matrix if self.approx_type == 'hess': self._update_hessian(wz, Mw, wMw, z) else: self._update_inverse_hessian(wz, Mw, wMw, z) class SR1(FullHessianUpdateStrategy): """Symmetric-rank-1 Hessian update strategy. Parameters ---------- min_denominator : float This number, scaled by a normalization factor, defines the minimum denominator magnitude allowed in the update. When the condition is violated we skip the update. By default uses ``1e-8``. init_scale : {float, 'auto'}, optional Matrix scale at first iteration. At the first iteration the Hessian matrix or its inverse will be initialized with ``init_scale*np.eye(n)``, where ``n`` is the problem dimension. Set it to 'auto' in order to use an automatic heuristic for choosing the initial scale. The heuristic is described in [1]_, p.143. By default uses 'auto'. Notes ----- The update is based on the description in [1]_, p.144-146. References ---------- .. [1] Nocedal, Jorge, and Stephen J. Wright. "Numerical optimization" Second Edition (2006). """ def __init__(self, min_denominator=1e-8, init_scale='auto'): self.min_denominator = min_denominator super().__init__(init_scale) def _update_implementation(self, delta_x, delta_grad): # Auxiliary variables w and z if self.approx_type == 'hess': w = delta_x z = delta_grad else: w = delta_grad z = delta_x # Do some common operations Mw = self.dot(w) z_minus_Mw = z - Mw denominator = np.dot(w, z_minus_Mw) # If the denominator is too small # we just skip the update. if np.abs(denominator) <= self.min_denominator*norm(w)*norm(z_minus_Mw): return # Update matrix if self.approx_type == 'hess': self.B = self._syr(1/denominator, z_minus_Mw, a=self.B) else: self.H = self._syr(1/denominator, z_minus_Mw, a=self.H)
15,830
35.816279
80
py
scipy
scipy-main/scipy/optimize/zeros.py
# This file is not meant for public use and will be removed in SciPy v2.0.0. # Use the `scipy.optimize` namespace for importing the functions # included below. import warnings from . import _zeros_py __all__ = [ # noqa: F822 'CONVERGED', 'CONVERR', 'INPROGRESS', 'RootResults', 'SIGNERR', 'TOMS748Solver', 'VALUEERR', 'bisect', 'brenth', 'brentq', 'flag_map', 'namedtuple', 'newton', 'operator', 'results_c', 'ridder', 'toms748', ] def __dir__(): return __all__ def __getattr__(name): if name not in __all__: raise AttributeError( "scipy.optimize.zeros is deprecated and has no attribute " f"{name}. Try looking in scipy.optimize instead.") warnings.warn(f"Please use `{name}` from the `scipy.optimize` namespace, " "the `scipy.optimize.zeros` namespace is deprecated.", category=DeprecationWarning, stacklevel=2) return getattr(_zeros_py, name)
1,008
21.422222
78
py
scipy
scipy-main/scipy/optimize/_lbfgsb_py.py
""" Functions --------- .. autosummary:: :toctree: generated/ fmin_l_bfgs_b """ ## License for the Python wrapper ## ============================== ## Copyright (c) 2004 David M. Cooke <cookedm@physics.mcmaster.ca> ## Permission is hereby granted, free of charge, to any person obtaining a ## copy of this software and associated documentation files (the "Software"), ## to deal in the Software without restriction, including without limitation ## the rights to use, copy, modify, merge, publish, distribute, sublicense, ## and/or sell copies of the Software, and to permit persons to whom the ## Software is furnished to do so, subject to the following conditions: ## The above copyright notice and this permission notice shall be included in ## all copies or substantial portions of the Software. ## THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR ## IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, ## FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE ## AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER ## LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING ## FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER ## DEALINGS IN THE SOFTWARE. ## Modifications by Travis Oliphant and Enthought, Inc. for inclusion in SciPy import numpy as np from numpy import array, asarray, float64, zeros from . import _lbfgsb from ._optimize import (MemoizeJac, OptimizeResult, _call_callback_maybe_halt, _wrap_callback, _check_unknown_options, _prepare_scalar_function) from ._constraints import old_bound_to_new from scipy.sparse.linalg import LinearOperator __all__ = ['fmin_l_bfgs_b', 'LbfgsInvHessProduct'] def fmin_l_bfgs_b(func, x0, fprime=None, args=(), approx_grad=0, bounds=None, m=10, factr=1e7, pgtol=1e-5, epsilon=1e-8, iprint=-1, maxfun=15000, maxiter=15000, disp=None, callback=None, maxls=20): """ Minimize a function func using the L-BFGS-B algorithm. Parameters ---------- func : callable f(x,*args) Function to minimize. x0 : ndarray Initial guess. fprime : callable fprime(x,*args), optional The gradient of `func`. If None, then `func` returns the function value and the gradient (``f, g = func(x, *args)``), unless `approx_grad` is True in which case `func` returns only ``f``. args : sequence, optional Arguments to pass to `func` and `fprime`. approx_grad : bool, optional Whether to approximate the gradient numerically (in which case `func` returns only the function value). bounds : list, optional ``(min, max)`` pairs for each element in ``x``, defining the bounds on that parameter. Use None or +-inf for one of ``min`` or ``max`` when there is no bound in that direction. m : int, optional The maximum number of variable metric corrections used to define the limited memory matrix. (The limited memory BFGS method does not store the full hessian but uses this many terms in an approximation to it.) factr : float, optional The iteration stops when ``(f^k - f^{k+1})/max{|f^k|,|f^{k+1}|,1} <= factr * eps``, where ``eps`` is the machine precision, which is automatically generated by the code. Typical values for `factr` are: 1e12 for low accuracy; 1e7 for moderate accuracy; 10.0 for extremely high accuracy. See Notes for relationship to `ftol`, which is exposed (instead of `factr`) by the `scipy.optimize.minimize` interface to L-BFGS-B. pgtol : float, optional The iteration will stop when ``max{|proj g_i | i = 1, ..., n} <= pgtol`` where ``proj g_i`` is the i-th component of the projected gradient. epsilon : float, optional Step size used when `approx_grad` is True, for numerically calculating the gradient iprint : int, optional Controls the frequency of output. ``iprint < 0`` means no output; ``iprint = 0`` print only one line at the last iteration; ``0 < iprint < 99`` print also f and ``|proj g|`` every iprint iterations; ``iprint = 99`` print details of every iteration except n-vectors; ``iprint = 100`` print also the changes of active set and final x; ``iprint > 100`` print details of every iteration including x and g. disp : int, optional If zero, then no output. If a positive number, then this over-rides `iprint` (i.e., `iprint` gets the value of `disp`). maxfun : int, optional Maximum number of function evaluations. Note that this function may violate the limit because of evaluating gradients by numerical differentiation. maxiter : int, optional Maximum number of iterations. callback : callable, optional Called after each iteration, as ``callback(xk)``, where ``xk`` is the current parameter vector. maxls : int, optional Maximum number of line search steps (per iteration). Default is 20. Returns ------- x : array_like Estimated position of the minimum. f : float Value of `func` at the minimum. d : dict Information dictionary. * d['warnflag'] is - 0 if converged, - 1 if too many function evaluations or too many iterations, - 2 if stopped for another reason, given in d['task'] * d['grad'] is the gradient at the minimum (should be 0 ish) * d['funcalls'] is the number of function calls made. * d['nit'] is the number of iterations. See also -------- minimize: Interface to minimization algorithms for multivariate functions. See the 'L-BFGS-B' `method` in particular. Note that the `ftol` option is made available via that interface, while `factr` is provided via this interface, where `factr` is the factor multiplying the default machine floating-point precision to arrive at `ftol`: ``ftol = factr * numpy.finfo(float).eps``. Notes ----- License of L-BFGS-B (FORTRAN code): The version included here (in fortran code) is 3.0 (released April 25, 2011). It was written by Ciyou Zhu, Richard Byrd, and Jorge Nocedal <nocedal@ece.nwu.edu>. It carries the following condition for use: This software is freely available, but we expect that all publications describing work using this software, or all commercial products using it, quote at least one of the references given below. This software is released under the BSD License. References ---------- * R. H. Byrd, P. Lu and J. Nocedal. A Limited Memory Algorithm for Bound Constrained Optimization, (1995), SIAM Journal on Scientific and Statistical Computing, 16, 5, pp. 1190-1208. * C. Zhu, R. H. Byrd and J. Nocedal. L-BFGS-B: Algorithm 778: L-BFGS-B, FORTRAN routines for large scale bound constrained optimization (1997), ACM Transactions on Mathematical Software, 23, 4, pp. 550 - 560. * J.L. Morales and J. Nocedal. L-BFGS-B: Remark on Algorithm 778: L-BFGS-B, FORTRAN routines for large scale bound constrained optimization (2011), ACM Transactions on Mathematical Software, 38, 1. """ # handle fprime/approx_grad if approx_grad: fun = func jac = None elif fprime is None: fun = MemoizeJac(func) jac = fun.derivative else: fun = func jac = fprime # build options callback = _wrap_callback(callback) opts = {'disp': disp, 'iprint': iprint, 'maxcor': m, 'ftol': factr * np.finfo(float).eps, 'gtol': pgtol, 'eps': epsilon, 'maxfun': maxfun, 'maxiter': maxiter, 'callback': callback, 'maxls': maxls} res = _minimize_lbfgsb(fun, x0, args=args, jac=jac, bounds=bounds, **opts) d = {'grad': res['jac'], 'task': res['message'], 'funcalls': res['nfev'], 'nit': res['nit'], 'warnflag': res['status']} f = res['fun'] x = res['x'] return x, f, d def _minimize_lbfgsb(fun, x0, args=(), jac=None, bounds=None, disp=None, maxcor=10, ftol=2.2204460492503131e-09, gtol=1e-5, eps=1e-8, maxfun=15000, maxiter=15000, iprint=-1, callback=None, maxls=20, finite_diff_rel_step=None, **unknown_options): """ Minimize a scalar function of one or more variables using the L-BFGS-B algorithm. Options ------- disp : None or int If `disp is None` (the default), then the supplied version of `iprint` is used. If `disp is not None`, then it overrides the supplied version of `iprint` with the behaviour you outlined. maxcor : int The maximum number of variable metric corrections used to define the limited memory matrix. (The limited memory BFGS method does not store the full hessian but uses this many terms in an approximation to it.) ftol : float The iteration stops when ``(f^k - f^{k+1})/max{|f^k|,|f^{k+1}|,1} <= ftol``. gtol : float The iteration will stop when ``max{|proj g_i | i = 1, ..., n} <= gtol`` where ``proj g_i`` is the i-th component of the projected gradient. eps : float or ndarray If `jac is None` the absolute step size used for numerical approximation of the jacobian via forward differences. maxfun : int Maximum number of function evaluations. Note that this function may violate the limit because of evaluating gradients by numerical differentiation. maxiter : int Maximum number of iterations. iprint : int, optional Controls the frequency of output. ``iprint < 0`` means no output; ``iprint = 0`` print only one line at the last iteration; ``0 < iprint < 99`` print also f and ``|proj g|`` every iprint iterations; ``iprint = 99`` print details of every iteration except n-vectors; ``iprint = 100`` print also the changes of active set and final x; ``iprint > 100`` print details of every iteration including x and g. maxls : int, optional Maximum number of line search steps (per iteration). Default is 20. finite_diff_rel_step : None or array_like, optional If `jac in ['2-point', '3-point', 'cs']` the relative step size to use for numerical approximation of the jacobian. The absolute step size is computed as ``h = rel_step * sign(x) * max(1, abs(x))``, possibly adjusted to fit into the bounds. For ``method='3-point'`` the sign of `h` is ignored. If None (default) then step is selected automatically. Notes ----- The option `ftol` is exposed via the `scipy.optimize.minimize` interface, but calling `scipy.optimize.fmin_l_bfgs_b` directly exposes `factr`. The relationship between the two is ``ftol = factr * numpy.finfo(float).eps``. I.e., `factr` multiplies the default machine floating-point precision to arrive at `ftol`. """ _check_unknown_options(unknown_options) m = maxcor pgtol = gtol factr = ftol / np.finfo(float).eps x0 = asarray(x0).ravel() n, = x0.shape if bounds is None: bounds = [(None, None)] * n if len(bounds) != n: raise ValueError('length of x0 != length of bounds') # unbounded variables must use None, not +-inf, for optimizer to work properly bounds = [(None if l == -np.inf else l, None if u == np.inf else u) for l, u in bounds] # LBFGSB is sent 'old-style' bounds, 'new-style' bounds are required by # approx_derivative and ScalarFunction new_bounds = old_bound_to_new(bounds) # check bounds if (new_bounds[0] > new_bounds[1]).any(): raise ValueError("LBFGSB - one of the lower bounds is greater than an upper bound.") # initial vector must lie within the bounds. Otherwise ScalarFunction and # approx_derivative will cause problems x0 = np.clip(x0, new_bounds[0], new_bounds[1]) if disp is not None: if disp == 0: iprint = -1 else: iprint = disp sf = _prepare_scalar_function(fun, x0, jac=jac, args=args, epsilon=eps, bounds=new_bounds, finite_diff_rel_step=finite_diff_rel_step) func_and_grad = sf.fun_and_grad fortran_int = _lbfgsb.types.intvar.dtype nbd = zeros(n, fortran_int) low_bnd = zeros(n, float64) upper_bnd = zeros(n, float64) bounds_map = {(None, None): 0, (1, None): 1, (1, 1): 2, (None, 1): 3} for i in range(0, n): l, u = bounds[i] if l is not None: low_bnd[i] = l l = 1 if u is not None: upper_bnd[i] = u u = 1 nbd[i] = bounds_map[l, u] if not maxls > 0: raise ValueError('maxls must be positive.') x = array(x0, float64) f = array(0.0, float64) g = zeros((n,), float64) wa = zeros(2*m*n + 5*n + 11*m*m + 8*m, float64) iwa = zeros(3*n, fortran_int) task = zeros(1, 'S60') csave = zeros(1, 'S60') lsave = zeros(4, fortran_int) isave = zeros(44, fortran_int) dsave = zeros(29, float64) task[:] = 'START' n_iterations = 0 while 1: # x, f, g, wa, iwa, task, csave, lsave, isave, dsave = \ _lbfgsb.setulb(m, x, low_bnd, upper_bnd, nbd, f, g, factr, pgtol, wa, iwa, task, iprint, csave, lsave, isave, dsave, maxls) task_str = task.tobytes() if task_str.startswith(b'FG'): # The minimization routine wants f and g at the current x. # Note that interruptions due to maxfun are postponed # until the completion of the current minimization iteration. # Overwrite f and g: f, g = func_and_grad(x) elif task_str.startswith(b'NEW_X'): # new iteration n_iterations += 1 intermediate_result = OptimizeResult(x=x, fun=f) if _call_callback_maybe_halt(callback, intermediate_result): task[:] = 'STOP: CALLBACK REQUESTED HALT' if n_iterations >= maxiter: task[:] = 'STOP: TOTAL NO. of ITERATIONS REACHED LIMIT' elif sf.nfev > maxfun: task[:] = ('STOP: TOTAL NO. of f AND g EVALUATIONS ' 'EXCEEDS LIMIT') else: break task_str = task.tobytes().strip(b'\x00').strip() if task_str.startswith(b'CONV'): warnflag = 0 elif sf.nfev > maxfun or n_iterations >= maxiter: warnflag = 1 else: warnflag = 2 # These two portions of the workspace are described in the mainlb # subroutine in lbfgsb.f. See line 363. s = wa[0: m*n].reshape(m, n) y = wa[m*n: 2*m*n].reshape(m, n) # See lbfgsb.f line 160 for this portion of the workspace. # isave(31) = the total number of BFGS updates prior the current iteration; n_bfgs_updates = isave[30] n_corrs = min(n_bfgs_updates, maxcor) hess_inv = LbfgsInvHessProduct(s[:n_corrs], y[:n_corrs]) task_str = task_str.decode() return OptimizeResult(fun=f, jac=g, nfev=sf.nfev, njev=sf.ngev, nit=n_iterations, status=warnflag, message=task_str, x=x, success=(warnflag == 0), hess_inv=hess_inv) class LbfgsInvHessProduct(LinearOperator): """Linear operator for the L-BFGS approximate inverse Hessian. This operator computes the product of a vector with the approximate inverse of the Hessian of the objective function, using the L-BFGS limited memory approximation to the inverse Hessian, accumulated during the optimization. Objects of this class implement the ``scipy.sparse.linalg.LinearOperator`` interface. Parameters ---------- sk : array_like, shape=(n_corr, n) Array of `n_corr` most recent updates to the solution vector. (See [1]). yk : array_like, shape=(n_corr, n) Array of `n_corr` most recent updates to the gradient. (See [1]). References ---------- .. [1] Nocedal, Jorge. "Updating quasi-Newton matrices with limited storage." Mathematics of computation 35.151 (1980): 773-782. """ def __init__(self, sk, yk): """Construct the operator.""" if sk.shape != yk.shape or sk.ndim != 2: raise ValueError('sk and yk must have matching shape, (n_corrs, n)') n_corrs, n = sk.shape super().__init__(dtype=np.float64, shape=(n, n)) self.sk = sk self.yk = yk self.n_corrs = n_corrs self.rho = 1 / np.einsum('ij,ij->i', sk, yk) def _matvec(self, x): """Efficient matrix-vector multiply with the BFGS matrices. This calculation is described in Section (4) of [1]. Parameters ---------- x : ndarray An array with shape (n,) or (n,1). Returns ------- y : ndarray The matrix-vector product """ s, y, n_corrs, rho = self.sk, self.yk, self.n_corrs, self.rho q = np.array(x, dtype=self.dtype, copy=True) if q.ndim == 2 and q.shape[1] == 1: q = q.reshape(-1) alpha = np.empty(n_corrs) for i in range(n_corrs-1, -1, -1): alpha[i] = rho[i] * np.dot(s[i], q) q = q - alpha[i]*y[i] r = q for i in range(n_corrs): beta = rho[i] * np.dot(y[i], r) r = r + s[i] * (alpha[i] - beta) return r def todense(self): """Return a dense array representation of this operator. Returns ------- arr : ndarray, shape=(n, n) An array with the same shape and containing the same data represented by this `LinearOperator`. """ s, y, n_corrs, rho = self.sk, self.yk, self.n_corrs, self.rho I = np.eye(*self.shape, dtype=self.dtype) Hk = I for i in range(n_corrs): A1 = I - s[i][:, np.newaxis] * y[i][np.newaxis, :] * rho[i] A2 = I - y[i][:, np.newaxis] * s[i][np.newaxis, :] * rho[i] Hk = np.dot(A1, np.dot(Hk, A2)) + (rho[i] * s[i][:, np.newaxis] * s[i][np.newaxis, :]) return Hk
18,877
36.907631
92
py
scipy
scipy-main/scipy/optimize/_root_scalar.py
""" Unified interfaces to root finding algorithms for real or complex scalar functions. Functions --------- - root : find a root of a scalar function. """ import numpy as np from . import _zeros_py as optzeros from ._numdiff import approx_derivative __all__ = ['root_scalar'] ROOT_SCALAR_METHODS = ['bisect', 'brentq', 'brenth', 'ridder', 'toms748', 'newton', 'secant', 'halley'] class MemoizeDer: """Decorator that caches the value and derivative(s) of function each time it is called. This is a simplistic memoizer that calls and caches a single value of `f(x, *args)`. It assumes that `args` does not change between invocations. It supports the use case of a root-finder where `args` is fixed, `x` changes, and only rarely, if at all, does x assume the same value more than once.""" def __init__(self, fun): self.fun = fun self.vals = None self.x = None self.n_calls = 0 def __call__(self, x, *args): r"""Calculate f or use cached value if available""" # Derivative may be requested before the function itself, always check if self.vals is None or x != self.x: fg = self.fun(x, *args) self.x = x self.n_calls += 1 self.vals = fg[:] return self.vals[0] def fprime(self, x, *args): r"""Calculate f' or use a cached value if available""" if self.vals is None or x != self.x: self(x, *args) return self.vals[1] def fprime2(self, x, *args): r"""Calculate f'' or use a cached value if available""" if self.vals is None or x != self.x: self(x, *args) return self.vals[2] def ncalls(self): return self.n_calls def root_scalar(f, args=(), method=None, bracket=None, fprime=None, fprime2=None, x0=None, x1=None, xtol=None, rtol=None, maxiter=None, options=None): """ Find a root of a scalar function. Parameters ---------- f : callable A function to find a root of. args : tuple, optional Extra arguments passed to the objective function and its derivative(s). method : str, optional Type of solver. Should be one of - 'bisect' :ref:`(see here) <optimize.root_scalar-bisect>` - 'brentq' :ref:`(see here) <optimize.root_scalar-brentq>` - 'brenth' :ref:`(see here) <optimize.root_scalar-brenth>` - 'ridder' :ref:`(see here) <optimize.root_scalar-ridder>` - 'toms748' :ref:`(see here) <optimize.root_scalar-toms748>` - 'newton' :ref:`(see here) <optimize.root_scalar-newton>` - 'secant' :ref:`(see here) <optimize.root_scalar-secant>` - 'halley' :ref:`(see here) <optimize.root_scalar-halley>` bracket: A sequence of 2 floats, optional An interval bracketing a root. `f(x, *args)` must have different signs at the two endpoints. x0 : float, optional Initial guess. x1 : float, optional A second guess. fprime : bool or callable, optional If `fprime` is a boolean and is True, `f` is assumed to return the value of the objective function and of the derivative. `fprime` can also be a callable returning the derivative of `f`. In this case, it must accept the same arguments as `f`. fprime2 : bool or callable, optional If `fprime2` is a boolean and is True, `f` is assumed to return the value of the objective function and of the first and second derivatives. `fprime2` can also be a callable returning the second derivative of `f`. In this case, it must accept the same arguments as `f`. xtol : float, optional Tolerance (absolute) for termination. rtol : float, optional Tolerance (relative) for termination. maxiter : int, optional Maximum number of iterations. options : dict, optional A dictionary of solver options. E.g., ``k``, see :obj:`show_options()` for details. Returns ------- sol : RootResults The solution represented as a ``RootResults`` object. Important attributes are: ``root`` the solution , ``converged`` a boolean flag indicating if the algorithm exited successfully and ``flag`` which describes the cause of the termination. See `RootResults` for a description of other attributes. See also -------- show_options : Additional options accepted by the solvers root : Find a root of a vector function. Notes ----- This section describes the available solvers that can be selected by the 'method' parameter. The default is to use the best method available for the situation presented. If a bracket is provided, it may use one of the bracketing methods. If a derivative and an initial value are specified, it may select one of the derivative-based methods. If no method is judged applicable, it will raise an Exception. Arguments for each method are as follows (x=required, o=optional). +-----------------------------------------------+---+------+---------+----+----+--------+---------+------+------+---------+---------+ | method | f | args | bracket | x0 | x1 | fprime | fprime2 | xtol | rtol | maxiter | options | +===============================================+===+======+=========+====+====+========+=========+======+======+=========+=========+ | :ref:`bisect <optimize.root_scalar-bisect>` | x | o | x | | | | | o | o | o | o | +-----------------------------------------------+---+------+---------+----+----+--------+---------+------+------+---------+---------+ | :ref:`brentq <optimize.root_scalar-brentq>` | x | o | x | | | | | o | o | o | o | +-----------------------------------------------+---+------+---------+----+----+--------+---------+------+------+---------+---------+ | :ref:`brenth <optimize.root_scalar-brenth>` | x | o | x | | | | | o | o | o | o | +-----------------------------------------------+---+------+---------+----+----+--------+---------+------+------+---------+---------+ | :ref:`ridder <optimize.root_scalar-ridder>` | x | o | x | | | | | o | o | o | o | +-----------------------------------------------+---+------+---------+----+----+--------+---------+------+------+---------+---------+ | :ref:`toms748 <optimize.root_scalar-toms748>` | x | o | x | | | | | o | o | o | o | +-----------------------------------------------+---+------+---------+----+----+--------+---------+------+------+---------+---------+ | :ref:`secant <optimize.root_scalar-secant>` | x | o | | x | o | | | o | o | o | o | +-----------------------------------------------+---+------+---------+----+----+--------+---------+------+------+---------+---------+ | :ref:`newton <optimize.root_scalar-newton>` | x | o | | x | | o | | o | o | o | o | +-----------------------------------------------+---+------+---------+----+----+--------+---------+------+------+---------+---------+ | :ref:`halley <optimize.root_scalar-halley>` | x | o | | x | | x | x | o | o | o | o | +-----------------------------------------------+---+------+---------+----+----+--------+---------+------+------+---------+---------+ Examples -------- Find the root of a simple cubic >>> from scipy import optimize >>> def f(x): ... return (x**3 - 1) # only one real root at x = 1 >>> def fprime(x): ... return 3*x**2 The `brentq` method takes as input a bracket >>> sol = optimize.root_scalar(f, bracket=[0, 3], method='brentq') >>> sol.root, sol.iterations, sol.function_calls (1.0, 10, 11) The `newton` method takes as input a single point and uses the derivative(s). >>> sol = optimize.root_scalar(f, x0=0.2, fprime=fprime, method='newton') >>> sol.root, sol.iterations, sol.function_calls (1.0, 11, 22) The function can provide the value and derivative(s) in a single call. >>> def f_p_pp(x): ... return (x**3 - 1), 3*x**2, 6*x >>> sol = optimize.root_scalar( ... f_p_pp, x0=0.2, fprime=True, method='newton' ... ) >>> sol.root, sol.iterations, sol.function_calls (1.0, 11, 11) >>> sol = optimize.root_scalar( ... f_p_pp, x0=0.2, fprime=True, fprime2=True, method='halley' ... ) >>> sol.root, sol.iterations, sol.function_calls (1.0, 7, 8) """ # noqa if not isinstance(args, tuple): args = (args,) if options is None: options = {} # fun also returns the derivative(s) is_memoized = False if fprime2 is not None and not callable(fprime2): if bool(fprime2): f = MemoizeDer(f) is_memoized = True fprime2 = f.fprime2 fprime = f.fprime else: fprime2 = None if fprime is not None and not callable(fprime): if bool(fprime): f = MemoizeDer(f) is_memoized = True fprime = f.fprime else: fprime = None # respect solver-specific default tolerances - only pass in if actually set kwargs = {} for k in ['xtol', 'rtol', 'maxiter']: v = locals().get(k) if v is not None: kwargs[k] = v # Set any solver-specific options if options: kwargs.update(options) # Always request full_output from the underlying method as _root_scalar # always returns a RootResults object kwargs.update(full_output=True, disp=False) # Pick a method if not specified. # Use the "best" method available for the situation. if not method: if bracket: method = 'brentq' elif x0 is not None: if fprime: if fprime2: method = 'halley' else: method = 'newton' elif x1 is not None: method = 'secant' else: method = 'newton' if not method: raise ValueError('Unable to select a solver as neither bracket ' 'nor starting point provided.') meth = method.lower() map2underlying = {'halley': 'newton', 'secant': 'newton'} try: methodc = getattr(optzeros, map2underlying.get(meth, meth)) except AttributeError as e: raise ValueError('Unknown solver %s' % meth) from e if meth in ['bisect', 'ridder', 'brentq', 'brenth', 'toms748']: if not isinstance(bracket, (list, tuple, np.ndarray)): raise ValueError('Bracket needed for %s' % method) a, b = bracket[:2] try: r, sol = methodc(f, a, b, args=args, **kwargs) except ValueError as e: # gh-17622 fixed some bugs in low-level solvers by raising an error # (rather than returning incorrect results) when the callable # returns a NaN. It did so by wrapping the callable rather than # modifying compiled code, so the iteration count is not available. if hasattr(e, "_x"): sol = optzeros.RootResults(root=e._x, iterations=np.nan, function_calls=e._function_calls, flag=str(e)) else: raise elif meth in ['secant']: if x0 is None: raise ValueError('x0 must not be None for %s' % method) if 'xtol' in kwargs: kwargs['tol'] = kwargs.pop('xtol') r, sol = methodc(f, x0, args=args, fprime=None, fprime2=None, x1=x1, **kwargs) elif meth in ['newton']: if x0 is None: raise ValueError('x0 must not be None for %s' % method) if not fprime: # approximate fprime with finite differences def fprime(x): # `root_scalar` doesn't actually seem to support vectorized # use of `newton`. In that case, `approx_derivative` will # always get scalar input. Nonetheless, it always returns an # array, so we extract the element to produce scalar output. return approx_derivative(f, x, method='2-point')[0] if 'xtol' in kwargs: kwargs['tol'] = kwargs.pop('xtol') r, sol = methodc(f, x0, args=args, fprime=fprime, fprime2=None, **kwargs) elif meth in ['halley']: if x0 is None: raise ValueError('x0 must not be None for %s' % method) if not fprime: raise ValueError('fprime must be specified for %s' % method) if not fprime2: raise ValueError('fprime2 must be specified for %s' % method) if 'xtol' in kwargs: kwargs['tol'] = kwargs.pop('xtol') r, sol = methodc(f, x0, args=args, fprime=fprime, fprime2=fprime2, **kwargs) else: raise ValueError('Unknown solver %s' % method) if is_memoized: # Replace the function_calls count with the memoized count. # Avoids double and triple-counting. n_calls = f.n_calls sol.function_calls = n_calls return sol def _root_scalar_brentq_doc(): r""" Options ------- args : tuple, optional Extra arguments passed to the objective function. bracket: A sequence of 2 floats, optional An interval bracketing a root. `f(x, *args)` must have different signs at the two endpoints. xtol : float, optional Tolerance (absolute) for termination. rtol : float, optional Tolerance (relative) for termination. maxiter : int, optional Maximum number of iterations. options: dict, optional Specifies any method-specific options not covered above """ pass def _root_scalar_brenth_doc(): r""" Options ------- args : tuple, optional Extra arguments passed to the objective function. bracket: A sequence of 2 floats, optional An interval bracketing a root. `f(x, *args)` must have different signs at the two endpoints. xtol : float, optional Tolerance (absolute) for termination. rtol : float, optional Tolerance (relative) for termination. maxiter : int, optional Maximum number of iterations. options: dict, optional Specifies any method-specific options not covered above. """ pass def _root_scalar_toms748_doc(): r""" Options ------- args : tuple, optional Extra arguments passed to the objective function. bracket: A sequence of 2 floats, optional An interval bracketing a root. `f(x, *args)` must have different signs at the two endpoints. xtol : float, optional Tolerance (absolute) for termination. rtol : float, optional Tolerance (relative) for termination. maxiter : int, optional Maximum number of iterations. options: dict, optional Specifies any method-specific options not covered above. """ pass def _root_scalar_secant_doc(): r""" Options ------- args : tuple, optional Extra arguments passed to the objective function. xtol : float, optional Tolerance (absolute) for termination. rtol : float, optional Tolerance (relative) for termination. maxiter : int, optional Maximum number of iterations. x0 : float, required Initial guess. x1 : float, required A second guess. options: dict, optional Specifies any method-specific options not covered above. """ pass def _root_scalar_newton_doc(): r""" Options ------- args : tuple, optional Extra arguments passed to the objective function and its derivative. xtol : float, optional Tolerance (absolute) for termination. rtol : float, optional Tolerance (relative) for termination. maxiter : int, optional Maximum number of iterations. x0 : float, required Initial guess. fprime : bool or callable, optional If `fprime` is a boolean and is True, `f` is assumed to return the value of derivative along with the objective function. `fprime` can also be a callable returning the derivative of `f`. In this case, it must accept the same arguments as `f`. options: dict, optional Specifies any method-specific options not covered above. """ pass def _root_scalar_halley_doc(): r""" Options ------- args : tuple, optional Extra arguments passed to the objective function and its derivatives. xtol : float, optional Tolerance (absolute) for termination. rtol : float, optional Tolerance (relative) for termination. maxiter : int, optional Maximum number of iterations. x0 : float, required Initial guess. fprime : bool or callable, required If `fprime` is a boolean and is True, `f` is assumed to return the value of derivative along with the objective function. `fprime` can also be a callable returning the derivative of `f`. In this case, it must accept the same arguments as `f`. fprime2 : bool or callable, required If `fprime2` is a boolean and is True, `f` is assumed to return the value of 1st and 2nd derivatives along with the objective function. `fprime2` can also be a callable returning the 2nd derivative of `f`. In this case, it must accept the same arguments as `f`. options: dict, optional Specifies any method-specific options not covered above. """ pass def _root_scalar_ridder_doc(): r""" Options ------- args : tuple, optional Extra arguments passed to the objective function. bracket: A sequence of 2 floats, optional An interval bracketing a root. `f(x, *args)` must have different signs at the two endpoints. xtol : float, optional Tolerance (absolute) for termination. rtol : float, optional Tolerance (relative) for termination. maxiter : int, optional Maximum number of iterations. options: dict, optional Specifies any method-specific options not covered above. """ pass def _root_scalar_bisect_doc(): r""" Options ------- args : tuple, optional Extra arguments passed to the objective function. bracket: A sequence of 2 floats, optional An interval bracketing a root. `f(x, *args)` must have different signs at the two endpoints. xtol : float, optional Tolerance (absolute) for termination. rtol : float, optional Tolerance (relative) for termination. maxiter : int, optional Maximum number of iterations. options: dict, optional Specifies any method-specific options not covered above. """ pass
19,556
36.180608
137
py
scipy
scipy-main/scipy/optimize/_trustregion_constr/canonical_constraint.py
import numpy as np import scipy.sparse as sps class CanonicalConstraint: """Canonical constraint to use with trust-constr algorithm. It represents the set of constraints of the form:: f_eq(x) = 0 f_ineq(x) <= 0 where ``f_eq`` and ``f_ineq`` are evaluated by a single function, see below. The class is supposed to be instantiated by factory methods, which should prepare the parameters listed below. Parameters ---------- n_eq, n_ineq : int Number of equality and inequality constraints respectively. fun : callable Function defining the constraints. The signature is ``fun(x) -> c_eq, c_ineq``, where ``c_eq`` is ndarray with `n_eq` components and ``c_ineq`` is ndarray with `n_ineq` components. jac : callable Function to evaluate the Jacobian of the constraint. The signature is ``jac(x) -> J_eq, J_ineq``, where ``J_eq`` and ``J_ineq`` are either ndarray of csr_matrix of shapes (n_eq, n) and (n_ineq, n), respectively. hess : callable Function to evaluate the Hessian of the constraints multiplied by Lagrange multipliers, that is ``dot(f_eq, v_eq) + dot(f_ineq, v_ineq)``. The signature is ``hess(x, v_eq, v_ineq) -> H``, where ``H`` has an implied shape (n, n) and provide a matrix-vector product operation ``H.dot(p)``. keep_feasible : ndarray, shape (n_ineq,) Mask indicating which inequality constraints should be kept feasible. """ def __init__(self, n_eq, n_ineq, fun, jac, hess, keep_feasible): self.n_eq = n_eq self.n_ineq = n_ineq self.fun = fun self.jac = jac self.hess = hess self.keep_feasible = keep_feasible @classmethod def from_PreparedConstraint(cls, constraint): """Create an instance from `PreparedConstrained` object.""" lb, ub = constraint.bounds cfun = constraint.fun keep_feasible = constraint.keep_feasible if np.all(lb == -np.inf) and np.all(ub == np.inf): return cls.empty(cfun.n) if np.all(lb == -np.inf) and np.all(ub == np.inf): return cls.empty(cfun.n) elif np.all(lb == ub): return cls._equal_to_canonical(cfun, lb) elif np.all(lb == -np.inf): return cls._less_to_canonical(cfun, ub, keep_feasible) elif np.all(ub == np.inf): return cls._greater_to_canonical(cfun, lb, keep_feasible) else: return cls._interval_to_canonical(cfun, lb, ub, keep_feasible) @classmethod def empty(cls, n): """Create an "empty" instance. This "empty" instance is required to allow working with unconstrained problems as if they have some constraints. """ empty_fun = np.empty(0) empty_jac = np.empty((0, n)) empty_hess = sps.csr_matrix((n, n)) def fun(x): return empty_fun, empty_fun def jac(x): return empty_jac, empty_jac def hess(x, v_eq, v_ineq): return empty_hess return cls(0, 0, fun, jac, hess, np.empty(0, dtype=np.bool_)) @classmethod def concatenate(cls, canonical_constraints, sparse_jacobian): """Concatenate multiple `CanonicalConstraint` into one. `sparse_jacobian` (bool) determines the Jacobian format of the concatenated constraint. Note that items in `canonical_constraints` must have their Jacobians in the same format. """ def fun(x): if canonical_constraints: eq_all, ineq_all = zip( *[c.fun(x) for c in canonical_constraints]) else: eq_all, ineq_all = [], [] return np.hstack(eq_all), np.hstack(ineq_all) if sparse_jacobian: vstack = sps.vstack else: vstack = np.vstack def jac(x): if canonical_constraints: eq_all, ineq_all = zip( *[c.jac(x) for c in canonical_constraints]) else: eq_all, ineq_all = [], [] return vstack(eq_all), vstack(ineq_all) def hess(x, v_eq, v_ineq): hess_all = [] index_eq = 0 index_ineq = 0 for c in canonical_constraints: vc_eq = v_eq[index_eq:index_eq + c.n_eq] vc_ineq = v_ineq[index_ineq:index_ineq + c.n_ineq] hess_all.append(c.hess(x, vc_eq, vc_ineq)) index_eq += c.n_eq index_ineq += c.n_ineq def matvec(p): result = np.zeros_like(p) for h in hess_all: result += h.dot(p) return result n = x.shape[0] return sps.linalg.LinearOperator((n, n), matvec, dtype=float) n_eq = sum(c.n_eq for c in canonical_constraints) n_ineq = sum(c.n_ineq for c in canonical_constraints) keep_feasible = np.hstack([c.keep_feasible for c in canonical_constraints]) return cls(n_eq, n_ineq, fun, jac, hess, keep_feasible) @classmethod def _equal_to_canonical(cls, cfun, value): empty_fun = np.empty(0) n = cfun.n n_eq = value.shape[0] n_ineq = 0 keep_feasible = np.empty(0, dtype=bool) if cfun.sparse_jacobian: empty_jac = sps.csr_matrix((0, n)) else: empty_jac = np.empty((0, n)) def fun(x): return cfun.fun(x) - value, empty_fun def jac(x): return cfun.jac(x), empty_jac def hess(x, v_eq, v_ineq): return cfun.hess(x, v_eq) empty_fun = np.empty(0) n = cfun.n if cfun.sparse_jacobian: empty_jac = sps.csr_matrix((0, n)) else: empty_jac = np.empty((0, n)) return cls(n_eq, n_ineq, fun, jac, hess, keep_feasible) @classmethod def _less_to_canonical(cls, cfun, ub, keep_feasible): empty_fun = np.empty(0) n = cfun.n if cfun.sparse_jacobian: empty_jac = sps.csr_matrix((0, n)) else: empty_jac = np.empty((0, n)) finite_ub = ub < np.inf n_eq = 0 n_ineq = np.sum(finite_ub) if np.all(finite_ub): def fun(x): return empty_fun, cfun.fun(x) - ub def jac(x): return empty_jac, cfun.jac(x) def hess(x, v_eq, v_ineq): return cfun.hess(x, v_ineq) else: finite_ub = np.nonzero(finite_ub)[0] keep_feasible = keep_feasible[finite_ub] ub = ub[finite_ub] def fun(x): return empty_fun, cfun.fun(x)[finite_ub] - ub def jac(x): return empty_jac, cfun.jac(x)[finite_ub] def hess(x, v_eq, v_ineq): v = np.zeros(cfun.m) v[finite_ub] = v_ineq return cfun.hess(x, v) return cls(n_eq, n_ineq, fun, jac, hess, keep_feasible) @classmethod def _greater_to_canonical(cls, cfun, lb, keep_feasible): empty_fun = np.empty(0) n = cfun.n if cfun.sparse_jacobian: empty_jac = sps.csr_matrix((0, n)) else: empty_jac = np.empty((0, n)) finite_lb = lb > -np.inf n_eq = 0 n_ineq = np.sum(finite_lb) if np.all(finite_lb): def fun(x): return empty_fun, lb - cfun.fun(x) def jac(x): return empty_jac, -cfun.jac(x) def hess(x, v_eq, v_ineq): return cfun.hess(x, -v_ineq) else: finite_lb = np.nonzero(finite_lb)[0] keep_feasible = keep_feasible[finite_lb] lb = lb[finite_lb] def fun(x): return empty_fun, lb - cfun.fun(x)[finite_lb] def jac(x): return empty_jac, -cfun.jac(x)[finite_lb] def hess(x, v_eq, v_ineq): v = np.zeros(cfun.m) v[finite_lb] = -v_ineq return cfun.hess(x, v) return cls(n_eq, n_ineq, fun, jac, hess, keep_feasible) @classmethod def _interval_to_canonical(cls, cfun, lb, ub, keep_feasible): lb_inf = lb == -np.inf ub_inf = ub == np.inf equal = lb == ub less = lb_inf & ~ub_inf greater = ub_inf & ~lb_inf interval = ~equal & ~lb_inf & ~ub_inf equal = np.nonzero(equal)[0] less = np.nonzero(less)[0] greater = np.nonzero(greater)[0] interval = np.nonzero(interval)[0] n_less = less.shape[0] n_greater = greater.shape[0] n_interval = interval.shape[0] n_ineq = n_less + n_greater + 2 * n_interval n_eq = equal.shape[0] keep_feasible = np.hstack((keep_feasible[less], keep_feasible[greater], keep_feasible[interval], keep_feasible[interval])) def fun(x): f = cfun.fun(x) eq = f[equal] - lb[equal] le = f[less] - ub[less] ge = lb[greater] - f[greater] il = f[interval] - ub[interval] ig = lb[interval] - f[interval] return eq, np.hstack((le, ge, il, ig)) def jac(x): J = cfun.jac(x) eq = J[equal] le = J[less] ge = -J[greater] il = J[interval] ig = -il if sps.issparse(J): ineq = sps.vstack((le, ge, il, ig)) else: ineq = np.vstack((le, ge, il, ig)) return eq, ineq def hess(x, v_eq, v_ineq): n_start = 0 v_l = v_ineq[n_start:n_start + n_less] n_start += n_less v_g = v_ineq[n_start:n_start + n_greater] n_start += n_greater v_il = v_ineq[n_start:n_start + n_interval] n_start += n_interval v_ig = v_ineq[n_start:n_start + n_interval] v = np.zeros_like(lb) v[equal] = v_eq v[less] = v_l v[greater] = -v_g v[interval] = v_il - v_ig return cfun.hess(x, v) return cls(n_eq, n_ineq, fun, jac, hess, keep_feasible) def initial_constraints_as_canonical(n, prepared_constraints, sparse_jacobian): """Convert initial values of the constraints to the canonical format. The purpose to avoid one additional call to the constraints at the initial point. It takes saved values in `PreparedConstraint`, modififies and concatenates them to the canonical constraint format. """ c_eq = [] c_ineq = [] J_eq = [] J_ineq = [] for c in prepared_constraints: f = c.fun.f J = c.fun.J lb, ub = c.bounds if np.all(lb == ub): c_eq.append(f - lb) J_eq.append(J) elif np.all(lb == -np.inf): finite_ub = ub < np.inf c_ineq.append(f[finite_ub] - ub[finite_ub]) J_ineq.append(J[finite_ub]) elif np.all(ub == np.inf): finite_lb = lb > -np.inf c_ineq.append(lb[finite_lb] - f[finite_lb]) J_ineq.append(-J[finite_lb]) else: lb_inf = lb == -np.inf ub_inf = ub == np.inf equal = lb == ub less = lb_inf & ~ub_inf greater = ub_inf & ~lb_inf interval = ~equal & ~lb_inf & ~ub_inf c_eq.append(f[equal] - lb[equal]) c_ineq.append(f[less] - ub[less]) c_ineq.append(lb[greater] - f[greater]) c_ineq.append(f[interval] - ub[interval]) c_ineq.append(lb[interval] - f[interval]) J_eq.append(J[equal]) J_ineq.append(J[less]) J_ineq.append(-J[greater]) J_ineq.append(J[interval]) J_ineq.append(-J[interval]) c_eq = np.hstack(c_eq) if c_eq else np.empty(0) c_ineq = np.hstack(c_ineq) if c_ineq else np.empty(0) if sparse_jacobian: vstack = sps.vstack empty = sps.csr_matrix((0, n)) else: vstack = np.vstack empty = np.empty((0, n)) J_eq = vstack(J_eq) if J_eq else empty J_ineq = vstack(J_ineq) if J_ineq else empty return c_eq, c_ineq, J_eq, J_ineq
12,538
31.069054
79
py
scipy
scipy-main/scipy/optimize/_trustregion_constr/setup.py
def configuration(parent_package='', top_path=None): from numpy.distutils.misc_util import Configuration config = Configuration('_trustregion_constr', parent_package, top_path) config.add_data_dir('tests') return config if __name__ == '__main__': from numpy.distutils.core import setup setup(**configuration(top_path='').todict())
357
31.545455
75
py
scipy
scipy-main/scipy/optimize/_trustregion_constr/minimize_trustregion_constr.py
import time import numpy as np from scipy.sparse.linalg import LinearOperator from .._differentiable_functions import VectorFunction from .._constraints import ( NonlinearConstraint, LinearConstraint, PreparedConstraint, strict_bounds) from .._hessian_update_strategy import BFGS from .._optimize import OptimizeResult from .._differentiable_functions import ScalarFunction from .equality_constrained_sqp import equality_constrained_sqp from .canonical_constraint import (CanonicalConstraint, initial_constraints_as_canonical) from .tr_interior_point import tr_interior_point from .report import BasicReport, SQPReport, IPReport TERMINATION_MESSAGES = { 0: "The maximum number of function evaluations is exceeded.", 1: "`gtol` termination condition is satisfied.", 2: "`xtol` termination condition is satisfied.", 3: "`callback` function requested termination." } class HessianLinearOperator: """Build LinearOperator from hessp""" def __init__(self, hessp, n): self.hessp = hessp self.n = n def __call__(self, x, *args): def matvec(p): return self.hessp(x, p, *args) return LinearOperator((self.n, self.n), matvec=matvec) class LagrangianHessian: """The Hessian of the Lagrangian as LinearOperator. The Lagrangian is computed as the objective function plus all the constraints multiplied with some numbers (Lagrange multipliers). """ def __init__(self, n, objective_hess, constraints_hess): self.n = n self.objective_hess = objective_hess self.constraints_hess = constraints_hess def __call__(self, x, v_eq=np.empty(0), v_ineq=np.empty(0)): H_objective = self.objective_hess(x) H_constraints = self.constraints_hess(x, v_eq, v_ineq) def matvec(p): return H_objective.dot(p) + H_constraints.dot(p) return LinearOperator((self.n, self.n), matvec) def update_state_sqp(state, x, last_iteration_failed, objective, prepared_constraints, start_time, tr_radius, constr_penalty, cg_info): state.nit += 1 state.nfev = objective.nfev state.njev = objective.ngev state.nhev = objective.nhev state.constr_nfev = [c.fun.nfev if isinstance(c.fun, VectorFunction) else 0 for c in prepared_constraints] state.constr_njev = [c.fun.njev if isinstance(c.fun, VectorFunction) else 0 for c in prepared_constraints] state.constr_nhev = [c.fun.nhev if isinstance(c.fun, VectorFunction) else 0 for c in prepared_constraints] if not last_iteration_failed: state.x = x state.fun = objective.f state.grad = objective.g state.v = [c.fun.v for c in prepared_constraints] state.constr = [c.fun.f for c in prepared_constraints] state.jac = [c.fun.J for c in prepared_constraints] # Compute Lagrangian Gradient state.lagrangian_grad = np.copy(state.grad) for c in prepared_constraints: state.lagrangian_grad += c.fun.J.T.dot(c.fun.v) state.optimality = np.linalg.norm(state.lagrangian_grad, np.inf) # Compute maximum constraint violation state.constr_violation = 0 for i in range(len(prepared_constraints)): lb, ub = prepared_constraints[i].bounds c = state.constr[i] state.constr_violation = np.max([state.constr_violation, np.max(lb - c), np.max(c - ub)]) state.execution_time = time.time() - start_time state.tr_radius = tr_radius state.constr_penalty = constr_penalty state.cg_niter += cg_info["niter"] state.cg_stop_cond = cg_info["stop_cond"] return state def update_state_ip(state, x, last_iteration_failed, objective, prepared_constraints, start_time, tr_radius, constr_penalty, cg_info, barrier_parameter, barrier_tolerance): state = update_state_sqp(state, x, last_iteration_failed, objective, prepared_constraints, start_time, tr_radius, constr_penalty, cg_info) state.barrier_parameter = barrier_parameter state.barrier_tolerance = barrier_tolerance return state def _minimize_trustregion_constr(fun, x0, args, grad, hess, hessp, bounds, constraints, xtol=1e-8, gtol=1e-8, barrier_tol=1e-8, sparse_jacobian=None, callback=None, maxiter=1000, verbose=0, finite_diff_rel_step=None, initial_constr_penalty=1.0, initial_tr_radius=1.0, initial_barrier_parameter=0.1, initial_barrier_tolerance=0.1, factorization_method=None, disp=False): """Minimize a scalar function subject to constraints. Parameters ---------- gtol : float, optional Tolerance for termination by the norm of the Lagrangian gradient. The algorithm will terminate when both the infinity norm (i.e., max abs value) of the Lagrangian gradient and the constraint violation are smaller than ``gtol``. Default is 1e-8. xtol : float, optional Tolerance for termination by the change of the independent variable. The algorithm will terminate when ``tr_radius < xtol``, where ``tr_radius`` is the radius of the trust region used in the algorithm. Default is 1e-8. barrier_tol : float, optional Threshold on the barrier parameter for the algorithm termination. When inequality constraints are present, the algorithm will terminate only when the barrier parameter is less than `barrier_tol`. Default is 1e-8. sparse_jacobian : {bool, None}, optional Determines how to represent Jacobians of the constraints. If bool, then Jacobians of all the constraints will be converted to the corresponding format. If None (default), then Jacobians won't be converted, but the algorithm can proceed only if they all have the same format. initial_tr_radius: float, optional Initial trust radius. The trust radius gives the maximum distance between solution points in consecutive iterations. It reflects the trust the algorithm puts in the local approximation of the optimization problem. For an accurate local approximation the trust-region should be large and for an approximation valid only close to the current point it should be a small one. The trust radius is automatically updated throughout the optimization process, with ``initial_tr_radius`` being its initial value. Default is 1 (recommended in [1]_, p. 19). initial_constr_penalty : float, optional Initial constraints penalty parameter. The penalty parameter is used for balancing the requirements of decreasing the objective function and satisfying the constraints. It is used for defining the merit function: ``merit_function(x) = fun(x) + constr_penalty * constr_norm_l2(x)``, where ``constr_norm_l2(x)`` is the l2 norm of a vector containing all the constraints. The merit function is used for accepting or rejecting trial points and ``constr_penalty`` weights the two conflicting goals of reducing objective function and constraints. The penalty is automatically updated throughout the optimization process, with ``initial_constr_penalty`` being its initial value. Default is 1 (recommended in [1]_, p 19). initial_barrier_parameter, initial_barrier_tolerance: float, optional Initial barrier parameter and initial tolerance for the barrier subproblem. Both are used only when inequality constraints are present. For dealing with optimization problems ``min_x f(x)`` subject to inequality constraints ``c(x) <= 0`` the algorithm introduces slack variables, solving the problem ``min_(x,s) f(x) + barrier_parameter*sum(ln(s))`` subject to the equality constraints ``c(x) + s = 0`` instead of the original problem. This subproblem is solved for decreasing values of ``barrier_parameter`` and with decreasing tolerances for the termination, starting with ``initial_barrier_parameter`` for the barrier parameter and ``initial_barrier_tolerance`` for the barrier tolerance. Default is 0.1 for both values (recommended in [1]_ p. 19). Also note that ``barrier_parameter`` and ``barrier_tolerance`` are updated with the same prefactor. factorization_method : string or None, optional Method to factorize the Jacobian of the constraints. Use None (default) for the auto selection or one of: - 'NormalEquation' (requires scikit-sparse) - 'AugmentedSystem' - 'QRFactorization' - 'SVDFactorization' The methods 'NormalEquation' and 'AugmentedSystem' can be used only with sparse constraints. The projections required by the algorithm will be computed using, respectively, the normal equation and the augmented system approaches explained in [1]_. 'NormalEquation' computes the Cholesky factorization of ``A A.T`` and 'AugmentedSystem' performs the LU factorization of an augmented system. They usually provide similar results. 'AugmentedSystem' is used by default for sparse matrices. The methods 'QRFactorization' and 'SVDFactorization' can be used only with dense constraints. They compute the required projections using, respectively, QR and SVD factorizations. The 'SVDFactorization' method can cope with Jacobian matrices with deficient row rank and will be used whenever other factorization methods fail (which may imply the conversion of sparse matrices to a dense format when required). By default, 'QRFactorization' is used for dense matrices. finite_diff_rel_step : None or array_like, optional Relative step size for the finite difference approximation. maxiter : int, optional Maximum number of algorithm iterations. Default is 1000. verbose : {0, 1, 2}, optional Level of algorithm's verbosity: * 0 (default) : work silently. * 1 : display a termination report. * 2 : display progress during iterations. * 3 : display progress during iterations (more complete report). disp : bool, optional If True (default), then `verbose` will be set to 1 if it was 0. Returns ------- `OptimizeResult` with the fields documented below. Note the following: 1. All values corresponding to the constraints are ordered as they were passed to the solver. And values corresponding to `bounds` constraints are put *after* other constraints. 2. All numbers of function, Jacobian or Hessian evaluations correspond to numbers of actual Python function calls. It means, for example, that if a Jacobian is estimated by finite differences, then the number of Jacobian evaluations will be zero and the number of function evaluations will be incremented by all calls during the finite difference estimation. x : ndarray, shape (n,) Solution found. optimality : float Infinity norm of the Lagrangian gradient at the solution. constr_violation : float Maximum constraint violation at the solution. fun : float Objective function at the solution. grad : ndarray, shape (n,) Gradient of the objective function at the solution. lagrangian_grad : ndarray, shape (n,) Gradient of the Lagrangian function at the solution. nit : int Total number of iterations. nfev : integer Number of the objective function evaluations. njev : integer Number of the objective function gradient evaluations. nhev : integer Number of the objective function Hessian evaluations. cg_niter : int Total number of the conjugate gradient method iterations. method : {'equality_constrained_sqp', 'tr_interior_point'} Optimization method used. constr : list of ndarray List of constraint values at the solution. jac : list of {ndarray, sparse matrix} List of the Jacobian matrices of the constraints at the solution. v : list of ndarray List of the Lagrange multipliers for the constraints at the solution. For an inequality constraint a positive multiplier means that the upper bound is active, a negative multiplier means that the lower bound is active and if a multiplier is zero it means the constraint is not active. constr_nfev : list of int Number of constraint evaluations for each of the constraints. constr_njev : list of int Number of Jacobian matrix evaluations for each of the constraints. constr_nhev : list of int Number of Hessian evaluations for each of the constraints. tr_radius : float Radius of the trust region at the last iteration. constr_penalty : float Penalty parameter at the last iteration, see `initial_constr_penalty`. barrier_tolerance : float Tolerance for the barrier subproblem at the last iteration. Only for problems with inequality constraints. barrier_parameter : float Barrier parameter at the last iteration. Only for problems with inequality constraints. execution_time : float Total execution time. message : str Termination message. status : {0, 1, 2, 3} Termination status: * 0 : The maximum number of function evaluations is exceeded. * 1 : `gtol` termination condition is satisfied. * 2 : `xtol` termination condition is satisfied. * 3 : `callback` function requested termination. cg_stop_cond : int Reason for CG subproblem termination at the last iteration: * 0 : CG subproblem not evaluated. * 1 : Iteration limit was reached. * 2 : Reached the trust-region boundary. * 3 : Negative curvature detected. * 4 : Tolerance was satisfied. References ---------- .. [1] Conn, A. R., Gould, N. I., & Toint, P. L. Trust region methods. 2000. Siam. pp. 19. """ x0 = np.atleast_1d(x0).astype(float) n_vars = np.size(x0) if hess is None: if callable(hessp): hess = HessianLinearOperator(hessp, n_vars) else: hess = BFGS() if disp and verbose == 0: verbose = 1 if bounds is not None: finite_diff_bounds = strict_bounds(bounds.lb, bounds.ub, bounds.keep_feasible, n_vars) else: finite_diff_bounds = (-np.inf, np.inf) # Define Objective Function objective = ScalarFunction(fun, x0, args, grad, hess, finite_diff_rel_step, finite_diff_bounds) # Put constraints in list format when needed. if isinstance(constraints, (NonlinearConstraint, LinearConstraint)): constraints = [constraints] # Prepare constraints. prepared_constraints = [ PreparedConstraint(c, x0, sparse_jacobian, finite_diff_bounds) for c in constraints] # Check that all constraints are either sparse or dense. n_sparse = sum(c.fun.sparse_jacobian for c in prepared_constraints) if 0 < n_sparse < len(prepared_constraints): raise ValueError("All constraints must have the same kind of the " "Jacobian --- either all sparse or all dense. " "You can set the sparsity globally by setting " "`sparse_jacobian` to either True of False.") if prepared_constraints: sparse_jacobian = n_sparse > 0 if bounds is not None: if sparse_jacobian is None: sparse_jacobian = True prepared_constraints.append(PreparedConstraint(bounds, x0, sparse_jacobian)) # Concatenate initial constraints to the canonical form. c_eq0, c_ineq0, J_eq0, J_ineq0 = initial_constraints_as_canonical( n_vars, prepared_constraints, sparse_jacobian) # Prepare all canonical constraints and concatenate it into one. canonical_all = [CanonicalConstraint.from_PreparedConstraint(c) for c in prepared_constraints] if len(canonical_all) == 0: canonical = CanonicalConstraint.empty(n_vars) elif len(canonical_all) == 1: canonical = canonical_all[0] else: canonical = CanonicalConstraint.concatenate(canonical_all, sparse_jacobian) # Generate the Hessian of the Lagrangian. lagrangian_hess = LagrangianHessian(n_vars, objective.hess, canonical.hess) # Choose appropriate method if canonical.n_ineq == 0: method = 'equality_constrained_sqp' else: method = 'tr_interior_point' # Construct OptimizeResult state = OptimizeResult( nit=0, nfev=0, njev=0, nhev=0, cg_niter=0, cg_stop_cond=0, fun=objective.f, grad=objective.g, lagrangian_grad=np.copy(objective.g), constr=[c.fun.f for c in prepared_constraints], jac=[c.fun.J for c in prepared_constraints], constr_nfev=[0 for c in prepared_constraints], constr_njev=[0 for c in prepared_constraints], constr_nhev=[0 for c in prepared_constraints], v=[c.fun.v for c in prepared_constraints], method=method) # Start counting start_time = time.time() # Define stop criteria if method == 'equality_constrained_sqp': def stop_criteria(state, x, last_iteration_failed, optimality, constr_violation, tr_radius, constr_penalty, cg_info): state = update_state_sqp(state, x, last_iteration_failed, objective, prepared_constraints, start_time, tr_radius, constr_penalty, cg_info) if verbose == 2: BasicReport.print_iteration(state.nit, state.nfev, state.cg_niter, state.fun, state.tr_radius, state.optimality, state.constr_violation) elif verbose > 2: SQPReport.print_iteration(state.nit, state.nfev, state.cg_niter, state.fun, state.tr_radius, state.optimality, state.constr_violation, state.constr_penalty, state.cg_stop_cond) state.status = None state.niter = state.nit # Alias for callback (backward-compatibility) if callback is not None: callback_stop = False try: callback_stop = callback(state) except StopIteration: callback_stop = True if callback_stop: state.status = 3 return True if state.optimality < gtol and state.constr_violation < gtol: state.status = 1 elif state.tr_radius < xtol: state.status = 2 elif state.nit >= maxiter: state.status = 0 return state.status in (0, 1, 2, 3) elif method == 'tr_interior_point': def stop_criteria(state, x, last_iteration_failed, tr_radius, constr_penalty, cg_info, barrier_parameter, barrier_tolerance): state = update_state_ip(state, x, last_iteration_failed, objective, prepared_constraints, start_time, tr_radius, constr_penalty, cg_info, barrier_parameter, barrier_tolerance) if verbose == 2: BasicReport.print_iteration(state.nit, state.nfev, state.cg_niter, state.fun, state.tr_radius, state.optimality, state.constr_violation) elif verbose > 2: IPReport.print_iteration(state.nit, state.nfev, state.cg_niter, state.fun, state.tr_radius, state.optimality, state.constr_violation, state.constr_penalty, state.barrier_parameter, state.cg_stop_cond) state.status = None state.niter = state.nit # Alias for callback (backward compatibility) if callback is not None: callback_stop = False try: callback_stop = callback(state) except StopIteration: callback_stop = True if callback_stop: state.status = 3 return True if state.optimality < gtol and state.constr_violation < gtol: state.status = 1 elif (state.tr_radius < xtol and state.barrier_parameter < barrier_tol): state.status = 2 elif state.nit >= maxiter: state.status = 0 return state.status in (0, 1, 2, 3) if verbose == 2: BasicReport.print_header() elif verbose > 2: if method == 'equality_constrained_sqp': SQPReport.print_header() elif method == 'tr_interior_point': IPReport.print_header() # Call inferior function to do the optimization if method == 'equality_constrained_sqp': def fun_and_constr(x): f = objective.fun(x) c_eq, _ = canonical.fun(x) return f, c_eq def grad_and_jac(x): g = objective.grad(x) J_eq, _ = canonical.jac(x) return g, J_eq _, result = equality_constrained_sqp( fun_and_constr, grad_and_jac, lagrangian_hess, x0, objective.f, objective.g, c_eq0, J_eq0, stop_criteria, state, initial_constr_penalty, initial_tr_radius, factorization_method) elif method == 'tr_interior_point': _, result = tr_interior_point( objective.fun, objective.grad, lagrangian_hess, n_vars, canonical.n_ineq, canonical.n_eq, canonical.fun, canonical.jac, x0, objective.f, objective.g, c_ineq0, J_ineq0, c_eq0, J_eq0, stop_criteria, canonical.keep_feasible, xtol, state, initial_barrier_parameter, initial_barrier_tolerance, initial_constr_penalty, initial_tr_radius, factorization_method) # Status 3 occurs when the callback function requests termination, # this is assumed to not be a success. result.success = True if result.status in (1, 2) else False result.message = TERMINATION_MESSAGES[result.status] # Alias (for backward compatibility with 1.1.0) result.niter = result.nit if verbose == 2: BasicReport.print_footer() elif verbose > 2: if method == 'equality_constrained_sqp': SQPReport.print_footer() elif method == 'tr_interior_point': IPReport.print_footer() if verbose >= 1: print(result.message) print("Number of iterations: {}, function evaluations: {}, " "CG iterations: {}, optimality: {:.2e}, " "constraint violation: {:.2e}, execution time: {:4.2} s." .format(result.nit, result.nfev, result.cg_niter, result.optimality, result.constr_violation, result.execution_time)) return result
25,330
44.233929
86
py
scipy
scipy-main/scipy/optimize/_trustregion_constr/report.py
"""Progress report printers.""" from __future__ import annotations class ReportBase: COLUMN_NAMES: list[str] = NotImplemented COLUMN_WIDTHS: list[int] = NotImplemented ITERATION_FORMATS: list[str] = NotImplemented @classmethod def print_header(cls): fmt = ("|" + "|".join([f"{{:^{x}}}" for x in cls.COLUMN_WIDTHS]) + "|") separators = ['-' * x for x in cls.COLUMN_WIDTHS] print(fmt.format(*cls.COLUMN_NAMES)) print(fmt.format(*separators)) @classmethod def print_iteration(cls, *args): iteration_format = [f"{{:{x}}}" for x in cls.ITERATION_FORMATS] fmt = "|" + "|".join(iteration_format) + "|" print(fmt.format(*args)) @classmethod def print_footer(cls): print() class BasicReport(ReportBase): COLUMN_NAMES = ["niter", "f evals", "CG iter", "obj func", "tr radius", "opt", "c viol"] COLUMN_WIDTHS = [7, 7, 7, 13, 10, 10, 10] ITERATION_FORMATS = ["^7", "^7", "^7", "^+13.4e", "^10.2e", "^10.2e", "^10.2e"] class SQPReport(ReportBase): COLUMN_NAMES = ["niter", "f evals", "CG iter", "obj func", "tr radius", "opt", "c viol", "penalty", "CG stop"] COLUMN_WIDTHS = [7, 7, 7, 13, 10, 10, 10, 10, 7] ITERATION_FORMATS = ["^7", "^7", "^7", "^+13.4e", "^10.2e", "^10.2e", "^10.2e", "^10.2e", "^7"] class IPReport(ReportBase): COLUMN_NAMES = ["niter", "f evals", "CG iter", "obj func", "tr radius", "opt", "c viol", "penalty", "barrier param", "CG stop"] COLUMN_WIDTHS = [7, 7, 7, 13, 10, 10, 10, 10, 13, 7] ITERATION_FORMATS = ["^7", "^7", "^7", "^+13.4e", "^10.2e", "^10.2e", "^10.2e", "^10.2e", "^13.2e", "^7"]
1,818
33.980769
75
py
scipy
scipy-main/scipy/optimize/_trustregion_constr/__init__.py
"""This module contains the equality constrained SQP solver.""" from .minimize_trustregion_constr import _minimize_trustregion_constr __all__ = ['_minimize_trustregion_constr']
180
24.857143
69
py
scipy
scipy-main/scipy/optimize/_trustregion_constr/projections.py
"""Basic linear factorizations needed by the solver.""" from scipy.sparse import (bmat, csc_matrix, eye, issparse) from scipy.sparse.linalg import LinearOperator import scipy.linalg import scipy.sparse.linalg try: from sksparse.cholmod import cholesky_AAt sksparse_available = True except ImportError: import warnings sksparse_available = False import numpy as np from warnings import warn __all__ = [ 'orthogonality', 'projections', ] def orthogonality(A, g): """Measure orthogonality between a vector and the null space of a matrix. Compute a measure of orthogonality between the null space of the (possibly sparse) matrix ``A`` and a given vector ``g``. The formula is a simplified (and cheaper) version of formula (3.13) from [1]_. ``orth = norm(A g, ord=2)/(norm(A, ord='fro')*norm(g, ord=2))``. References ---------- .. [1] Gould, Nicholas IM, Mary E. Hribar, and Jorge Nocedal. "On the solution of equality constrained quadratic programming problems arising in optimization." SIAM Journal on Scientific Computing 23.4 (2001): 1376-1395. """ # Compute vector norms norm_g = np.linalg.norm(g) # Compute Froebnius norm of the matrix A if issparse(A): norm_A = scipy.sparse.linalg.norm(A, ord='fro') else: norm_A = np.linalg.norm(A, ord='fro') # Check if norms are zero if norm_g == 0 or norm_A == 0: return 0 norm_A_g = np.linalg.norm(A.dot(g)) # Orthogonality measure orth = norm_A_g / (norm_A*norm_g) return orth def normal_equation_projections(A, m, n, orth_tol, max_refin, tol): """Return linear operators for matrix A using ``NormalEquation`` approach. """ # Cholesky factorization factor = cholesky_AAt(A) # z = x - A.T inv(A A.T) A x def null_space(x): v = factor(A.dot(x)) z = x - A.T.dot(v) # Iterative refinement to improve roundoff # errors described in [2]_, algorithm 5.1. k = 0 while orthogonality(A, z) > orth_tol: if k >= max_refin: break # z_next = z - A.T inv(A A.T) A z v = factor(A.dot(z)) z = z - A.T.dot(v) k += 1 return z # z = inv(A A.T) A x def least_squares(x): return factor(A.dot(x)) # z = A.T inv(A A.T) x def row_space(x): return A.T.dot(factor(x)) return null_space, least_squares, row_space def augmented_system_projections(A, m, n, orth_tol, max_refin, tol): """Return linear operators for matrix A - ``AugmentedSystem``.""" # Form augmented system K = csc_matrix(bmat([[eye(n), A.T], [A, None]])) # LU factorization # TODO: Use a symmetric indefinite factorization # to solve the system twice as fast (because # of the symmetry). try: solve = scipy.sparse.linalg.factorized(K) except RuntimeError: warn("Singular Jacobian matrix. Using dense SVD decomposition to " "perform the factorizations.") return svd_factorization_projections(A.toarray(), m, n, orth_tol, max_refin, tol) # z = x - A.T inv(A A.T) A x # is computed solving the extended system: # [I A.T] * [ z ] = [x] # [A O ] [aux] [0] def null_space(x): # v = [x] # [0] v = np.hstack([x, np.zeros(m)]) # lu_sol = [ z ] # [aux] lu_sol = solve(v) z = lu_sol[:n] # Iterative refinement to improve roundoff # errors described in [2]_, algorithm 5.2. k = 0 while orthogonality(A, z) > orth_tol: if k >= max_refin: break # new_v = [x] - [I A.T] * [ z ] # [0] [A O ] [aux] new_v = v - K.dot(lu_sol) # [I A.T] * [delta z ] = new_v # [A O ] [delta aux] lu_update = solve(new_v) # [ z ] += [delta z ] # [aux] [delta aux] lu_sol += lu_update z = lu_sol[:n] k += 1 # return z = x - A.T inv(A A.T) A x return z # z = inv(A A.T) A x # is computed solving the extended system: # [I A.T] * [aux] = [x] # [A O ] [ z ] [0] def least_squares(x): # v = [x] # [0] v = np.hstack([x, np.zeros(m)]) # lu_sol = [aux] # [ z ] lu_sol = solve(v) # return z = inv(A A.T) A x return lu_sol[n:m+n] # z = A.T inv(A A.T) x # is computed solving the extended system: # [I A.T] * [ z ] = [0] # [A O ] [aux] [x] def row_space(x): # v = [0] # [x] v = np.hstack([np.zeros(n), x]) # lu_sol = [ z ] # [aux] lu_sol = solve(v) # return z = A.T inv(A A.T) x return lu_sol[:n] return null_space, least_squares, row_space def qr_factorization_projections(A, m, n, orth_tol, max_refin, tol): """Return linear operators for matrix A using ``QRFactorization`` approach. """ # QRFactorization Q, R, P = scipy.linalg.qr(A.T, pivoting=True, mode='economic') if np.linalg.norm(R[-1, :], np.inf) < tol: warn('Singular Jacobian matrix. Using SVD decomposition to ' + 'perform the factorizations.') return svd_factorization_projections(A, m, n, orth_tol, max_refin, tol) # z = x - A.T inv(A A.T) A x def null_space(x): # v = P inv(R) Q.T x aux1 = Q.T.dot(x) aux2 = scipy.linalg.solve_triangular(R, aux1, lower=False) v = np.zeros(m) v[P] = aux2 z = x - A.T.dot(v) # Iterative refinement to improve roundoff # errors described in [2]_, algorithm 5.1. k = 0 while orthogonality(A, z) > orth_tol: if k >= max_refin: break # v = P inv(R) Q.T x aux1 = Q.T.dot(z) aux2 = scipy.linalg.solve_triangular(R, aux1, lower=False) v[P] = aux2 # z_next = z - A.T v z = z - A.T.dot(v) k += 1 return z # z = inv(A A.T) A x def least_squares(x): # z = P inv(R) Q.T x aux1 = Q.T.dot(x) aux2 = scipy.linalg.solve_triangular(R, aux1, lower=False) z = np.zeros(m) z[P] = aux2 return z # z = A.T inv(A A.T) x def row_space(x): # z = Q inv(R.T) P.T x aux1 = x[P] aux2 = scipy.linalg.solve_triangular(R, aux1, lower=False, trans='T') z = Q.dot(aux2) return z return null_space, least_squares, row_space def svd_factorization_projections(A, m, n, orth_tol, max_refin, tol): """Return linear operators for matrix A using ``SVDFactorization`` approach. """ # SVD Factorization U, s, Vt = scipy.linalg.svd(A, full_matrices=False) # Remove dimensions related with very small singular values U = U[:, s > tol] Vt = Vt[s > tol, :] s = s[s > tol] # z = x - A.T inv(A A.T) A x def null_space(x): # v = U 1/s V.T x = inv(A A.T) A x aux1 = Vt.dot(x) aux2 = 1/s*aux1 v = U.dot(aux2) z = x - A.T.dot(v) # Iterative refinement to improve roundoff # errors described in [2]_, algorithm 5.1. k = 0 while orthogonality(A, z) > orth_tol: if k >= max_refin: break # v = U 1/s V.T x = inv(A A.T) A x aux1 = Vt.dot(z) aux2 = 1/s*aux1 v = U.dot(aux2) # z_next = z - A.T v z = z - A.T.dot(v) k += 1 return z # z = inv(A A.T) A x def least_squares(x): # z = U 1/s V.T x = inv(A A.T) A x aux1 = Vt.dot(x) aux2 = 1/s*aux1 z = U.dot(aux2) return z # z = A.T inv(A A.T) x def row_space(x): # z = V 1/s U.T x aux1 = U.T.dot(x) aux2 = 1/s*aux1 z = Vt.T.dot(aux2) return z return null_space, least_squares, row_space def projections(A, method=None, orth_tol=1e-12, max_refin=3, tol=1e-15): """Return three linear operators related with a given matrix A. Parameters ---------- A : sparse matrix (or ndarray), shape (m, n) Matrix ``A`` used in the projection. method : string, optional Method used for compute the given linear operators. Should be one of: - 'NormalEquation': The operators will be computed using the so-called normal equation approach explained in [1]_. In order to do so the Cholesky factorization of ``(A A.T)`` is computed. Exclusive for sparse matrices. - 'AugmentedSystem': The operators will be computed using the so-called augmented system approach explained in [1]_. Exclusive for sparse matrices. - 'QRFactorization': Compute projections using QR factorization. Exclusive for dense matrices. - 'SVDFactorization': Compute projections using SVD factorization. Exclusive for dense matrices. orth_tol : float, optional Tolerance for iterative refinements. max_refin : int, optional Maximum number of iterative refinements. tol : float, optional Tolerance for singular values. Returns ------- Z : LinearOperator, shape (n, n) Null-space operator. For a given vector ``x``, the null space operator is equivalent to apply a projection matrix ``P = I - A.T inv(A A.T) A`` to the vector. It can be shown that this is equivalent to project ``x`` into the null space of A. LS : LinearOperator, shape (m, n) Least-squares operator. For a given vector ``x``, the least-squares operator is equivalent to apply a pseudoinverse matrix ``pinv(A.T) = inv(A A.T) A`` to the vector. It can be shown that this vector ``pinv(A.T) x`` is the least_square solution to ``A.T y = x``. Y : LinearOperator, shape (n, m) Row-space operator. For a given vector ``x``, the row-space operator is equivalent to apply a projection matrix ``Q = A.T inv(A A.T)`` to the vector. It can be shown that this vector ``y = Q x`` the minimum norm solution of ``A y = x``. Notes ----- Uses iterative refinements described in [1] during the computation of ``Z`` in order to cope with the possibility of large roundoff errors. References ---------- .. [1] Gould, Nicholas IM, Mary E. Hribar, and Jorge Nocedal. "On the solution of equality constrained quadratic programming problems arising in optimization." SIAM Journal on Scientific Computing 23.4 (2001): 1376-1395. """ m, n = np.shape(A) # The factorization of an empty matrix # only works for the sparse representation. if m*n == 0: A = csc_matrix(A) # Check Argument if issparse(A): if method is None: method = "AugmentedSystem" if method not in ("NormalEquation", "AugmentedSystem"): raise ValueError("Method not allowed for sparse matrix.") if method == "NormalEquation" and not sksparse_available: warnings.warn(("Only accepts 'NormalEquation' option when" " scikit-sparse is available. Using " "'AugmentedSystem' option instead."), ImportWarning) method = 'AugmentedSystem' else: if method is None: method = "QRFactorization" if method not in ("QRFactorization", "SVDFactorization"): raise ValueError("Method not allowed for dense array.") if method == 'NormalEquation': null_space, least_squares, row_space \ = normal_equation_projections(A, m, n, orth_tol, max_refin, tol) elif method == 'AugmentedSystem': null_space, least_squares, row_space \ = augmented_system_projections(A, m, n, orth_tol, max_refin, tol) elif method == "QRFactorization": null_space, least_squares, row_space \ = qr_factorization_projections(A, m, n, orth_tol, max_refin, tol) elif method == "SVDFactorization": null_space, least_squares, row_space \ = svd_factorization_projections(A, m, n, orth_tol, max_refin, tol) Z = LinearOperator((n, n), null_space) LS = LinearOperator((m, n), least_squares) Y = LinearOperator((n, m), row_space) return Z, LS, Y
13,105
31.280788
80
py
scipy
scipy-main/scipy/optimize/_trustregion_constr/qp_subproblem.py
"""Equality-constrained quadratic programming solvers.""" from scipy.sparse import (linalg, bmat, csc_matrix) from math import copysign import numpy as np from numpy.linalg import norm __all__ = [ 'eqp_kktfact', 'sphere_intersections', 'box_intersections', 'box_sphere_intersections', 'inside_box_boundaries', 'modified_dogleg', 'projected_cg' ] # For comparison with the projected CG def eqp_kktfact(H, c, A, b): """Solve equality-constrained quadratic programming (EQP) problem. Solve ``min 1/2 x.T H x + x.t c`` subject to ``A x + b = 0`` using direct factorization of the KKT system. Parameters ---------- H : sparse matrix, shape (n, n) Hessian matrix of the EQP problem. c : array_like, shape (n,) Gradient of the quadratic objective function. A : sparse matrix Jacobian matrix of the EQP problem. b : array_like, shape (m,) Right-hand side of the constraint equation. Returns ------- x : array_like, shape (n,) Solution of the KKT problem. lagrange_multipliers : ndarray, shape (m,) Lagrange multipliers of the KKT problem. """ n, = np.shape(c) # Number of parameters m, = np.shape(b) # Number of constraints # Karush-Kuhn-Tucker matrix of coefficients. # Defined as in Nocedal/Wright "Numerical # Optimization" p.452 in Eq. (16.4). kkt_matrix = csc_matrix(bmat([[H, A.T], [A, None]])) # Vector of coefficients. kkt_vec = np.hstack([-c, -b]) # TODO: Use a symmetric indefinite factorization # to solve the system twice as fast (because # of the symmetry). lu = linalg.splu(kkt_matrix) kkt_sol = lu.solve(kkt_vec) x = kkt_sol[:n] lagrange_multipliers = -kkt_sol[n:n+m] return x, lagrange_multipliers def sphere_intersections(z, d, trust_radius, entire_line=False): """Find the intersection between segment (or line) and spherical constraints. Find the intersection between the segment (or line) defined by the parametric equation ``x(t) = z + t*d`` and the ball ``||x|| <= trust_radius``. Parameters ---------- z : array_like, shape (n,) Initial point. d : array_like, shape (n,) Direction. trust_radius : float Ball radius. entire_line : bool, optional When ``True``, the function returns the intersection between the line ``x(t) = z + t*d`` (``t`` can assume any value) and the ball ``||x|| <= trust_radius``. When ``False``, the function returns the intersection between the segment ``x(t) = z + t*d``, ``0 <= t <= 1``, and the ball. Returns ------- ta, tb : float The line/segment ``x(t) = z + t*d`` is inside the ball for for ``ta <= t <= tb``. intersect : bool When ``True``, there is a intersection between the line/segment and the sphere. On the other hand, when ``False``, there is no intersection. """ # Special case when d=0 if norm(d) == 0: return 0, 0, False # Check for inf trust_radius if np.isinf(trust_radius): if entire_line: ta = -np.inf tb = np.inf else: ta = 0 tb = 1 intersect = True return ta, tb, intersect a = np.dot(d, d) b = 2 * np.dot(z, d) c = np.dot(z, z) - trust_radius**2 discriminant = b*b - 4*a*c if discriminant < 0: intersect = False return 0, 0, intersect sqrt_discriminant = np.sqrt(discriminant) # The following calculation is mathematically # equivalent to: # ta = (-b - sqrt_discriminant) / (2*a) # tb = (-b + sqrt_discriminant) / (2*a) # but produce smaller round off errors. # Look at Matrix Computation p.97 # for a better justification. aux = b + copysign(sqrt_discriminant, b) ta = -aux / (2*a) tb = -2*c / aux ta, tb = sorted([ta, tb]) if entire_line: intersect = True else: # Checks to see if intersection happens # within vectors length. if tb < 0 or ta > 1: intersect = False ta = 0 tb = 0 else: intersect = True # Restrict intersection interval # between 0 and 1. ta = max(0, ta) tb = min(1, tb) return ta, tb, intersect def box_intersections(z, d, lb, ub, entire_line=False): """Find the intersection between segment (or line) and box constraints. Find the intersection between the segment (or line) defined by the parametric equation ``x(t) = z + t*d`` and the rectangular box ``lb <= x <= ub``. Parameters ---------- z : array_like, shape (n,) Initial point. d : array_like, shape (n,) Direction. lb : array_like, shape (n,) Lower bounds to each one of the components of ``x``. Used to delimit the rectangular box. ub : array_like, shape (n, ) Upper bounds to each one of the components of ``x``. Used to delimit the rectangular box. entire_line : bool, optional When ``True``, the function returns the intersection between the line ``x(t) = z + t*d`` (``t`` can assume any value) and the rectangular box. When ``False``, the function returns the intersection between the segment ``x(t) = z + t*d``, ``0 <= t <= 1``, and the rectangular box. Returns ------- ta, tb : float The line/segment ``x(t) = z + t*d`` is inside the box for for ``ta <= t <= tb``. intersect : bool When ``True``, there is a intersection between the line (or segment) and the rectangular box. On the other hand, when ``False``, there is no intersection. """ # Make sure it is a numpy array z = np.asarray(z) d = np.asarray(d) lb = np.asarray(lb) ub = np.asarray(ub) # Special case when d=0 if norm(d) == 0: return 0, 0, False # Get values for which d==0 zero_d = (d == 0) # If the boundaries are not satisfied for some coordinate # for which "d" is zero, there is no box-line intersection. if (z[zero_d] < lb[zero_d]).any() or (z[zero_d] > ub[zero_d]).any(): intersect = False return 0, 0, intersect # Remove values for which d is zero not_zero_d = np.logical_not(zero_d) z = z[not_zero_d] d = d[not_zero_d] lb = lb[not_zero_d] ub = ub[not_zero_d] # Find a series of intervals (t_lb[i], t_ub[i]). t_lb = (lb-z) / d t_ub = (ub-z) / d # Get the intersection of all those intervals. ta = max(np.minimum(t_lb, t_ub)) tb = min(np.maximum(t_lb, t_ub)) # Check if intersection is feasible if ta <= tb: intersect = True else: intersect = False # Checks to see if intersection happens within vectors length. if not entire_line: if tb < 0 or ta > 1: intersect = False ta = 0 tb = 0 else: # Restrict intersection interval between 0 and 1. ta = max(0, ta) tb = min(1, tb) return ta, tb, intersect def box_sphere_intersections(z, d, lb, ub, trust_radius, entire_line=False, extra_info=False): """Find the intersection between segment (or line) and box/sphere constraints. Find the intersection between the segment (or line) defined by the parametric equation ``x(t) = z + t*d``, the rectangular box ``lb <= x <= ub`` and the ball ``||x|| <= trust_radius``. Parameters ---------- z : array_like, shape (n,) Initial point. d : array_like, shape (n,) Direction. lb : array_like, shape (n,) Lower bounds to each one of the components of ``x``. Used to delimit the rectangular box. ub : array_like, shape (n, ) Upper bounds to each one of the components of ``x``. Used to delimit the rectangular box. trust_radius : float Ball radius. entire_line : bool, optional When ``True``, the function returns the intersection between the line ``x(t) = z + t*d`` (``t`` can assume any value) and the constraints. When ``False``, the function returns the intersection between the segment ``x(t) = z + t*d``, ``0 <= t <= 1`` and the constraints. extra_info : bool, optional When ``True``, the function returns ``intersect_sphere`` and ``intersect_box``. Returns ------- ta, tb : float The line/segment ``x(t) = z + t*d`` is inside the rectangular box and inside the ball for ``ta <= t <= tb``. intersect : bool When ``True``, there is a intersection between the line (or segment) and both constraints. On the other hand, when ``False``, there is no intersection. sphere_info : dict, optional Dictionary ``{ta, tb, intersect}`` containing the interval ``[ta, tb]`` for which the line intercepts the ball. And a boolean value indicating whether the sphere is intersected by the line. box_info : dict, optional Dictionary ``{ta, tb, intersect}`` containing the interval ``[ta, tb]`` for which the line intercepts the box. And a boolean value indicating whether the box is intersected by the line. """ ta_b, tb_b, intersect_b = box_intersections(z, d, lb, ub, entire_line) ta_s, tb_s, intersect_s = sphere_intersections(z, d, trust_radius, entire_line) ta = np.maximum(ta_b, ta_s) tb = np.minimum(tb_b, tb_s) if intersect_b and intersect_s and ta <= tb: intersect = True else: intersect = False if extra_info: sphere_info = {'ta': ta_s, 'tb': tb_s, 'intersect': intersect_s} box_info = {'ta': ta_b, 'tb': tb_b, 'intersect': intersect_b} return ta, tb, intersect, sphere_info, box_info else: return ta, tb, intersect def inside_box_boundaries(x, lb, ub): """Check if lb <= x <= ub.""" return (lb <= x).all() and (x <= ub).all() def reinforce_box_boundaries(x, lb, ub): """Return clipped value of x""" return np.minimum(np.maximum(x, lb), ub) def modified_dogleg(A, Y, b, trust_radius, lb, ub): """Approximately minimize ``1/2*|| A x + b ||^2`` inside trust-region. Approximately solve the problem of minimizing ``1/2*|| A x + b ||^2`` subject to ``||x|| < Delta`` and ``lb <= x <= ub`` using a modification of the classical dogleg approach. Parameters ---------- A : LinearOperator (or sparse matrix or ndarray), shape (m, n) Matrix ``A`` in the minimization problem. It should have dimension ``(m, n)`` such that ``m < n``. Y : LinearOperator (or sparse matrix or ndarray), shape (n, m) LinearOperator that apply the projection matrix ``Q = A.T inv(A A.T)`` to the vector. The obtained vector ``y = Q x`` being the minimum norm solution of ``A y = x``. b : array_like, shape (m,) Vector ``b``in the minimization problem. trust_radius: float Trust radius to be considered. Delimits a sphere boundary to the problem. lb : array_like, shape (n,) Lower bounds to each one of the components of ``x``. It is expected that ``lb <= 0``, otherwise the algorithm may fail. If ``lb[i] = -Inf``, the lower bound for the ith component is just ignored. ub : array_like, shape (n, ) Upper bounds to each one of the components of ``x``. It is expected that ``ub >= 0``, otherwise the algorithm may fail. If ``ub[i] = Inf``, the upper bound for the ith component is just ignored. Returns ------- x : array_like, shape (n,) Solution to the problem. Notes ----- Based on implementations described in pp. 885-886 from [1]_. References ---------- .. [1] Byrd, Richard H., Mary E. Hribar, and Jorge Nocedal. "An interior point algorithm for large-scale nonlinear programming." SIAM Journal on Optimization 9.4 (1999): 877-900. """ # Compute minimum norm minimizer of 1/2*|| A x + b ||^2. newton_point = -Y.dot(b) # Check for interior point if inside_box_boundaries(newton_point, lb, ub) \ and norm(newton_point) <= trust_radius: x = newton_point return x # Compute gradient vector ``g = A.T b`` g = A.T.dot(b) # Compute Cauchy point # `cauchy_point = g.T g / (g.T A.T A g)``. A_g = A.dot(g) cauchy_point = -np.dot(g, g) / np.dot(A_g, A_g) * g # Origin origin_point = np.zeros_like(cauchy_point) # Check the segment between cauchy_point and newton_point # for a possible solution. z = cauchy_point p = newton_point - cauchy_point _, alpha, intersect = box_sphere_intersections(z, p, lb, ub, trust_radius) if intersect: x1 = z + alpha*p else: # Check the segment between the origin and cauchy_point # for a possible solution. z = origin_point p = cauchy_point _, alpha, _ = box_sphere_intersections(z, p, lb, ub, trust_radius) x1 = z + alpha*p # Check the segment between origin and newton_point # for a possible solution. z = origin_point p = newton_point _, alpha, _ = box_sphere_intersections(z, p, lb, ub, trust_radius) x2 = z + alpha*p # Return the best solution among x1 and x2. if norm(A.dot(x1) + b) < norm(A.dot(x2) + b): return x1 else: return x2 def projected_cg(H, c, Z, Y, b, trust_radius=np.inf, lb=None, ub=None, tol=None, max_iter=None, max_infeasible_iter=None, return_all=False): """Solve EQP problem with projected CG method. Solve equality-constrained quadratic programming problem ``min 1/2 x.T H x + x.t c`` subject to ``A x + b = 0`` and, possibly, to trust region constraints ``||x|| < trust_radius`` and box constraints ``lb <= x <= ub``. Parameters ---------- H : LinearOperator (or sparse matrix or ndarray), shape (n, n) Operator for computing ``H v``. c : array_like, shape (n,) Gradient of the quadratic objective function. Z : LinearOperator (or sparse matrix or ndarray), shape (n, n) Operator for projecting ``x`` into the null space of A. Y : LinearOperator, sparse matrix, ndarray, shape (n, m) Operator that, for a given a vector ``b``, compute smallest norm solution of ``A x + b = 0``. b : array_like, shape (m,) Right-hand side of the constraint equation. trust_radius : float, optional Trust radius to be considered. By default, uses ``trust_radius=inf``, which means no trust radius at all. lb : array_like, shape (n,), optional Lower bounds to each one of the components of ``x``. If ``lb[i] = -Inf`` the lower bound for the i-th component is just ignored (default). ub : array_like, shape (n, ), optional Upper bounds to each one of the components of ``x``. If ``ub[i] = Inf`` the upper bound for the i-th component is just ignored (default). tol : float, optional Tolerance used to interrupt the algorithm. max_iter : int, optional Maximum algorithm iterations. Where ``max_inter <= n-m``. By default, uses ``max_iter = n-m``. max_infeasible_iter : int, optional Maximum infeasible (regarding box constraints) iterations the algorithm is allowed to take. By default, uses ``max_infeasible_iter = n-m``. return_all : bool, optional When ``true``, return the list of all vectors through the iterations. Returns ------- x : array_like, shape (n,) Solution of the EQP problem. info : Dict Dictionary containing the following: - niter : Number of iterations. - stop_cond : Reason for algorithm termination: 1. Iteration limit was reached; 2. Reached the trust-region boundary; 3. Negative curvature detected; 4. Tolerance was satisfied. - allvecs : List containing all intermediary vectors (optional). - hits_boundary : True if the proposed step is on the boundary of the trust region. Notes ----- Implementation of Algorithm 6.2 on [1]_. In the absence of spherical and box constraints, for sufficient iterations, the method returns a truly optimal result. In the presence of those constraints, the value returned is only a inexpensive approximation of the optimal value. References ---------- .. [1] Gould, Nicholas IM, Mary E. Hribar, and Jorge Nocedal. "On the solution of equality constrained quadratic programming problems arising in optimization." SIAM Journal on Scientific Computing 23.4 (2001): 1376-1395. """ CLOSE_TO_ZERO = 1e-25 n, = np.shape(c) # Number of parameters m, = np.shape(b) # Number of constraints # Initial Values x = Y.dot(-b) r = Z.dot(H.dot(x) + c) g = Z.dot(r) p = -g # Store ``x`` value if return_all: allvecs = [x] # Values for the first iteration H_p = H.dot(p) rt_g = norm(g)**2 # g.T g = r.T Z g = r.T g (ref [1]_ p.1389) # If x > trust-region the problem does not have a solution. tr_distance = trust_radius - norm(x) if tr_distance < 0: raise ValueError("Trust region problem does not have a solution.") # If x == trust_radius, then x is the solution # to the optimization problem, since x is the # minimum norm solution to Ax=b. elif tr_distance < CLOSE_TO_ZERO: info = {'niter': 0, 'stop_cond': 2, 'hits_boundary': True} if return_all: allvecs.append(x) info['allvecs'] = allvecs return x, info # Set default tolerance if tol is None: tol = max(min(0.01 * np.sqrt(rt_g), 0.1 * rt_g), CLOSE_TO_ZERO) # Set default lower and upper bounds if lb is None: lb = np.full(n, -np.inf) if ub is None: ub = np.full(n, np.inf) # Set maximum iterations if max_iter is None: max_iter = n-m max_iter = min(max_iter, n-m) # Set maximum infeasible iterations if max_infeasible_iter is None: max_infeasible_iter = n-m hits_boundary = False stop_cond = 1 counter = 0 last_feasible_x = np.zeros_like(x) k = 0 for i in range(max_iter): # Stop criteria - Tolerance : r.T g < tol if rt_g < tol: stop_cond = 4 break k += 1 # Compute curvature pt_H_p = H_p.dot(p) # Stop criteria - Negative curvature if pt_H_p <= 0: if np.isinf(trust_radius): raise ValueError("Negative curvature not allowed " "for unrestricted problems.") else: # Find intersection with constraints _, alpha, intersect = box_sphere_intersections( x, p, lb, ub, trust_radius, entire_line=True) # Update solution if intersect: x = x + alpha*p # Reinforce variables are inside box constraints. # This is only necessary because of roundoff errors. x = reinforce_box_boundaries(x, lb, ub) # Attribute information stop_cond = 3 hits_boundary = True break # Get next step alpha = rt_g / pt_H_p x_next = x + alpha*p # Stop criteria - Hits boundary if np.linalg.norm(x_next) >= trust_radius: # Find intersection with box constraints _, theta, intersect = box_sphere_intersections(x, alpha*p, lb, ub, trust_radius) # Update solution if intersect: x = x + theta*alpha*p # Reinforce variables are inside box constraints. # This is only necessary because of roundoff errors. x = reinforce_box_boundaries(x, lb, ub) # Attribute information stop_cond = 2 hits_boundary = True break # Check if ``x`` is inside the box and start counter if it is not. if inside_box_boundaries(x_next, lb, ub): counter = 0 else: counter += 1 # Whenever outside box constraints keep looking for intersections. if counter > 0: _, theta, intersect = box_sphere_intersections(x, alpha*p, lb, ub, trust_radius) if intersect: last_feasible_x = x + theta*alpha*p # Reinforce variables are inside box constraints. # This is only necessary because of roundoff errors. last_feasible_x = reinforce_box_boundaries(last_feasible_x, lb, ub) counter = 0 # Stop after too many infeasible (regarding box constraints) iteration. if counter > max_infeasible_iter: break # Store ``x_next`` value if return_all: allvecs.append(x_next) # Update residual r_next = r + alpha*H_p # Project residual g+ = Z r+ g_next = Z.dot(r_next) # Compute conjugate direction step d rt_g_next = norm(g_next)**2 # g.T g = r.T g (ref [1]_ p.1389) beta = rt_g_next / rt_g p = - g_next + beta*p # Prepare for next iteration x = x_next g = g_next r = g_next rt_g = norm(g)**2 # g.T g = r.T Z g = r.T g (ref [1]_ p.1389) H_p = H.dot(p) if not inside_box_boundaries(x, lb, ub): x = last_feasible_x hits_boundary = True info = {'niter': k, 'stop_cond': stop_cond, 'hits_boundary': hits_boundary} if return_all: info['allvecs'] = allvecs return x, info
22,592
34.412226
88
py
scipy
scipy-main/scipy/optimize/_trustregion_constr/equality_constrained_sqp.py
"""Byrd-Omojokun Trust-Region SQP method.""" from scipy.sparse import eye as speye from .projections import projections from .qp_subproblem import modified_dogleg, projected_cg, box_intersections import numpy as np from numpy.linalg import norm __all__ = ['equality_constrained_sqp'] def default_scaling(x): n, = np.shape(x) return speye(n) def equality_constrained_sqp(fun_and_constr, grad_and_jac, lagr_hess, x0, fun0, grad0, constr0, jac0, stop_criteria, state, initial_penalty, initial_trust_radius, factorization_method, trust_lb=None, trust_ub=None, scaling=default_scaling): """Solve nonlinear equality-constrained problem using trust-region SQP. Solve optimization problem: minimize fun(x) subject to: constr(x) = 0 using Byrd-Omojokun Trust-Region SQP method described in [1]_. Several implementation details are based on [2]_ and [3]_, p. 549. References ---------- .. [1] Lalee, Marucha, Jorge Nocedal, and Todd Plantenga. "On the implementation of an algorithm for large-scale equality constrained optimization." SIAM Journal on Optimization 8.3 (1998): 682-706. .. [2] Byrd, Richard H., Mary E. Hribar, and Jorge Nocedal. "An interior point algorithm for large-scale nonlinear programming." SIAM Journal on Optimization 9.4 (1999): 877-900. .. [3] Nocedal, Jorge, and Stephen J. Wright. "Numerical optimization" Second Edition (2006). """ PENALTY_FACTOR = 0.3 # Rho from formula (3.51), reference [2]_, p.891. LARGE_REDUCTION_RATIO = 0.9 INTERMEDIARY_REDUCTION_RATIO = 0.3 SUFFICIENT_REDUCTION_RATIO = 1e-8 # Eta from reference [2]_, p.892. TRUST_ENLARGEMENT_FACTOR_L = 7.0 TRUST_ENLARGEMENT_FACTOR_S = 2.0 MAX_TRUST_REDUCTION = 0.5 MIN_TRUST_REDUCTION = 0.1 SOC_THRESHOLD = 0.1 TR_FACTOR = 0.8 # Zeta from formula (3.21), reference [2]_, p.885. BOX_FACTOR = 0.5 n, = np.shape(x0) # Number of parameters # Set default lower and upper bounds. if trust_lb is None: trust_lb = np.full(n, -np.inf) if trust_ub is None: trust_ub = np.full(n, np.inf) # Initial values x = np.copy(x0) trust_radius = initial_trust_radius penalty = initial_penalty # Compute Values f = fun0 c = grad0 b = constr0 A = jac0 S = scaling(x) # Get projections Z, LS, Y = projections(A, factorization_method) # Compute least-square lagrange multipliers v = -LS.dot(c) # Compute Hessian H = lagr_hess(x, v) # Update state parameters optimality = norm(c + A.T.dot(v), np.inf) constr_violation = norm(b, np.inf) if len(b) > 0 else 0 cg_info = {'niter': 0, 'stop_cond': 0, 'hits_boundary': False} last_iteration_failed = False while not stop_criteria(state, x, last_iteration_failed, optimality, constr_violation, trust_radius, penalty, cg_info): # Normal Step - `dn` # minimize 1/2*||A dn + b||^2 # subject to: # ||dn|| <= TR_FACTOR * trust_radius # BOX_FACTOR * lb <= dn <= BOX_FACTOR * ub. dn = modified_dogleg(A, Y, b, TR_FACTOR*trust_radius, BOX_FACTOR*trust_lb, BOX_FACTOR*trust_ub) # Tangential Step - `dt` # Solve the QP problem: # minimize 1/2 dt.T H dt + dt.T (H dn + c) # subject to: # A dt = 0 # ||dt|| <= sqrt(trust_radius**2 - ||dn||**2) # lb - dn <= dt <= ub - dn c_t = H.dot(dn) + c b_t = np.zeros_like(b) trust_radius_t = np.sqrt(trust_radius**2 - np.linalg.norm(dn)**2) lb_t = trust_lb - dn ub_t = trust_ub - dn dt, cg_info = projected_cg(H, c_t, Z, Y, b_t, trust_radius_t, lb_t, ub_t) # Compute update (normal + tangential steps). d = dn + dt # Compute second order model: 1/2 d H d + c.T d + f. quadratic_model = 1/2*(H.dot(d)).dot(d) + c.T.dot(d) # Compute linearized constraint: l = A d + b. linearized_constr = A.dot(d)+b # Compute new penalty parameter according to formula (3.52), # reference [2]_, p.891. vpred = norm(b) - norm(linearized_constr) # Guarantee `vpred` always positive, # regardless of roundoff errors. vpred = max(1e-16, vpred) previous_penalty = penalty if quadratic_model > 0: new_penalty = quadratic_model / ((1-PENALTY_FACTOR)*vpred) penalty = max(penalty, new_penalty) # Compute predicted reduction according to formula (3.52), # reference [2]_, p.891. predicted_reduction = -quadratic_model + penalty*vpred # Compute merit function at current point merit_function = f + penalty*norm(b) # Evaluate function and constraints at trial point x_next = x + S.dot(d) f_next, b_next = fun_and_constr(x_next) # Compute merit function at trial point merit_function_next = f_next + penalty*norm(b_next) # Compute actual reduction according to formula (3.54), # reference [2]_, p.892. actual_reduction = merit_function - merit_function_next # Compute reduction ratio reduction_ratio = actual_reduction / predicted_reduction # Second order correction (SOC), reference [2]_, p.892. if reduction_ratio < SUFFICIENT_REDUCTION_RATIO and \ norm(dn) <= SOC_THRESHOLD * norm(dt): # Compute second order correction y = -Y.dot(b_next) # Make sure increment is inside box constraints _, t, intersect = box_intersections(d, y, trust_lb, trust_ub) # Compute tentative point x_soc = x + S.dot(d + t*y) f_soc, b_soc = fun_and_constr(x_soc) # Recompute actual reduction merit_function_soc = f_soc + penalty*norm(b_soc) actual_reduction_soc = merit_function - merit_function_soc # Recompute reduction ratio reduction_ratio_soc = actual_reduction_soc / predicted_reduction if intersect and reduction_ratio_soc >= SUFFICIENT_REDUCTION_RATIO: x_next = x_soc f_next = f_soc b_next = b_soc reduction_ratio = reduction_ratio_soc # Readjust trust region step, formula (3.55), reference [2]_, p.892. if reduction_ratio >= LARGE_REDUCTION_RATIO: trust_radius = max(TRUST_ENLARGEMENT_FACTOR_L * norm(d), trust_radius) elif reduction_ratio >= INTERMEDIARY_REDUCTION_RATIO: trust_radius = max(TRUST_ENLARGEMENT_FACTOR_S * norm(d), trust_radius) # Reduce trust region step, according to reference [3]_, p.696. elif reduction_ratio < SUFFICIENT_REDUCTION_RATIO: trust_reduction = ((1-SUFFICIENT_REDUCTION_RATIO) / (1-reduction_ratio)) new_trust_radius = trust_reduction * norm(d) if new_trust_radius >= MAX_TRUST_REDUCTION * trust_radius: trust_radius *= MAX_TRUST_REDUCTION elif new_trust_radius >= MIN_TRUST_REDUCTION * trust_radius: trust_radius = new_trust_radius else: trust_radius *= MIN_TRUST_REDUCTION # Update iteration if reduction_ratio >= SUFFICIENT_REDUCTION_RATIO: x = x_next f, b = f_next, b_next c, A = grad_and_jac(x) S = scaling(x) # Get projections Z, LS, Y = projections(A, factorization_method) # Compute least-square lagrange multipliers v = -LS.dot(c) # Compute Hessian H = lagr_hess(x, v) # Set Flag last_iteration_failed = False # Otimality values optimality = norm(c + A.T.dot(v), np.inf) constr_violation = norm(b, np.inf) if len(b) > 0 else 0 else: penalty = previous_penalty last_iteration_failed = True return x, state
8,592
38.417431
79
py
scipy
scipy-main/scipy/optimize/_trustregion_constr/tr_interior_point.py
"""Trust-region interior point method. References ---------- .. [1] Byrd, Richard H., Mary E. Hribar, and Jorge Nocedal. "An interior point algorithm for large-scale nonlinear programming." SIAM Journal on Optimization 9.4 (1999): 877-900. .. [2] Byrd, Richard H., Guanghui Liu, and Jorge Nocedal. "On the local behavior of an interior point method for nonlinear programming." Numerical analysis 1997 (1997): 37-56. .. [3] Nocedal, Jorge, and Stephen J. Wright. "Numerical optimization" Second Edition (2006). """ import scipy.sparse as sps import numpy as np from .equality_constrained_sqp import equality_constrained_sqp from scipy.sparse.linalg import LinearOperator __all__ = ['tr_interior_point'] class BarrierSubproblem: """ Barrier optimization problem: minimize fun(x) - barrier_parameter*sum(log(s)) subject to: constr_eq(x) = 0 constr_ineq(x) + s = 0 """ def __init__(self, x0, s0, fun, grad, lagr_hess, n_vars, n_ineq, n_eq, constr, jac, barrier_parameter, tolerance, enforce_feasibility, global_stop_criteria, xtol, fun0, grad0, constr_ineq0, jac_ineq0, constr_eq0, jac_eq0): # Store parameters self.n_vars = n_vars self.x0 = x0 self.s0 = s0 self.fun = fun self.grad = grad self.lagr_hess = lagr_hess self.constr = constr self.jac = jac self.barrier_parameter = barrier_parameter self.tolerance = tolerance self.n_eq = n_eq self.n_ineq = n_ineq self.enforce_feasibility = enforce_feasibility self.global_stop_criteria = global_stop_criteria self.xtol = xtol self.fun0 = self._compute_function(fun0, constr_ineq0, s0) self.grad0 = self._compute_gradient(grad0) self.constr0 = self._compute_constr(constr_ineq0, constr_eq0, s0) self.jac0 = self._compute_jacobian(jac_eq0, jac_ineq0, s0) self.terminate = False def update(self, barrier_parameter, tolerance): self.barrier_parameter = barrier_parameter self.tolerance = tolerance def get_slack(self, z): return z[self.n_vars:self.n_vars+self.n_ineq] def get_variables(self, z): return z[:self.n_vars] def function_and_constraints(self, z): """Returns barrier function and constraints at given point. For z = [x, s], returns barrier function: function(z) = fun(x) - barrier_parameter*sum(log(s)) and barrier constraints: constraints(z) = [ constr_eq(x) ] [ constr_ineq(x) + s ] """ # Get variables and slack variables x = self.get_variables(z) s = self.get_slack(z) # Compute function and constraints f = self.fun(x) c_eq, c_ineq = self.constr(x) # Return objective function and constraints return (self._compute_function(f, c_ineq, s), self._compute_constr(c_ineq, c_eq, s)) def _compute_function(self, f, c_ineq, s): # Use technique from Nocedal and Wright book, ref [3]_, p.576, # to guarantee constraints from `enforce_feasibility` # stay feasible along iterations. s[self.enforce_feasibility] = -c_ineq[self.enforce_feasibility] log_s = [np.log(s_i) if s_i > 0 else -np.inf for s_i in s] # Compute barrier objective function return f - self.barrier_parameter*np.sum(log_s) def _compute_constr(self, c_ineq, c_eq, s): # Compute barrier constraint return np.hstack((c_eq, c_ineq + s)) def scaling(self, z): """Returns scaling vector. Given by: scaling = [ones(n_vars), s] """ s = self.get_slack(z) diag_elements = np.hstack((np.ones(self.n_vars), s)) # Diagonal matrix def matvec(vec): return diag_elements*vec return LinearOperator((self.n_vars+self.n_ineq, self.n_vars+self.n_ineq), matvec) def gradient_and_jacobian(self, z): """Returns scaled gradient. Return scaled gradient: gradient = [ grad(x) ] [ -barrier_parameter*ones(n_ineq) ] and scaled Jacobian matrix: jacobian = [ jac_eq(x) 0 ] [ jac_ineq(x) S ] Both of them scaled by the previously defined scaling factor. """ # Get variables and slack variables x = self.get_variables(z) s = self.get_slack(z) # Compute first derivatives g = self.grad(x) J_eq, J_ineq = self.jac(x) # Return gradient and Jacobian return (self._compute_gradient(g), self._compute_jacobian(J_eq, J_ineq, s)) def _compute_gradient(self, g): return np.hstack((g, -self.barrier_parameter*np.ones(self.n_ineq))) def _compute_jacobian(self, J_eq, J_ineq, s): if self.n_ineq == 0: return J_eq else: if sps.issparse(J_eq) or sps.issparse(J_ineq): # It is expected that J_eq and J_ineq # are already `csr_matrix` because of # the way ``BoxConstraint``, ``NonlinearConstraint`` # and ``LinearConstraint`` are defined. J_eq = sps.csr_matrix(J_eq) J_ineq = sps.csr_matrix(J_ineq) return self._assemble_sparse_jacobian(J_eq, J_ineq, s) else: S = np.diag(s) zeros = np.zeros((self.n_eq, self.n_ineq)) # Convert to matrix if sps.issparse(J_ineq): J_ineq = J_ineq.toarray() if sps.issparse(J_eq): J_eq = J_eq.toarray() # Concatenate matrices return np.block([[J_eq, zeros], [J_ineq, S]]) def _assemble_sparse_jacobian(self, J_eq, J_ineq, s): """Assemble sparse Jacobian given its components. Given ``J_eq``, ``J_ineq`` and ``s`` returns: jacobian = [ J_eq, 0 ] [ J_ineq, diag(s) ] It is equivalent to: sps.bmat([[ J_eq, None ], [ J_ineq, diag(s) ]], "csr") but significantly more efficient for this given structure. """ n_vars, n_ineq, n_eq = self.n_vars, self.n_ineq, self.n_eq J_aux = sps.vstack([J_eq, J_ineq], "csr") indptr, indices, data = J_aux.indptr, J_aux.indices, J_aux.data new_indptr = indptr + np.hstack((np.zeros(n_eq, dtype=int), np.arange(n_ineq+1, dtype=int))) size = indices.size+n_ineq new_indices = np.empty(size) new_data = np.empty(size) mask = np.full(size, False, bool) mask[new_indptr[-n_ineq:]-1] = True new_indices[mask] = n_vars+np.arange(n_ineq) new_indices[~mask] = indices new_data[mask] = s new_data[~mask] = data J = sps.csr_matrix((new_data, new_indices, new_indptr), (n_eq + n_ineq, n_vars + n_ineq)) return J def lagrangian_hessian_x(self, z, v): """Returns Lagrangian Hessian (in relation to `x`) -> Hx""" x = self.get_variables(z) # Get lagrange multipliers relatated to nonlinear equality constraints v_eq = v[:self.n_eq] # Get lagrange multipliers relatated to nonlinear ineq. constraints v_ineq = v[self.n_eq:self.n_eq+self.n_ineq] lagr_hess = self.lagr_hess return lagr_hess(x, v_eq, v_ineq) def lagrangian_hessian_s(self, z, v): """Returns scaled Lagrangian Hessian (in relation to`s`) -> S Hs S""" s = self.get_slack(z) # Using the primal formulation: # S Hs S = diag(s)*diag(barrier_parameter/s**2)*diag(s). # Reference [1]_ p. 882, formula (3.1) primal = self.barrier_parameter # Using the primal-dual formulation # S Hs S = diag(s)*diag(v/s)*diag(s) # Reference [1]_ p. 883, formula (3.11) primal_dual = v[-self.n_ineq:]*s # Uses the primal-dual formulation for # positives values of v_ineq, and primal # formulation for the remaining ones. return np.where(v[-self.n_ineq:] > 0, primal_dual, primal) def lagrangian_hessian(self, z, v): """Returns scaled Lagrangian Hessian""" # Compute Hessian in relation to x and s Hx = self.lagrangian_hessian_x(z, v) if self.n_ineq > 0: S_Hs_S = self.lagrangian_hessian_s(z, v) # The scaled Lagragian Hessian is: # [ Hx 0 ] # [ 0 S Hs S ] def matvec(vec): vec_x = self.get_variables(vec) vec_s = self.get_slack(vec) if self.n_ineq > 0: return np.hstack((Hx.dot(vec_x), S_Hs_S*vec_s)) else: return Hx.dot(vec_x) return LinearOperator((self.n_vars+self.n_ineq, self.n_vars+self.n_ineq), matvec) def stop_criteria(self, state, z, last_iteration_failed, optimality, constr_violation, trust_radius, penalty, cg_info): """Stop criteria to the barrier problem. The criteria here proposed is similar to formula (2.3) from [1]_, p.879. """ x = self.get_variables(z) if self.global_stop_criteria(state, x, last_iteration_failed, trust_radius, penalty, cg_info, self.barrier_parameter, self.tolerance): self.terminate = True return True else: g_cond = (optimality < self.tolerance and constr_violation < self.tolerance) x_cond = trust_radius < self.xtol return g_cond or x_cond def tr_interior_point(fun, grad, lagr_hess, n_vars, n_ineq, n_eq, constr, jac, x0, fun0, grad0, constr_ineq0, jac_ineq0, constr_eq0, jac_eq0, stop_criteria, enforce_feasibility, xtol, state, initial_barrier_parameter, initial_tolerance, initial_penalty, initial_trust_radius, factorization_method): """Trust-region interior points method. Solve problem: minimize fun(x) subject to: constr_ineq(x) <= 0 constr_eq(x) = 0 using trust-region interior point method described in [1]_. """ # BOUNDARY_PARAMETER controls the decrease on the slack # variables. Represents ``tau`` from [1]_ p.885, formula (3.18). BOUNDARY_PARAMETER = 0.995 # BARRIER_DECAY_RATIO controls the decay of the barrier parameter # and of the subproblem toloerance. Represents ``theta`` from [1]_ p.879. BARRIER_DECAY_RATIO = 0.2 # TRUST_ENLARGEMENT controls the enlargement on trust radius # after each iteration TRUST_ENLARGEMENT = 5 # Default enforce_feasibility if enforce_feasibility is None: enforce_feasibility = np.zeros(n_ineq, bool) # Initial Values barrier_parameter = initial_barrier_parameter tolerance = initial_tolerance trust_radius = initial_trust_radius # Define initial value for the slack variables s0 = np.maximum(-1.5*constr_ineq0, np.ones(n_ineq)) # Define barrier subproblem subprob = BarrierSubproblem( x0, s0, fun, grad, lagr_hess, n_vars, n_ineq, n_eq, constr, jac, barrier_parameter, tolerance, enforce_feasibility, stop_criteria, xtol, fun0, grad0, constr_ineq0, jac_ineq0, constr_eq0, jac_eq0) # Define initial parameter for the first iteration. z = np.hstack((x0, s0)) fun0_subprob, constr0_subprob = subprob.fun0, subprob.constr0 grad0_subprob, jac0_subprob = subprob.grad0, subprob.jac0 # Define trust region bounds trust_lb = np.hstack((np.full(subprob.n_vars, -np.inf), np.full(subprob.n_ineq, -BOUNDARY_PARAMETER))) trust_ub = np.full(subprob.n_vars+subprob.n_ineq, np.inf) # Solves a sequence of barrier problems while True: # Solve SQP subproblem z, state = equality_constrained_sqp( subprob.function_and_constraints, subprob.gradient_and_jacobian, subprob.lagrangian_hessian, z, fun0_subprob, grad0_subprob, constr0_subprob, jac0_subprob, subprob.stop_criteria, state, initial_penalty, trust_radius, factorization_method, trust_lb, trust_ub, subprob.scaling) if subprob.terminate: break # Update parameters trust_radius = max(initial_trust_radius, TRUST_ENLARGEMENT*state.tr_radius) # TODO: Use more advanced strategies from [2]_ # to update this parameters. barrier_parameter *= BARRIER_DECAY_RATIO tolerance *= BARRIER_DECAY_RATIO # Update Barrier Problem subprob.update(barrier_parameter, tolerance) # Compute initial values for next iteration fun0_subprob, constr0_subprob = subprob.function_and_constraints(z) grad0_subprob, jac0_subprob = subprob.gradient_and_jacobian(z) # Get x and s x = subprob.get_variables(z) return x, state
13,802
38.778098
78
py
scipy
scipy-main/scipy/optimize/_trustregion_constr/tests/test_projections.py
import numpy as np import scipy.linalg from scipy.sparse import csc_matrix from scipy.optimize._trustregion_constr.projections \ import projections, orthogonality from numpy.testing import (TestCase, assert_array_almost_equal, assert_equal, assert_allclose) try: from sksparse.cholmod import cholesky_AAt # noqa: F401 sksparse_available = True available_sparse_methods = ("NormalEquation", "AugmentedSystem") except ImportError: sksparse_available = False available_sparse_methods = ("AugmentedSystem",) available_dense_methods = ('QRFactorization', 'SVDFactorization') class TestProjections(TestCase): def test_nullspace_and_least_squares_sparse(self): A_dense = np.array([[1, 2, 3, 4, 0, 5, 0, 7], [0, 8, 7, 0, 1, 5, 9, 0], [1, 0, 0, 0, 0, 1, 2, 3]]) At_dense = A_dense.T A = csc_matrix(A_dense) test_points = ([1, 2, 3, 4, 5, 6, 7, 8], [1, 10, 3, 0, 1, 6, 7, 8], [1.12, 10, 0, 0, 100000, 6, 0.7, 8]) for method in available_sparse_methods: Z, LS, _ = projections(A, method) for z in test_points: # Test if x is in the null_space x = Z.matvec(z) assert_array_almost_equal(A.dot(x), 0) # Test orthogonality assert_array_almost_equal(orthogonality(A, x), 0) # Test if x is the least square solution x = LS.matvec(z) x2 = scipy.linalg.lstsq(At_dense, z)[0] assert_array_almost_equal(x, x2) def test_iterative_refinements_sparse(self): A_dense = np.array([[1, 2, 3, 4, 0, 5, 0, 7], [0, 8, 7, 0, 1, 5, 9, 0], [1, 0, 0, 0, 0, 1, 2, 3]]) A = csc_matrix(A_dense) test_points = ([1, 2, 3, 4, 5, 6, 7, 8], [1, 10, 3, 0, 1, 6, 7, 8], [1.12, 10, 0, 0, 100000, 6, 0.7, 8], [1, 0, 0, 0, 0, 1, 2, 3+1e-10]) for method in available_sparse_methods: Z, LS, _ = projections(A, method, orth_tol=1e-18, max_refin=100) for z in test_points: # Test if x is in the null_space x = Z.matvec(z) atol = 1e-13 * abs(x).max() assert_allclose(A.dot(x), 0, atol=atol) # Test orthogonality assert_allclose(orthogonality(A, x), 0, atol=1e-13) def test_rowspace_sparse(self): A_dense = np.array([[1, 2, 3, 4, 0, 5, 0, 7], [0, 8, 7, 0, 1, 5, 9, 0], [1, 0, 0, 0, 0, 1, 2, 3]]) A = csc_matrix(A_dense) test_points = ([1, 2, 3], [1, 10, 3], [1.12, 10, 0]) for method in available_sparse_methods: _, _, Y = projections(A, method) for z in test_points: # Test if x is solution of A x = z x = Y.matvec(z) assert_array_almost_equal(A.dot(x), z) # Test if x is in the return row space of A A_ext = np.vstack((A_dense, x)) assert_equal(np.linalg.matrix_rank(A_dense), np.linalg.matrix_rank(A_ext)) def test_nullspace_and_least_squares_dense(self): A = np.array([[1, 2, 3, 4, 0, 5, 0, 7], [0, 8, 7, 0, 1, 5, 9, 0], [1, 0, 0, 0, 0, 1, 2, 3]]) At = A.T test_points = ([1, 2, 3, 4, 5, 6, 7, 8], [1, 10, 3, 0, 1, 6, 7, 8], [1.12, 10, 0, 0, 100000, 6, 0.7, 8]) for method in available_dense_methods: Z, LS, _ = projections(A, method) for z in test_points: # Test if x is in the null_space x = Z.matvec(z) assert_array_almost_equal(A.dot(x), 0) # Test orthogonality assert_array_almost_equal(orthogonality(A, x), 0) # Test if x is the least square solution x = LS.matvec(z) x2 = scipy.linalg.lstsq(At, z)[0] assert_array_almost_equal(x, x2) def test_compare_dense_and_sparse(self): D = np.diag(range(1, 101)) A = np.hstack([D, D, D, D]) A_sparse = csc_matrix(A) np.random.seed(0) Z, LS, Y = projections(A) Z_sparse, LS_sparse, Y_sparse = projections(A_sparse) for k in range(20): z = np.random.normal(size=(400,)) assert_array_almost_equal(Z.dot(z), Z_sparse.dot(z)) assert_array_almost_equal(LS.dot(z), LS_sparse.dot(z)) x = np.random.normal(size=(100,)) assert_array_almost_equal(Y.dot(x), Y_sparse.dot(x)) def test_compare_dense_and_sparse2(self): D1 = np.diag([-1.7, 1, 0.5]) D2 = np.diag([1, -0.6, -0.3]) D3 = np.diag([-0.3, -1.5, 2]) A = np.hstack([D1, D2, D3]) A_sparse = csc_matrix(A) np.random.seed(0) Z, LS, Y = projections(A) Z_sparse, LS_sparse, Y_sparse = projections(A_sparse) for k in range(1): z = np.random.normal(size=(9,)) assert_array_almost_equal(Z.dot(z), Z_sparse.dot(z)) assert_array_almost_equal(LS.dot(z), LS_sparse.dot(z)) x = np.random.normal(size=(3,)) assert_array_almost_equal(Y.dot(x), Y_sparse.dot(x)) def test_iterative_refinements_dense(self): A = np.array([[1, 2, 3, 4, 0, 5, 0, 7], [0, 8, 7, 0, 1, 5, 9, 0], [1, 0, 0, 0, 0, 1, 2, 3]]) test_points = ([1, 2, 3, 4, 5, 6, 7, 8], [1, 10, 3, 0, 1, 6, 7, 8], [1, 0, 0, 0, 0, 1, 2, 3+1e-10]) for method in available_dense_methods: Z, LS, _ = projections(A, method, orth_tol=1e-18, max_refin=10) for z in test_points: # Test if x is in the null_space x = Z.matvec(z) assert_allclose(A.dot(x), 0, rtol=0, atol=2.5e-14) # Test orthogonality assert_allclose(orthogonality(A, x), 0, rtol=0, atol=5e-16) def test_rowspace_dense(self): A = np.array([[1, 2, 3, 4, 0, 5, 0, 7], [0, 8, 7, 0, 1, 5, 9, 0], [1, 0, 0, 0, 0, 1, 2, 3]]) test_points = ([1, 2, 3], [1, 10, 3], [1.12, 10, 0]) for method in available_dense_methods: _, _, Y = projections(A, method) for z in test_points: # Test if x is solution of A x = z x = Y.matvec(z) assert_array_almost_equal(A.dot(x), z) # Test if x is in the return row space of A A_ext = np.vstack((A, x)) assert_equal(np.linalg.matrix_rank(A), np.linalg.matrix_rank(A_ext)) class TestOrthogonality(TestCase): def test_dense_matrix(self): A = np.array([[1, 2, 3, 4, 0, 5, 0, 7], [0, 8, 7, 0, 1, 5, 9, 0], [1, 0, 0, 0, 0, 1, 2, 3]]) test_vectors = ([-1.98931144, -1.56363389, -0.84115584, 2.2864762, 5.599141, 0.09286976, 1.37040802, -0.28145812], [697.92794044, -4091.65114008, -3327.42316335, 836.86906951, 99434.98929065, -1285.37653682, -4109.21503806, 2935.29289083]) test_expected_orth = (0, 0) for i in range(len(test_vectors)): x = test_vectors[i] orth = test_expected_orth[i] assert_array_almost_equal(orthogonality(A, x), orth) def test_sparse_matrix(self): A = np.array([[1, 2, 3, 4, 0, 5, 0, 7], [0, 8, 7, 0, 1, 5, 9, 0], [1, 0, 0, 0, 0, 1, 2, 3]]) A = csc_matrix(A) test_vectors = ([-1.98931144, -1.56363389, -0.84115584, 2.2864762, 5.599141, 0.09286976, 1.37040802, -0.28145812], [697.92794044, -4091.65114008, -3327.42316335, 836.86906951, 99434.98929065, -1285.37653682, -4109.21503806, 2935.29289083]) test_expected_orth = (0, 0) for i in range(len(test_vectors)): x = test_vectors[i] orth = test_expected_orth[i] assert_array_almost_equal(orthogonality(A, x), orth)
8,834
40.093023
76
py
scipy
scipy-main/scipy/optimize/_trustregion_constr/tests/test_qp_subproblem.py
import numpy as np from scipy.sparse import csc_matrix from scipy.optimize._trustregion_constr.qp_subproblem \ import (eqp_kktfact, projected_cg, box_intersections, sphere_intersections, box_sphere_intersections, modified_dogleg) from scipy.optimize._trustregion_constr.projections \ import projections from numpy.testing import TestCase, assert_array_almost_equal, assert_equal import pytest class TestEQPDirectFactorization(TestCase): # From Example 16.2 Nocedal/Wright "Numerical # Optimization" p.452. def test_nocedal_example(self): H = csc_matrix([[6, 2, 1], [2, 5, 2], [1, 2, 4]]) A = csc_matrix([[1, 0, 1], [0, 1, 1]]) c = np.array([-8, -3, -3]) b = -np.array([3, 0]) x, lagrange_multipliers = eqp_kktfact(H, c, A, b) assert_array_almost_equal(x, [2, -1, 1]) assert_array_almost_equal(lagrange_multipliers, [3, -2]) class TestSphericalBoundariesIntersections(TestCase): def test_2d_sphere_constraints(self): # Interior inicial point ta, tb, intersect = sphere_intersections([0, 0], [1, 0], 0.5) assert_array_almost_equal([ta, tb], [0, 0.5]) assert_equal(intersect, True) # No intersection between line and circle ta, tb, intersect = sphere_intersections([2, 0], [0, 1], 1) assert_equal(intersect, False) # Outside initial point pointing toward outside the circle ta, tb, intersect = sphere_intersections([2, 0], [1, 0], 1) assert_equal(intersect, False) # Outside initial point pointing toward inside the circle ta, tb, intersect = sphere_intersections([2, 0], [-1, 0], 1.5) assert_array_almost_equal([ta, tb], [0.5, 1]) assert_equal(intersect, True) # Initial point on the boundary ta, tb, intersect = sphere_intersections([2, 0], [1, 0], 2) assert_array_almost_equal([ta, tb], [0, 0]) assert_equal(intersect, True) def test_2d_sphere_constraints_line_intersections(self): # Interior initial point ta, tb, intersect = sphere_intersections([0, 0], [1, 0], 0.5, entire_line=True) assert_array_almost_equal([ta, tb], [-0.5, 0.5]) assert_equal(intersect, True) # No intersection between line and circle ta, tb, intersect = sphere_intersections([2, 0], [0, 1], 1, entire_line=True) assert_equal(intersect, False) # Outside initial point pointing toward outside the circle ta, tb, intersect = sphere_intersections([2, 0], [1, 0], 1, entire_line=True) assert_array_almost_equal([ta, tb], [-3, -1]) assert_equal(intersect, True) # Outside initial point pointing toward inside the circle ta, tb, intersect = sphere_intersections([2, 0], [-1, 0], 1.5, entire_line=True) assert_array_almost_equal([ta, tb], [0.5, 3.5]) assert_equal(intersect, True) # Initial point on the boundary ta, tb, intersect = sphere_intersections([2, 0], [1, 0], 2, entire_line=True) assert_array_almost_equal([ta, tb], [-4, 0]) assert_equal(intersect, True) class TestBoxBoundariesIntersections(TestCase): def test_2d_box_constraints(self): # Box constraint in the direction of vector d ta, tb, intersect = box_intersections([2, 0], [0, 2], [1, 1], [3, 3]) assert_array_almost_equal([ta, tb], [0.5, 1]) assert_equal(intersect, True) # Negative direction ta, tb, intersect = box_intersections([2, 0], [0, 2], [1, -3], [3, -1]) assert_equal(intersect, False) # Some constraints are absent (set to +/- inf) ta, tb, intersect = box_intersections([2, 0], [0, 2], [-np.inf, 1], [np.inf, np.inf]) assert_array_almost_equal([ta, tb], [0.5, 1]) assert_equal(intersect, True) # Intersect on the face of the box ta, tb, intersect = box_intersections([1, 0], [0, 1], [1, 1], [3, 3]) assert_array_almost_equal([ta, tb], [1, 1]) assert_equal(intersect, True) # Interior initial point ta, tb, intersect = box_intersections([0, 0], [4, 4], [-2, -3], [3, 2]) assert_array_almost_equal([ta, tb], [0, 0.5]) assert_equal(intersect, True) # No intersection between line and box constraints ta, tb, intersect = box_intersections([2, 0], [0, 2], [-3, -3], [-1, -1]) assert_equal(intersect, False) ta, tb, intersect = box_intersections([2, 0], [0, 2], [-3, 3], [-1, 1]) assert_equal(intersect, False) ta, tb, intersect = box_intersections([2, 0], [0, 2], [-3, -np.inf], [-1, np.inf]) assert_equal(intersect, False) ta, tb, intersect = box_intersections([0, 0], [1, 100], [1, 1], [3, 3]) assert_equal(intersect, False) ta, tb, intersect = box_intersections([0.99, 0], [0, 2], [1, 1], [3, 3]) assert_equal(intersect, False) # Initial point on the boundary ta, tb, intersect = box_intersections([2, 2], [0, 1], [-2, -2], [2, 2]) assert_array_almost_equal([ta, tb], [0, 0]) assert_equal(intersect, True) def test_2d_box_constraints_entire_line(self): # Box constraint in the direction of vector d ta, tb, intersect = box_intersections([2, 0], [0, 2], [1, 1], [3, 3], entire_line=True) assert_array_almost_equal([ta, tb], [0.5, 1.5]) assert_equal(intersect, True) # Negative direction ta, tb, intersect = box_intersections([2, 0], [0, 2], [1, -3], [3, -1], entire_line=True) assert_array_almost_equal([ta, tb], [-1.5, -0.5]) assert_equal(intersect, True) # Some constraints are absent (set to +/- inf) ta, tb, intersect = box_intersections([2, 0], [0, 2], [-np.inf, 1], [np.inf, np.inf], entire_line=True) assert_array_almost_equal([ta, tb], [0.5, np.inf]) assert_equal(intersect, True) # Intersect on the face of the box ta, tb, intersect = box_intersections([1, 0], [0, 1], [1, 1], [3, 3], entire_line=True) assert_array_almost_equal([ta, tb], [1, 3]) assert_equal(intersect, True) # Interior initial pointoint ta, tb, intersect = box_intersections([0, 0], [4, 4], [-2, -3], [3, 2], entire_line=True) assert_array_almost_equal([ta, tb], [-0.5, 0.5]) assert_equal(intersect, True) # No intersection between line and box constraints ta, tb, intersect = box_intersections([2, 0], [0, 2], [-3, -3], [-1, -1], entire_line=True) assert_equal(intersect, False) ta, tb, intersect = box_intersections([2, 0], [0, 2], [-3, 3], [-1, 1], entire_line=True) assert_equal(intersect, False) ta, tb, intersect = box_intersections([2, 0], [0, 2], [-3, -np.inf], [-1, np.inf], entire_line=True) assert_equal(intersect, False) ta, tb, intersect = box_intersections([0, 0], [1, 100], [1, 1], [3, 3], entire_line=True) assert_equal(intersect, False) ta, tb, intersect = box_intersections([0.99, 0], [0, 2], [1, 1], [3, 3], entire_line=True) assert_equal(intersect, False) # Initial point on the boundary ta, tb, intersect = box_intersections([2, 2], [0, 1], [-2, -2], [2, 2], entire_line=True) assert_array_almost_equal([ta, tb], [-4, 0]) assert_equal(intersect, True) def test_3d_box_constraints(self): # Simple case ta, tb, intersect = box_intersections([1, 1, 0], [0, 0, 1], [1, 1, 1], [3, 3, 3]) assert_array_almost_equal([ta, tb], [1, 1]) assert_equal(intersect, True) # Negative direction ta, tb, intersect = box_intersections([1, 1, 0], [0, 0, -1], [1, 1, 1], [3, 3, 3]) assert_equal(intersect, False) # Interior point ta, tb, intersect = box_intersections([2, 2, 2], [0, -1, 1], [1, 1, 1], [3, 3, 3]) assert_array_almost_equal([ta, tb], [0, 1]) assert_equal(intersect, True) def test_3d_box_constraints_entire_line(self): # Simple case ta, tb, intersect = box_intersections([1, 1, 0], [0, 0, 1], [1, 1, 1], [3, 3, 3], entire_line=True) assert_array_almost_equal([ta, tb], [1, 3]) assert_equal(intersect, True) # Negative direction ta, tb, intersect = box_intersections([1, 1, 0], [0, 0, -1], [1, 1, 1], [3, 3, 3], entire_line=True) assert_array_almost_equal([ta, tb], [-3, -1]) assert_equal(intersect, True) # Interior point ta, tb, intersect = box_intersections([2, 2, 2], [0, -1, 1], [1, 1, 1], [3, 3, 3], entire_line=True) assert_array_almost_equal([ta, tb], [-1, 1]) assert_equal(intersect, True) class TestBoxSphereBoundariesIntersections(TestCase): def test_2d_box_constraints(self): # Both constraints are active ta, tb, intersect = box_sphere_intersections([1, 1], [-2, 2], [-1, -2], [1, 2], 2, entire_line=False) assert_array_almost_equal([ta, tb], [0, 0.5]) assert_equal(intersect, True) # None of the constraints are active ta, tb, intersect = box_sphere_intersections([1, 1], [-1, 1], [-1, -3], [1, 3], 10, entire_line=False) assert_array_almost_equal([ta, tb], [0, 1]) assert_equal(intersect, True) # Box constraints are active ta, tb, intersect = box_sphere_intersections([1, 1], [-4, 4], [-1, -3], [1, 3], 10, entire_line=False) assert_array_almost_equal([ta, tb], [0, 0.5]) assert_equal(intersect, True) # Spherical constraints are active ta, tb, intersect = box_sphere_intersections([1, 1], [-4, 4], [-1, -3], [1, 3], 2, entire_line=False) assert_array_almost_equal([ta, tb], [0, 0.25]) assert_equal(intersect, True) # Infeasible problems ta, tb, intersect = box_sphere_intersections([2, 2], [-4, 4], [-1, -3], [1, 3], 2, entire_line=False) assert_equal(intersect, False) ta, tb, intersect = box_sphere_intersections([1, 1], [-4, 4], [2, 4], [2, 4], 2, entire_line=False) assert_equal(intersect, False) def test_2d_box_constraints_entire_line(self): # Both constraints are active ta, tb, intersect = box_sphere_intersections([1, 1], [-2, 2], [-1, -2], [1, 2], 2, entire_line=True) assert_array_almost_equal([ta, tb], [0, 0.5]) assert_equal(intersect, True) # None of the constraints are active ta, tb, intersect = box_sphere_intersections([1, 1], [-1, 1], [-1, -3], [1, 3], 10, entire_line=True) assert_array_almost_equal([ta, tb], [0, 2]) assert_equal(intersect, True) # Box constraints are active ta, tb, intersect = box_sphere_intersections([1, 1], [-4, 4], [-1, -3], [1, 3], 10, entire_line=True) assert_array_almost_equal([ta, tb], [0, 0.5]) assert_equal(intersect, True) # Spherical constraints are active ta, tb, intersect = box_sphere_intersections([1, 1], [-4, 4], [-1, -3], [1, 3], 2, entire_line=True) assert_array_almost_equal([ta, tb], [0, 0.25]) assert_equal(intersect, True) # Infeasible problems ta, tb, intersect = box_sphere_intersections([2, 2], [-4, 4], [-1, -3], [1, 3], 2, entire_line=True) assert_equal(intersect, False) ta, tb, intersect = box_sphere_intersections([1, 1], [-4, 4], [2, 4], [2, 4], 2, entire_line=True) assert_equal(intersect, False) class TestModifiedDogleg(TestCase): def test_cauchypoint_equalsto_newtonpoint(self): A = np.array([[1, 8]]) b = np.array([-16]) _, _, Y = projections(A) newton_point = np.array([0.24615385, 1.96923077]) # Newton point inside boundaries x = modified_dogleg(A, Y, b, 2, [-np.inf, -np.inf], [np.inf, np.inf]) assert_array_almost_equal(x, newton_point) # Spherical constraint active x = modified_dogleg(A, Y, b, 1, [-np.inf, -np.inf], [np.inf, np.inf]) assert_array_almost_equal(x, newton_point/np.linalg.norm(newton_point)) # Box constraints active x = modified_dogleg(A, Y, b, 2, [-np.inf, -np.inf], [0.1, np.inf]) assert_array_almost_equal(x, (newton_point/newton_point[0]) * 0.1) def test_3d_example(self): A = np.array([[1, 8, 1], [4, 2, 2]]) b = np.array([-16, 2]) Z, LS, Y = projections(A) newton_point = np.array([-1.37090909, 2.23272727, -0.49090909]) cauchy_point = np.array([0.11165723, 1.73068711, 0.16748585]) origin = np.zeros_like(newton_point) # newton_point inside boundaries x = modified_dogleg(A, Y, b, 3, [-np.inf, -np.inf, -np.inf], [np.inf, np.inf, np.inf]) assert_array_almost_equal(x, newton_point) # line between cauchy_point and newton_point contains best point # (spherical constraint is active). x = modified_dogleg(A, Y, b, 2, [-np.inf, -np.inf, -np.inf], [np.inf, np.inf, np.inf]) z = cauchy_point d = newton_point-cauchy_point t = ((x-z)/(d)) assert_array_almost_equal(t, np.full(3, 0.40807330)) assert_array_almost_equal(np.linalg.norm(x), 2) # line between cauchy_point and newton_point contains best point # (box constraint is active). x = modified_dogleg(A, Y, b, 5, [-1, -np.inf, -np.inf], [np.inf, np.inf, np.inf]) z = cauchy_point d = newton_point-cauchy_point t = ((x-z)/(d)) assert_array_almost_equal(t, np.full(3, 0.7498195)) assert_array_almost_equal(x[0], -1) # line between origin and cauchy_point contains best point # (spherical constraint is active). x = modified_dogleg(A, Y, b, 1, [-np.inf, -np.inf, -np.inf], [np.inf, np.inf, np.inf]) z = origin d = cauchy_point t = ((x-z)/(d)) assert_array_almost_equal(t, np.full(3, 0.573936265)) assert_array_almost_equal(np.linalg.norm(x), 1) # line between origin and newton_point contains best point # (box constraint is active). x = modified_dogleg(A, Y, b, 2, [-np.inf, -np.inf, -np.inf], [np.inf, 1, np.inf]) z = origin d = newton_point t = ((x-z)/(d)) assert_array_almost_equal(t, np.full(3, 0.4478827364)) assert_array_almost_equal(x[1], 1) class TestProjectCG(TestCase): # From Example 16.2 Nocedal/Wright "Numerical # Optimization" p.452. def test_nocedal_example(self): H = csc_matrix([[6, 2, 1], [2, 5, 2], [1, 2, 4]]) A = csc_matrix([[1, 0, 1], [0, 1, 1]]) c = np.array([-8, -3, -3]) b = -np.array([3, 0]) Z, _, Y = projections(A) x, info = projected_cg(H, c, Z, Y, b) assert_equal(info["stop_cond"], 4) assert_equal(info["hits_boundary"], False) assert_array_almost_equal(x, [2, -1, 1]) def test_compare_with_direct_fact(self): H = csc_matrix([[6, 2, 1, 3], [2, 5, 2, 4], [1, 2, 4, 5], [3, 4, 5, 7]]) A = csc_matrix([[1, 0, 1, 0], [0, 1, 1, 1]]) c = np.array([-2, -3, -3, 1]) b = -np.array([3, 0]) Z, _, Y = projections(A) x, info = projected_cg(H, c, Z, Y, b, tol=0) x_kkt, _ = eqp_kktfact(H, c, A, b) assert_equal(info["stop_cond"], 1) assert_equal(info["hits_boundary"], False) assert_array_almost_equal(x, x_kkt) def test_trust_region_infeasible(self): H = csc_matrix([[6, 2, 1, 3], [2, 5, 2, 4], [1, 2, 4, 5], [3, 4, 5, 7]]) A = csc_matrix([[1, 0, 1, 0], [0, 1, 1, 1]]) c = np.array([-2, -3, -3, 1]) b = -np.array([3, 0]) trust_radius = 1 Z, _, Y = projections(A) with pytest.raises(ValueError): projected_cg(H, c, Z, Y, b, trust_radius=trust_radius) def test_trust_region_barely_feasible(self): H = csc_matrix([[6, 2, 1, 3], [2, 5, 2, 4], [1, 2, 4, 5], [3, 4, 5, 7]]) A = csc_matrix([[1, 0, 1, 0], [0, 1, 1, 1]]) c = np.array([-2, -3, -3, 1]) b = -np.array([3, 0]) trust_radius = 2.32379000772445021283 Z, _, Y = projections(A) x, info = projected_cg(H, c, Z, Y, b, tol=0, trust_radius=trust_radius) assert_equal(info["stop_cond"], 2) assert_equal(info["hits_boundary"], True) assert_array_almost_equal(np.linalg.norm(x), trust_radius) assert_array_almost_equal(x, -Y.dot(b)) def test_hits_boundary(self): H = csc_matrix([[6, 2, 1, 3], [2, 5, 2, 4], [1, 2, 4, 5], [3, 4, 5, 7]]) A = csc_matrix([[1, 0, 1, 0], [0, 1, 1, 1]]) c = np.array([-2, -3, -3, 1]) b = -np.array([3, 0]) trust_radius = 3 Z, _, Y = projections(A) x, info = projected_cg(H, c, Z, Y, b, tol=0, trust_radius=trust_radius) assert_equal(info["stop_cond"], 2) assert_equal(info["hits_boundary"], True) assert_array_almost_equal(np.linalg.norm(x), trust_radius) def test_negative_curvature_unconstrained(self): H = csc_matrix([[1, 2, 1, 3], [2, 0, 2, 4], [1, 2, 0, 2], [3, 4, 2, 0]]) A = csc_matrix([[1, 0, 1, 0], [0, 1, 0, 1]]) c = np.array([-2, -3, -3, 1]) b = -np.array([3, 0]) Z, _, Y = projections(A) with pytest.raises(ValueError): projected_cg(H, c, Z, Y, b, tol=0) def test_negative_curvature(self): H = csc_matrix([[1, 2, 1, 3], [2, 0, 2, 4], [1, 2, 0, 2], [3, 4, 2, 0]]) A = csc_matrix([[1, 0, 1, 0], [0, 1, 0, 1]]) c = np.array([-2, -3, -3, 1]) b = -np.array([3, 0]) Z, _, Y = projections(A) trust_radius = 1000 x, info = projected_cg(H, c, Z, Y, b, tol=0, trust_radius=trust_radius) assert_equal(info["stop_cond"], 3) assert_equal(info["hits_boundary"], True) assert_array_almost_equal(np.linalg.norm(x), trust_radius) # The box constraints are inactive at the solution but # are active during the iterations. def test_inactive_box_constraints(self): H = csc_matrix([[6, 2, 1, 3], [2, 5, 2, 4], [1, 2, 4, 5], [3, 4, 5, 7]]) A = csc_matrix([[1, 0, 1, 0], [0, 1, 1, 1]]) c = np.array([-2, -3, -3, 1]) b = -np.array([3, 0]) Z, _, Y = projections(A) x, info = projected_cg(H, c, Z, Y, b, tol=0, lb=[0.5, -np.inf, -np.inf, -np.inf], return_all=True) x_kkt, _ = eqp_kktfact(H, c, A, b) assert_equal(info["stop_cond"], 1) assert_equal(info["hits_boundary"], False) assert_array_almost_equal(x, x_kkt) # The box constraints active and the termination is # by maximum iterations (infeasible iteraction). def test_active_box_constraints_maximum_iterations_reached(self): H = csc_matrix([[6, 2, 1, 3], [2, 5, 2, 4], [1, 2, 4, 5], [3, 4, 5, 7]]) A = csc_matrix([[1, 0, 1, 0], [0, 1, 1, 1]]) c = np.array([-2, -3, -3, 1]) b = -np.array([3, 0]) Z, _, Y = projections(A) x, info = projected_cg(H, c, Z, Y, b, tol=0, lb=[0.8, -np.inf, -np.inf, -np.inf], return_all=True) assert_equal(info["stop_cond"], 1) assert_equal(info["hits_boundary"], True) assert_array_almost_equal(A.dot(x), -b) assert_array_almost_equal(x[0], 0.8) # The box constraints are active and the termination is # because it hits boundary (without infeasible iteraction). def test_active_box_constraints_hits_boundaries(self): H = csc_matrix([[6, 2, 1, 3], [2, 5, 2, 4], [1, 2, 4, 5], [3, 4, 5, 7]]) A = csc_matrix([[1, 0, 1, 0], [0, 1, 1, 1]]) c = np.array([-2, -3, -3, 1]) b = -np.array([3, 0]) trust_radius = 3 Z, _, Y = projections(A) x, info = projected_cg(H, c, Z, Y, b, tol=0, ub=[np.inf, np.inf, 1.6, np.inf], trust_radius=trust_radius, return_all=True) assert_equal(info["stop_cond"], 2) assert_equal(info["hits_boundary"], True) assert_array_almost_equal(x[2], 1.6) # The box constraints are active and the termination is # because it hits boundary (infeasible iteraction). def test_active_box_constraints_hits_boundaries_infeasible_iter(self): H = csc_matrix([[6, 2, 1, 3], [2, 5, 2, 4], [1, 2, 4, 5], [3, 4, 5, 7]]) A = csc_matrix([[1, 0, 1, 0], [0, 1, 1, 1]]) c = np.array([-2, -3, -3, 1]) b = -np.array([3, 0]) trust_radius = 4 Z, _, Y = projections(A) x, info = projected_cg(H, c, Z, Y, b, tol=0, ub=[np.inf, 0.1, np.inf, np.inf], trust_radius=trust_radius, return_all=True) assert_equal(info["stop_cond"], 2) assert_equal(info["hits_boundary"], True) assert_array_almost_equal(x[1], 0.1) # The box constraints are active and the termination is # because it hits boundary (no infeasible iteraction). def test_active_box_constraints_negative_curvature(self): H = csc_matrix([[1, 2, 1, 3], [2, 0, 2, 4], [1, 2, 0, 2], [3, 4, 2, 0]]) A = csc_matrix([[1, 0, 1, 0], [0, 1, 0, 1]]) c = np.array([-2, -3, -3, 1]) b = -np.array([3, 0]) Z, _, Y = projections(A) trust_radius = 1000 x, info = projected_cg(H, c, Z, Y, b, tol=0, ub=[np.inf, np.inf, 100, np.inf], trust_radius=trust_radius) assert_equal(info["stop_cond"], 3) assert_equal(info["hits_boundary"], True) assert_array_almost_equal(x[2], 100)
27,719
41.910217
79
py
scipy
scipy-main/scipy/optimize/_trustregion_constr/tests/__init__.py
0
0
0
py
scipy
scipy-main/scipy/optimize/_trustregion_constr/tests/test_canonical_constraint.py
import numpy as np from numpy.testing import assert_array_equal, assert_equal from scipy.optimize._constraints import (NonlinearConstraint, Bounds, PreparedConstraint) from scipy.optimize._trustregion_constr.canonical_constraint \ import CanonicalConstraint, initial_constraints_as_canonical def create_quadratic_function(n, m, rng): a = rng.rand(m) A = rng.rand(m, n) H = rng.rand(m, n, n) HT = np.transpose(H, (1, 2, 0)) def fun(x): return a + A.dot(x) + 0.5 * H.dot(x).dot(x) def jac(x): return A + H.dot(x) def hess(x, v): return HT.dot(v) return fun, jac, hess def test_bounds_cases(): # Test 1: no constraints. user_constraint = Bounds(-np.inf, np.inf) x0 = np.array([-1, 2]) prepared_constraint = PreparedConstraint(user_constraint, x0, False) c = CanonicalConstraint.from_PreparedConstraint(prepared_constraint) assert_equal(c.n_eq, 0) assert_equal(c.n_ineq, 0) c_eq, c_ineq = c.fun(x0) assert_array_equal(c_eq, []) assert_array_equal(c_ineq, []) J_eq, J_ineq = c.jac(x0) assert_array_equal(J_eq, np.empty((0, 2))) assert_array_equal(J_ineq, np.empty((0, 2))) assert_array_equal(c.keep_feasible, []) # Test 2: infinite lower bound. user_constraint = Bounds(-np.inf, [0, np.inf, 1], [False, True, True]) x0 = np.array([-1, -2, -3], dtype=float) prepared_constraint = PreparedConstraint(user_constraint, x0, False) c = CanonicalConstraint.from_PreparedConstraint(prepared_constraint) assert_equal(c.n_eq, 0) assert_equal(c.n_ineq, 2) c_eq, c_ineq = c.fun(x0) assert_array_equal(c_eq, []) assert_array_equal(c_ineq, [-1, -4]) J_eq, J_ineq = c.jac(x0) assert_array_equal(J_eq, np.empty((0, 3))) assert_array_equal(J_ineq, np.array([[1, 0, 0], [0, 0, 1]])) assert_array_equal(c.keep_feasible, [False, True]) # Test 3: infinite upper bound. user_constraint = Bounds([0, 1, -np.inf], np.inf, [True, False, True]) x0 = np.array([1, 2, 3], dtype=float) prepared_constraint = PreparedConstraint(user_constraint, x0, False) c = CanonicalConstraint.from_PreparedConstraint(prepared_constraint) assert_equal(c.n_eq, 0) assert_equal(c.n_ineq, 2) c_eq, c_ineq = c.fun(x0) assert_array_equal(c_eq, []) assert_array_equal(c_ineq, [-1, -1]) J_eq, J_ineq = c.jac(x0) assert_array_equal(J_eq, np.empty((0, 3))) assert_array_equal(J_ineq, np.array([[-1, 0, 0], [0, -1, 0]])) assert_array_equal(c.keep_feasible, [True, False]) # Test 4: interval constraint. user_constraint = Bounds([-1, -np.inf, 2, 3], [1, np.inf, 10, 3], [False, True, True, True]) x0 = np.array([0, 10, 8, 5]) prepared_constraint = PreparedConstraint(user_constraint, x0, False) c = CanonicalConstraint.from_PreparedConstraint(prepared_constraint) assert_equal(c.n_eq, 1) assert_equal(c.n_ineq, 4) c_eq, c_ineq = c.fun(x0) assert_array_equal(c_eq, [2]) assert_array_equal(c_ineq, [-1, -2, -1, -6]) J_eq, J_ineq = c.jac(x0) assert_array_equal(J_eq, [[0, 0, 0, 1]]) assert_array_equal(J_ineq, [[1, 0, 0, 0], [0, 0, 1, 0], [-1, 0, 0, 0], [0, 0, -1, 0]]) assert_array_equal(c.keep_feasible, [False, True, False, True]) def test_nonlinear_constraint(): n = 3 m = 5 rng = np.random.RandomState(0) x0 = rng.rand(n) fun, jac, hess = create_quadratic_function(n, m, rng) f = fun(x0) J = jac(x0) lb = [-10, 3, -np.inf, -np.inf, -5] ub = [10, 3, np.inf, 3, np.inf] user_constraint = NonlinearConstraint( fun, lb, ub, jac, hess, [True, False, False, True, False]) for sparse_jacobian in [False, True]: prepared_constraint = PreparedConstraint(user_constraint, x0, sparse_jacobian) c = CanonicalConstraint.from_PreparedConstraint(prepared_constraint) assert_array_equal(c.n_eq, 1) assert_array_equal(c.n_ineq, 4) c_eq, c_ineq = c.fun(x0) assert_array_equal(c_eq, [f[1] - lb[1]]) assert_array_equal(c_ineq, [f[3] - ub[3], lb[4] - f[4], f[0] - ub[0], lb[0] - f[0]]) J_eq, J_ineq = c.jac(x0) if sparse_jacobian: J_eq = J_eq.toarray() J_ineq = J_ineq.toarray() assert_array_equal(J_eq, J[1, None]) assert_array_equal(J_ineq, np.vstack((J[3], -J[4], J[0], -J[0]))) v_eq = rng.rand(c.n_eq) v_ineq = rng.rand(c.n_ineq) v = np.zeros(m) v[1] = v_eq[0] v[3] = v_ineq[0] v[4] = -v_ineq[1] v[0] = v_ineq[2] - v_ineq[3] assert_array_equal(c.hess(x0, v_eq, v_ineq), hess(x0, v)) assert_array_equal(c.keep_feasible, [True, False, True, True]) def test_concatenation(): rng = np.random.RandomState(0) n = 4 x0 = rng.rand(n) f1 = x0 J1 = np.eye(n) lb1 = [-1, -np.inf, -2, 3] ub1 = [1, np.inf, np.inf, 3] bounds = Bounds(lb1, ub1, [False, False, True, False]) fun, jac, hess = create_quadratic_function(n, 5, rng) f2 = fun(x0) J2 = jac(x0) lb2 = [-10, 3, -np.inf, -np.inf, -5] ub2 = [10, 3, np.inf, 5, np.inf] nonlinear = NonlinearConstraint( fun, lb2, ub2, jac, hess, [True, False, False, True, False]) for sparse_jacobian in [False, True]: bounds_prepared = PreparedConstraint(bounds, x0, sparse_jacobian) nonlinear_prepared = PreparedConstraint(nonlinear, x0, sparse_jacobian) c1 = CanonicalConstraint.from_PreparedConstraint(bounds_prepared) c2 = CanonicalConstraint.from_PreparedConstraint(nonlinear_prepared) c = CanonicalConstraint.concatenate([c1, c2], sparse_jacobian) assert_equal(c.n_eq, 2) assert_equal(c.n_ineq, 7) c_eq, c_ineq = c.fun(x0) assert_array_equal(c_eq, [f1[3] - lb1[3], f2[1] - lb2[1]]) assert_array_equal(c_ineq, [lb1[2] - f1[2], f1[0] - ub1[0], lb1[0] - f1[0], f2[3] - ub2[3], lb2[4] - f2[4], f2[0] - ub2[0], lb2[0] - f2[0]]) J_eq, J_ineq = c.jac(x0) if sparse_jacobian: J_eq = J_eq.toarray() J_ineq = J_ineq.toarray() assert_array_equal(J_eq, np.vstack((J1[3], J2[1]))) assert_array_equal(J_ineq, np.vstack((-J1[2], J1[0], -J1[0], J2[3], -J2[4], J2[0], -J2[0]))) v_eq = rng.rand(c.n_eq) v_ineq = rng.rand(c.n_ineq) v = np.zeros(5) v[1] = v_eq[1] v[3] = v_ineq[3] v[4] = -v_ineq[4] v[0] = v_ineq[5] - v_ineq[6] H = c.hess(x0, v_eq, v_ineq).dot(np.eye(n)) assert_array_equal(H, hess(x0, v)) assert_array_equal(c.keep_feasible, [True, False, False, True, False, True, True]) def test_empty(): x = np.array([1, 2, 3]) c = CanonicalConstraint.empty(3) assert_equal(c.n_eq, 0) assert_equal(c.n_ineq, 0) c_eq, c_ineq = c.fun(x) assert_array_equal(c_eq, []) assert_array_equal(c_ineq, []) J_eq, J_ineq = c.jac(x) assert_array_equal(J_eq, np.empty((0, 3))) assert_array_equal(J_ineq, np.empty((0, 3))) H = c.hess(x, None, None).toarray() assert_array_equal(H, np.zeros((3, 3))) def test_initial_constraints_as_canonical(): # rng is only used to generate the coefficients of the quadratic # function that is used by the nonlinear constraint. rng = np.random.RandomState(0) x0 = np.array([0.5, 0.4, 0.3, 0.2]) n = len(x0) lb1 = [-1, -np.inf, -2, 3] ub1 = [1, np.inf, np.inf, 3] bounds = Bounds(lb1, ub1, [False, False, True, False]) fun, jac, hess = create_quadratic_function(n, 5, rng) lb2 = [-10, 3, -np.inf, -np.inf, -5] ub2 = [10, 3, np.inf, 5, np.inf] nonlinear = NonlinearConstraint( fun, lb2, ub2, jac, hess, [True, False, False, True, False]) for sparse_jacobian in [False, True]: bounds_prepared = PreparedConstraint(bounds, x0, sparse_jacobian) nonlinear_prepared = PreparedConstraint(nonlinear, x0, sparse_jacobian) f1 = bounds_prepared.fun.f J1 = bounds_prepared.fun.J f2 = nonlinear_prepared.fun.f J2 = nonlinear_prepared.fun.J c_eq, c_ineq, J_eq, J_ineq = initial_constraints_as_canonical( n, [bounds_prepared, nonlinear_prepared], sparse_jacobian) assert_array_equal(c_eq, [f1[3] - lb1[3], f2[1] - lb2[1]]) assert_array_equal(c_ineq, [lb1[2] - f1[2], f1[0] - ub1[0], lb1[0] - f1[0], f2[3] - ub2[3], lb2[4] - f2[4], f2[0] - ub2[0], lb2[0] - f2[0]]) if sparse_jacobian: J1 = J1.toarray() J2 = J2.toarray() J_eq = J_eq.toarray() J_ineq = J_ineq.toarray() assert_array_equal(J_eq, np.vstack((J1[3], J2[1]))) assert_array_equal(J_ineq, np.vstack((-J1[2], J1[0], -J1[0], J2[3], -J2[4], J2[0], -J2[0]))) def test_initial_constraints_as_canonical_empty(): n = 3 for sparse_jacobian in [False, True]: c_eq, c_ineq, J_eq, J_ineq = initial_constraints_as_canonical( n, [], sparse_jacobian) assert_array_equal(c_eq, []) assert_array_equal(c_ineq, []) if sparse_jacobian: J_eq = J_eq.toarray() J_ineq = J_ineq.toarray() assert_array_equal(J_eq, np.empty((0, n))) assert_array_equal(J_ineq, np.empty((0, n)))
9,869
32.232323
79
py
scipy
scipy-main/scipy/optimize/_trustregion_constr/tests/test_report.py
import numpy as np from scipy.optimize import minimize, Bounds def test_gh10880(): # checks that verbose reporting works with trust-constr for # bound-contrained problems bnds = Bounds(1, 2) opts = {'maxiter': 1000, 'verbose': 2} minimize(lambda x: x**2, x0=2., method='trust-constr', bounds=bnds, options=opts) opts = {'maxiter': 1000, 'verbose': 3} minimize(lambda x: x**2, x0=2., method='trust-constr', bounds=bnds, options=opts) def test_gh12922(): # checks that verbose reporting works with trust-constr for # general constraints def objective(x): return np.array([(np.sum((x+1)**4))]) cons = {'type': 'ineq', 'fun': lambda x: -x[0]**2} n = 25 x0 = np.linspace(-5, 5, n) opts = {'maxiter': 1000, 'verbose': 2} minimize(objective, x0=x0, method='trust-constr', constraints=cons, options=opts) opts = {'maxiter': 1000, 'verbose': 3} minimize(objective, x0=x0, method='trust-constr', constraints=cons, options=opts)
1,070
31.454545
63
py
scipy
scipy-main/scipy/optimize/_highs/setup.py
""" setup.py for HiGHS scipy interface Some CMake files are used to create source lists for compilation """ from datetime import datetime import os from os.path import join from scipy._lib._highs_utils import _highs_dir def pre_build_hook(build_ext, ext): from scipy._build_utils.compiler_helper import get_cxx_std_flag std_flag = get_cxx_std_flag(build_ext._cxx_compiler) if std_flag is not None: ext.extra_compile_args.append(std_flag) def basiclu_pre_build_hook(build_clib, build_info): from scipy._build_utils.compiler_helper import get_c_std_flag c_flag = get_c_std_flag(build_clib.compiler) if c_flag is not None: if 'extra_compiler_args' not in build_info: build_info['extra_compiler_args'] = [] build_info['extra_compiler_args'].append(c_flag) def _get_sources(CMakeLists: str, start_token: str, end_token: str): # Read in sources from CMakeLists.txt CMakeLists = str(_highs_dir() / CMakeLists) with open(CMakeLists, encoding='utf-8') as f: s = f.read() # Find block where sources are listed start_idx = s.find(start_token) + len(start_token) end_idx = s[start_idx:].find(end_token) + len(s[:start_idx]) sources = s[start_idx:end_idx].split('\n') sources = [s.strip() for s in sources if s[0] != '#'] sources = [str(_highs_dir() / "src" / s) for s in sources] return sources def _get_version(CMakeLists: str, start_token: str, end_token: str = ')'): # Grab some more info about HiGHS from root CMakeLists CMakeLists = str(_highs_dir() / CMakeLists) with open(CMakeLists, encoding='utf-8') as f: s = f.read() start_idx = s.find(start_token) + len(start_token) + 1 end_idx = s[start_idx:].find(end_token) + len(s[:start_idx]) return s[start_idx:end_idx].strip() def configuration(parent_package='', top_path=None): from numpy.distutils.misc_util import Configuration config = Configuration('_highs', parent_package, top_path) # HiGHS info _major_dot_minor = _get_version( 'CMakeLists.txt', 'project(HIGHS VERSION', 'LANGUAGES CXX C') HIGHS_VERSION_MAJOR, HIGHS_VERSION_MINOR = _major_dot_minor.split('.') HIGHS_VERSION_PATCH = _get_version( 'CMakeLists.txt', 'HIGHS_VERSION_PATCH') GITHASH = 'n/a' HIGHS_DIR = str(_highs_dir().resolve()) # Here are the pound defines that HConfig.h would usually provide; # We provide an empty HConfig.h file and do the defs and undefs # here: TODAY_DATE = datetime.today().strftime('%Y-%m-%d') DEFINE_MACROS = [ ('CMAKE_BUILD_TYPE', '"RELEASE"'), ('HIGHS_GITHASH', '"%s"' % GITHASH), ('HIGHS_COMPILATION_DATE', '"' + TODAY_DATE + '"'), ('HIGHS_VERSION_MAJOR', HIGHS_VERSION_MAJOR), ('HIGHS_VERSION_MINOR', HIGHS_VERSION_MINOR), ('HIGHS_VERSION_PATCH', HIGHS_VERSION_PATCH), ('HIGHS_DIR', '"' + HIGHS_DIR + '"'), ('NDEBUG', None), # ('NPY_NO_DEPRECATED_API NPY_1_7_API_VERSION', None), ] UNDEF_MACROS = [ 'OPENMP', # unconditionally disable openmp 'EXT_PRESOLVE', 'SCIP_DEV', 'HiGHSDEV', 'OSI_FOUND', ] # Compile BASICLU as a static library to appease clang: # (won't allow -std=c++11/14 option for C sources) basiclu_sources = _get_sources('src/CMakeLists.txt', 'set(basiclu_sources\n', ')') highs_root = _highs_dir() config.add_library( 'basiclu', sources=basiclu_sources, include_dirs=[ 'src', str(highs_root / 'src'), str(highs_root / 'src/util'), str(highs_root / 'extern'), join(str(highs_root), 'src', 'ipm', 'basiclu', 'include'), ], language='c', macros=DEFINE_MACROS, _pre_build_hook=basiclu_pre_build_hook, ) # highs_wrapper: ipx_sources = _get_sources('src/CMakeLists.txt', 'set(ipx_sources\n', ')') highs_sources = _get_sources('src/CMakeLists.txt', 'set(sources\n', ')') highs_sources += [str(highs_root / "src/ipm/IpxWrapper.cpp")] ext = config.add_extension( '_highs_wrapper', sources=([join('cython', 'src', '_highs_wrapper.cxx')] + highs_sources + ipx_sources), include_dirs=[ # highs_wrapper 'src', str(highs_root / 'src'), str(highs_root / 'src/util'), str(highs_root / 'extern'), join(str(highs_root), 'cython', 'src'), join(str(highs_root), 'src', 'lp_data'), # highs join(str(highs_root), 'src', 'io'), join(str(highs_root), 'src', 'ipm', 'ipx', 'include'), # IPX join(str(highs_root), 'src', 'ipm', 'ipx', 'include'), join(str(highs_root), 'src', 'ipm', 'basiclu', 'include'), ], language='c++', libraries=['basiclu'], define_macros=DEFINE_MACROS, undef_macros=UNDEF_MACROS, depends=["setup.py"] + basiclu_sources + highs_sources + ipx_sources, ) # Add c++11/14 support: ext._pre_build_hook = pre_build_hook # Export constants and enums from HiGHS: ext = config.add_extension( '_highs_constants', sources=[join('cython', 'src', '_highs_constants.cxx')], include_dirs=[ 'src', str(highs_root / 'src'), str(highs_root / 'src/util'), str(highs_root / 'extern'), join(str(highs_root), 'cython', 'src'), join(str(highs_root), 'src', 'io'), join(str(highs_root), 'src', 'lp_data'), join(str(highs_root), 'src', 'simplex'), ], language='c++', depends=["setup.py"], ) ext._pre_build_hook = pre_build_hook config.add_data_files(os.path.join('cython', 'src', '*.pxd')) return config if __name__ == '__main__': from numpy.distutils.core import setup setup(**configuration(top_path='').todict())
6,062
34.046243
78
py
scipy
scipy-main/scipy/optimize/_highs/__init__.py
0
0
0
py
scipy
scipy-main/scipy/optimize/_highs/cython/__init__.py
0
0
0
py
scipy
scipy-main/scipy/optimize/_highs/cython/src/__init__.py
0
0
0
py
scipy
scipy-main/scipy/optimize/_shgo_lib/_complex.py
"""Base classes for low memory simplicial complex structures.""" import copy import logging import itertools import decimal from functools import cache import numpy from ._vertex import (VertexCacheField, VertexCacheIndex) class Complex: """ Base class for a simplicial complex described as a cache of vertices together with their connections. Important methods: Domain triangulation: Complex.triangulate, Complex.split_generation Triangulating arbitrary points (must be traingulable, may exist outside domain): Complex.triangulate(sample_set) Converting another simplicial complex structure data type to the structure used in Complex (ex. OBJ wavefront) Complex.convert(datatype, data) Important objects: HC.V: The cache of vertices and their connection HC.H: Storage structure of all vertex groups Parameters ---------- dim : int Spatial dimensionality of the complex R^dim domain : list of tuples, optional The bounds [x_l, x_u]^dim of the hyperrectangle space ex. The default domain is the hyperrectangle [0, 1]^dim Note: The domain must be convex, non-convex spaces can be cut away from this domain using the non-linear g_cons functions to define any arbitrary domain (these domains may also be disconnected from each other) sfield : A scalar function defined in the associated domain f: R^dim --> R sfield_args : tuple Additional arguments to be passed to `sfield` vfield : A scalar function defined in the associated domain f: R^dim --> R^m (for example a gradient function of the scalar field) vfield_args : tuple Additional arguments to be passed to vfield symmetry : None or list Specify if the objective function contains symmetric variables. The search space (and therefore performance) is decreased by up to O(n!) times in the fully symmetric case. E.g. f(x) = (x_1 + x_2 + x_3) + (x_4)**2 + (x_5)**2 + (x_6)**2 In this equation x_2 and x_3 are symmetric to x_1, while x_5 and x_6 are symmetric to x_4, this can be specified to the solver as: symmetry = [0, # Variable 1 0, # symmetric to variable 1 0, # symmetric to variable 1 3, # Variable 4 3, # symmetric to variable 4 3, # symmetric to variable 4 ] constraints : dict or sequence of dict, optional Constraints definition. Function(s) ``R**n`` in the form:: g(x) <= 0 applied as g : R^n -> R^m h(x) == 0 applied as h : R^n -> R^p Each constraint is defined in a dictionary with fields: type : str Constraint type: 'eq' for equality, 'ineq' for inequality. fun : callable The function defining the constraint. jac : callable, optional The Jacobian of `fun` (only for SLSQP). args : sequence, optional Extra arguments to be passed to the function and Jacobian. Equality constraint means that the constraint function result is to be zero whereas inequality means that it is to be non-negative.constraints : dict or sequence of dict, optional Constraints definition. Function(s) ``R**n`` in the form:: g(x) <= 0 applied as g : R^n -> R^m h(x) == 0 applied as h : R^n -> R^p Each constraint is defined in a dictionary with fields: type : str Constraint type: 'eq' for equality, 'ineq' for inequality. fun : callable The function defining the constraint. jac : callable, optional The Jacobian of `fun` (unused). args : sequence, optional Extra arguments to be passed to the function and Jacobian. Equality constraint means that the constraint function result is to be zero whereas inequality means that it is to be non-negative. workers : int optional Uses `multiprocessing.Pool <multiprocessing>`) to compute the field functions in parrallel. """ def __init__(self, dim, domain=None, sfield=None, sfield_args=(), symmetry=None, constraints=None, workers=1): self.dim = dim # Domains self.domain = domain if domain is None: self.bounds = [(float(0), float(1.0)), ] * dim else: self.bounds = domain self.symmetry = symmetry # here in init to avoid if checks # Field functions self.sfield = sfield self.sfield_args = sfield_args # Process constraints # Constraints # Process constraint dict sequence: if constraints is not None: self.min_cons = constraints self.g_cons = [] self.g_args = [] if (type(constraints) is not tuple) and (type(constraints) is not list): constraints = (constraints,) for cons in constraints: if cons['type'] in ('ineq'): self.g_cons.append(cons['fun']) try: self.g_args.append(cons['args']) except KeyError: self.g_args.append(()) self.g_cons = tuple(self.g_cons) self.g_args = tuple(self.g_args) else: self.g_cons = None self.g_args = None # Homology properties self.gen = 0 self.perm_cycle = 0 # Every cell is stored in a list of its generation, # ex. the initial cell is stored in self.H[0] # 1st get new cells are stored in self.H[1] etc. # When a cell is sub-generated it is removed from this list self.H = [] # Storage structure of vertex groups # Cache of all vertices if (sfield is not None) or (self.g_cons is not None): # Initiate a vertex cache and an associated field cache, note that # the field case is always initiated inside the vertex cache if an # associated field scalar field is defined: if sfield is not None: self.V = VertexCacheField(field=sfield, field_args=sfield_args, g_cons=self.g_cons, g_cons_args=self.g_args, workers=workers) elif self.g_cons is not None: self.V = VertexCacheField(field=sfield, field_args=sfield_args, g_cons=self.g_cons, g_cons_args=self.g_args, workers=workers) else: self.V = VertexCacheIndex() self.V_non_symm = [] # List of non-symmetric vertices def __call__(self): return self.H # %% Triangulation methods def cyclic_product(self, bounds, origin, supremum, centroid=True): """Generate initial triangulation using cyclic product""" # Define current hyperrectangle vot = tuple(origin) vut = tuple(supremum) # Hyperrectangle supremum self.V[vot] vo = self.V[vot] yield vo.x self.V[vut].connect(self.V[vot]) yield vut # Cyclic group approach with second x_l --- x_u operation. # These containers store the "lower" and "upper" vertices # corresponding to the origin or supremum of every C2 group. # It has the structure of `dim` times embedded lists each containing # these vertices as the entire complex grows. Bounds[0] has to be done # outside the loops before we have symmetric containers. # NOTE: This means that bounds[0][1] must always exist C0x = [[self.V[vot]]] a_vo = copy.copy(list(origin)) a_vo[0] = vut[0] # Update aN Origin a_vo = self.V[tuple(a_vo)] # self.V[vot].connect(self.V[tuple(a_vo)]) self.V[vot].connect(a_vo) yield a_vo.x C1x = [[a_vo]] # C1x = [[self.V[tuple(a_vo)]]] ab_C = [] # Container for a + b operations # Loop over remaining bounds for i, x in enumerate(bounds[1:]): # Update lower and upper containers C0x.append([]) C1x.append([]) # try to access a second bound (if not, C1 is symmetric) try: # Early try so that we don't have to copy the cache before # moving on to next C1/C2: Try to add the operation of a new # C2 product by accessing the upper bound x[1] # Copy lists for iteration cC0x = [x[:] for x in C0x[:i + 1]] cC1x = [x[:] for x in C1x[:i + 1]] for j, (VL, VU) in enumerate(zip(cC0x, cC1x)): for k, (vl, vu) in enumerate(zip(VL, VU)): # Build aN vertices for each lower-upper pair in N: a_vl = list(vl.x) a_vu = list(vu.x) a_vl[i + 1] = vut[i + 1] a_vu[i + 1] = vut[i + 1] a_vl = self.V[tuple(a_vl)] # Connect vertices in N to corresponding vertices # in aN: vl.connect(a_vl) yield a_vl.x a_vu = self.V[tuple(a_vu)] # Connect vertices in N to corresponding vertices # in aN: vu.connect(a_vu) # Connect new vertex pair in aN: a_vl.connect(a_vu) # Connect lower pair to upper (triangulation # operation of a + b (two arbitrary operations): vl.connect(a_vu) ab_C.append((vl, a_vu)) # Update the containers C0x[i + 1].append(vl) C0x[i + 1].append(vu) C1x[i + 1].append(a_vl) C1x[i + 1].append(a_vu) # Update old containers C0x[j].append(a_vl) C1x[j].append(a_vu) # Yield new points yield a_vu.x # Try to connect aN lower source of previous a + b # operation with a aN vertex ab_Cc = copy.copy(ab_C) for vp in ab_Cc: b_v = list(vp[0].x) ab_v = list(vp[1].x) b_v[i + 1] = vut[i + 1] ab_v[i + 1] = vut[i + 1] b_v = self.V[tuple(b_v)] # b + vl ab_v = self.V[tuple(ab_v)] # b + a_vl # Note o---o is already connected vp[0].connect(ab_v) # o-s b_v.connect(ab_v) # s-s # Add new list of cross pairs ab_C.append((vp[0], ab_v)) ab_C.append((b_v, ab_v)) except IndexError: cC0x = C0x[i] cC1x = C1x[i] VL, VU = cC0x, cC1x for k, (vl, vu) in enumerate(zip(VL, VU)): # Build aN vertices for each lower-upper pair in N: a_vu = list(vu.x) a_vu[i + 1] = vut[i + 1] # Connect vertices in N to corresponding vertices # in aN: a_vu = self.V[tuple(a_vu)] # Connect vertices in N to corresponding vertices # in aN: vu.connect(a_vu) # Connect new vertex pair in aN: # a_vl.connect(a_vu) # Connect lower pair to upper (triangulation # operation of a + b (two arbitrary operations): vl.connect(a_vu) ab_C.append((vl, a_vu)) C0x[i + 1].append(vu) C1x[i + 1].append(a_vu) # Yield new points a_vu.connect(self.V[vut]) yield a_vu.x ab_Cc = copy.copy(ab_C) for vp in ab_Cc: if vp[1].x[i] == vut[i]: ab_v = list(vp[1].x) ab_v[i + 1] = vut[i + 1] ab_v = self.V[tuple(ab_v)] # b + a_vl # Note o---o is already connected vp[0].connect(ab_v) # o-s # Add new list of cross pairs ab_C.append((vp[0], ab_v)) # Clean class trash try: del C0x del cC0x del C1x del cC1x del ab_C del ab_Cc except UnboundLocalError: pass # Extra yield to ensure that the triangulation is completed if centroid: vo = self.V[vot] vs = self.V[vut] # Disconnect the origin and supremum vo.disconnect(vs) # Build centroid vc = self.split_edge(vot, vut) for v in vo.nn: v.connect(vc) yield vc.x return vc.x else: yield vut return vut def triangulate(self, n=None, symmetry=None, centroid=True, printout=False): """ Triangulate the initial domain, if n is not None then a limited number of points will be generated Parameters ---------- n : int, Number of points to be sampled. symmetry : Ex. Dictionary/hashtable f(x) = (x_1 + x_2 + x_3) + (x_4)**2 + (x_5)**2 + (x_6)**2 symmetry = symmetry[0]: 0, # Variable 1 symmetry[1]: 0, # symmetric to variable 1 symmetry[2]: 0, # symmetric to variable 1 symmetry[3]: 3, # Variable 4 symmetry[4]: 3, # symmetric to variable 4 symmetry[5]: 3, # symmetric to variable 4 } centroid : bool, if True add a central point to the hypercube printout : bool, if True print out results NOTES: ------ Rather than using the combinatorial algorithm to connect vertices we make the following observation: The bound pairs are similar a C2 cyclic group and the structure is formed using the cartesian product: H = C2 x C2 x C2 ... x C2 (dim times) So construct any normal subgroup N and consider H/N first, we connect all vertices within N (ex. N is C2 (the first dimension), then we move to a left coset aN (an operation moving around the defined H/N group by for example moving from the lower bound in C2 (dimension 2) to the higher bound in C2. During this operation connection all the vertices. Now repeat the N connections. Note that these elements can be connected in parrallel. """ # Inherit class arguments if symmetry is None: symmetry = self.symmetry # Build origin and supremum vectors origin = [i[0] for i in self.bounds] self.origin = origin supremum = [i[1] for i in self.bounds] self.supremum = supremum if symmetry is None: cbounds = self.bounds else: cbounds = copy.copy(self.bounds) for i, j in enumerate(symmetry): if i is not j: # pop second entry on second symmetry vars cbounds[i] = [self.bounds[symmetry[i]][0]] # Sole (first) entry is the sup value and there is no # origin: cbounds[i] = [self.bounds[symmetry[i]][1]] if (self.bounds[symmetry[i]] is not self.bounds[symmetry[j]]): logging.warning(f"Variable {i} was specified as " f"symmetetric to variable {j}, however" f", the bounds {i} =" f" {self.bounds[symmetry[i]]} and {j}" f" =" f" {self.bounds[symmetry[j]]} do not " f"match, the mismatch was ignored in " f"the initial triangulation.") cbounds[i] = self.bounds[symmetry[j]] if n is None: # Build generator self.cp = self.cyclic_product(cbounds, origin, supremum, centroid) for i in self.cp: i try: self.triangulated_vectors.append((tuple(self.origin), tuple(self.supremum))) except (AttributeError, KeyError): self.triangulated_vectors = [(tuple(self.origin), tuple(self.supremum))] else: # Check if generator already exists try: self.cp except (AttributeError, KeyError): self.cp = self.cyclic_product(cbounds, origin, supremum, centroid) try: while len(self.V.cache) < n: next(self.cp) except StopIteration: try: self.triangulated_vectors.append((tuple(self.origin), tuple(self.supremum))) except (AttributeError, KeyError): self.triangulated_vectors = [(tuple(self.origin), tuple(self.supremum))] if printout: # for v in self.C0(): # v.print_out() for v in self.V.cache: self.V[v].print_out() return def refine(self, n=1): if n is None: try: self.triangulated_vectors self.refine_all() return except AttributeError as ae: if str(ae) == "'Complex' object has no attribute " \ "'triangulated_vectors'": self.triangulate(symmetry=self.symmetry) return else: raise nt = len(self.V.cache) + n # Target number of total vertices # In the outer while loop we iterate until we have added an extra `n` # vertices to the complex: while len(self.V.cache) < nt: # while loop 1 try: # try 1 # Try to access triangulated_vectors, this should only be # defined if an initial triangulation has already been # performed: self.triangulated_vectors # Try a usual iteration of the current generator, if it # does not exist or is exhausted then produce a new generator try: # try 2 next(self.rls) except (AttributeError, StopIteration, KeyError): vp = self.triangulated_vectors[0] self.rls = self.refine_local_space(*vp, bounds=self.bounds) next(self.rls) except (AttributeError, KeyError): # If an initial triangulation has not been completed, then # we start/continue the initial triangulation targeting `nt` # vertices, if nt is greater than the initial number of # vertices then the `refine` routine will move back to try 1. self.triangulate(nt, self.symmetry) return def refine_all(self, centroids=True): """Refine the entire domain of the current complex.""" try: self.triangulated_vectors tvs = copy.copy(self.triangulated_vectors) for i, vp in enumerate(tvs): self.rls = self.refine_local_space(*vp, bounds=self.bounds) for i in self.rls: i except AttributeError as ae: if str(ae) == "'Complex' object has no attribute " \ "'triangulated_vectors'": self.triangulate(symmetry=self.symmetry, centroid=centroids) else: raise # This adds a centroid to every new sub-domain generated and defined # by self.triangulated_vectors, in addition the vertices ! to complete # the triangulation return def refine_local_space(self, origin, supremum, bounds, centroid=1): # Copy for later removal origin_c = copy.copy(origin) supremum_c = copy.copy(supremum) # Initiate local variables redefined in later inner `for` loop: vl, vu, a_vu = None, None, None # Change the vector orientation so that it is only increasing s_ov = list(origin) s_origin = list(origin) s_sv = list(supremum) s_supremum = list(supremum) for i, vi in enumerate(s_origin): if s_ov[i] > s_sv[i]: s_origin[i] = s_sv[i] s_supremum[i] = s_ov[i] vot = tuple(s_origin) vut = tuple(s_supremum) # Hyperrectangle supremum vo = self.V[vot] # initiate if doesn't exist yet vs = self.V[vut] # Start by finding the old centroid of the new space: vco = self.split_edge(vo.x, vs.x) # Split in case not centroid arg # Find set of extreme vertices in current local space sup_set = copy.copy(vco.nn) # Cyclic group approach with second x_l --- x_u operation. # These containers store the "lower" and "upper" vertices # corresponding to the origin or supremum of every C2 group. # It has the structure of `dim` times embedded lists each containing # these vertices as the entire complex grows. Bounds[0] has to be done # outside the loops before we have symmetric containers. # NOTE: This means that bounds[0][1] must always exist a_vl = copy.copy(list(vot)) a_vl[0] = vut[0] # Update aN Origin if tuple(a_vl) not in self.V.cache: vo = self.V[vot] # initiate if doesn't exist yet vs = self.V[vut] # Start by finding the old centroid of the new space: vco = self.split_edge(vo.x, vs.x) # Split in case not centroid arg # Find set of extreme vertices in current local space sup_set = copy.copy(vco.nn) a_vl = copy.copy(list(vot)) a_vl[0] = vut[0] # Update aN Origin a_vl = self.V[tuple(a_vl)] else: a_vl = self.V[tuple(a_vl)] c_v = self.split_edge(vo.x, a_vl.x) c_v.connect(vco) yield c_v.x Cox = [[vo]] Ccx = [[c_v]] Cux = [[a_vl]] ab_C = [] # Container for a + b operations s_ab_C = [] # Container for symmetric a + b operations # Loop over remaining bounds for i, x in enumerate(bounds[1:]): # Update lower and upper containers Cox.append([]) Ccx.append([]) Cux.append([]) # try to access a second bound (if not, C1 is symmetric) try: t_a_vl = list(vot) t_a_vl[i + 1] = vut[i + 1] # New: lists are used anyway, so copy all # %% # Copy lists for iteration cCox = [x[:] for x in Cox[:i + 1]] cCcx = [x[:] for x in Ccx[:i + 1]] cCux = [x[:] for x in Cux[:i + 1]] # Try to connect aN lower source of previous a + b # operation with a aN vertex ab_Cc = copy.copy(ab_C) # NOTE: We append ab_C in the # (VL, VC, VU) for-loop, but we use the copy of the list in the # ab_Cc for-loop. s_ab_Cc = copy.copy(s_ab_C) # Early try so that we don't have to copy the cache before # moving on to next C1/C2: Try to add the operation of a new # C2 product by accessing the upper bound if tuple(t_a_vl) not in self.V.cache: # Raise error to continue symmetric refine raise IndexError t_a_vu = list(vut) t_a_vu[i + 1] = vut[i + 1] if tuple(t_a_vu) not in self.V.cache: # Raise error to continue symmetric refine: raise IndexError for vectors in s_ab_Cc: # s_ab_C.append([c_vc, vl, vu, a_vu]) bc_vc = list(vectors[0].x) b_vl = list(vectors[1].x) b_vu = list(vectors[2].x) ba_vu = list(vectors[3].x) bc_vc[i + 1] = vut[i + 1] b_vl[i + 1] = vut[i + 1] b_vu[i + 1] = vut[i + 1] ba_vu[i + 1] = vut[i + 1] bc_vc = self.V[tuple(bc_vc)] bc_vc.connect(vco) # NOTE: Unneeded? yield bc_vc # Split to centre, call this centre group "d = 0.5*a" d_bc_vc = self.split_edge(vectors[0].x, bc_vc.x) d_bc_vc.connect(bc_vc) d_bc_vc.connect(vectors[1]) # Connect all to centroid d_bc_vc.connect(vectors[2]) # Connect all to centroid d_bc_vc.connect(vectors[3]) # Connect all to centroid yield d_bc_vc.x b_vl = self.V[tuple(b_vl)] bc_vc.connect(b_vl) # Connect aN cross pairs d_bc_vc.connect(b_vl) # Connect all to centroid yield b_vl b_vu = self.V[tuple(b_vu)] bc_vc.connect(b_vu) # Connect aN cross pairs d_bc_vc.connect(b_vu) # Connect all to centroid b_vl_c = self.split_edge(b_vu.x, b_vl.x) bc_vc.connect(b_vl_c) yield b_vu ba_vu = self.V[tuple(ba_vu)] bc_vc.connect(ba_vu) # Connect aN cross pairs d_bc_vc.connect(ba_vu) # Connect all to centroid # Split the a + b edge of the initial triangulation: os_v = self.split_edge(vectors[1].x, ba_vu.x) # o-s ss_v = self.split_edge(b_vl.x, ba_vu.x) # s-s b_vu_c = self.split_edge(b_vu.x, ba_vu.x) bc_vc.connect(b_vu_c) yield os_v.x # often equal to vco, but not always yield ss_v.x # often equal to bc_vu, but not always yield ba_vu # Split remaining to centre, call this centre group # "d = 0.5*a" d_bc_vc = self.split_edge(vectors[0].x, bc_vc.x) d_bc_vc.connect(vco) # NOTE: Unneeded? yield d_bc_vc.x d_b_vl = self.split_edge(vectors[1].x, b_vl.x) d_bc_vc.connect(vco) # NOTE: Unneeded? d_bc_vc.connect(d_b_vl) # Connect dN cross pairs yield d_b_vl.x d_b_vu = self.split_edge(vectors[2].x, b_vu.x) d_bc_vc.connect(vco) # NOTE: Unneeded? d_bc_vc.connect(d_b_vu) # Connect dN cross pairs yield d_b_vu.x d_ba_vu = self.split_edge(vectors[3].x, ba_vu.x) d_bc_vc.connect(vco) # NOTE: Unneeded? d_bc_vc.connect(d_ba_vu) # Connect dN cross pairs yield d_ba_vu # comb = [c_vc, vl, vu, a_vl, a_vu, # bc_vc, b_vl, b_vu, ba_vl, ba_vu] comb = [vl, vu, a_vu, b_vl, b_vu, ba_vu] comb_iter = itertools.combinations(comb, 2) for vecs in comb_iter: self.split_edge(vecs[0].x, vecs[1].x) # Add new list of cross pairs ab_C.append((d_bc_vc, vectors[1], b_vl, a_vu, ba_vu)) ab_C.append((d_bc_vc, vl, b_vl, a_vu, ba_vu)) # = prev for vectors in ab_Cc: bc_vc = list(vectors[0].x) b_vl = list(vectors[1].x) b_vu = list(vectors[2].x) ba_vl = list(vectors[3].x) ba_vu = list(vectors[4].x) bc_vc[i + 1] = vut[i + 1] b_vl[i + 1] = vut[i + 1] b_vu[i + 1] = vut[i + 1] ba_vl[i + 1] = vut[i + 1] ba_vu[i + 1] = vut[i + 1] bc_vc = self.V[tuple(bc_vc)] bc_vc.connect(vco) # NOTE: Unneeded? yield bc_vc # Split to centre, call this centre group "d = 0.5*a" d_bc_vc = self.split_edge(vectors[0].x, bc_vc.x) d_bc_vc.connect(bc_vc) d_bc_vc.connect(vectors[1]) # Connect all to centroid d_bc_vc.connect(vectors[2]) # Connect all to centroid d_bc_vc.connect(vectors[3]) # Connect all to centroid d_bc_vc.connect(vectors[4]) # Connect all to centroid yield d_bc_vc.x b_vl = self.V[tuple(b_vl)] bc_vc.connect(b_vl) # Connect aN cross pairs d_bc_vc.connect(b_vl) # Connect all to centroid yield b_vl b_vu = self.V[tuple(b_vu)] bc_vc.connect(b_vu) # Connect aN cross pairs d_bc_vc.connect(b_vu) # Connect all to centroid yield b_vu ba_vl = self.V[tuple(ba_vl)] bc_vc.connect(ba_vl) # Connect aN cross pairs d_bc_vc.connect(ba_vl) # Connect all to centroid self.split_edge(b_vu.x, ba_vl.x) yield ba_vl ba_vu = self.V[tuple(ba_vu)] bc_vc.connect(ba_vu) # Connect aN cross pairs d_bc_vc.connect(ba_vu) # Connect all to centroid # Split the a + b edge of the initial triangulation: os_v = self.split_edge(vectors[1].x, ba_vu.x) # o-s ss_v = self.split_edge(b_vl.x, ba_vu.x) # s-s yield os_v.x # often equal to vco, but not always yield ss_v.x # often equal to bc_vu, but not always yield ba_vu # Split remaining to centre, call this centre group # "d = 0.5*a" d_bc_vc = self.split_edge(vectors[0].x, bc_vc.x) d_bc_vc.connect(vco) # NOTE: Unneeded? yield d_bc_vc.x d_b_vl = self.split_edge(vectors[1].x, b_vl.x) d_bc_vc.connect(vco) # NOTE: Unneeded? d_bc_vc.connect(d_b_vl) # Connect dN cross pairs yield d_b_vl.x d_b_vu = self.split_edge(vectors[2].x, b_vu.x) d_bc_vc.connect(vco) # NOTE: Unneeded? d_bc_vc.connect(d_b_vu) # Connect dN cross pairs yield d_b_vu.x d_ba_vl = self.split_edge(vectors[3].x, ba_vl.x) d_bc_vc.connect(vco) # NOTE: Unneeded? d_bc_vc.connect(d_ba_vl) # Connect dN cross pairs yield d_ba_vl d_ba_vu = self.split_edge(vectors[4].x, ba_vu.x) d_bc_vc.connect(vco) # NOTE: Unneeded? d_bc_vc.connect(d_ba_vu) # Connect dN cross pairs yield d_ba_vu c_vc, vl, vu, a_vl, a_vu = vectors comb = [vl, vu, a_vl, a_vu, b_vl, b_vu, ba_vl, ba_vu] comb_iter = itertools.combinations(comb, 2) for vecs in comb_iter: self.split_edge(vecs[0].x, vecs[1].x) # Add new list of cross pairs ab_C.append((bc_vc, b_vl, b_vu, ba_vl, ba_vu)) ab_C.append((d_bc_vc, d_b_vl, d_b_vu, d_ba_vl, d_ba_vu)) ab_C.append((d_bc_vc, vectors[1], b_vl, a_vu, ba_vu)) ab_C.append((d_bc_vc, vu, b_vu, a_vl, ba_vl)) for j, (VL, VC, VU) in enumerate(zip(cCox, cCcx, cCux)): for k, (vl, vc, vu) in enumerate(zip(VL, VC, VU)): # Build aN vertices for each lower-upper C3 group in N: a_vl = list(vl.x) a_vu = list(vu.x) a_vl[i + 1] = vut[i + 1] a_vu[i + 1] = vut[i + 1] a_vl = self.V[tuple(a_vl)] a_vu = self.V[tuple(a_vu)] # Note, build (a + vc) later for consistent yields # Split the a + b edge of the initial triangulation: c_vc = self.split_edge(vl.x, a_vu.x) self.split_edge(vl.x, vu.x) # Equal to vc # Build cN vertices for each lower-upper C3 group in N: c_vc.connect(vco) c_vc.connect(vc) c_vc.connect(vl) # Connect c + ac operations c_vc.connect(vu) # Connect c + ac operations c_vc.connect(a_vl) # Connect c + ac operations c_vc.connect(a_vu) # Connect c + ac operations yield c_vc.x c_vl = self.split_edge(vl.x, a_vl.x) c_vl.connect(vco) c_vc.connect(c_vl) # Connect cN group vertices yield c_vl.x # yield at end of loop: c_vu = self.split_edge(vu.x, a_vu.x) c_vu.connect(vco) # Connect remaining cN group vertices c_vc.connect(c_vu) # Connect cN group vertices yield c_vu.x a_vc = self.split_edge(a_vl.x, a_vu.x) # is (a + vc) ? a_vc.connect(vco) a_vc.connect(c_vc) # Storage for connecting c + ac operations: ab_C.append((c_vc, vl, vu, a_vl, a_vu)) # Update the containers Cox[i + 1].append(vl) Cox[i + 1].append(vc) Cox[i + 1].append(vu) Ccx[i + 1].append(c_vl) Ccx[i + 1].append(c_vc) Ccx[i + 1].append(c_vu) Cux[i + 1].append(a_vl) Cux[i + 1].append(a_vc) Cux[i + 1].append(a_vu) # Update old containers Cox[j].append(c_vl) # ! Cox[j].append(a_vl) Ccx[j].append(c_vc) # ! Ccx[j].append(a_vc) # ! Cux[j].append(c_vu) # ! Cux[j].append(a_vu) # Yield new points yield a_vc.x except IndexError: for vectors in ab_Cc: ba_vl = list(vectors[3].x) ba_vu = list(vectors[4].x) ba_vl[i + 1] = vut[i + 1] ba_vu[i + 1] = vut[i + 1] ba_vu = self.V[tuple(ba_vu)] yield ba_vu d_bc_vc = self.split_edge(vectors[1].x, ba_vu.x) # o-s yield ba_vu d_bc_vc.connect(vectors[1]) # Connect all to centroid d_bc_vc.connect(vectors[2]) # Connect all to centroid d_bc_vc.connect(vectors[3]) # Connect all to centroid d_bc_vc.connect(vectors[4]) # Connect all to centroid yield d_bc_vc.x ba_vl = self.V[tuple(ba_vl)] yield ba_vl d_ba_vl = self.split_edge(vectors[3].x, ba_vl.x) d_ba_vu = self.split_edge(vectors[4].x, ba_vu.x) d_ba_vc = self.split_edge(d_ba_vl.x, d_ba_vu.x) yield d_ba_vl yield d_ba_vu yield d_ba_vc c_vc, vl, vu, a_vl, a_vu = vectors comb = [vl, vu, a_vl, a_vu, ba_vl, ba_vu] comb_iter = itertools.combinations(comb, 2) for vecs in comb_iter: self.split_edge(vecs[0].x, vecs[1].x) # Copy lists for iteration cCox = Cox[i] cCcx = Ccx[i] cCux = Cux[i] VL, VC, VU = cCox, cCcx, cCux for k, (vl, vc, vu) in enumerate(zip(VL, VC, VU)): # Build aN vertices for each lower-upper pair in N: a_vu = list(vu.x) a_vu[i + 1] = vut[i + 1] # Connect vertices in N to corresponding vertices # in aN: a_vu = self.V[tuple(a_vu)] yield a_vl.x # Split the a + b edge of the initial triangulation: c_vc = self.split_edge(vl.x, a_vu.x) self.split_edge(vl.x, vu.x) # Equal to vc c_vc.connect(vco) c_vc.connect(vc) c_vc.connect(vl) # Connect c + ac operations c_vc.connect(vu) # Connect c + ac operations c_vc.connect(a_vu) # Connect c + ac operations yield (c_vc.x) c_vu = self.split_edge(vu.x, a_vu.x) # yield at end of loop c_vu.connect(vco) # Connect remaining cN group vertices c_vc.connect(c_vu) # Connect cN group vertices yield (c_vu.x) # Update the containers Cox[i + 1].append(vu) Ccx[i + 1].append(c_vu) Cux[i + 1].append(a_vu) # Update old containers s_ab_C.append([c_vc, vl, vu, a_vu]) yield a_vu.x # Clean class trash try: del Cox del Ccx del Cux del ab_C del ab_Cc except UnboundLocalError: pass try: self.triangulated_vectors.remove((tuple(origin_c), tuple(supremum_c))) except ValueError: # Turn this into a logging warning? pass # Add newly triangulated vectors: for vs in sup_set: self.triangulated_vectors.append((tuple(vco.x), tuple(vs.x))) # Extra yield to ensure that the triangulation is completed if centroid: vcn_set = set() c_nn_lists = [] for vs in sup_set: # Build centroid c_nn = self.vpool(vco.x, vs.x) try: c_nn.remove(vcn_set) except KeyError: pass c_nn_lists.append(c_nn) for c_nn in c_nn_lists: try: c_nn.remove(vcn_set) except KeyError: pass for vs, c_nn in zip(sup_set, c_nn_lists): # Build centroid vcn = self.split_edge(vco.x, vs.x) vcn_set.add(vcn) try: # Shouldn't be needed? c_nn.remove(vcn_set) except KeyError: pass for vnn in c_nn: vcn.connect(vnn) yield vcn.x else: pass yield vut return def refine_star(self, v): """Refine the star domain of a vertex `v`.""" # Copy lists before iteration vnn = copy.copy(v.nn) v1nn = [] d_v0v1_set = set() for v1 in vnn: v1nn.append(copy.copy(v1.nn)) for v1, v1nn in zip(vnn, v1nn): vnnu = v1nn.intersection(vnn) d_v0v1 = self.split_edge(v.x, v1.x) for o_d_v0v1 in d_v0v1_set: d_v0v1.connect(o_d_v0v1) d_v0v1_set.add(d_v0v1) for v2 in vnnu: d_v1v2 = self.split_edge(v1.x, v2.x) d_v0v1.connect(d_v1v2) return @cache def split_edge(self, v1, v2): v1 = self.V[v1] v2 = self.V[v2] # Destroy original edge, if it exists: v1.disconnect(v2) # Compute vertex on centre of edge: try: vct = (v2.x_a - v1.x_a) / 2.0 + v1.x_a except TypeError: # Allow for decimal operations vct = (v2.x_a - v1.x_a) / decimal.Decimal(2.0) + v1.x_a vc = self.V[tuple(vct)] # Connect to original 2 vertices to the new centre vertex vc.connect(v1) vc.connect(v2) return vc def vpool(self, origin, supremum): vot = tuple(origin) vst = tuple(supremum) # Initiate vertices in case they don't exist vo = self.V[vot] vs = self.V[vst] # Remove origin - supremum disconnect # Find the lower/upper bounds of the refinement hyperrectangle bl = list(vot) bu = list(vst) for i, (voi, vsi) in enumerate(zip(vot, vst)): if bl[i] > vsi: bl[i] = vsi if bu[i] < voi: bu[i] = voi # NOTE: This is mostly done with sets/lists because we aren't sure # how well the numpy arrays will scale to thousands of # dimensions. vn_pool = set() vn_pool.update(vo.nn) vn_pool.update(vs.nn) cvn_pool = copy.copy(vn_pool) for vn in cvn_pool: for i, xi in enumerate(vn.x): if bl[i] <= xi <= bu[i]: pass else: try: vn_pool.remove(vn) except KeyError: pass # NOTE: Not all neigbouds are in initial pool return vn_pool def vf_to_vv(self, vertices, simplices): """ Convert a vertex-face mesh to a vertex-vertex mesh used by this class Parameters ---------- vertices : list Vertices simplices : list Simplices """ if self.dim > 1: for s in simplices: edges = itertools.combinations(s, self.dim) for e in edges: self.V[tuple(vertices[e[0]])].connect( self.V[tuple(vertices[e[1]])]) else: for e in simplices: self.V[tuple(vertices[e[0]])].connect( self.V[tuple(vertices[e[1]])]) return def connect_vertex_non_symm(self, v_x, near=None): """ Adds a vertex at coords v_x to the complex that is not symmetric to the initial triangulation and sub-triangulation. If near is specified (for example; a star domain or collections of cells known to contain v) then only those simplices containd in near will be searched, this greatly speeds up the process. If near is not specified this method will search the entire simplicial complex structure. Parameters ---------- v_x : tuple Coordinates of non-symmetric vertex near : set or list List of vertices, these are points near v to check for """ if near is None: star = self.V else: star = near # Create the vertex origin if tuple(v_x) in self.V.cache: if self.V[v_x] in self.V_non_symm: pass else: return self.V[v_x] found_nn = False S_rows = [] for v in star: S_rows.append(v.x) S_rows = numpy.array(S_rows) A = numpy.array(S_rows) - numpy.array(v_x) # Iterate through all the possible simplices of S_rows for s_i in itertools.combinations(range(S_rows.shape[0]), r=self.dim + 1): # Check if connected, else s_i is not a simplex valid_simplex = True for i in itertools.combinations(s_i, r=2): # Every combination of vertices must be connected, we check of # the current iteration of all combinations of s_i are # connected we break the loop if it is not. if ((self.V[tuple(S_rows[i[1]])] not in self.V[tuple(S_rows[i[0]])].nn) and (self.V[tuple(S_rows[i[0]])] not in self.V[tuple(S_rows[i[1]])].nn)): valid_simplex = False break S = S_rows[tuple([s_i])] if valid_simplex: if self.deg_simplex(S, proj=None): valid_simplex = False # If s_i is a valid simplex we can test if v_x is inside si if valid_simplex: # Find the A_j0 value from the precalculated values A_j0 = A[tuple([s_i])] if self.in_simplex(S, v_x, A_j0): found_nn = True # breaks the main for loop, s_i is the target simplex: break # Connect the simplex to point if found_nn: for i in s_i: self.V[v_x].connect(self.V[tuple(S_rows[i])]) # Attached the simplex to storage for all non-symmetric vertices self.V_non_symm.append(self.V[v_x]) # this bool value indicates a successful connection if True: return found_nn def in_simplex(self, S, v_x, A_j0=None): """Check if a vector v_x is in simplex `S`. Parameters ---------- S : array_like Array containing simplex entries of vertices as rows v_x : A candidate vertex A_j0 : array, optional, Allows for A_j0 to be pre-calculated Returns ------- res : boolean True if `v_x` is in `S` """ A_11 = numpy.delete(S, 0, 0) - S[0] sign_det_A_11 = numpy.sign(numpy.linalg.det(A_11)) if sign_det_A_11 == 0: # NOTE: We keep the variable A_11, but we loop through A_jj # ind= # while sign_det_A_11 == 0: # A_11 = numpy.delete(S, ind, 0) - S[ind] # sign_det_A_11 = numpy.sign(numpy.linalg.det(A_11)) sign_det_A_11 = -1 # TODO: Choose another det of j instead? # TODO: Unlikely to work in many cases if A_j0 is None: A_j0 = S - v_x for d in range(self.dim + 1): det_A_jj = (-1)**d * sign_det_A_11 # TODO: Note that scipy might be faster to add as an optional # dependency sign_det_A_j0 = numpy.sign(numpy.linalg.det(numpy.delete(A_j0, d, 0))) # TODO: Note if sign_det_A_j0 == then the point is coplanar to the # current simplex facet, so perhaps return True and attach? if det_A_jj == sign_det_A_j0: continue else: return False return True def deg_simplex(self, S, proj=None): """Test a simplex S for degeneracy (linear dependence in R^dim). Parameters ---------- S : np.array Simplex with rows as vertex vectors proj : array, optional, If the projection S[1:] - S[0] is already computed it can be added as an optional argument. """ # Strategy: we test all combination of faces, if any of the # determinants are zero then the vectors lie on the same face and is # therefore linearly dependent in the space of R^dim if proj is None: proj = S[1:] - S[0] # TODO: Is checking the projection of one vertex against faces of other # vertices sufficient? Or do we need to check more vertices in # dimensions higher than 2? # TODO: Literature seems to suggest using proj.T, but why is this # needed? if numpy.linalg.det(proj) == 0.0: # TODO: Repalace with tolerance? return True # Simplex is degenerate else: return False # Simplex is not degenerate
50,352
40.03749
79
py
scipy
scipy-main/scipy/optimize/_shgo_lib/_vertex.py
import collections from abc import ABC, abstractmethod import numpy as np from scipy._lib._util import MapWrapper class VertexBase(ABC): """ Base class for a vertex. """ def __init__(self, x, nn=None, index=None): """ Initiation of a vertex object. Parameters ---------- x : tuple or vector The geometric location (domain). nn : list, optional Nearest neighbour list. index : int, optional Index of vertex. """ self.x = x self.hash = hash(self.x) # Save precomputed hash if nn is not None: self.nn = set(nn) # can use .indexupdate to add a new list else: self.nn = set() self.index = index def __hash__(self): return self.hash def __getattr__(self, item): if item not in ['x_a']: raise AttributeError(f"{type(self)} object has no attribute " f"'{item}'") if item == 'x_a': self.x_a = np.array(self.x) return self.x_a @abstractmethod def connect(self, v): raise NotImplementedError("This method is only implemented with an " "associated child of the base class.") @abstractmethod def disconnect(self, v): raise NotImplementedError("This method is only implemented with an " "associated child of the base class.") def star(self): """Returns the star domain ``st(v)`` of the vertex. Parameters ---------- v : The vertex ``v`` in ``st(v)`` Returns ------- st : set A set containing all the vertices in ``st(v)`` """ self.st = self.nn self.st.add(self) return self.st class VertexScalarField(VertexBase): """ Add homology properties of a scalar field f: R^n --> R associated with the geometry built from the VertexBase class """ def __init__(self, x, field=None, nn=None, index=None, field_args=(), g_cons=None, g_cons_args=()): """ Parameters ---------- x : tuple, vector of vertex coordinates field : callable, optional a scalar field f: R^n --> R associated with the geometry nn : list, optional list of nearest neighbours index : int, optional index of the vertex field_args : tuple, optional additional arguments to be passed to field g_cons : callable, optional constraints on the vertex g_cons_args : tuple, optional additional arguments to be passed to g_cons """ super().__init__(x, nn=nn, index=index) # Note Vertex is only initiated once for all x so only # evaluated once # self.feasible = None # self.f is externally defined by the cache to allow parallel # processing # None type that will break arithmetic operations unless defined # self.f = None self.check_min = True self.check_max = True def connect(self, v): """Connects self to another vertex object v. Parameters ---------- v : VertexBase or VertexScalarField object """ if v is not self and v not in self.nn: self.nn.add(v) v.nn.add(self) # Flags for checking homology properties: self.check_min = True self.check_max = True v.check_min = True v.check_max = True def disconnect(self, v): if v in self.nn: self.nn.remove(v) v.nn.remove(self) # Flags for checking homology properties: self.check_min = True self.check_max = True v.check_min = True v.check_max = True def minimiser(self): """Check whether this vertex is strictly less than all its neighbours""" if self.check_min: self._min = all(self.f < v.f for v in self.nn) self.check_min = False return self._min def maximiser(self): """ Check whether this vertex is strictly greater than all its neighbours. """ if self.check_max: self._max = all(self.f > v.f for v in self.nn) self.check_max = False return self._max class VertexVectorField(VertexBase): """ Add homology properties of a scalar field f: R^n --> R^m associated with the geometry built from the VertexBase class. """ def __init__(self, x, sfield=None, vfield=None, field_args=(), vfield_args=(), g_cons=None, g_cons_args=(), nn=None, index=None): super().__init__(x, nn=nn, index=index) raise NotImplementedError("This class is still a work in progress") class VertexCacheBase: """Base class for a vertex cache for a simplicial complex.""" def __init__(self): self.cache = collections.OrderedDict() self.nfev = 0 # Feasible points self.index = -1 def __iter__(self): for v in self.cache: yield self.cache[v] return def size(self): """Returns the size of the vertex cache.""" return self.index + 1 def print_out(self): headlen = len(f"Vertex cache of size: {len(self.cache)}:") print('=' * headlen) print(f"Vertex cache of size: {len(self.cache)}:") print('=' * headlen) for v in self.cache: self.cache[v].print_out() class VertexCube(VertexBase): """Vertex class to be used for a pure simplicial complex with no associated differential geometry (single level domain that exists in R^n)""" def __init__(self, x, nn=None, index=None): super().__init__(x, nn=nn, index=index) def connect(self, v): if v is not self and v not in self.nn: self.nn.add(v) v.nn.add(self) def disconnect(self, v): if v in self.nn: self.nn.remove(v) v.nn.remove(self) class VertexCacheIndex(VertexCacheBase): def __init__(self): """ Class for a vertex cache for a simplicial complex without an associated field. Useful only for building and visualising a domain complex. Parameters ---------- """ super().__init__() self.Vertex = VertexCube def __getitem__(self, x, nn=None): try: return self.cache[x] except KeyError: self.index += 1 xval = self.Vertex(x, index=self.index) # logging.info("New generated vertex at x = {}".format(x)) # NOTE: Surprisingly high performance increase if logging # is commented out self.cache[x] = xval return self.cache[x] class VertexCacheField(VertexCacheBase): def __init__(self, field=None, field_args=(), g_cons=None, g_cons_args=(), workers=1): """ Class for a vertex cache for a simplicial complex with an associated field. Parameters ---------- field : callable Scalar or vector field callable. field_args : tuple, optional Any additional fixed parameters needed to completely specify the field function g_cons : dict or sequence of dict, optional Constraints definition. Function(s) ``R**n`` in the form:: g_cons_args : tuple, optional Any additional fixed parameters needed to completely specify the constraint functions workers : int optional Uses `multiprocessing.Pool <multiprocessing>`) to compute the field functions in parrallel. """ super().__init__() self.index = -1 self.Vertex = VertexScalarField self.field = field self.field_args = field_args self.wfield = FieldWrapper(field, field_args) # if workers is not 1 self.g_cons = g_cons self.g_cons_args = g_cons_args self.wgcons = ConstraintWrapper(g_cons, g_cons_args) self.gpool = set() # A set of tuples to process for feasibility # Field processing objects self.fpool = set() # A set of tuples to process for scalar function self.sfc_lock = False # True if self.fpool is non-Empty self.workers = workers self._mapwrapper = MapWrapper(workers) if workers == 1: self.process_gpool = self.proc_gpool if g_cons is None: self.process_fpool = self.proc_fpool_nog else: self.process_fpool = self.proc_fpool_g else: self.process_gpool = self.pproc_gpool if g_cons is None: self.process_fpool = self.pproc_fpool_nog else: self.process_fpool = self.pproc_fpool_g def __getitem__(self, x, nn=None): try: return self.cache[x] except KeyError: self.index += 1 xval = self.Vertex(x, field=self.field, nn=nn, index=self.index, field_args=self.field_args, g_cons=self.g_cons, g_cons_args=self.g_cons_args) self.cache[x] = xval # Define in cache self.gpool.add(xval) # Add to pool for processing feasibility self.fpool.add(xval) # Add to pool for processing field values return self.cache[x] def __getstate__(self): self_dict = self.__dict__.copy() del self_dict['pool'] return self_dict def process_pools(self): if self.g_cons is not None: self.process_gpool() self.process_fpool() self.proc_minimisers() def feasibility_check(self, v): v.feasible = True for g, args in zip(self.g_cons, self.g_cons_args): # constraint may return more than 1 value. if np.any(g(v.x_a, *args) < 0.0): v.f = np.inf v.feasible = False break def compute_sfield(self, v): """Compute the scalar field values of a vertex object `v`. Parameters ---------- v : VertexBase or VertexScalarField object """ try: v.f = self.field(v.x_a, *self.field_args) self.nfev += 1 except AttributeError: v.f = np.inf # logging.warning(f"Field function not found at x = {self.x_a}") if np.isnan(v.f): v.f = np.inf def proc_gpool(self): """Process all constraints.""" if self.g_cons is not None: for v in self.gpool: self.feasibility_check(v) # Clean the pool self.gpool = set() def pproc_gpool(self): """Process all constraints in parallel.""" gpool_l = [] for v in self.gpool: gpool_l.append(v.x_a) G = self._mapwrapper(self.wgcons.gcons, gpool_l) for v, g in zip(self.gpool, G): v.feasible = g # set vertex object attribute v.feasible = g (bool) def proc_fpool_g(self): """Process all field functions with constraints supplied.""" for v in self.fpool: if v.feasible: self.compute_sfield(v) # Clean the pool self.fpool = set() def proc_fpool_nog(self): """Process all field functions with no constraints supplied.""" for v in self.fpool: self.compute_sfield(v) # Clean the pool self.fpool = set() def pproc_fpool_g(self): """ Process all field functions with constraints supplied in parallel. """ self.wfield.func fpool_l = [] for v in self.fpool: if v.feasible: fpool_l.append(v.x_a) else: v.f = np.inf F = self._mapwrapper(self.wfield.func, fpool_l) for va, f in zip(fpool_l, F): vt = tuple(va) self[vt].f = f # set vertex object attribute v.f = f self.nfev += 1 # Clean the pool self.fpool = set() def pproc_fpool_nog(self): """ Process all field functions with no constraints supplied in parallel. """ self.wfield.func fpool_l = [] for v in self.fpool: fpool_l.append(v.x_a) F = self._mapwrapper(self.wfield.func, fpool_l) for va, f in zip(fpool_l, F): vt = tuple(va) self[vt].f = f # set vertex object attribute v.f = f self.nfev += 1 # Clean the pool self.fpool = set() def proc_minimisers(self): """Check for minimisers.""" for v in self: v.minimiser() v.maximiser() class ConstraintWrapper: """Object to wrap constraints to pass to `multiprocessing.Pool`.""" def __init__(self, g_cons, g_cons_args): self.g_cons = g_cons self.g_cons_args = g_cons_args def gcons(self, v_x_a): vfeasible = True for g, args in zip(self.g_cons, self.g_cons_args): # constraint may return more than 1 value. if np.any(g(v_x_a, *args) < 0.0): vfeasible = False break return vfeasible class FieldWrapper: """Object to wrap field to pass to `multiprocessing.Pool`.""" def __init__(self, field, field_args): self.field = field self.field_args = field_args def func(self, v_x_a): try: v_f = self.field(v_x_a, *self.field_args) except Exception: v_f = np.inf if np.isnan(v_f): v_f = np.inf return v_f
13,997
29.364425
79
py
scipy
scipy-main/scipy/optimize/_shgo_lib/__init__.py
0
0
0
py
scipy
scipy-main/scipy/optimize/tests/test__root.py
""" Unit tests for optimization routines from _root.py. """ from numpy.testing import assert_, assert_equal from pytest import raises as assert_raises, warns as assert_warns import numpy as np from scipy.optimize import root class TestRoot: def test_tol_parameter(self): # Check that the minimize() tol= argument does something def func(z): x, y = z return np.array([x**3 - 1, y**3 - 1]) def dfunc(z): x, y = z return np.array([[3*x**2, 0], [0, 3*y**2]]) for method in ['hybr', 'lm', 'broyden1', 'broyden2', 'anderson', 'diagbroyden', 'krylov']: if method in ('linearmixing', 'excitingmixing'): # doesn't converge continue if method in ('hybr', 'lm'): jac = dfunc else: jac = None sol1 = root(func, [1.1,1.1], jac=jac, tol=1e-4, method=method) sol2 = root(func, [1.1,1.1], jac=jac, tol=0.5, method=method) msg = f"{method}: {func(sol1.x)} vs. {func(sol2.x)}" assert_(sol1.success, msg) assert_(sol2.success, msg) assert_(abs(func(sol1.x)).max() < abs(func(sol2.x)).max(), msg) def test_tol_norm(self): def norm(x): return abs(x[0]) for method in ['excitingmixing', 'diagbroyden', 'linearmixing', 'anderson', 'broyden1', 'broyden2', 'krylov']: root(np.zeros_like, np.zeros(2), method=method, options={"tol_norm": norm}) def test_minimize_scalar_coerce_args_param(self): # github issue #3503 def func(z, f=1): x, y = z return np.array([x**3 - 1, y**3 - f]) root(func, [1.1, 1.1], args=1.5) def test_f_size(self): # gh8320 # check that decreasing the size of the returned array raises an error # and doesn't segfault class fun: def __init__(self): self.count = 0 def __call__(self, x): self.count += 1 if not (self.count % 5): ret = x[0] + 0.5 * (x[0] - x[1]) ** 3 - 1.0 else: ret = ([x[0] + 0.5 * (x[0] - x[1]) ** 3 - 1.0, 0.5 * (x[1] - x[0]) ** 3 + x[1]]) return ret F = fun() with assert_raises(ValueError): root(F, [0.1, 0.0], method='lm') def test_gh_10370(self): # gh-10370 reported that passing both `args` and `jac` to `root` with # `method='krylov'` caused a failure. Ensure that this is fixed whether # the gradient is passed via `jac` or as a second output of `fun`. def fun(x, ignored): return [3*x[0] - 0.25*x[1]**2 + 10, 0.1*x[0]**2 + 5*x[1] - 2] def grad(x, ignored): return [[3, 0.5 * x[1]], [0.2 * x[0], 5]] def fun_grad(x, ignored): return fun(x, ignored), grad(x, ignored) x0 = np.zeros(2) ref = root(fun, x0, args=(1,), method='krylov') message = 'Method krylov does not use the jacobian' with assert_warns(RuntimeWarning, match=message): res1 = root(fun, x0, args=(1,), method='krylov', jac=grad) with assert_warns(RuntimeWarning, match=message): res2 = root(fun_grad, x0, args=(1,), method='krylov', jac=True) assert_equal(res1.x, ref.x) assert_equal(res2.x, ref.x) assert res1.success is res2.success is ref.success is True
3,727
32.285714
79
py
scipy
scipy-main/scipy/optimize/tests/test__numdiff.py
import math from itertools import product import numpy as np from numpy.testing import assert_allclose, assert_equal, assert_ from pytest import raises as assert_raises from scipy.sparse import csr_matrix, csc_matrix, lil_matrix from scipy.optimize._numdiff import ( _adjust_scheme_to_bounds, approx_derivative, check_derivative, group_columns, _eps_for_method, _compute_absolute_step) def test_group_columns(): structure = [ [1, 1, 0, 0, 0, 0], [1, 1, 1, 0, 0, 0], [0, 1, 1, 1, 0, 0], [0, 0, 1, 1, 1, 0], [0, 0, 0, 1, 1, 1], [0, 0, 0, 0, 1, 1], [0, 0, 0, 0, 0, 0] ] for transform in [np.asarray, csr_matrix, csc_matrix, lil_matrix]: A = transform(structure) order = np.arange(6) groups_true = np.array([0, 1, 2, 0, 1, 2]) groups = group_columns(A, order) assert_equal(groups, groups_true) order = [1, 2, 4, 3, 5, 0] groups_true = np.array([2, 0, 1, 2, 0, 1]) groups = group_columns(A, order) assert_equal(groups, groups_true) # Test repeatability. groups_1 = group_columns(A) groups_2 = group_columns(A) assert_equal(groups_1, groups_2) def test_correct_fp_eps(): # check that relative step size is correct for FP size EPS = np.finfo(np.float64).eps relative_step = {"2-point": EPS**0.5, "3-point": EPS**(1/3), "cs": EPS**0.5} for method in ['2-point', '3-point', 'cs']: assert_allclose( _eps_for_method(np.float64, np.float64, method), relative_step[method]) assert_allclose( _eps_for_method(np.complex128, np.complex128, method), relative_step[method] ) # check another FP size EPS = np.finfo(np.float32).eps relative_step = {"2-point": EPS**0.5, "3-point": EPS**(1/3), "cs": EPS**0.5} for method in ['2-point', '3-point', 'cs']: assert_allclose( _eps_for_method(np.float64, np.float32, method), relative_step[method] ) assert_allclose( _eps_for_method(np.float32, np.float64, method), relative_step[method] ) assert_allclose( _eps_for_method(np.float32, np.float32, method), relative_step[method] ) class TestAdjustSchemeToBounds: def test_no_bounds(self): x0 = np.zeros(3) h = np.full(3, 1e-2) inf_lower = np.empty_like(x0) inf_upper = np.empty_like(x0) inf_lower.fill(-np.inf) inf_upper.fill(np.inf) h_adjusted, one_sided = _adjust_scheme_to_bounds( x0, h, 1, '1-sided', inf_lower, inf_upper) assert_allclose(h_adjusted, h) assert_(np.all(one_sided)) h_adjusted, one_sided = _adjust_scheme_to_bounds( x0, h, 2, '1-sided', inf_lower, inf_upper) assert_allclose(h_adjusted, h) assert_(np.all(one_sided)) h_adjusted, one_sided = _adjust_scheme_to_bounds( x0, h, 1, '2-sided', inf_lower, inf_upper) assert_allclose(h_adjusted, h) assert_(np.all(~one_sided)) h_adjusted, one_sided = _adjust_scheme_to_bounds( x0, h, 2, '2-sided', inf_lower, inf_upper) assert_allclose(h_adjusted, h) assert_(np.all(~one_sided)) def test_with_bound(self): x0 = np.array([0.0, 0.85, -0.85]) lb = -np.ones(3) ub = np.ones(3) h = np.array([1, 1, -1]) * 1e-1 h_adjusted, _ = _adjust_scheme_to_bounds(x0, h, 1, '1-sided', lb, ub) assert_allclose(h_adjusted, h) h_adjusted, _ = _adjust_scheme_to_bounds(x0, h, 2, '1-sided', lb, ub) assert_allclose(h_adjusted, np.array([1, -1, 1]) * 1e-1) h_adjusted, one_sided = _adjust_scheme_to_bounds( x0, h, 1, '2-sided', lb, ub) assert_allclose(h_adjusted, np.abs(h)) assert_(np.all(~one_sided)) h_adjusted, one_sided = _adjust_scheme_to_bounds( x0, h, 2, '2-sided', lb, ub) assert_allclose(h_adjusted, np.array([1, -1, 1]) * 1e-1) assert_equal(one_sided, np.array([False, True, True])) def test_tight_bounds(self): lb = np.array([-0.03, -0.03]) ub = np.array([0.05, 0.05]) x0 = np.array([0.0, 0.03]) h = np.array([-0.1, -0.1]) h_adjusted, _ = _adjust_scheme_to_bounds(x0, h, 1, '1-sided', lb, ub) assert_allclose(h_adjusted, np.array([0.05, -0.06])) h_adjusted, _ = _adjust_scheme_to_bounds(x0, h, 2, '1-sided', lb, ub) assert_allclose(h_adjusted, np.array([0.025, -0.03])) h_adjusted, one_sided = _adjust_scheme_to_bounds( x0, h, 1, '2-sided', lb, ub) assert_allclose(h_adjusted, np.array([0.03, -0.03])) assert_equal(one_sided, np.array([False, True])) h_adjusted, one_sided = _adjust_scheme_to_bounds( x0, h, 2, '2-sided', lb, ub) assert_allclose(h_adjusted, np.array([0.015, -0.015])) assert_equal(one_sided, np.array([False, True])) class TestApproxDerivativesDense: def fun_scalar_scalar(self, x): return np.sinh(x) def jac_scalar_scalar(self, x): return np.cosh(x) def fun_scalar_vector(self, x): return np.array([x[0]**2, np.tan(x[0]), np.exp(x[0])]) def jac_scalar_vector(self, x): return np.array( [2 * x[0], np.cos(x[0]) ** -2, np.exp(x[0])]).reshape(-1, 1) def fun_vector_scalar(self, x): return np.sin(x[0] * x[1]) * np.log(x[0]) def wrong_dimensions_fun(self, x): return np.array([x**2, np.tan(x), np.exp(x)]) def jac_vector_scalar(self, x): return np.array([ x[1] * np.cos(x[0] * x[1]) * np.log(x[0]) + np.sin(x[0] * x[1]) / x[0], x[0] * np.cos(x[0] * x[1]) * np.log(x[0]) ]) def fun_vector_vector(self, x): return np.array([ x[0] * np.sin(x[1]), x[1] * np.cos(x[0]), x[0] ** 3 * x[1] ** -0.5 ]) def jac_vector_vector(self, x): return np.array([ [np.sin(x[1]), x[0] * np.cos(x[1])], [-x[1] * np.sin(x[0]), np.cos(x[0])], [3 * x[0] ** 2 * x[1] ** -0.5, -0.5 * x[0] ** 3 * x[1] ** -1.5] ]) def fun_parametrized(self, x, c0, c1=1.0): return np.array([np.exp(c0 * x[0]), np.exp(c1 * x[1])]) def jac_parametrized(self, x, c0, c1=0.1): return np.array([ [c0 * np.exp(c0 * x[0]), 0], [0, c1 * np.exp(c1 * x[1])] ]) def fun_with_nan(self, x): return x if np.abs(x) <= 1e-8 else np.nan def jac_with_nan(self, x): return 1.0 if np.abs(x) <= 1e-8 else np.nan def fun_zero_jacobian(self, x): return np.array([x[0] * x[1], np.cos(x[0] * x[1])]) def jac_zero_jacobian(self, x): return np.array([ [x[1], x[0]], [-x[1] * np.sin(x[0] * x[1]), -x[0] * np.sin(x[0] * x[1])] ]) def jac_non_numpy(self, x): # x can be a scalar or an array [val]. # Cast to true scalar before handing over to math.exp xp = np.asarray(x).item() return math.exp(xp) def test_scalar_scalar(self): x0 = 1.0 jac_diff_2 = approx_derivative(self.fun_scalar_scalar, x0, method='2-point') jac_diff_3 = approx_derivative(self.fun_scalar_scalar, x0) jac_diff_4 = approx_derivative(self.fun_scalar_scalar, x0, method='cs') jac_true = self.jac_scalar_scalar(x0) assert_allclose(jac_diff_2, jac_true, rtol=1e-6) assert_allclose(jac_diff_3, jac_true, rtol=1e-9) assert_allclose(jac_diff_4, jac_true, rtol=1e-12) def test_scalar_scalar_abs_step(self): # can approx_derivative use abs_step? x0 = 1.0 jac_diff_2 = approx_derivative(self.fun_scalar_scalar, x0, method='2-point', abs_step=1.49e-8) jac_diff_3 = approx_derivative(self.fun_scalar_scalar, x0, abs_step=1.49e-8) jac_diff_4 = approx_derivative(self.fun_scalar_scalar, x0, method='cs', abs_step=1.49e-8) jac_true = self.jac_scalar_scalar(x0) assert_allclose(jac_diff_2, jac_true, rtol=1e-6) assert_allclose(jac_diff_3, jac_true, rtol=1e-9) assert_allclose(jac_diff_4, jac_true, rtol=1e-12) def test_scalar_vector(self): x0 = 0.5 jac_diff_2 = approx_derivative(self.fun_scalar_vector, x0, method='2-point') jac_diff_3 = approx_derivative(self.fun_scalar_vector, x0) jac_diff_4 = approx_derivative(self.fun_scalar_vector, x0, method='cs') jac_true = self.jac_scalar_vector(np.atleast_1d(x0)) assert_allclose(jac_diff_2, jac_true, rtol=1e-6) assert_allclose(jac_diff_3, jac_true, rtol=1e-9) assert_allclose(jac_diff_4, jac_true, rtol=1e-12) def test_vector_scalar(self): x0 = np.array([100.0, -0.5]) jac_diff_2 = approx_derivative(self.fun_vector_scalar, x0, method='2-point') jac_diff_3 = approx_derivative(self.fun_vector_scalar, x0) jac_diff_4 = approx_derivative(self.fun_vector_scalar, x0, method='cs') jac_true = self.jac_vector_scalar(x0) assert_allclose(jac_diff_2, jac_true, rtol=1e-6) assert_allclose(jac_diff_3, jac_true, rtol=1e-7) assert_allclose(jac_diff_4, jac_true, rtol=1e-12) def test_vector_scalar_abs_step(self): # can approx_derivative use abs_step? x0 = np.array([100.0, -0.5]) jac_diff_2 = approx_derivative(self.fun_vector_scalar, x0, method='2-point', abs_step=1.49e-8) jac_diff_3 = approx_derivative(self.fun_vector_scalar, x0, abs_step=1.49e-8, rel_step=np.inf) jac_diff_4 = approx_derivative(self.fun_vector_scalar, x0, method='cs', abs_step=1.49e-8) jac_true = self.jac_vector_scalar(x0) assert_allclose(jac_diff_2, jac_true, rtol=1e-6) assert_allclose(jac_diff_3, jac_true, rtol=3e-9) assert_allclose(jac_diff_4, jac_true, rtol=1e-12) def test_vector_vector(self): x0 = np.array([-100.0, 0.2]) jac_diff_2 = approx_derivative(self.fun_vector_vector, x0, method='2-point') jac_diff_3 = approx_derivative(self.fun_vector_vector, x0) jac_diff_4 = approx_derivative(self.fun_vector_vector, x0, method='cs') jac_true = self.jac_vector_vector(x0) assert_allclose(jac_diff_2, jac_true, rtol=1e-5) assert_allclose(jac_diff_3, jac_true, rtol=1e-6) assert_allclose(jac_diff_4, jac_true, rtol=1e-12) def test_wrong_dimensions(self): x0 = 1.0 assert_raises(RuntimeError, approx_derivative, self.wrong_dimensions_fun, x0) f0 = self.wrong_dimensions_fun(np.atleast_1d(x0)) assert_raises(ValueError, approx_derivative, self.wrong_dimensions_fun, x0, f0=f0) def test_custom_rel_step(self): x0 = np.array([-0.1, 0.1]) jac_diff_2 = approx_derivative(self.fun_vector_vector, x0, method='2-point', rel_step=1e-4) jac_diff_3 = approx_derivative(self.fun_vector_vector, x0, rel_step=1e-4) jac_true = self.jac_vector_vector(x0) assert_allclose(jac_diff_2, jac_true, rtol=1e-2) assert_allclose(jac_diff_3, jac_true, rtol=1e-4) def test_options(self): x0 = np.array([1.0, 1.0]) c0 = -1.0 c1 = 1.0 lb = 0.0 ub = 2.0 f0 = self.fun_parametrized(x0, c0, c1=c1) rel_step = np.array([-1e-6, 1e-7]) jac_true = self.jac_parametrized(x0, c0, c1) jac_diff_2 = approx_derivative( self.fun_parametrized, x0, method='2-point', rel_step=rel_step, f0=f0, args=(c0,), kwargs=dict(c1=c1), bounds=(lb, ub)) jac_diff_3 = approx_derivative( self.fun_parametrized, x0, rel_step=rel_step, f0=f0, args=(c0,), kwargs=dict(c1=c1), bounds=(lb, ub)) assert_allclose(jac_diff_2, jac_true, rtol=1e-6) assert_allclose(jac_diff_3, jac_true, rtol=1e-9) def test_with_bounds_2_point(self): lb = -np.ones(2) ub = np.ones(2) x0 = np.array([-2.0, 0.2]) assert_raises(ValueError, approx_derivative, self.fun_vector_vector, x0, bounds=(lb, ub)) x0 = np.array([-1.0, 1.0]) jac_diff = approx_derivative(self.fun_vector_vector, x0, method='2-point', bounds=(lb, ub)) jac_true = self.jac_vector_vector(x0) assert_allclose(jac_diff, jac_true, rtol=1e-6) def test_with_bounds_3_point(self): lb = np.array([1.0, 1.0]) ub = np.array([2.0, 2.0]) x0 = np.array([1.0, 2.0]) jac_true = self.jac_vector_vector(x0) jac_diff = approx_derivative(self.fun_vector_vector, x0) assert_allclose(jac_diff, jac_true, rtol=1e-9) jac_diff = approx_derivative(self.fun_vector_vector, x0, bounds=(lb, np.inf)) assert_allclose(jac_diff, jac_true, rtol=1e-9) jac_diff = approx_derivative(self.fun_vector_vector, x0, bounds=(-np.inf, ub)) assert_allclose(jac_diff, jac_true, rtol=1e-9) jac_diff = approx_derivative(self.fun_vector_vector, x0, bounds=(lb, ub)) assert_allclose(jac_diff, jac_true, rtol=1e-9) def test_tight_bounds(self): x0 = np.array([10.0, 10.0]) lb = x0 - 3e-9 ub = x0 + 2e-9 jac_true = self.jac_vector_vector(x0) jac_diff = approx_derivative( self.fun_vector_vector, x0, method='2-point', bounds=(lb, ub)) assert_allclose(jac_diff, jac_true, rtol=1e-6) jac_diff = approx_derivative( self.fun_vector_vector, x0, method='2-point', rel_step=1e-6, bounds=(lb, ub)) assert_allclose(jac_diff, jac_true, rtol=1e-6) jac_diff = approx_derivative( self.fun_vector_vector, x0, bounds=(lb, ub)) assert_allclose(jac_diff, jac_true, rtol=1e-6) jac_diff = approx_derivative( self.fun_vector_vector, x0, rel_step=1e-6, bounds=(lb, ub)) assert_allclose(jac_true, jac_diff, rtol=1e-6) def test_bound_switches(self): lb = -1e-8 ub = 1e-8 x0 = 0.0 jac_true = self.jac_with_nan(x0) jac_diff_2 = approx_derivative( self.fun_with_nan, x0, method='2-point', rel_step=1e-6, bounds=(lb, ub)) jac_diff_3 = approx_derivative( self.fun_with_nan, x0, rel_step=1e-6, bounds=(lb, ub)) assert_allclose(jac_diff_2, jac_true, rtol=1e-6) assert_allclose(jac_diff_3, jac_true, rtol=1e-9) x0 = 1e-8 jac_true = self.jac_with_nan(x0) jac_diff_2 = approx_derivative( self.fun_with_nan, x0, method='2-point', rel_step=1e-6, bounds=(lb, ub)) jac_diff_3 = approx_derivative( self.fun_with_nan, x0, rel_step=1e-6, bounds=(lb, ub)) assert_allclose(jac_diff_2, jac_true, rtol=1e-6) assert_allclose(jac_diff_3, jac_true, rtol=1e-9) def test_non_numpy(self): x0 = 1.0 jac_true = self.jac_non_numpy(x0) jac_diff_2 = approx_derivative(self.jac_non_numpy, x0, method='2-point') jac_diff_3 = approx_derivative(self.jac_non_numpy, x0) assert_allclose(jac_diff_2, jac_true, rtol=1e-6) assert_allclose(jac_diff_3, jac_true, rtol=1e-8) # math.exp cannot handle complex arguments, hence this raises assert_raises(TypeError, approx_derivative, self.jac_non_numpy, x0, **dict(method='cs')) def test_fp(self): # checks that approx_derivative works for FP size other than 64. # Example is derived from the minimal working example in gh12991. np.random.seed(1) def func(p, x): return p[0] + p[1] * x def err(p, x, y): return func(p, x) - y x = np.linspace(0, 1, 100, dtype=np.float64) y = np.random.random(100).astype(np.float64) p0 = np.array([-1.0, -1.0]) jac_fp64 = approx_derivative(err, p0, method='2-point', args=(x, y)) # parameter vector is float32, func output is float64 jac_fp = approx_derivative(err, p0.astype(np.float32), method='2-point', args=(x, y)) assert err(p0, x, y).dtype == np.float64 assert_allclose(jac_fp, jac_fp64, atol=1e-3) # parameter vector is float64, func output is float32 def err_fp32(p): return err(p, x, y).astype(np.float32) jac_fp = approx_derivative(err_fp32, p0, method='2-point') assert err_fp32(p0).dtype == np.float32 assert_allclose(jac_fp, jac_fp64, atol=1e-3) # check upper bound of error on the derivative for 2-point def f(x): return np.sin(x) def g(x): return np.cos(x) def hess(x): return -np.sin(x) def calc_atol(h, x0, f, hess, EPS): # truncation error t0 = h / 2 * max(np.abs(hess(x0)), np.abs(hess(x0 + h))) # roundoff error. There may be a divisor (>1) missing from # the following line, so this contribution is possibly # overestimated t1 = EPS / h * max(np.abs(f(x0)), np.abs(f(x0 + h))) return t0 + t1 for dtype in [np.float16, np.float32, np.float64]: EPS = np.finfo(dtype).eps x0 = np.array(1.0).astype(dtype) h = _compute_absolute_step(None, x0, f(x0), '2-point') atol = calc_atol(h, x0, f, hess, EPS) err = approx_derivative(f, x0, method='2-point', abs_step=h) - g(x0) assert abs(err) < atol def test_check_derivative(self): x0 = np.array([-10.0, 10]) accuracy = check_derivative(self.fun_vector_vector, self.jac_vector_vector, x0) assert_(accuracy < 1e-9) accuracy = check_derivative(self.fun_vector_vector, self.jac_vector_vector, x0) assert_(accuracy < 1e-6) x0 = np.array([0.0, 0.0]) accuracy = check_derivative(self.fun_zero_jacobian, self.jac_zero_jacobian, x0) assert_(accuracy == 0) accuracy = check_derivative(self.fun_zero_jacobian, self.jac_zero_jacobian, x0) assert_(accuracy == 0) class TestApproxDerivativeSparse: # Example from Numerical Optimization 2nd edition, p. 198. def setup_method(self): np.random.seed(0) self.n = 50 self.lb = -0.1 * (1 + np.arange(self.n)) self.ub = 0.1 * (1 + np.arange(self.n)) self.x0 = np.empty(self.n) self.x0[::2] = (1 - 1e-7) * self.lb[::2] self.x0[1::2] = (1 - 1e-7) * self.ub[1::2] self.J_true = self.jac(self.x0) def fun(self, x): e = x[1:]**3 - x[:-1]**2 return np.hstack((0, 3 * e)) + np.hstack((2 * e, 0)) def jac(self, x): n = x.size J = np.zeros((n, n)) J[0, 0] = -4 * x[0] J[0, 1] = 6 * x[1]**2 for i in range(1, n - 1): J[i, i - 1] = -6 * x[i-1] J[i, i] = 9 * x[i]**2 - 4 * x[i] J[i, i + 1] = 6 * x[i+1]**2 J[-1, -1] = 9 * x[-1]**2 J[-1, -2] = -6 * x[-2] return J def structure(self, n): A = np.zeros((n, n), dtype=int) A[0, 0] = 1 A[0, 1] = 1 for i in range(1, n - 1): A[i, i - 1: i + 2] = 1 A[-1, -1] = 1 A[-1, -2] = 1 return A def test_all(self): A = self.structure(self.n) order = np.arange(self.n) groups_1 = group_columns(A, order) np.random.shuffle(order) groups_2 = group_columns(A, order) for method, groups, l, u in product( ['2-point', '3-point', 'cs'], [groups_1, groups_2], [-np.inf, self.lb], [np.inf, self.ub]): J = approx_derivative(self.fun, self.x0, method=method, bounds=(l, u), sparsity=(A, groups)) assert_(isinstance(J, csr_matrix)) assert_allclose(J.toarray(), self.J_true, rtol=1e-6) rel_step = np.full_like(self.x0, 1e-8) rel_step[::2] *= -1 J = approx_derivative(self.fun, self.x0, method=method, rel_step=rel_step, sparsity=(A, groups)) assert_allclose(J.toarray(), self.J_true, rtol=1e-5) def test_no_precomputed_groups(self): A = self.structure(self.n) J = approx_derivative(self.fun, self.x0, sparsity=A) assert_allclose(J.toarray(), self.J_true, rtol=1e-6) def test_equivalence(self): structure = np.ones((self.n, self.n), dtype=int) groups = np.arange(self.n) for method in ['2-point', '3-point', 'cs']: J_dense = approx_derivative(self.fun, self.x0, method=method) J_sparse = approx_derivative( self.fun, self.x0, sparsity=(structure, groups), method=method) assert_allclose(J_dense, J_sparse.toarray(), rtol=5e-16, atol=7e-15) def test_check_derivative(self): def jac(x): return csr_matrix(self.jac(x)) accuracy = check_derivative(self.fun, jac, self.x0, bounds=(self.lb, self.ub)) assert_(accuracy < 1e-9) accuracy = check_derivative(self.fun, jac, self.x0, bounds=(self.lb, self.ub)) assert_(accuracy < 1e-9) class TestApproxDerivativeLinearOperator: def fun_scalar_scalar(self, x): return np.sinh(x) def jac_scalar_scalar(self, x): return np.cosh(x) def fun_scalar_vector(self, x): return np.array([x[0]**2, np.tan(x[0]), np.exp(x[0])]) def jac_scalar_vector(self, x): return np.array( [2 * x[0], np.cos(x[0]) ** -2, np.exp(x[0])]).reshape(-1, 1) def fun_vector_scalar(self, x): return np.sin(x[0] * x[1]) * np.log(x[0]) def jac_vector_scalar(self, x): return np.array([ x[1] * np.cos(x[0] * x[1]) * np.log(x[0]) + np.sin(x[0] * x[1]) / x[0], x[0] * np.cos(x[0] * x[1]) * np.log(x[0]) ]) def fun_vector_vector(self, x): return np.array([ x[0] * np.sin(x[1]), x[1] * np.cos(x[0]), x[0] ** 3 * x[1] ** -0.5 ]) def jac_vector_vector(self, x): return np.array([ [np.sin(x[1]), x[0] * np.cos(x[1])], [-x[1] * np.sin(x[0]), np.cos(x[0])], [3 * x[0] ** 2 * x[1] ** -0.5, -0.5 * x[0] ** 3 * x[1] ** -1.5] ]) def test_scalar_scalar(self): x0 = 1.0 jac_diff_2 = approx_derivative(self.fun_scalar_scalar, x0, method='2-point', as_linear_operator=True) jac_diff_3 = approx_derivative(self.fun_scalar_scalar, x0, as_linear_operator=True) jac_diff_4 = approx_derivative(self.fun_scalar_scalar, x0, method='cs', as_linear_operator=True) jac_true = self.jac_scalar_scalar(x0) np.random.seed(1) for i in range(10): p = np.random.uniform(-10, 10, size=(1,)) assert_allclose(jac_diff_2.dot(p), jac_true*p, rtol=1e-5) assert_allclose(jac_diff_3.dot(p), jac_true*p, rtol=5e-6) assert_allclose(jac_diff_4.dot(p), jac_true*p, rtol=5e-6) def test_scalar_vector(self): x0 = 0.5 jac_diff_2 = approx_derivative(self.fun_scalar_vector, x0, method='2-point', as_linear_operator=True) jac_diff_3 = approx_derivative(self.fun_scalar_vector, x0, as_linear_operator=True) jac_diff_4 = approx_derivative(self.fun_scalar_vector, x0, method='cs', as_linear_operator=True) jac_true = self.jac_scalar_vector(np.atleast_1d(x0)) np.random.seed(1) for i in range(10): p = np.random.uniform(-10, 10, size=(1,)) assert_allclose(jac_diff_2.dot(p), jac_true.dot(p), rtol=1e-5) assert_allclose(jac_diff_3.dot(p), jac_true.dot(p), rtol=5e-6) assert_allclose(jac_diff_4.dot(p), jac_true.dot(p), rtol=5e-6) def test_vector_scalar(self): x0 = np.array([100.0, -0.5]) jac_diff_2 = approx_derivative(self.fun_vector_scalar, x0, method='2-point', as_linear_operator=True) jac_diff_3 = approx_derivative(self.fun_vector_scalar, x0, as_linear_operator=True) jac_diff_4 = approx_derivative(self.fun_vector_scalar, x0, method='cs', as_linear_operator=True) jac_true = self.jac_vector_scalar(x0) np.random.seed(1) for i in range(10): p = np.random.uniform(-10, 10, size=x0.shape) assert_allclose(jac_diff_2.dot(p), np.atleast_1d(jac_true.dot(p)), rtol=1e-5) assert_allclose(jac_diff_3.dot(p), np.atleast_1d(jac_true.dot(p)), rtol=5e-6) assert_allclose(jac_diff_4.dot(p), np.atleast_1d(jac_true.dot(p)), rtol=1e-7) def test_vector_vector(self): x0 = np.array([-100.0, 0.2]) jac_diff_2 = approx_derivative(self.fun_vector_vector, x0, method='2-point', as_linear_operator=True) jac_diff_3 = approx_derivative(self.fun_vector_vector, x0, as_linear_operator=True) jac_diff_4 = approx_derivative(self.fun_vector_vector, x0, method='cs', as_linear_operator=True) jac_true = self.jac_vector_vector(x0) np.random.seed(1) for i in range(10): p = np.random.uniform(-10, 10, size=x0.shape) assert_allclose(jac_diff_2.dot(p), jac_true.dot(p), rtol=1e-5) assert_allclose(jac_diff_3.dot(p), jac_true.dot(p), rtol=1e-6) assert_allclose(jac_diff_4.dot(p), jac_true.dot(p), rtol=1e-7) def test_exception(self): x0 = np.array([-100.0, 0.2]) assert_raises(ValueError, approx_derivative, self.fun_vector_vector, x0, method='2-point', bounds=(1, np.inf)) def test_absolute_step_sign(): # test for gh12487 # if an absolute step is specified for 2-point differences make sure that # the side corresponds to the step. i.e. if step is positive then forward # differences should be used, if step is negative then backwards # differences should be used. # function has double discontinuity at x = [-1, -1] # first component is \/, second component is /\ def f(x): return -np.abs(x[0] + 1) + np.abs(x[1] + 1) # check that the forward difference is used grad = approx_derivative(f, [-1, -1], method='2-point', abs_step=1e-8) assert_allclose(grad, [-1.0, 1.0]) # check that the backwards difference is used grad = approx_derivative(f, [-1, -1], method='2-point', abs_step=-1e-8) assert_allclose(grad, [1.0, -1.0]) # check that the forwards difference is used with a step for both # parameters grad = approx_derivative( f, [-1, -1], method='2-point', abs_step=[1e-8, 1e-8] ) assert_allclose(grad, [-1.0, 1.0]) # check that we can mix forward/backwards steps. grad = approx_derivative( f, [-1, -1], method='2-point', abs_step=[1e-8, -1e-8] ) assert_allclose(grad, [-1.0, -1.0]) grad = approx_derivative( f, [-1, -1], method='2-point', abs_step=[-1e-8, 1e-8] ) assert_allclose(grad, [1.0, 1.0]) # the forward step should reverse to a backwards step if it runs into a # bound # This is kind of tested in TestAdjustSchemeToBounds, but only for a lower level # function. grad = approx_derivative( f, [-1, -1], method='2-point', abs_step=1e-8, bounds=(-np.inf, -1) ) assert_allclose(grad, [1.0, -1.0]) grad = approx_derivative( f, [-1, -1], method='2-point', abs_step=-1e-8, bounds=(-1, np.inf) ) assert_allclose(grad, [-1.0, 1.0]) def test__compute_absolute_step(): # tests calculation of absolute step from rel_step methods = ['2-point', '3-point', 'cs'] x0 = np.array([1e-5, 0, 1, 1e5]) EPS = np.finfo(np.float64).eps relative_step = { "2-point": EPS**0.5, "3-point": EPS**(1/3), "cs": EPS**0.5 } f0 = np.array(1.0) for method in methods: rel_step = relative_step[method] correct_step = np.array([rel_step, rel_step * 1., rel_step * 1., rel_step * np.abs(x0[3])]) abs_step = _compute_absolute_step(None, x0, f0, method) assert_allclose(abs_step, correct_step) sign_x0 = (-x0 >= 0).astype(float) * 2 - 1 abs_step = _compute_absolute_step(None, -x0, f0, method) assert_allclose(abs_step, sign_x0 * correct_step) # if a relative step is provided it should be used rel_step = np.array([0.1, 1, 10, 100]) correct_step = np.array([rel_step[0] * x0[0], relative_step['2-point'], rel_step[2] * 1., rel_step[3] * np.abs(x0[3])]) abs_step = _compute_absolute_step(rel_step, x0, f0, '2-point') assert_allclose(abs_step, correct_step) sign_x0 = (-x0 >= 0).astype(float) * 2 - 1 abs_step = _compute_absolute_step(rel_step, -x0, f0, '2-point') assert_allclose(abs_step, sign_x0 * correct_step)
31,338
37.452761
84
py
scipy
scipy-main/scipy/optimize/tests/test_cython_optimize.py
""" Test Cython optimize zeros API functions: ``bisect``, ``ridder``, ``brenth``, and ``brentq`` in `scipy.optimize.cython_optimize`, by finding the roots of a 3rd order polynomial given a sequence of constant terms, ``a0``, and fixed 1st, 2nd, and 3rd order terms in ``args``. .. math:: f(x, a0, args) = ((args[2]*x + args[1])*x + args[0])*x + a0 The 3rd order polynomial function is written in Cython and called in a Python wrapper named after the zero function. See the private ``_zeros`` Cython module in `scipy.optimize.cython_optimze` for more information. """ import numpy.testing as npt from scipy.optimize.cython_optimize import _zeros # CONSTANTS # Solve x**3 - A0 = 0 for A0 = [2.0, 2.1, ..., 2.9]. # The ARGS have 3 elements just to show how this could be done for any cubic # polynomial. A0 = tuple(-2.0 - x/10.0 for x in range(10)) # constant term ARGS = (0.0, 0.0, 1.0) # 1st, 2nd, and 3rd order terms XLO, XHI = 0.0, 2.0 # first and second bounds of zeros functions # absolute and relative tolerances and max iterations for zeros functions XTOL, RTOL, MITR = 0.001, 0.001, 10 EXPECTED = [(-a0) ** (1.0/3.0) for a0 in A0] # = [1.2599210498948732, # 1.2805791649874942, # 1.300591446851387, # 1.3200061217959123, # 1.338865900164339, # 1.3572088082974532, # 1.375068867074141, # 1.3924766500838337, # 1.4094597464129783, # 1.4260431471424087] # test bisect def test_bisect(): npt.assert_allclose( EXPECTED, list( _zeros.loop_example('bisect', A0, ARGS, XLO, XHI, XTOL, RTOL, MITR) ), rtol=RTOL, atol=XTOL ) # test ridder def test_ridder(): npt.assert_allclose( EXPECTED, list( _zeros.loop_example('ridder', A0, ARGS, XLO, XHI, XTOL, RTOL, MITR) ), rtol=RTOL, atol=XTOL ) # test brenth def test_brenth(): npt.assert_allclose( EXPECTED, list( _zeros.loop_example('brenth', A0, ARGS, XLO, XHI, XTOL, RTOL, MITR) ), rtol=RTOL, atol=XTOL ) # test brentq def test_brentq(): npt.assert_allclose( EXPECTED, list( _zeros.loop_example('brentq', A0, ARGS, XLO, XHI, XTOL, RTOL, MITR) ), rtol=RTOL, atol=XTOL ) # test brentq with full output def test_brentq_full_output(): output = _zeros.full_output_example( (A0[0],) + ARGS, XLO, XHI, XTOL, RTOL, MITR) npt.assert_allclose(EXPECTED[0], output['root'], rtol=RTOL, atol=XTOL) npt.assert_equal(6, output['iterations']) npt.assert_equal(7, output['funcalls']) npt.assert_equal(0, output['error_num'])
2,638
27.376344
79
py
scipy
scipy-main/scipy/optimize/tests/test_lsq_common.py
from numpy.testing import assert_, assert_allclose, assert_equal from pytest import raises as assert_raises import numpy as np from scipy.optimize._lsq.common import ( step_size_to_bound, find_active_constraints, make_strictly_feasible, CL_scaling_vector, intersect_trust_region, build_quadratic_1d, minimize_quadratic_1d, evaluate_quadratic, reflective_transformation, left_multiplied_operator, right_multiplied_operator) class TestBounds: def test_step_size_to_bounds(self): lb = np.array([-1.0, 2.5, 10.0]) ub = np.array([1.0, 5.0, 100.0]) x = np.array([0.0, 2.5, 12.0]) s = np.array([0.1, 0.0, 0.0]) step, hits = step_size_to_bound(x, s, lb, ub) assert_equal(step, 10) assert_equal(hits, [1, 0, 0]) s = np.array([0.01, 0.05, -1.0]) step, hits = step_size_to_bound(x, s, lb, ub) assert_equal(step, 2) assert_equal(hits, [0, 0, -1]) s = np.array([10.0, -0.0001, 100.0]) step, hits = step_size_to_bound(x, s, lb, ub) assert_equal(step, np.array(-0)) assert_equal(hits, [0, -1, 0]) s = np.array([1.0, 0.5, -2.0]) step, hits = step_size_to_bound(x, s, lb, ub) assert_equal(step, 1.0) assert_equal(hits, [1, 0, -1]) s = np.zeros(3) step, hits = step_size_to_bound(x, s, lb, ub) assert_equal(step, np.inf) assert_equal(hits, [0, 0, 0]) def test_find_active_constraints(self): lb = np.array([0.0, -10.0, 1.0]) ub = np.array([1.0, 0.0, 100.0]) x = np.array([0.5, -5.0, 2.0]) active = find_active_constraints(x, lb, ub) assert_equal(active, [0, 0, 0]) x = np.array([0.0, 0.0, 10.0]) active = find_active_constraints(x, lb, ub) assert_equal(active, [-1, 1, 0]) active = find_active_constraints(x, lb, ub, rtol=0) assert_equal(active, [-1, 1, 0]) x = np.array([1e-9, -1e-8, 100 - 1e-9]) active = find_active_constraints(x, lb, ub) assert_equal(active, [0, 0, 1]) active = find_active_constraints(x, lb, ub, rtol=1.5e-9) assert_equal(active, [-1, 0, 1]) lb = np.array([1.0, -np.inf, -np.inf]) ub = np.array([np.inf, 10.0, np.inf]) x = np.ones(3) active = find_active_constraints(x, lb, ub) assert_equal(active, [-1, 0, 0]) # Handles out-of-bound cases. x = np.array([0.0, 11.0, 0.0]) active = find_active_constraints(x, lb, ub) assert_equal(active, [-1, 1, 0]) active = find_active_constraints(x, lb, ub, rtol=0) assert_equal(active, [-1, 1, 0]) def test_make_strictly_feasible(self): lb = np.array([-0.5, -0.8, 2.0]) ub = np.array([0.8, 1.0, 3.0]) x = np.array([-0.5, 0.0, 2 + 1e-10]) x_new = make_strictly_feasible(x, lb, ub, rstep=0) assert_(x_new[0] > -0.5) assert_equal(x_new[1:], x[1:]) x_new = make_strictly_feasible(x, lb, ub, rstep=1e-4) assert_equal(x_new, [-0.5 + 1e-4, 0.0, 2 * (1 + 1e-4)]) x = np.array([-0.5, -1, 3.1]) x_new = make_strictly_feasible(x, lb, ub) assert_(np.all((x_new >= lb) & (x_new <= ub))) x_new = make_strictly_feasible(x, lb, ub, rstep=0) assert_(np.all((x_new >= lb) & (x_new <= ub))) lb = np.array([-1, 100.0]) ub = np.array([1, 100.0 + 1e-10]) x = np.array([0, 100.0]) x_new = make_strictly_feasible(x, lb, ub, rstep=1e-8) assert_equal(x_new, [0, 100.0 + 0.5e-10]) def test_scaling_vector(self): lb = np.array([-np.inf, -5.0, 1.0, -np.inf]) ub = np.array([1.0, np.inf, 10.0, np.inf]) x = np.array([0.5, 2.0, 5.0, 0.0]) g = np.array([1.0, 0.1, -10.0, 0.0]) v, dv = CL_scaling_vector(x, g, lb, ub) assert_equal(v, [1.0, 7.0, 5.0, 1.0]) assert_equal(dv, [0.0, 1.0, -1.0, 0.0]) class TestQuadraticFunction: def setup_method(self): self.J = np.array([ [0.1, 0.2], [-1.0, 1.0], [0.5, 0.2]]) self.g = np.array([0.8, -2.0]) self.diag = np.array([1.0, 2.0]) def test_build_quadratic_1d(self): s = np.zeros(2) a, b = build_quadratic_1d(self.J, self.g, s) assert_equal(a, 0) assert_equal(b, 0) a, b = build_quadratic_1d(self.J, self.g, s, diag=self.diag) assert_equal(a, 0) assert_equal(b, 0) s = np.array([1.0, -1.0]) a, b = build_quadratic_1d(self.J, self.g, s) assert_equal(a, 2.05) assert_equal(b, 2.8) a, b = build_quadratic_1d(self.J, self.g, s, diag=self.diag) assert_equal(a, 3.55) assert_equal(b, 2.8) s0 = np.array([0.5, 0.5]) a, b, c = build_quadratic_1d(self.J, self.g, s, diag=self.diag, s0=s0) assert_equal(a, 3.55) assert_allclose(b, 2.39) assert_allclose(c, -0.1525) def test_minimize_quadratic_1d(self): a = 5 b = -1 t, y = minimize_quadratic_1d(a, b, 1, 2) assert_equal(t, 1) assert_allclose(y, a * t**2 + b * t, rtol=1e-15) t, y = minimize_quadratic_1d(a, b, -2, -1) assert_equal(t, -1) assert_allclose(y, a * t**2 + b * t, rtol=1e-15) t, y = minimize_quadratic_1d(a, b, -1, 1) assert_equal(t, 0.1) assert_allclose(y, a * t**2 + b * t, rtol=1e-15) c = 10 t, y = minimize_quadratic_1d(a, b, -1, 1, c=c) assert_equal(t, 0.1) assert_allclose(y, a * t**2 + b * t + c, rtol=1e-15) t, y = minimize_quadratic_1d(a, b, -np.inf, np.inf, c=c) assert_equal(t, 0.1) assert_allclose(y, a * t ** 2 + b * t + c, rtol=1e-15) t, y = minimize_quadratic_1d(a, b, 0, np.inf, c=c) assert_equal(t, 0.1) assert_allclose(y, a * t ** 2 + b * t + c, rtol=1e-15) t, y = minimize_quadratic_1d(a, b, -np.inf, 0, c=c) assert_equal(t, 0) assert_allclose(y, a * t ** 2 + b * t + c, rtol=1e-15) a = -1 b = 0.2 t, y = minimize_quadratic_1d(a, b, -np.inf, np.inf) assert_equal(y, -np.inf) t, y = minimize_quadratic_1d(a, b, 0, np.inf) assert_equal(t, np.inf) assert_equal(y, -np.inf) t, y = minimize_quadratic_1d(a, b, -np.inf, 0) assert_equal(t, -np.inf) assert_equal(y, -np.inf) def test_evaluate_quadratic(self): s = np.array([1.0, -1.0]) value = evaluate_quadratic(self.J, self.g, s) assert_equal(value, 4.85) value = evaluate_quadratic(self.J, self.g, s, diag=self.diag) assert_equal(value, 6.35) s = np.array([[1.0, -1.0], [1.0, 1.0], [0.0, 0.0]]) values = evaluate_quadratic(self.J, self.g, s) assert_allclose(values, [4.85, -0.91, 0.0]) values = evaluate_quadratic(self.J, self.g, s, diag=self.diag) assert_allclose(values, [6.35, 0.59, 0.0]) class TestTrustRegion: def test_intersect(self): Delta = 1.0 x = np.zeros(3) s = np.array([1.0, 0.0, 0.0]) t_neg, t_pos = intersect_trust_region(x, s, Delta) assert_equal(t_neg, -1) assert_equal(t_pos, 1) s = np.array([-1.0, 1.0, -1.0]) t_neg, t_pos = intersect_trust_region(x, s, Delta) assert_allclose(t_neg, -3**-0.5) assert_allclose(t_pos, 3**-0.5) x = np.array([0.5, -0.5, 0]) s = np.array([0, 0, 1.0]) t_neg, t_pos = intersect_trust_region(x, s, Delta) assert_allclose(t_neg, -2**-0.5) assert_allclose(t_pos, 2**-0.5) x = np.ones(3) assert_raises(ValueError, intersect_trust_region, x, s, Delta) x = np.zeros(3) s = np.zeros(3) assert_raises(ValueError, intersect_trust_region, x, s, Delta) def test_reflective_transformation(): lb = np.array([-1, -2], dtype=float) ub = np.array([5, 3], dtype=float) y = np.array([0, 0]) x, g = reflective_transformation(y, lb, ub) assert_equal(x, y) assert_equal(g, np.ones(2)) y = np.array([-4, 4], dtype=float) x, g = reflective_transformation(y, lb, np.array([np.inf, np.inf])) assert_equal(x, [2, 4]) assert_equal(g, [-1, 1]) x, g = reflective_transformation(y, np.array([-np.inf, -np.inf]), ub) assert_equal(x, [-4, 2]) assert_equal(g, [1, -1]) x, g = reflective_transformation(y, lb, ub) assert_equal(x, [2, 2]) assert_equal(g, [-1, -1]) lb = np.array([-np.inf, -2]) ub = np.array([5, np.inf]) y = np.array([10, 10], dtype=float) x, g = reflective_transformation(y, lb, ub) assert_equal(x, [0, 10]) assert_equal(g, [-1, 1]) def test_linear_operators(): A = np.arange(6).reshape((3, 2)) d_left = np.array([-1, 2, 5]) DA = np.diag(d_left).dot(A) J_left = left_multiplied_operator(A, d_left) d_right = np.array([5, 10]) AD = A.dot(np.diag(d_right)) J_right = right_multiplied_operator(A, d_right) x = np.array([-2, 3]) X = -2 * np.arange(2, 8).reshape((2, 3)) xt = np.array([0, -2, 15]) assert_allclose(DA.dot(x), J_left.matvec(x)) assert_allclose(DA.dot(X), J_left.matmat(X)) assert_allclose(DA.T.dot(xt), J_left.rmatvec(xt)) assert_allclose(AD.dot(x), J_right.matvec(x)) assert_allclose(AD.dot(X), J_right.matmat(X)) assert_allclose(AD.T.dot(xt), J_right.rmatvec(xt))
9,500
30.88255
78
py
scipy
scipy-main/scipy/optimize/tests/test_regression.py
"""Regression tests for optimize. """ import numpy as np from numpy.testing import assert_almost_equal from pytest import raises as assert_raises import scipy.optimize class TestRegression: def test_newton_x0_is_0(self): # Regression test for gh-1601 tgt = 1 res = scipy.optimize.newton(lambda x: x - 1, 0) assert_almost_equal(res, tgt) def test_newton_integers(self): # Regression test for gh-1741 root = scipy.optimize.newton(lambda x: x**2 - 1, x0=2, fprime=lambda x: 2*x) assert_almost_equal(root, 1.0) def test_lmdif_errmsg(self): # This shouldn't cause a crash on Python 3 class SomeError(Exception): pass counter = [0] def func(x): counter[0] += 1 if counter[0] < 3: return x**2 - np.array([9, 10, 11]) else: raise SomeError() assert_raises(SomeError, scipy.optimize.leastsq, func, [1, 2, 3])
1,077
25.292683
62
py
scipy
scipy-main/scipy/optimize/tests/test_slsqp.py
""" Unit test for SLSQP optimization. """ from numpy.testing import (assert_, assert_array_almost_equal, assert_allclose, assert_equal) from pytest import raises as assert_raises import pytest import numpy as np from scipy.optimize import fmin_slsqp, minimize, Bounds, NonlinearConstraint class MyCallBack: """pass a custom callback function This makes sure it's being used. """ def __init__(self): self.been_called = False self.ncalls = 0 def __call__(self, x): self.been_called = True self.ncalls += 1 class TestSLSQP: """ Test SLSQP algorithm using Example 14.4 from Numerical Methods for Engineers by Steven Chapra and Raymond Canale. This example maximizes the function f(x) = 2*x*y + 2*x - x**2 - 2*y**2, which has a maximum at x=2, y=1. """ def setup_method(self): self.opts = {'disp': False} def fun(self, d, sign=1.0): """ Arguments: d - A list of two elements, where d[0] represents x and d[1] represents y in the following equation. sign - A multiplier for f. Since we want to optimize it, and the SciPy optimizers can only minimize functions, we need to multiply it by -1 to achieve the desired solution Returns: 2*x*y + 2*x - x**2 - 2*y**2 """ x = d[0] y = d[1] return sign*(2*x*y + 2*x - x**2 - 2*y**2) def jac(self, d, sign=1.0): """ This is the derivative of fun, returning a NumPy array representing df/dx and df/dy. """ x = d[0] y = d[1] dfdx = sign*(-2*x + 2*y + 2) dfdy = sign*(2*x - 4*y) return np.array([dfdx, dfdy], float) def fun_and_jac(self, d, sign=1.0): return self.fun(d, sign), self.jac(d, sign) def f_eqcon(self, x, sign=1.0): """ Equality constraint """ return np.array([x[0] - x[1]]) def fprime_eqcon(self, x, sign=1.0): """ Equality constraint, derivative """ return np.array([[1, -1]]) def f_eqcon_scalar(self, x, sign=1.0): """ Scalar equality constraint """ return self.f_eqcon(x, sign)[0] def fprime_eqcon_scalar(self, x, sign=1.0): """ Scalar equality constraint, derivative """ return self.fprime_eqcon(x, sign)[0].tolist() def f_ieqcon(self, x, sign=1.0): """ Inequality constraint """ return np.array([x[0] - x[1] - 1.0]) def fprime_ieqcon(self, x, sign=1.0): """ Inequality constraint, derivative """ return np.array([[1, -1]]) def f_ieqcon2(self, x): """ Vector inequality constraint """ return np.asarray(x) def fprime_ieqcon2(self, x): """ Vector inequality constraint, derivative """ return np.identity(x.shape[0]) # minimize def test_minimize_unbounded_approximated(self): # Minimize, method='SLSQP': unbounded, approximated jacobian. jacs = [None, False, '2-point', '3-point'] for jac in jacs: res = minimize(self.fun, [-1.0, 1.0], args=(-1.0, ), jac=jac, method='SLSQP', options=self.opts) assert_(res['success'], res['message']) assert_allclose(res.x, [2, 1]) def test_minimize_unbounded_given(self): # Minimize, method='SLSQP': unbounded, given Jacobian. res = minimize(self.fun, [-1.0, 1.0], args=(-1.0, ), jac=self.jac, method='SLSQP', options=self.opts) assert_(res['success'], res['message']) assert_allclose(res.x, [2, 1]) def test_minimize_bounded_approximated(self): # Minimize, method='SLSQP': bounded, approximated jacobian. jacs = [None, False, '2-point', '3-point'] for jac in jacs: with np.errstate(invalid='ignore'): res = minimize(self.fun, [-1.0, 1.0], args=(-1.0, ), jac=jac, bounds=((2.5, None), (None, 0.5)), method='SLSQP', options=self.opts) assert_(res['success'], res['message']) assert_allclose(res.x, [2.5, 0.5]) assert_(2.5 <= res.x[0]) assert_(res.x[1] <= 0.5) def test_minimize_unbounded_combined(self): # Minimize, method='SLSQP': unbounded, combined function and Jacobian. res = minimize(self.fun_and_jac, [-1.0, 1.0], args=(-1.0, ), jac=True, method='SLSQP', options=self.opts) assert_(res['success'], res['message']) assert_allclose(res.x, [2, 1]) def test_minimize_equality_approximated(self): # Minimize with method='SLSQP': equality constraint, approx. jacobian. jacs = [None, False, '2-point', '3-point'] for jac in jacs: res = minimize(self.fun, [-1.0, 1.0], args=(-1.0, ), jac=jac, constraints={'type': 'eq', 'fun': self.f_eqcon, 'args': (-1.0, )}, method='SLSQP', options=self.opts) assert_(res['success'], res['message']) assert_allclose(res.x, [1, 1]) def test_minimize_equality_given(self): # Minimize with method='SLSQP': equality constraint, given Jacobian. res = minimize(self.fun, [-1.0, 1.0], jac=self.jac, method='SLSQP', args=(-1.0,), constraints={'type': 'eq', 'fun':self.f_eqcon, 'args': (-1.0, )}, options=self.opts) assert_(res['success'], res['message']) assert_allclose(res.x, [1, 1]) def test_minimize_equality_given2(self): # Minimize with method='SLSQP': equality constraint, given Jacobian # for fun and const. res = minimize(self.fun, [-1.0, 1.0], method='SLSQP', jac=self.jac, args=(-1.0,), constraints={'type': 'eq', 'fun': self.f_eqcon, 'args': (-1.0, ), 'jac': self.fprime_eqcon}, options=self.opts) assert_(res['success'], res['message']) assert_allclose(res.x, [1, 1]) def test_minimize_equality_given_cons_scalar(self): # Minimize with method='SLSQP': scalar equality constraint, given # Jacobian for fun and const. res = minimize(self.fun, [-1.0, 1.0], method='SLSQP', jac=self.jac, args=(-1.0,), constraints={'type': 'eq', 'fun': self.f_eqcon_scalar, 'args': (-1.0, ), 'jac': self.fprime_eqcon_scalar}, options=self.opts) assert_(res['success'], res['message']) assert_allclose(res.x, [1, 1]) def test_minimize_inequality_given(self): # Minimize with method='SLSQP': inequality constraint, given Jacobian. res = minimize(self.fun, [-1.0, 1.0], method='SLSQP', jac=self.jac, args=(-1.0, ), constraints={'type': 'ineq', 'fun': self.f_ieqcon, 'args': (-1.0, )}, options=self.opts) assert_(res['success'], res['message']) assert_allclose(res.x, [2, 1], atol=1e-3) def test_minimize_inequality_given_vector_constraints(self): # Minimize with method='SLSQP': vector inequality constraint, given # Jacobian. res = minimize(self.fun, [-1.0, 1.0], jac=self.jac, method='SLSQP', args=(-1.0,), constraints={'type': 'ineq', 'fun': self.f_ieqcon2, 'jac': self.fprime_ieqcon2}, options=self.opts) assert_(res['success'], res['message']) assert_allclose(res.x, [2, 1]) def test_minimize_bounded_constraint(self): # when the constraint makes the solver go up against a parameter # bound make sure that the numerical differentiation of the # jacobian doesn't try to exceed that bound using a finite difference. # gh11403 def c(x): assert 0 <= x[0] <= 1 and 0 <= x[1] <= 1, x return x[0] ** 0.5 + x[1] def f(x): assert 0 <= x[0] <= 1 and 0 <= x[1] <= 1, x return -x[0] ** 2 + x[1] ** 2 cns = [NonlinearConstraint(c, 0, 1.5)] x0 = np.asarray([0.9, 0.5]) bnd = Bounds([0., 0.], [1.0, 1.0]) minimize(f, x0, method='SLSQP', bounds=bnd, constraints=cns) def test_minimize_bound_equality_given2(self): # Minimize with method='SLSQP': bounds, eq. const., given jac. for # fun. and const. res = minimize(self.fun, [-1.0, 1.0], method='SLSQP', jac=self.jac, args=(-1.0, ), bounds=[(-0.8, 1.), (-1, 0.8)], constraints={'type': 'eq', 'fun': self.f_eqcon, 'args': (-1.0, ), 'jac': self.fprime_eqcon}, options=self.opts) assert_(res['success'], res['message']) assert_allclose(res.x, [0.8, 0.8], atol=1e-3) assert_(-0.8 <= res.x[0] <= 1) assert_(-1 <= res.x[1] <= 0.8) # fmin_slsqp def test_unbounded_approximated(self): # SLSQP: unbounded, approximated Jacobian. res = fmin_slsqp(self.fun, [-1.0, 1.0], args=(-1.0, ), iprint = 0, full_output = 1) x, fx, its, imode, smode = res assert_(imode == 0, imode) assert_array_almost_equal(x, [2, 1]) def test_unbounded_given(self): # SLSQP: unbounded, given Jacobian. res = fmin_slsqp(self.fun, [-1.0, 1.0], args=(-1.0, ), fprime = self.jac, iprint = 0, full_output = 1) x, fx, its, imode, smode = res assert_(imode == 0, imode) assert_array_almost_equal(x, [2, 1]) def test_equality_approximated(self): # SLSQP: equality constraint, approximated Jacobian. res = fmin_slsqp(self.fun,[-1.0,1.0], args=(-1.0,), eqcons = [self.f_eqcon], iprint = 0, full_output = 1) x, fx, its, imode, smode = res assert_(imode == 0, imode) assert_array_almost_equal(x, [1, 1]) def test_equality_given(self): # SLSQP: equality constraint, given Jacobian. res = fmin_slsqp(self.fun, [-1.0, 1.0], fprime=self.jac, args=(-1.0,), eqcons = [self.f_eqcon], iprint = 0, full_output = 1) x, fx, its, imode, smode = res assert_(imode == 0, imode) assert_array_almost_equal(x, [1, 1]) def test_equality_given2(self): # SLSQP: equality constraint, given Jacobian for fun and const. res = fmin_slsqp(self.fun, [-1.0, 1.0], fprime=self.jac, args=(-1.0,), f_eqcons = self.f_eqcon, fprime_eqcons = self.fprime_eqcon, iprint = 0, full_output = 1) x, fx, its, imode, smode = res assert_(imode == 0, imode) assert_array_almost_equal(x, [1, 1]) def test_inequality_given(self): # SLSQP: inequality constraint, given Jacobian. res = fmin_slsqp(self.fun, [-1.0, 1.0], fprime=self.jac, args=(-1.0, ), ieqcons = [self.f_ieqcon], iprint = 0, full_output = 1) x, fx, its, imode, smode = res assert_(imode == 0, imode) assert_array_almost_equal(x, [2, 1], decimal=3) def test_bound_equality_given2(self): # SLSQP: bounds, eq. const., given jac. for fun. and const. res = fmin_slsqp(self.fun, [-1.0, 1.0], fprime=self.jac, args=(-1.0, ), bounds = [(-0.8, 1.), (-1, 0.8)], f_eqcons = self.f_eqcon, fprime_eqcons = self.fprime_eqcon, iprint = 0, full_output = 1) x, fx, its, imode, smode = res assert_(imode == 0, imode) assert_array_almost_equal(x, [0.8, 0.8], decimal=3) assert_(-0.8 <= x[0] <= 1) assert_(-1 <= x[1] <= 0.8) def test_scalar_constraints(self): # Regression test for gh-2182 x = fmin_slsqp(lambda z: z**2, [3.], ieqcons=[lambda z: z[0] - 1], iprint=0) assert_array_almost_equal(x, [1.]) x = fmin_slsqp(lambda z: z**2, [3.], f_ieqcons=lambda z: [z[0] - 1], iprint=0) assert_array_almost_equal(x, [1.]) def test_integer_bounds(self): # This should not raise an exception fmin_slsqp(lambda z: z**2 - 1, [0], bounds=[[0, 1]], iprint=0) def test_array_bounds(self): # NumPy used to treat n-dimensional 1-element arrays as scalars # in some cases. The handling of `bounds` by `fmin_slsqp` still # supports this behavior. bounds = [(-np.inf, np.inf), (np.array([2]), np.array([3]))] x = fmin_slsqp(lambda z: np.sum(z**2 - 1), [2.5, 2.5], bounds=bounds, iprint=0) assert_array_almost_equal(x, [0, 2]) def test_obj_must_return_scalar(self): # Regression test for Github Issue #5433 # If objective function does not return a scalar, raises ValueError with assert_raises(ValueError): fmin_slsqp(lambda x: [0, 1], [1, 2, 3]) def test_obj_returns_scalar_in_list(self): # Test for Github Issue #5433 and PR #6691 # Objective function should be able to return length-1 Python list # containing the scalar fmin_slsqp(lambda x: [0], [1, 2, 3], iprint=0) def test_callback(self): # Minimize, method='SLSQP': unbounded, approximated jacobian. Check for callback callback = MyCallBack() res = minimize(self.fun, [-1.0, 1.0], args=(-1.0, ), method='SLSQP', callback=callback, options=self.opts) assert_(res['success'], res['message']) assert_(callback.been_called) assert_equal(callback.ncalls, res['nit']) def test_inconsistent_linearization(self): # SLSQP must be able to solve this problem, even if the # linearized problem at the starting point is infeasible. # Linearized constraints are # # 2*x0[0]*x[0] >= 1 # # At x0 = [0, 1], the second constraint is clearly infeasible. # This triggers a call with n2==1 in the LSQ subroutine. x = [0, 1] def f1(x): return x[0] + x[1] - 2 def f2(x): return x[0] ** 2 - 1 sol = minimize( lambda x: x[0]**2 + x[1]**2, x, constraints=({'type':'eq','fun': f1}, {'type':'ineq','fun': f2}), bounds=((0,None), (0,None)), method='SLSQP') x = sol.x assert_allclose(f1(x), 0, atol=1e-8) assert_(f2(x) >= -1e-8) assert_(sol.success, sol) def test_regression_5743(self): # SLSQP must not indicate success for this problem, # which is infeasible. x = [1, 2] sol = minimize( lambda x: x[0]**2 + x[1]**2, x, constraints=({'type':'eq','fun': lambda x: x[0]+x[1]-1}, {'type':'ineq','fun': lambda x: x[0]-2}), bounds=((0,None), (0,None)), method='SLSQP') assert_(not sol.success, sol) def test_gh_6676(self): def func(x): return (x[0] - 1)**2 + 2*(x[1] - 1)**2 + 0.5*(x[2] - 1)**2 sol = minimize(func, [0, 0, 0], method='SLSQP') assert_(sol.jac.shape == (3,)) def test_invalid_bounds(self): # Raise correct error when lower bound is greater than upper bound. # See Github issue 6875. bounds_list = [ ((1, 2), (2, 1)), ((2, 1), (1, 2)), ((2, 1), (2, 1)), ((np.inf, 0), (np.inf, 0)), ((1, -np.inf), (0, 1)), ] for bounds in bounds_list: with assert_raises(ValueError): minimize(self.fun, [-1.0, 1.0], bounds=bounds, method='SLSQP') def test_bounds_clipping(self): # # SLSQP returns bogus results for initial guess out of bounds, gh-6859 # def f(x): return (x[0] - 1)**2 sol = minimize(f, [10], method='slsqp', bounds=[(None, 0)]) assert_(sol.success) assert_allclose(sol.x, 0, atol=1e-10) sol = minimize(f, [-10], method='slsqp', bounds=[(2, None)]) assert_(sol.success) assert_allclose(sol.x, 2, atol=1e-10) sol = minimize(f, [-10], method='slsqp', bounds=[(None, 0)]) assert_(sol.success) assert_allclose(sol.x, 0, atol=1e-10) sol = minimize(f, [10], method='slsqp', bounds=[(2, None)]) assert_(sol.success) assert_allclose(sol.x, 2, atol=1e-10) sol = minimize(f, [-0.5], method='slsqp', bounds=[(-1, 0)]) assert_(sol.success) assert_allclose(sol.x, 0, atol=1e-10) sol = minimize(f, [10], method='slsqp', bounds=[(-1, 0)]) assert_(sol.success) assert_allclose(sol.x, 0, atol=1e-10) def test_infeasible_initial(self): # Check SLSQP behavior with infeasible initial point def f(x): x, = x return x*x - 2*x + 1 cons_u = [{'type': 'ineq', 'fun': lambda x: 0 - x}] cons_l = [{'type': 'ineq', 'fun': lambda x: x - 2}] cons_ul = [{'type': 'ineq', 'fun': lambda x: 0 - x}, {'type': 'ineq', 'fun': lambda x: x + 1}] sol = minimize(f, [10], method='slsqp', constraints=cons_u) assert_(sol.success) assert_allclose(sol.x, 0, atol=1e-10) sol = minimize(f, [-10], method='slsqp', constraints=cons_l) assert_(sol.success) assert_allclose(sol.x, 2, atol=1e-10) sol = minimize(f, [-10], method='slsqp', constraints=cons_u) assert_(sol.success) assert_allclose(sol.x, 0, atol=1e-10) sol = minimize(f, [10], method='slsqp', constraints=cons_l) assert_(sol.success) assert_allclose(sol.x, 2, atol=1e-10) sol = minimize(f, [-0.5], method='slsqp', constraints=cons_ul) assert_(sol.success) assert_allclose(sol.x, 0, atol=1e-10) sol = minimize(f, [10], method='slsqp', constraints=cons_ul) assert_(sol.success) assert_allclose(sol.x, 0, atol=1e-10) def test_inconsistent_inequalities(self): # gh-7618 def cost(x): return -1 * x[0] + 4 * x[1] def ineqcons1(x): return x[1] - x[0] - 1 def ineqcons2(x): return x[0] - x[1] # The inequalities are inconsistent, so no solution can exist: # # x1 >= x0 + 1 # x0 >= x1 x0 = (1,5) bounds = ((-5, 5), (-5, 5)) cons = (dict(type='ineq', fun=ineqcons1), dict(type='ineq', fun=ineqcons2)) res = minimize(cost, x0, method='SLSQP', bounds=bounds, constraints=cons) assert_(not res.success) def test_new_bounds_type(self): def f(x): return x[0] ** 2 + x[1] ** 2 bounds = Bounds([1, 0], [np.inf, np.inf]) sol = minimize(f, [0, 0], method='slsqp', bounds=bounds) assert_(sol.success) assert_allclose(sol.x, [1, 0]) def test_nested_minimization(self): class NestedProblem(): def __init__(self): self.F_outer_count = 0 def F_outer(self, x): self.F_outer_count += 1 if self.F_outer_count > 1000: raise Exception("Nested minimization failed to terminate.") inner_res = minimize(self.F_inner, (3, 4), method="SLSQP") assert_(inner_res.success) assert_allclose(inner_res.x, [1, 1]) return x[0]**2 + x[1]**2 + x[2]**2 def F_inner(self, x): return (x[0] - 1)**2 + (x[1] - 1)**2 def solve(self): outer_res = minimize(self.F_outer, (5, 5, 5), method="SLSQP") assert_(outer_res.success) assert_allclose(outer_res.x, [0, 0, 0]) problem = NestedProblem() problem.solve() def test_gh1758(self): # the test suggested in gh1758 # https://nlopt.readthedocs.io/en/latest/NLopt_Tutorial/ # implement two equality constraints, in R^2. def fun(x): return np.sqrt(x[1]) def f_eqcon(x): """ Equality constraint """ return x[1] - (2 * x[0]) ** 3 def f_eqcon2(x): """ Equality constraint """ return x[1] - (-x[0] + 1) ** 3 c1 = {'type': 'eq', 'fun': f_eqcon} c2 = {'type': 'eq', 'fun': f_eqcon2} res = minimize(fun, [8, 0.25], method='SLSQP', constraints=[c1, c2], bounds=[(-0.5, 1), (0, 8)]) np.testing.assert_allclose(res.fun, 0.5443310539518) np.testing.assert_allclose(res.x, [0.33333333, 0.2962963]) assert res.success def test_gh9640(self): np.random.seed(10) cons = ({'type': 'ineq', 'fun': lambda x: -x[0] - x[1] - 3}, {'type': 'ineq', 'fun': lambda x: x[1] + x[2] - 2}) bnds = ((-2, 2), (-2, 2), (-2, 2)) def target(x): return 1 x0 = [-1.8869783504471584, -0.640096352696244, -0.8174212253407696] res = minimize(target, x0, method='SLSQP', bounds=bnds, constraints=cons, options={'disp':False, 'maxiter':10000}) # The problem is infeasible, so it cannot succeed assert not res.success def test_parameters_stay_within_bounds(self): # gh11403. For some problems the SLSQP Fortran code suggests a step # outside one of the lower/upper bounds. When this happens # approx_derivative complains because it's being asked to evaluate # a gradient outside its domain. np.random.seed(1) bounds = Bounds(np.array([0.1]), np.array([1.0])) n_inputs = len(bounds.lb) x0 = np.array(bounds.lb + (bounds.ub - bounds.lb) * np.random.random(n_inputs)) def f(x): assert (x >= bounds.lb).all() return np.linalg.norm(x) with pytest.warns(RuntimeWarning, match='x were outside bounds'): res = minimize(f, x0, method='SLSQP', bounds=bounds) assert res.success
23,260
37.195402
88
py
scipy
scipy-main/scipy/optimize/tests/test_trustregion_krylov.py
""" Unit tests for Krylov space trust-region subproblem solver. To run it in its simplest form:: nosetests test_optimize.py """ import numpy as np from scipy.optimize._trlib import (get_trlib_quadratic_subproblem) from numpy.testing import (assert_, assert_almost_equal, assert_equal, assert_array_almost_equal) KrylovQP = get_trlib_quadratic_subproblem(tol_rel_i=1e-8, tol_rel_b=1e-6) KrylovQP_disp = get_trlib_quadratic_subproblem(tol_rel_i=1e-8, tol_rel_b=1e-6, disp=True) class TestKrylovQuadraticSubproblem: def test_for_the_easy_case(self): # `H` is chosen such that `g` is not orthogonal to the # eigenvector associated with the smallest eigenvalue. H = np.array([[1.0, 0.0, 4.0], [0.0, 2.0, 0.0], [4.0, 0.0, 3.0]]) g = np.array([5.0, 0.0, 4.0]) # Trust Radius trust_radius = 1.0 # Solve Subproblem subprob = KrylovQP(x=0, fun=lambda x: 0, jac=lambda x: g, hess=lambda x: None, hessp=lambda x, y: H.dot(y)) p, hits_boundary = subprob.solve(trust_radius) assert_array_almost_equal(p, np.array([-1.0, 0.0, 0.0])) assert_equal(hits_boundary, True) # check kkt satisfaction assert_almost_equal( np.linalg.norm(H.dot(p) + subprob.lam * p + g), 0.0) # check trust region constraint assert_almost_equal(np.linalg.norm(p), trust_radius) trust_radius = 0.5 p, hits_boundary = subprob.solve(trust_radius) assert_array_almost_equal(p, np.array([-0.46125446, 0., -0.19298788])) assert_equal(hits_boundary, True) # check kkt satisfaction assert_almost_equal( np.linalg.norm(H.dot(p) + subprob.lam * p + g), 0.0) # check trust region constraint assert_almost_equal(np.linalg.norm(p), trust_radius) def test_for_the_hard_case(self): # `H` is chosen such that `g` is orthogonal to the # eigenvector associated with the smallest eigenvalue. H = np.array([[1.0, 0.0, 4.0], [0.0, 2.0, 0.0], [4.0, 0.0, 3.0]]) g = np.array([0.0, 2.0, 0.0]) # Trust Radius trust_radius = 1.0 # Solve Subproblem subprob = KrylovQP(x=0, fun=lambda x: 0, jac=lambda x: g, hess=lambda x: None, hessp=lambda x, y: H.dot(y)) p, hits_boundary = subprob.solve(trust_radius) assert_array_almost_equal(p, np.array([0.0, -1.0, 0.0])) # check kkt satisfaction assert_almost_equal( np.linalg.norm(H.dot(p) + subprob.lam * p + g), 0.0) # check trust region constraint assert_almost_equal(np.linalg.norm(p), trust_radius) trust_radius = 0.5 p, hits_boundary = subprob.solve(trust_radius) assert_array_almost_equal(p, np.array([0.0, -0.5, 0.0])) # check kkt satisfaction assert_almost_equal( np.linalg.norm(H.dot(p) + subprob.lam * p + g), 0.0) # check trust region constraint assert_almost_equal(np.linalg.norm(p), trust_radius) def test_for_interior_convergence(self): H = np.array([[1.812159, 0.82687265, 0.21838879, -0.52487006, 0.25436988], [0.82687265, 2.66380283, 0.31508988, -0.40144163, 0.08811588], [0.21838879, 0.31508988, 2.38020726, -0.3166346, 0.27363867], [-0.52487006, -0.40144163, -0.3166346, 1.61927182, -0.42140166], [0.25436988, 0.08811588, 0.27363867, -0.42140166, 1.33243101]]) g = np.array([0.75798952, 0.01421945, 0.33847612, 0.83725004, -0.47909534]) trust_radius = 1.1 # Solve Subproblem subprob = KrylovQP(x=0, fun=lambda x: 0, jac=lambda x: g, hess=lambda x: None, hessp=lambda x, y: H.dot(y)) p, hits_boundary = subprob.solve(trust_radius) # check kkt satisfaction assert_almost_equal( np.linalg.norm(H.dot(p) + subprob.lam * p + g), 0.0) assert_array_almost_equal(p, [-0.68585435, 0.1222621, -0.22090999, -0.67005053, 0.31586769]) assert_array_almost_equal(hits_boundary, False) def test_for_very_close_to_zero(self): H = np.array([[0.88547534, 2.90692271, 0.98440885, -0.78911503, -0.28035809], [2.90692271, -0.04618819, 0.32867263, -0.83737945, 0.17116396], [0.98440885, 0.32867263, -0.87355957, -0.06521957, -1.43030957], [-0.78911503, -0.83737945, -0.06521957, -1.645709, -0.33887298], [-0.28035809, 0.17116396, -1.43030957, -0.33887298, -1.68586978]]) g = np.array([0, 0, 0, 0, 1e-6]) trust_radius = 1.1 # Solve Subproblem subprob = KrylovQP(x=0, fun=lambda x: 0, jac=lambda x: g, hess=lambda x: None, hessp=lambda x, y: H.dot(y)) p, hits_boundary = subprob.solve(trust_radius) # check kkt satisfaction assert_almost_equal( np.linalg.norm(H.dot(p) + subprob.lam * p + g), 0.0) # check trust region constraint assert_almost_equal(np.linalg.norm(p), trust_radius) assert_array_almost_equal(p, [0.06910534, -0.01432721, -0.65311947, -0.23815972, -0.84954934]) assert_array_almost_equal(hits_boundary, True) def test_disp(self, capsys): H = -np.eye(5) g = np.array([0, 0, 0, 0, 1e-6]) trust_radius = 1.1 subprob = KrylovQP_disp(x=0, fun=lambda x: 0, jac=lambda x: g, hess=lambda x: None, hessp=lambda x, y: H.dot(y)) p, hits_boundary = subprob.solve(trust_radius) out, err = capsys.readouterr() assert_(out.startswith(' TR Solving trust region problem'), repr(out))
6,587
37.526316
89
py
scipy
scipy-main/scipy/optimize/tests/test_nnls.py
import numpy as np from numpy.testing import assert_allclose from pytest import raises as assert_raises from scipy.optimize import nnls class TestNNLS: def setup_method(self): self.rng = np.random.default_rng(1685225766635251) def test_nnls(self): a = np.arange(25.0).reshape(-1, 5) x = np.arange(5.0) y = a @ x x, res = nnls(a, y) assert res < 1e-7 assert np.linalg.norm((a @ x) - y) < 1e-7 def test_nnls_tall(self): a = self.rng.uniform(low=-10, high=10, size=[50, 10]) x = np.abs(self.rng.uniform(low=-2, high=2, size=[10])) x[::2] = 0 b = a @ x xact, rnorm = nnls(a, b, atol=500*np.linalg.norm(a, 1)*np.spacing(1.)) assert_allclose(xact, x, rtol=0., atol=1e-10) assert rnorm < 1e-12 def test_nnls_wide(self): # If too wide then problem becomes too ill-conditioned ans starts # emitting warnings, hence small m, n difference. a = self.rng.uniform(low=-10, high=10, size=[100, 120]) x = np.abs(self.rng.uniform(low=-2, high=2, size=[120])) x[::2] = 0 b = a @ x xact, rnorm = nnls(a, b, atol=500*np.linalg.norm(a, 1)*np.spacing(1.)) assert_allclose(xact, x, rtol=0., atol=1e-10) assert rnorm < 1e-12 def test_maxiter(self): # test that maxiter argument does stop iterations a = self.rng.uniform(size=(5, 10)) b = self.rng.uniform(size=5) with assert_raises(RuntimeError): nnls(a, b, maxiter=1)
1,549
33.444444
78
py
scipy
scipy-main/scipy/optimize/tests/test__spectral.py
import itertools import numpy as np from numpy import exp from numpy.testing import assert_, assert_equal from scipy.optimize import root def test_performance(): # Compare performance results to those listed in # [Cheng & Li, IMA J. Num. An. 29, 814 (2008)] # and # [W. La Cruz, J.M. Martinez, M. Raydan, Math. Comp. 75, 1429 (2006)]. # and those produced by dfsane.f from M. Raydan's website. # # Where the results disagree, the largest limits are taken. e_a = 1e-5 e_r = 1e-4 table_1 = [ dict(F=F_1, x0=x0_1, n=1000, nit=5, nfev=5), dict(F=F_1, x0=x0_1, n=10000, nit=2, nfev=2), dict(F=F_2, x0=x0_2, n=500, nit=11, nfev=11), dict(F=F_2, x0=x0_2, n=2000, nit=11, nfev=11), # dict(F=F_4, x0=x0_4, n=999, nit=243, nfev=1188), removed: too sensitive to rounding errors dict(F=F_6, x0=x0_6, n=100, nit=6, nfev=6), # Results from dfsane.f; papers list nit=3, nfev=3 dict(F=F_7, x0=x0_7, n=99, nit=23, nfev=29), # Must have n%3==0, typo in papers? dict(F=F_7, x0=x0_7, n=999, nit=23, nfev=29), # Must have n%3==0, typo in papers? dict(F=F_9, x0=x0_9, n=100, nit=12, nfev=18), # Results from dfsane.f; papers list nit=nfev=6? dict(F=F_9, x0=x0_9, n=1000, nit=12, nfev=18), dict(F=F_10, x0=x0_10, n=1000, nit=5, nfev=5), # Results from dfsane.f; papers list nit=2, nfev=12 ] # Check also scaling invariance for xscale, yscale, line_search in itertools.product([1.0, 1e-10, 1e10], [1.0, 1e-10, 1e10], ['cruz', 'cheng']): for problem in table_1: n = problem['n'] def func(x, n): return yscale * problem['F'](x / xscale, n) args = (n,) x0 = problem['x0'](n) * xscale fatol = np.sqrt(n) * e_a * yscale + e_r * np.linalg.norm(func(x0, n)) sigma_eps = 1e-10 * min(yscale/xscale, xscale/yscale) sigma_0 = xscale/yscale with np.errstate(over='ignore'): sol = root(func, x0, args=args, options=dict(ftol=0, fatol=fatol, maxfev=problem['nfev'] + 1, sigma_0=sigma_0, sigma_eps=sigma_eps, line_search=line_search), method='DF-SANE') err_msg = repr([xscale, yscale, line_search, problem, np.linalg.norm(func(sol.x, n)), fatol, sol.success, sol.nit, sol.nfev]) assert_(sol.success, err_msg) assert_(sol.nfev <= problem['nfev'] + 1, err_msg) # nfev+1: dfsane.f doesn't count first eval assert_(sol.nit <= problem['nit'], err_msg) assert_(np.linalg.norm(func(sol.x, n)) <= fatol, err_msg) def test_complex(): def func(z): return z**2 - 1 + 2j x0 = 2.0j ftol = 1e-4 sol = root(func, x0, tol=ftol, method='DF-SANE') assert_(sol.success) f0 = np.linalg.norm(func(x0)) fx = np.linalg.norm(func(sol.x)) assert_(fx <= ftol*f0) def test_linear_definite(): # The DF-SANE paper proves convergence for "strongly isolated" # solutions. # # For linear systems F(x) = A x - b = 0, with A positive or # negative definite, the solution is strongly isolated. def check_solvability(A, b, line_search='cruz'): def func(x): return A.dot(x) - b xp = np.linalg.solve(A, b) eps = np.linalg.norm(func(xp)) * 1e3 sol = root(func, b, options=dict(fatol=eps, ftol=0, maxfev=17523, line_search=line_search), method='DF-SANE') assert_(sol.success) assert_(np.linalg.norm(func(sol.x)) <= eps) n = 90 # Test linear pos.def. system np.random.seed(1234) A = np.arange(n*n).reshape(n, n) A = A + n*n * np.diag(1 + np.arange(n)) assert_(np.linalg.eigvals(A).min() > 0) b = np.arange(n) * 1.0 check_solvability(A, b, 'cruz') check_solvability(A, b, 'cheng') # Test linear neg.def. system check_solvability(-A, b, 'cruz') check_solvability(-A, b, 'cheng') def test_shape(): def f(x, arg): return x - arg for dt in [float, complex]: x = np.zeros([2,2]) arg = np.ones([2,2], dtype=dt) sol = root(f, x, args=(arg,), method='DF-SANE') assert_(sol.success) assert_equal(sol.x.shape, x.shape) # Some of the test functions and initial guesses listed in # [W. La Cruz, M. Raydan. Optimization Methods and Software, 18, 583 (2003)] def F_1(x, n): g = np.zeros([n]) i = np.arange(2, n+1) g[0] = exp(x[0] - 1) - 1 g[1:] = i*(exp(x[1:] - 1) - x[1:]) return g def x0_1(n): x0 = np.empty([n]) x0.fill(n/(n-1)) return x0 def F_2(x, n): g = np.zeros([n]) i = np.arange(2, n+1) g[0] = exp(x[0]) - 1 g[1:] = 0.1*i*(exp(x[1:]) + x[:-1] - 1) return g def x0_2(n): x0 = np.empty([n]) x0.fill(1/n**2) return x0 def F_4(x, n): # skip name check assert_equal(n % 3, 0) g = np.zeros([n]) # Note: the first line is typoed in some of the references; # correct in original [Gasparo, Optimization Meth. 13, 79 (2000)] g[::3] = 0.6 * x[::3] + 1.6 * x[1::3]**3 - 7.2 * x[1::3]**2 + 9.6 * x[1::3] - 4.8 g[1::3] = 0.48 * x[::3] - 0.72 * x[1::3]**3 + 3.24 * x[1::3]**2 - 4.32 * x[1::3] - x[2::3] + 0.2 * x[2::3]**3 + 2.16 g[2::3] = 1.25 * x[2::3] - 0.25*x[2::3]**3 return g def x0_4(n): # skip name check assert_equal(n % 3, 0) x0 = np.array([-1, 1/2, -1] * (n//3)) return x0 def F_6(x, n): c = 0.9 mu = (np.arange(1, n+1) - 0.5)/n return x - 1/(1 - c/(2*n) * (mu[:,None]*x / (mu[:,None] + mu)).sum(axis=1)) def x0_6(n): return np.ones([n]) def F_7(x, n): assert_equal(n % 3, 0) def phi(t): v = 0.5*t - 2 v[t > -1] = ((-592*t**3 + 888*t**2 + 4551*t - 1924)/1998)[t > -1] v[t >= 2] = (0.5*t + 2)[t >= 2] return v g = np.zeros([n]) g[::3] = 1e4 * x[1::3]**2 - 1 g[1::3] = exp(-x[::3]) + exp(-x[1::3]) - 1.0001 g[2::3] = phi(x[2::3]) return g def x0_7(n): assert_equal(n % 3, 0) return np.array([1e-3, 18, 1] * (n//3)) def F_9(x, n): g = np.zeros([n]) i = np.arange(2, n) g[0] = x[0]**3/3 + x[1]**2/2 g[1:-1] = -x[1:-1]**2/2 + i*x[1:-1]**3/3 + x[2:]**2/2 g[-1] = -x[-1]**2/2 + n*x[-1]**3/3 return g def x0_9(n): return np.ones([n]) def F_10(x, n): return np.log(1 + x) - x/n def x0_10(n): return np.ones([n])
6,597
29.976526
120
py
scipy
scipy-main/scipy/optimize/tests/test_nonlin.py
""" Unit tests for nonlinear solvers Author: Ondrej Certik May 2007 """ from numpy.testing import assert_ import pytest from scipy.optimize import _nonlin as nonlin, root from numpy import diag, dot from numpy.linalg import inv import numpy as np from .test_minpack import pressure_network SOLVERS = {'anderson': nonlin.anderson, 'diagbroyden': nonlin.diagbroyden, 'linearmixing': nonlin.linearmixing, 'excitingmixing': nonlin.excitingmixing, 'broyden1': nonlin.broyden1, 'broyden2': nonlin.broyden2, 'krylov': nonlin.newton_krylov} MUST_WORK = {'anderson': nonlin.anderson, 'broyden1': nonlin.broyden1, 'broyden2': nonlin.broyden2, 'krylov': nonlin.newton_krylov} # ---------------------------------------------------------------------------- # Test problems # ---------------------------------------------------------------------------- def F(x): x = np.asarray(x).T d = diag([3, 2, 1.5, 1, 0.5]) c = 0.01 f = -d @ x - c * float(x.T @ x) * x return f F.xin = [1, 1, 1, 1, 1] F.KNOWN_BAD = {} F.JAC_KSP_BAD = {} F.ROOT_JAC_KSP_BAD = {} def F2(x): return x F2.xin = [1, 2, 3, 4, 5, 6] F2.KNOWN_BAD = {'linearmixing': nonlin.linearmixing, 'excitingmixing': nonlin.excitingmixing} F2.JAC_KSP_BAD = {} F2.ROOT_JAC_KSP_BAD = {} def F2_lucky(x): return x F2_lucky.xin = [0, 0, 0, 0, 0, 0] F2_lucky.KNOWN_BAD = {} F2_lucky.JAC_KSP_BAD = {} F2_lucky.ROOT_JAC_KSP_BAD = {} def F3(x): A = np.array([[-2, 1, 0.], [1, -2, 1], [0, 1, -2]]) b = np.array([1, 2, 3.]) return A @ x - b F3.xin = [1, 2, 3] F3.KNOWN_BAD = {} F3.JAC_KSP_BAD = {} F3.ROOT_JAC_KSP_BAD = {} def F4_powell(x): A = 1e4 return [A*x[0]*x[1] - 1, np.exp(-x[0]) + np.exp(-x[1]) - (1 + 1/A)] F4_powell.xin = [-1, -2] F4_powell.KNOWN_BAD = {'linearmixing': nonlin.linearmixing, 'excitingmixing': nonlin.excitingmixing, 'diagbroyden': nonlin.diagbroyden} # In the extreme case, it does not converge for nolinear problem solved by # MINRES and root problem solved by GMRES/BiCGStab/CGS/MINRES/TFQMR when using # Krylov method to approximate Jacobian F4_powell.JAC_KSP_BAD = {'minres'} F4_powell.ROOT_JAC_KSP_BAD = {'gmres', 'bicgstab', 'cgs', 'minres', 'tfqmr'} def F5(x): return pressure_network(x, 4, np.array([.5, .5, .5, .5])) F5.xin = [2., 0, 2, 0] F5.KNOWN_BAD = {'excitingmixing': nonlin.excitingmixing, 'linearmixing': nonlin.linearmixing, 'diagbroyden': nonlin.diagbroyden} # In the extreme case, the Jacobian inversion yielded zero vector for nonlinear # problem solved by CGS/MINRES and it does not converge for root problem solved # by MINRES and when using Krylov method to approximate Jacobian F5.JAC_KSP_BAD = {'cgs', 'minres'} F5.ROOT_JAC_KSP_BAD = {'minres'} def F6(x): x1, x2 = x J0 = np.array([[-4.256, 14.7], [0.8394989, 0.59964207]]) v = np.array([(x1 + 3) * (x2**5 - 7) + 3*6, np.sin(x2 * np.exp(x1) - 1)]) return -np.linalg.solve(J0, v) F6.xin = [-0.5, 1.4] F6.KNOWN_BAD = {'excitingmixing': nonlin.excitingmixing, 'linearmixing': nonlin.linearmixing, 'diagbroyden': nonlin.diagbroyden} F6.JAC_KSP_BAD = {} F6.ROOT_JAC_KSP_BAD = {} # ---------------------------------------------------------------------------- # Tests # ---------------------------------------------------------------------------- class TestNonlin: """ Check the Broyden methods for a few test problems. broyden1, broyden2, and newton_krylov must succeed for all functions. Some of the others don't -- tests in KNOWN_BAD are skipped. """ def _check_nonlin_func(self, f, func, f_tol=1e-2): # Test all methods mentioned in the class `KrylovJacobian` if func == SOLVERS['krylov']: for method in ['gmres', 'bicgstab', 'cgs', 'minres', 'tfqmr']: if method in f.JAC_KSP_BAD: continue x = func(f, f.xin, method=method, line_search=None, f_tol=f_tol, maxiter=200, verbose=0) assert_(np.absolute(f(x)).max() < f_tol) x = func(f, f.xin, f_tol=f_tol, maxiter=200, verbose=0) assert_(np.absolute(f(x)).max() < f_tol) def _check_root(self, f, method, f_tol=1e-2): # Test Krylov methods if method == 'krylov': for jac_method in ['gmres', 'bicgstab', 'cgs', 'minres', 'tfqmr']: if jac_method in f.ROOT_JAC_KSP_BAD: continue res = root(f, f.xin, method=method, options={'ftol': f_tol, 'maxiter': 200, 'disp': 0, 'jac_options': {'method': jac_method}}) assert_(np.absolute(res.fun).max() < f_tol) res = root(f, f.xin, method=method, options={'ftol': f_tol, 'maxiter': 200, 'disp': 0}) assert_(np.absolute(res.fun).max() < f_tol) @pytest.mark.xfail def _check_func_fail(self, *a, **kw): pass @pytest.mark.filterwarnings('ignore::DeprecationWarning') def test_problem_nonlin(self): for f in [F, F2, F2_lucky, F3, F4_powell, F5, F6]: for func in SOLVERS.values(): if func in f.KNOWN_BAD.values(): if func in MUST_WORK.values(): self._check_func_fail(f, func) continue self._check_nonlin_func(f, func) @pytest.mark.filterwarnings('ignore::DeprecationWarning') @pytest.mark.parametrize("method", ['lgmres', 'gmres', 'bicgstab', 'cgs', 'minres', 'tfqmr']) def test_tol_norm_called(self, method): # Check that supplying tol_norm keyword to nonlin_solve works self._tol_norm_used = False def local_norm_func(x): self._tol_norm_used = True return np.absolute(x).max() nonlin.newton_krylov(F, F.xin, method=method, f_tol=1e-2, maxiter=200, verbose=0, tol_norm=local_norm_func) assert_(self._tol_norm_used) @pytest.mark.filterwarnings('ignore::DeprecationWarning') def test_problem_root(self): for f in [F, F2, F2_lucky, F3, F4_powell, F5, F6]: for meth in SOLVERS: if meth in f.KNOWN_BAD: if meth in MUST_WORK: self._check_func_fail(f, meth) continue self._check_root(f, meth) class TestSecant: """Check that some Jacobian approximations satisfy the secant condition""" xs = [np.array([1., 2., 3., 4., 5.]), np.array([2., 3., 4., 5., 1.]), np.array([3., 4., 5., 1., 2.]), np.array([4., 5., 1., 2., 3.]), np.array([9., 1., 9., 1., 3.]), np.array([0., 1., 9., 1., 3.]), np.array([5., 5., 7., 1., 1.]), np.array([1., 2., 7., 5., 1.]),] fs = [x**2 - 1 for x in xs] def _check_secant(self, jac_cls, npoints=1, **kw): """ Check that the given Jacobian approximation satisfies secant conditions for last `npoints` points. """ jac = jac_cls(**kw) jac.setup(self.xs[0], self.fs[0], None) for j, (x, f) in enumerate(zip(self.xs[1:], self.fs[1:])): jac.update(x, f) for k in range(min(npoints, j+1)): dx = self.xs[j-k+1] - self.xs[j-k] df = self.fs[j-k+1] - self.fs[j-k] assert_(np.allclose(dx, jac.solve(df))) # Check that the `npoints` secant bound is strict if j >= npoints: dx = self.xs[j-npoints+1] - self.xs[j-npoints] df = self.fs[j-npoints+1] - self.fs[j-npoints] assert_(not np.allclose(dx, jac.solve(df))) def test_broyden1(self): self._check_secant(nonlin.BroydenFirst) def test_broyden2(self): self._check_secant(nonlin.BroydenSecond) def test_broyden1_update(self): # Check that BroydenFirst update works as for a dense matrix jac = nonlin.BroydenFirst(alpha=0.1) jac.setup(self.xs[0], self.fs[0], None) B = np.identity(5) * (-1/0.1) for last_j, (x, f) in enumerate(zip(self.xs[1:], self.fs[1:])): df = f - self.fs[last_j] dx = x - self.xs[last_j] B += (df - dot(B, dx))[:, None] * dx[None, :] / dot(dx, dx) jac.update(x, f) assert_(np.allclose(jac.todense(), B, rtol=1e-10, atol=1e-13)) def test_broyden2_update(self): # Check that BroydenSecond update works as for a dense matrix jac = nonlin.BroydenSecond(alpha=0.1) jac.setup(self.xs[0], self.fs[0], None) H = np.identity(5) * (-0.1) for last_j, (x, f) in enumerate(zip(self.xs[1:], self.fs[1:])): df = f - self.fs[last_j] dx = x - self.xs[last_j] H += (dx - dot(H, df))[:, None] * df[None, :] / dot(df, df) jac.update(x, f) assert_(np.allclose(jac.todense(), inv(H), rtol=1e-10, atol=1e-13)) def test_anderson(self): # Anderson mixing (with w0=0) satisfies secant conditions # for the last M iterates, see [Ey]_ # # .. [Ey] V. Eyert, J. Comp. Phys., 124, 271 (1996). self._check_secant(nonlin.Anderson, M=3, w0=0, npoints=3) class TestLinear: """Solve a linear equation; some methods find the exact solution in a finite number of steps""" def _check(self, jac, N, maxiter, complex=False, **kw): np.random.seed(123) A = np.random.randn(N, N) if complex: A = A + 1j*np.random.randn(N, N) b = np.random.randn(N) if complex: b = b + 1j*np.random.randn(N) def func(x): return dot(A, x) - b sol = nonlin.nonlin_solve(func, np.zeros(N), jac, maxiter=maxiter, f_tol=1e-6, line_search=None, verbose=0) assert_(np.allclose(dot(A, sol), b, atol=1e-6)) def test_broyden1(self): # Broyden methods solve linear systems exactly in 2*N steps self._check(nonlin.BroydenFirst(alpha=1.0), 20, 41, False) self._check(nonlin.BroydenFirst(alpha=1.0), 20, 41, True) def test_broyden2(self): # Broyden methods solve linear systems exactly in 2*N steps self._check(nonlin.BroydenSecond(alpha=1.0), 20, 41, False) self._check(nonlin.BroydenSecond(alpha=1.0), 20, 41, True) def test_anderson(self): # Anderson is rather similar to Broyden, if given enough storage space self._check(nonlin.Anderson(M=50, alpha=1.0), 20, 29, False) self._check(nonlin.Anderson(M=50, alpha=1.0), 20, 29, True) def test_krylov(self): # Krylov methods solve linear systems exactly in N inner steps self._check(nonlin.KrylovJacobian, 20, 2, False, inner_m=10) self._check(nonlin.KrylovJacobian, 20, 2, True, inner_m=10) class TestJacobianDotSolve: """ Check that solve/dot methods in Jacobian approximations are consistent """ def _func(self, x): return x**2 - 1 + np.dot(self.A, x) def _check_dot(self, jac_cls, complex=False, tol=1e-6, **kw): np.random.seed(123) N = 7 def rand(*a): q = np.random.rand(*a) if complex: q = q + 1j*np.random.rand(*a) return q def assert_close(a, b, msg): d = abs(a - b).max() f = tol + abs(b).max()*tol if d > f: raise AssertionError(f'{msg}: err {d:g}') self.A = rand(N, N) # initialize x0 = np.random.rand(N) jac = jac_cls(**kw) jac.setup(x0, self._func(x0), self._func) # check consistency for k in range(2*N): v = rand(N) if hasattr(jac, '__array__'): Jd = np.array(jac) if hasattr(jac, 'solve'): Gv = jac.solve(v) Gv2 = np.linalg.solve(Jd, v) assert_close(Gv, Gv2, 'solve vs array') if hasattr(jac, 'rsolve'): Gv = jac.rsolve(v) Gv2 = np.linalg.solve(Jd.T.conj(), v) assert_close(Gv, Gv2, 'rsolve vs array') if hasattr(jac, 'matvec'): Jv = jac.matvec(v) Jv2 = np.dot(Jd, v) assert_close(Jv, Jv2, 'dot vs array') if hasattr(jac, 'rmatvec'): Jv = jac.rmatvec(v) Jv2 = np.dot(Jd.T.conj(), v) assert_close(Jv, Jv2, 'rmatvec vs array') if hasattr(jac, 'matvec') and hasattr(jac, 'solve'): Jv = jac.matvec(v) Jv2 = jac.solve(jac.matvec(Jv)) assert_close(Jv, Jv2, 'dot vs solve') if hasattr(jac, 'rmatvec') and hasattr(jac, 'rsolve'): Jv = jac.rmatvec(v) Jv2 = jac.rmatvec(jac.rsolve(Jv)) assert_close(Jv, Jv2, 'rmatvec vs rsolve') x = rand(N) jac.update(x, self._func(x)) def test_broyden1(self): self._check_dot(nonlin.BroydenFirst, complex=False) self._check_dot(nonlin.BroydenFirst, complex=True) def test_broyden2(self): self._check_dot(nonlin.BroydenSecond, complex=False) self._check_dot(nonlin.BroydenSecond, complex=True) def test_anderson(self): self._check_dot(nonlin.Anderson, complex=False) self._check_dot(nonlin.Anderson, complex=True) def test_diagbroyden(self): self._check_dot(nonlin.DiagBroyden, complex=False) self._check_dot(nonlin.DiagBroyden, complex=True) def test_linearmixing(self): self._check_dot(nonlin.LinearMixing, complex=False) self._check_dot(nonlin.LinearMixing, complex=True) def test_excitingmixing(self): self._check_dot(nonlin.ExcitingMixing, complex=False) self._check_dot(nonlin.ExcitingMixing, complex=True) def test_krylov(self): self._check_dot(nonlin.KrylovJacobian, complex=False, tol=1e-3) self._check_dot(nonlin.KrylovJacobian, complex=True, tol=1e-3) class TestNonlinOldTests: """ Test case for a simple constrained entropy maximization problem (the machine translation example of Berger et al in Computational Linguistics, vol 22, num 1, pp 39--72, 1996.) """ def test_broyden1(self): x = nonlin.broyden1(F, F.xin, iter=12, alpha=1) assert_(nonlin.norm(x) < 1e-9) assert_(nonlin.norm(F(x)) < 1e-9) def test_broyden2(self): x = nonlin.broyden2(F, F.xin, iter=12, alpha=1) assert_(nonlin.norm(x) < 1e-9) assert_(nonlin.norm(F(x)) < 1e-9) def test_anderson(self): x = nonlin.anderson(F, F.xin, iter=12, alpha=0.03, M=5) assert_(nonlin.norm(x) < 0.33) def test_linearmixing(self): x = nonlin.linearmixing(F, F.xin, iter=60, alpha=0.5) assert_(nonlin.norm(x) < 1e-7) assert_(nonlin.norm(F(x)) < 1e-7) def test_exciting(self): x = nonlin.excitingmixing(F, F.xin, iter=20, alpha=0.5) assert_(nonlin.norm(x) < 1e-5) assert_(nonlin.norm(F(x)) < 1e-5) def test_diagbroyden(self): x = nonlin.diagbroyden(F, F.xin, iter=11, alpha=1) assert_(nonlin.norm(x) < 1e-8) assert_(nonlin.norm(F(x)) < 1e-8) def test_root_broyden1(self): res = root(F, F.xin, method='broyden1', options={'nit': 12, 'jac_options': {'alpha': 1}}) assert_(nonlin.norm(res.x) < 1e-9) assert_(nonlin.norm(res.fun) < 1e-9) def test_root_broyden2(self): res = root(F, F.xin, method='broyden2', options={'nit': 12, 'jac_options': {'alpha': 1}}) assert_(nonlin.norm(res.x) < 1e-9) assert_(nonlin.norm(res.fun) < 1e-9) def test_root_anderson(self): res = root(F, F.xin, method='anderson', options={'nit': 12, 'jac_options': {'alpha': 0.03, 'M': 5}}) assert_(nonlin.norm(res.x) < 0.33) def test_root_linearmixing(self): res = root(F, F.xin, method='linearmixing', options={'nit': 60, 'jac_options': {'alpha': 0.5}}) assert_(nonlin.norm(res.x) < 1e-7) assert_(nonlin.norm(res.fun) < 1e-7) def test_root_excitingmixing(self): res = root(F, F.xin, method='excitingmixing', options={'nit': 20, 'jac_options': {'alpha': 0.5}}) assert_(nonlin.norm(res.x) < 1e-5) assert_(nonlin.norm(res.fun) < 1e-5) def test_root_diagbroyden(self): res = root(F, F.xin, method='diagbroyden', options={'nit': 11, 'jac_options': {'alpha': 1}}) assert_(nonlin.norm(res.x) < 1e-8) assert_(nonlin.norm(res.fun) < 1e-8)
17,228
33.527054
79
py
scipy
scipy-main/scipy/optimize/tests/test_minpack.py
""" Unit tests for optimization routines from minpack.py. """ import warnings import pytest from numpy.testing import (assert_, assert_almost_equal, assert_array_equal, assert_array_almost_equal, assert_allclose, assert_warns, suppress_warnings) from pytest import raises as assert_raises import numpy as np from numpy import array, float64 from multiprocessing.pool import ThreadPool from scipy import optimize, linalg from scipy.special import lambertw from scipy.optimize._minpack_py import leastsq, curve_fit, fixed_point from scipy.optimize import OptimizeWarning from scipy.optimize._minimize import Bounds class ReturnShape: """This class exists to create a callable that does not have a '__name__' attribute. __init__ takes the argument 'shape', which should be a tuple of ints. When an instance is called with a single argument 'x', it returns numpy.ones(shape). """ def __init__(self, shape): self.shape = shape def __call__(self, x): return np.ones(self.shape) def dummy_func(x, shape): """A function that returns an array of ones of the given shape. `x` is ignored. """ return np.ones(shape) def sequence_parallel(fs): with ThreadPool(len(fs)) as pool: return pool.map(lambda f: f(), fs) # Function and Jacobian for tests of solvers for systems of nonlinear # equations def pressure_network(flow_rates, Qtot, k): """Evaluate non-linear equation system representing the pressures and flows in a system of n parallel pipes:: f_i = P_i - P_0, for i = 1..n f_0 = sum(Q_i) - Qtot where Q_i is the flow rate in pipe i and P_i the pressure in that pipe. Pressure is modeled as a P=kQ**2 where k is a valve coefficient and Q is the flow rate. Parameters ---------- flow_rates : float A 1-D array of n flow rates [kg/s]. k : float A 1-D array of n valve coefficients [1/kg m]. Qtot : float A scalar, the total input flow rate [kg/s]. Returns ------- F : float A 1-D array, F[i] == f_i. """ P = k * flow_rates**2 F = np.hstack((P[1:] - P[0], flow_rates.sum() - Qtot)) return F def pressure_network_jacobian(flow_rates, Qtot, k): """Return the jacobian of the equation system F(flow_rates) computed by `pressure_network` with respect to *flow_rates*. See `pressure_network` for the detailed description of parrameters. Returns ------- jac : float *n* by *n* matrix ``df_i/dQ_i`` where ``n = len(flow_rates)`` and *f_i* and *Q_i* are described in the doc for `pressure_network` """ n = len(flow_rates) pdiff = np.diag(flow_rates[1:] * 2 * k[1:] - 2 * flow_rates[0] * k[0]) jac = np.empty((n, n)) jac[:n-1, :n-1] = pdiff * 0 jac[:n-1, n-1] = 0 jac[n-1, :] = np.ones(n) return jac def pressure_network_fun_and_grad(flow_rates, Qtot, k): return (pressure_network(flow_rates, Qtot, k), pressure_network_jacobian(flow_rates, Qtot, k)) class TestFSolve: def test_pressure_network_no_gradient(self): # fsolve without gradient, equal pipes -> equal flows. k = np.full(4, 0.5) Qtot = 4 initial_guess = array([2., 0., 2., 0.]) final_flows, info, ier, mesg = optimize.fsolve( pressure_network, initial_guess, args=(Qtot, k), full_output=True) assert_array_almost_equal(final_flows, np.ones(4)) assert_(ier == 1, mesg) def test_pressure_network_with_gradient(self): # fsolve with gradient, equal pipes -> equal flows k = np.full(4, 0.5) Qtot = 4 initial_guess = array([2., 0., 2., 0.]) final_flows = optimize.fsolve( pressure_network, initial_guess, args=(Qtot, k), fprime=pressure_network_jacobian) assert_array_almost_equal(final_flows, np.ones(4)) def test_wrong_shape_func_callable(self): func = ReturnShape(1) # x0 is a list of two elements, but func will return an array with # length 1, so this should result in a TypeError. x0 = [1.5, 2.0] assert_raises(TypeError, optimize.fsolve, func, x0) def test_wrong_shape_func_function(self): # x0 is a list of two elements, but func will return an array with # length 1, so this should result in a TypeError. x0 = [1.5, 2.0] assert_raises(TypeError, optimize.fsolve, dummy_func, x0, args=((1,),)) def test_wrong_shape_fprime_callable(self): func = ReturnShape(1) deriv_func = ReturnShape((2,2)) assert_raises(TypeError, optimize.fsolve, func, x0=[0,1], fprime=deriv_func) def test_wrong_shape_fprime_function(self): def func(x): return dummy_func(x, (2,)) def deriv_func(x): return dummy_func(x, (3, 3)) assert_raises(TypeError, optimize.fsolve, func, x0=[0,1], fprime=deriv_func) def test_func_can_raise(self): def func(*args): raise ValueError('I raised') with assert_raises(ValueError, match='I raised'): optimize.fsolve(func, x0=[0]) def test_Dfun_can_raise(self): def func(x): return x - np.array([10]) def deriv_func(*args): raise ValueError('I raised') with assert_raises(ValueError, match='I raised'): optimize.fsolve(func, x0=[0], fprime=deriv_func) def test_float32(self): def func(x): return np.array([x[0] - 100, x[1] - 1000], dtype=np.float32) ** 2 p = optimize.fsolve(func, np.array([1, 1], np.float32)) assert_allclose(func(p), [0, 0], atol=1e-3) def test_reentrant_func(self): def func(*args): self.test_pressure_network_no_gradient() return pressure_network(*args) # fsolve without gradient, equal pipes -> equal flows. k = np.full(4, 0.5) Qtot = 4 initial_guess = array([2., 0., 2., 0.]) final_flows, info, ier, mesg = optimize.fsolve( func, initial_guess, args=(Qtot, k), full_output=True) assert_array_almost_equal(final_flows, np.ones(4)) assert_(ier == 1, mesg) def test_reentrant_Dfunc(self): def deriv_func(*args): self.test_pressure_network_with_gradient() return pressure_network_jacobian(*args) # fsolve with gradient, equal pipes -> equal flows k = np.full(4, 0.5) Qtot = 4 initial_guess = array([2., 0., 2., 0.]) final_flows = optimize.fsolve( pressure_network, initial_guess, args=(Qtot, k), fprime=deriv_func) assert_array_almost_equal(final_flows, np.ones(4)) def test_concurrent_no_gradient(self): v = sequence_parallel([self.test_pressure_network_no_gradient] * 10) assert all([result is None for result in v]) def test_concurrent_with_gradient(self): v = sequence_parallel([self.test_pressure_network_with_gradient] * 10) assert all([result is None for result in v]) class TestRootHybr: def test_pressure_network_no_gradient(self): # root/hybr without gradient, equal pipes -> equal flows k = np.full(4, 0.5) Qtot = 4 initial_guess = array([2., 0., 2., 0.]) final_flows = optimize.root(pressure_network, initial_guess, method='hybr', args=(Qtot, k)).x assert_array_almost_equal(final_flows, np.ones(4)) def test_pressure_network_with_gradient(self): # root/hybr with gradient, equal pipes -> equal flows k = np.full(4, 0.5) Qtot = 4 initial_guess = array([[2., 0., 2., 0.]]) final_flows = optimize.root(pressure_network, initial_guess, args=(Qtot, k), method='hybr', jac=pressure_network_jacobian).x assert_array_almost_equal(final_flows, np.ones(4)) def test_pressure_network_with_gradient_combined(self): # root/hybr with gradient and function combined, equal pipes -> equal # flows k = np.full(4, 0.5) Qtot = 4 initial_guess = array([2., 0., 2., 0.]) final_flows = optimize.root(pressure_network_fun_and_grad, initial_guess, args=(Qtot, k), method='hybr', jac=True).x assert_array_almost_equal(final_flows, np.ones(4)) class TestRootLM: def test_pressure_network_no_gradient(self): # root/lm without gradient, equal pipes -> equal flows k = np.full(4, 0.5) Qtot = 4 initial_guess = array([2., 0., 2., 0.]) final_flows = optimize.root(pressure_network, initial_guess, method='lm', args=(Qtot, k)).x assert_array_almost_equal(final_flows, np.ones(4)) class TestLeastSq: def setup_method(self): x = np.linspace(0, 10, 40) a,b,c = 3.1, 42, -304.2 self.x = x self.abc = a,b,c y_true = a*x**2 + b*x + c np.random.seed(0) self.y_meas = y_true + 0.01*np.random.standard_normal(y_true.shape) def residuals(self, p, y, x): a,b,c = p err = y-(a*x**2 + b*x + c) return err def residuals_jacobian(self, _p, _y, x): return -np.vstack([x**2, x, np.ones_like(x)]).T def test_basic(self): p0 = array([0,0,0]) params_fit, ier = leastsq(self.residuals, p0, args=(self.y_meas, self.x)) assert_(ier in (1,2,3,4), 'solution not found (ier=%d)' % ier) # low precision due to random assert_array_almost_equal(params_fit, self.abc, decimal=2) def test_basic_with_gradient(self): p0 = array([0,0,0]) params_fit, ier = leastsq(self.residuals, p0, args=(self.y_meas, self.x), Dfun=self.residuals_jacobian) assert_(ier in (1,2,3,4), 'solution not found (ier=%d)' % ier) # low precision due to random assert_array_almost_equal(params_fit, self.abc, decimal=2) def test_full_output(self): p0 = array([[0,0,0]]) full_output = leastsq(self.residuals, p0, args=(self.y_meas, self.x), full_output=True) params_fit, cov_x, infodict, mesg, ier = full_output assert_(ier in (1,2,3,4), 'solution not found: %s' % mesg) def test_input_untouched(self): p0 = array([0,0,0],dtype=float64) p0_copy = array(p0, copy=True) full_output = leastsq(self.residuals, p0, args=(self.y_meas, self.x), full_output=True) params_fit, cov_x, infodict, mesg, ier = full_output assert_(ier in (1,2,3,4), 'solution not found: %s' % mesg) assert_array_equal(p0, p0_copy) def test_wrong_shape_func_callable(self): func = ReturnShape(1) # x0 is a list of two elements, but func will return an array with # length 1, so this should result in a TypeError. x0 = [1.5, 2.0] assert_raises(TypeError, optimize.leastsq, func, x0) def test_wrong_shape_func_function(self): # x0 is a list of two elements, but func will return an array with # length 1, so this should result in a TypeError. x0 = [1.5, 2.0] assert_raises(TypeError, optimize.leastsq, dummy_func, x0, args=((1,),)) def test_wrong_shape_Dfun_callable(self): func = ReturnShape(1) deriv_func = ReturnShape((2,2)) assert_raises(TypeError, optimize.leastsq, func, x0=[0,1], Dfun=deriv_func) def test_wrong_shape_Dfun_function(self): def func(x): return dummy_func(x, (2,)) def deriv_func(x): return dummy_func(x, (3, 3)) assert_raises(TypeError, optimize.leastsq, func, x0=[0,1], Dfun=deriv_func) def test_float32(self): # Regression test for gh-1447 def func(p,x,y): q = p[0]*np.exp(-(x-p[1])**2/(2.0*p[2]**2))+p[3] return q - y x = np.array([1.475,1.429,1.409,1.419,1.455,1.519,1.472, 1.368,1.286, 1.231], dtype=np.float32) y = np.array([0.0168,0.0193,0.0211,0.0202,0.0171,0.0151,0.0185,0.0258, 0.034,0.0396], dtype=np.float32) p0 = np.array([1.0,1.0,1.0,1.0]) p1, success = optimize.leastsq(func, p0, args=(x,y)) assert_(success in [1,2,3,4]) assert_((func(p1,x,y)**2).sum() < 1e-4 * (func(p0,x,y)**2).sum()) def test_func_can_raise(self): def func(*args): raise ValueError('I raised') with assert_raises(ValueError, match='I raised'): optimize.leastsq(func, x0=[0]) def test_Dfun_can_raise(self): def func(x): return x - np.array([10]) def deriv_func(*args): raise ValueError('I raised') with assert_raises(ValueError, match='I raised'): optimize.leastsq(func, x0=[0], Dfun=deriv_func) def test_reentrant_func(self): def func(*args): self.test_basic() return self.residuals(*args) p0 = array([0,0,0]) params_fit, ier = leastsq(func, p0, args=(self.y_meas, self.x)) assert_(ier in (1,2,3,4), 'solution not found (ier=%d)' % ier) # low precision due to random assert_array_almost_equal(params_fit, self.abc, decimal=2) def test_reentrant_Dfun(self): def deriv_func(*args): self.test_basic() return self.residuals_jacobian(*args) p0 = array([0,0,0]) params_fit, ier = leastsq(self.residuals, p0, args=(self.y_meas, self.x), Dfun=deriv_func) assert_(ier in (1,2,3,4), 'solution not found (ier=%d)' % ier) # low precision due to random assert_array_almost_equal(params_fit, self.abc, decimal=2) def test_concurrent_no_gradient(self): v = sequence_parallel([self.test_basic] * 10) assert all([result is None for result in v]) def test_concurrent_with_gradient(self): v = sequence_parallel([self.test_basic_with_gradient] * 10) assert all([result is None for result in v]) def test_func_input_output_length_check(self): def func(x): return 2 * (x[0] - 3) ** 2 + 1 with assert_raises(TypeError, match='Improper input: func input vector length N='): optimize.leastsq(func, x0=[0, 1]) class TestCurveFit: def setup_method(self): self.y = array([1.0, 3.2, 9.5, 13.7]) self.x = array([1.0, 2.0, 3.0, 4.0]) def test_one_argument(self): def func(x,a): return x**a popt, pcov = curve_fit(func, self.x, self.y) assert_(len(popt) == 1) assert_(pcov.shape == (1,1)) assert_almost_equal(popt[0], 1.9149, decimal=4) assert_almost_equal(pcov[0,0], 0.0016, decimal=4) # Test if we get the same with full_output. Regression test for #1415. # Also test if check_finite can be turned off. res = curve_fit(func, self.x, self.y, full_output=1, check_finite=False) (popt2, pcov2, infodict, errmsg, ier) = res assert_array_almost_equal(popt, popt2) def test_two_argument(self): def func(x, a, b): return b*x**a popt, pcov = curve_fit(func, self.x, self.y) assert_(len(popt) == 2) assert_(pcov.shape == (2,2)) assert_array_almost_equal(popt, [1.7989, 1.1642], decimal=4) assert_array_almost_equal(pcov, [[0.0852, -0.1260], [-0.1260, 0.1912]], decimal=4) def test_func_is_classmethod(self): class test_self: """This class tests if curve_fit passes the correct number of arguments when the model function is a class instance method. """ def func(self, x, a, b): return b * x**a test_self_inst = test_self() popt, pcov = curve_fit(test_self_inst.func, self.x, self.y) assert_(pcov.shape == (2,2)) assert_array_almost_equal(popt, [1.7989, 1.1642], decimal=4) assert_array_almost_equal(pcov, [[0.0852, -0.1260], [-0.1260, 0.1912]], decimal=4) def test_regression_2639(self): # This test fails if epsfcn in leastsq is too large. x = [574.14200000000005, 574.154, 574.16499999999996, 574.17700000000002, 574.18799999999999, 574.19899999999996, 574.21100000000001, 574.22199999999998, 574.23400000000004, 574.245] y = [859.0, 997.0, 1699.0, 2604.0, 2013.0, 1964.0, 2435.0, 1550.0, 949.0, 841.0] guess = [574.1861428571428, 574.2155714285715, 1302.0, 1302.0, 0.0035019999999983615, 859.0] good = [5.74177150e+02, 5.74209188e+02, 1.74187044e+03, 1.58646166e+03, 1.0068462e-02, 8.57450661e+02] def f_double_gauss(x, x0, x1, A0, A1, sigma, c): return (A0*np.exp(-(x-x0)**2/(2.*sigma**2)) + A1*np.exp(-(x-x1)**2/(2.*sigma**2)) + c) popt, pcov = curve_fit(f_double_gauss, x, y, guess, maxfev=10000) assert_allclose(popt, good, rtol=1e-5) def test_pcov(self): xdata = np.array([0, 1, 2, 3, 4, 5]) ydata = np.array([1, 1, 5, 7, 8, 12]) sigma = np.array([1, 2, 1, 2, 1, 2]) def f(x, a, b): return a*x + b for method in ['lm', 'trf', 'dogbox']: popt, pcov = curve_fit(f, xdata, ydata, p0=[2, 0], sigma=sigma, method=method) perr_scaled = np.sqrt(np.diag(pcov)) assert_allclose(perr_scaled, [0.20659803, 0.57204404], rtol=1e-3) popt, pcov = curve_fit(f, xdata, ydata, p0=[2, 0], sigma=3*sigma, method=method) perr_scaled = np.sqrt(np.diag(pcov)) assert_allclose(perr_scaled, [0.20659803, 0.57204404], rtol=1e-3) popt, pcov = curve_fit(f, xdata, ydata, p0=[2, 0], sigma=sigma, absolute_sigma=True, method=method) perr = np.sqrt(np.diag(pcov)) assert_allclose(perr, [0.30714756, 0.85045308], rtol=1e-3) popt, pcov = curve_fit(f, xdata, ydata, p0=[2, 0], sigma=3*sigma, absolute_sigma=True, method=method) perr = np.sqrt(np.diag(pcov)) assert_allclose(perr, [3*0.30714756, 3*0.85045308], rtol=1e-3) # infinite variances def f_flat(x, a, b): return a*x pcov_expected = np.array([np.inf]*4).reshape(2, 2) with suppress_warnings() as sup: sup.filter(OptimizeWarning, "Covariance of the parameters could not be estimated") popt, pcov = curve_fit(f_flat, xdata, ydata, p0=[2, 0], sigma=sigma) popt1, pcov1 = curve_fit(f, xdata[:2], ydata[:2], p0=[2, 0]) assert_(pcov.shape == (2, 2)) assert_array_equal(pcov, pcov_expected) assert_(pcov1.shape == (2, 2)) assert_array_equal(pcov1, pcov_expected) def test_array_like(self): # Test sequence input. Regression test for gh-3037. def f_linear(x, a, b): return a*x + b x = [1, 2, 3, 4] y = [3, 5, 7, 9] assert_allclose(curve_fit(f_linear, x, y)[0], [2, 1], atol=1e-10) def test_indeterminate_covariance(self): # Test that a warning is returned when pcov is indeterminate xdata = np.array([1, 2, 3, 4, 5, 6]) ydata = np.array([1, 2, 3, 4, 5.5, 6]) assert_warns(OptimizeWarning, curve_fit, lambda x, a, b: a*x, xdata, ydata) def test_NaN_handling(self): # Test for correct handling of NaNs in input data: gh-3422 # create input with NaNs xdata = np.array([1, np.nan, 3]) ydata = np.array([1, 2, 3]) assert_raises(ValueError, curve_fit, lambda x, a, b: a*x + b, xdata, ydata) assert_raises(ValueError, curve_fit, lambda x, a, b: a*x + b, ydata, xdata) assert_raises(ValueError, curve_fit, lambda x, a, b: a*x + b, xdata, ydata, **{"check_finite": True}) @staticmethod def _check_nan_policy(f, xdata_with_nan, xdata_without_nan, ydata_with_nan, ydata_without_nan, method): kwargs = {'f': f, 'xdata': xdata_with_nan, 'ydata': ydata_with_nan, 'method': method, 'check_finite': False} # propagate test error_msg = ("`nan_policy='propagate'` is not supported " "by this function.") with assert_raises(ValueError, match=error_msg): curve_fit(**kwargs, nan_policy="propagate", maxfev=2000) # raise test with assert_raises(ValueError, match="The input contains nan"): curve_fit(**kwargs, nan_policy="raise") # omit test result_with_nan, _ = curve_fit(**kwargs, nan_policy="omit") kwargs['xdata'] = xdata_without_nan kwargs['ydata'] = ydata_without_nan result_without_nan, _ = curve_fit(**kwargs) assert_allclose(result_with_nan, result_without_nan) # not valid policy test error_msg = ("nan_policy must be one of " "{'None', 'raise', 'omit'}") with assert_raises(ValueError, match=error_msg): curve_fit(**kwargs, nan_policy="hi") @pytest.mark.parametrize('method', ["lm", "trf", "dogbox"]) def test_nan_policy_1d(self, method): def f(x, a, b): return a*x + b xdata_with_nan = np.array([2, 3, np.nan, 4, 4, np.nan]) ydata_with_nan = np.array([1, 2, 5, 3, np.nan, 7]) xdata_without_nan = np.array([2, 3, 4]) ydata_without_nan = np.array([1, 2, 3]) self._check_nan_policy(f, xdata_with_nan, xdata_without_nan, ydata_with_nan, ydata_without_nan, method) @pytest.mark.parametrize('method', ["lm", "trf", "dogbox"]) def test_nan_policy_2d(self, method): def f(x, a, b): x1 = x[0, :] x2 = x[1, :] return a*x1 + b + x2 xdata_with_nan = np.array([[2, 3, np.nan, 4, 4, np.nan, 5], [2, 3, np.nan, np.nan, 4, np.nan, 7]]) ydata_with_nan = np.array([1, 2, 5, 3, np.nan, 7, 10]) xdata_without_nan = np.array([[2, 3, 5], [2, 3, 7]]) ydata_without_nan = np.array([1, 2, 10]) self._check_nan_policy(f, xdata_with_nan, xdata_without_nan, ydata_with_nan, ydata_without_nan, method) @pytest.mark.parametrize('n', [2, 3]) @pytest.mark.parametrize('method', ["lm", "trf", "dogbox"]) def test_nan_policy_2_3d(self, n, method): def f(x, a, b): x1 = x[..., 0, :].squeeze() x2 = x[..., 1, :].squeeze() return a*x1 + b + x2 xdata_with_nan = np.array([[[2, 3, np.nan, 4, 4, np.nan, 5], [2, 3, np.nan, np.nan, 4, np.nan, 7]]]) xdata_with_nan = xdata_with_nan.squeeze() if n == 2 else xdata_with_nan ydata_with_nan = np.array([1, 2, 5, 3, np.nan, 7, 10]) xdata_without_nan = np.array([[[2, 3, 5], [2, 3, 7]]]) ydata_without_nan = np.array([1, 2, 10]) self._check_nan_policy(f, xdata_with_nan, xdata_without_nan, ydata_with_nan, ydata_without_nan, method) def test_empty_inputs(self): # Test both with and without bounds (regression test for gh-9864) assert_raises(ValueError, curve_fit, lambda x, a: a*x, [], []) assert_raises(ValueError, curve_fit, lambda x, a: a*x, [], [], bounds=(1, 2)) assert_raises(ValueError, curve_fit, lambda x, a: a*x, [1], []) assert_raises(ValueError, curve_fit, lambda x, a: a*x, [2], [], bounds=(1, 2)) def test_function_zero_params(self): # Fit args is zero, so "Unable to determine number of fit parameters." assert_raises(ValueError, curve_fit, lambda x: x, [1, 2], [3, 4]) def test_None_x(self): # Added in GH10196 popt, pcov = curve_fit(lambda _, a: a * np.arange(10), None, 2 * np.arange(10)) assert_allclose(popt, [2.]) def test_method_argument(self): def f(x, a, b): return a * np.exp(-b*x) xdata = np.linspace(0, 1, 11) ydata = f(xdata, 2., 2.) for method in ['trf', 'dogbox', 'lm', None]: popt, pcov = curve_fit(f, xdata, ydata, method=method) assert_allclose(popt, [2., 2.]) assert_raises(ValueError, curve_fit, f, xdata, ydata, method='unknown') def test_full_output(self): def f(x, a, b): return a * np.exp(-b * x) xdata = np.linspace(0, 1, 11) ydata = f(xdata, 2., 2.) for method in ['trf', 'dogbox', 'lm', None]: popt, pcov, infodict, errmsg, ier = curve_fit( f, xdata, ydata, method=method, full_output=True) assert_allclose(popt, [2., 2.]) assert "nfev" in infodict assert "fvec" in infodict if method == 'lm' or method is None: assert "fjac" in infodict assert "ipvt" in infodict assert "qtf" in infodict assert isinstance(errmsg, str) assert ier in (1, 2, 3, 4) def test_bounds(self): def f(x, a, b): return a * np.exp(-b*x) xdata = np.linspace(0, 1, 11) ydata = f(xdata, 2., 2.) # The minimum w/out bounds is at [2., 2.], # and with bounds it's at [1.5, smth]. lb = [1., 0] ub = [1.5, 3.] # Test that both variants of the bounds yield the same result bounds = (lb, ub) bounds_class = Bounds(lb, ub) for method in [None, 'trf', 'dogbox']: popt, pcov = curve_fit(f, xdata, ydata, bounds=bounds, method=method) assert_allclose(popt[0], 1.5) popt_class, pcov_class = curve_fit(f, xdata, ydata, bounds=bounds_class, method=method) assert_allclose(popt_class, popt) # With bounds, the starting estimate is feasible. popt, pcov = curve_fit(f, xdata, ydata, method='trf', bounds=([0., 0], [0.6, np.inf])) assert_allclose(popt[0], 0.6) # method='lm' doesn't support bounds. assert_raises(ValueError, curve_fit, f, xdata, ydata, bounds=bounds, method='lm') def test_bounds_p0(self): # This test is for issue #5719. The problem was that an initial guess # was ignored when 'trf' or 'dogbox' methods were invoked. def f(x, a): return np.sin(x + a) xdata = np.linspace(-2*np.pi, 2*np.pi, 40) ydata = np.sin(xdata) bounds = (-3 * np.pi, 3 * np.pi) for method in ['trf', 'dogbox']: popt_1, _ = curve_fit(f, xdata, ydata, p0=2.1*np.pi) popt_2, _ = curve_fit(f, xdata, ydata, p0=2.1*np.pi, bounds=bounds, method=method) # If the initial guess is ignored, then popt_2 would be close 0. assert_allclose(popt_1, popt_2) def test_jac(self): # Test that Jacobian callable is handled correctly and # weighted if sigma is provided. def f(x, a, b): return a * np.exp(-b*x) def jac(x, a, b): e = np.exp(-b*x) return np.vstack((e, -a * x * e)).T xdata = np.linspace(0, 1, 11) ydata = f(xdata, 2., 2.) # Test numerical options for least_squares backend. for method in ['trf', 'dogbox']: for scheme in ['2-point', '3-point', 'cs']: popt, pcov = curve_fit(f, xdata, ydata, jac=scheme, method=method) assert_allclose(popt, [2, 2]) # Test the analytic option. for method in ['lm', 'trf', 'dogbox']: popt, pcov = curve_fit(f, xdata, ydata, method=method, jac=jac) assert_allclose(popt, [2, 2]) # Now add an outlier and provide sigma. ydata[5] = 100 sigma = np.ones(xdata.shape[0]) sigma[5] = 200 for method in ['lm', 'trf', 'dogbox']: popt, pcov = curve_fit(f, xdata, ydata, sigma=sigma, method=method, jac=jac) # Still the optimization process is influenced somehow, # have to set rtol=1e-3. assert_allclose(popt, [2, 2], rtol=1e-3) def test_maxfev_and_bounds(self): # gh-6340: with no bounds, curve_fit accepts parameter maxfev (via leastsq) # but with bounds, the parameter is `max_nfev` (via least_squares) x = np.arange(0, 10) y = 2*x popt1, _ = curve_fit(lambda x,p: p*x, x, y, bounds=(0, 3), maxfev=100) popt2, _ = curve_fit(lambda x,p: p*x, x, y, bounds=(0, 3), max_nfev=100) assert_allclose(popt1, 2, atol=1e-14) assert_allclose(popt2, 2, atol=1e-14) def test_curvefit_simplecovariance(self): def func(x, a, b): return a * np.exp(-b*x) def jac(x, a, b): e = np.exp(-b*x) return np.vstack((e, -a * x * e)).T np.random.seed(0) xdata = np.linspace(0, 4, 50) y = func(xdata, 2.5, 1.3) ydata = y + 0.2 * np.random.normal(size=len(xdata)) sigma = np.zeros(len(xdata)) + 0.2 covar = np.diag(sigma**2) for jac1, jac2 in [(jac, jac), (None, None)]: for absolute_sigma in [False, True]: popt1, pcov1 = curve_fit(func, xdata, ydata, sigma=sigma, jac=jac1, absolute_sigma=absolute_sigma) popt2, pcov2 = curve_fit(func, xdata, ydata, sigma=covar, jac=jac2, absolute_sigma=absolute_sigma) assert_allclose(popt1, popt2, atol=1e-14) assert_allclose(pcov1, pcov2, atol=1e-14) def test_curvefit_covariance(self): def funcp(x, a, b): rotn = np.array([[1./np.sqrt(2), -1./np.sqrt(2), 0], [1./np.sqrt(2), 1./np.sqrt(2), 0], [0, 0, 1.0]]) return rotn.dot(a * np.exp(-b*x)) def jacp(x, a, b): rotn = np.array([[1./np.sqrt(2), -1./np.sqrt(2), 0], [1./np.sqrt(2), 1./np.sqrt(2), 0], [0, 0, 1.0]]) e = np.exp(-b*x) return rotn.dot(np.vstack((e, -a * x * e)).T) def func(x, a, b): return a * np.exp(-b*x) def jac(x, a, b): e = np.exp(-b*x) return np.vstack((e, -a * x * e)).T np.random.seed(0) xdata = np.arange(1, 4) y = func(xdata, 2.5, 1.0) ydata = y + 0.2 * np.random.normal(size=len(xdata)) sigma = np.zeros(len(xdata)) + 0.2 covar = np.diag(sigma**2) # Get a rotation matrix, and obtain ydatap = R ydata # Chisq = ydata^T C^{-1} ydata # = ydata^T R^T R C^{-1} R^T R ydata # = ydatap^T Cp^{-1} ydatap # Cp^{-1} = R C^{-1} R^T # Cp = R C R^T, since R^-1 = R^T rotn = np.array([[1./np.sqrt(2), -1./np.sqrt(2), 0], [1./np.sqrt(2), 1./np.sqrt(2), 0], [0, 0, 1.0]]) ydatap = rotn.dot(ydata) covarp = rotn.dot(covar).dot(rotn.T) for jac1, jac2 in [(jac, jacp), (None, None)]: for absolute_sigma in [False, True]: popt1, pcov1 = curve_fit(func, xdata, ydata, sigma=sigma, jac=jac1, absolute_sigma=absolute_sigma) popt2, pcov2 = curve_fit(funcp, xdata, ydatap, sigma=covarp, jac=jac2, absolute_sigma=absolute_sigma) assert_allclose(popt1, popt2, rtol=1.2e-7, atol=1e-14) assert_allclose(pcov1, pcov2, rtol=1.2e-7, atol=1e-14) def test_dtypes(self): # regression test for gh-9581: curve_fit fails if x and y dtypes differ x = np.arange(-3, 5) y = 1.5*x + 3.0 + 0.5*np.sin(x) def func(x, a, b): return a*x + b for method in ['lm', 'trf', 'dogbox']: for dtx in [np.float32, np.float64]: for dty in [np.float32, np.float64]: x = x.astype(dtx) y = y.astype(dty) with warnings.catch_warnings(): warnings.simplefilter("error", OptimizeWarning) p, cov = curve_fit(func, x, y, method=method) assert np.isfinite(cov).all() assert not np.allclose(p, 1) # curve_fit's initial value def test_dtypes2(self): # regression test for gh-7117: curve_fit fails if # both inputs are float32 def hyperbola(x, s_1, s_2, o_x, o_y, c): b_2 = (s_1 + s_2) / 2 b_1 = (s_2 - s_1) / 2 return o_y + b_1*(x-o_x) + b_2*np.sqrt((x-o_x)**2 + c**2/4) min_fit = np.array([-3.0, 0.0, -2.0, -10.0, 0.0]) max_fit = np.array([0.0, 3.0, 3.0, 0.0, 10.0]) guess = np.array([-2.5/3.0, 4/3.0, 1.0, -4.0, 0.5]) params = [-2, .4, -1, -5, 9.5] xdata = np.array([-32, -16, -8, 4, 4, 8, 16, 32]) ydata = hyperbola(xdata, *params) # run optimization twice, with xdata being float32 and float64 popt_64, _ = curve_fit(f=hyperbola, xdata=xdata, ydata=ydata, p0=guess, bounds=(min_fit, max_fit)) xdata = xdata.astype(np.float32) ydata = hyperbola(xdata, *params) popt_32, _ = curve_fit(f=hyperbola, xdata=xdata, ydata=ydata, p0=guess, bounds=(min_fit, max_fit)) assert_allclose(popt_32, popt_64, atol=2e-5) def test_broadcast_y(self): xdata = np.arange(10) target = 4.7 * xdata ** 2 + 3.5 * xdata + np.random.rand(len(xdata)) def fit_func(x, a, b): return a * x ** 2 + b * x - target for method in ['lm', 'trf', 'dogbox']: popt0, pcov0 = curve_fit(fit_func, xdata=xdata, ydata=np.zeros_like(xdata), method=method) popt1, pcov1 = curve_fit(fit_func, xdata=xdata, ydata=0, method=method) assert_allclose(pcov0, pcov1) def test_args_in_kwargs(self): # Ensure that `args` cannot be passed as keyword argument to `curve_fit` def func(x, a, b): return a * x + b with assert_raises(ValueError): curve_fit(func, xdata=[1, 2, 3, 4], ydata=[5, 9, 13, 17], p0=[1], args=(1,)) def test_data_point_number_validation(self): def func(x, a, b, c, d, e): return a * np.exp(-b * x) + c + d + e with assert_raises(TypeError, match="The number of func parameters="): curve_fit(func, xdata=[1, 2, 3, 4], ydata=[5, 9, 13, 17]) @pytest.mark.filterwarnings('ignore::RuntimeWarning') def test_gh4555(self): # gh-4555 reported that covariance matrices returned by `leastsq` # can have negative diagonal elements and eigenvalues. (In fact, # they can also be asymmetric.) This shows up in the output of # `scipy.optimize.curve_fit`. Check that it has been resolved.giit def f(x, a, b, c, d, e): return a*np.log(x + 1 + b) + c*np.log(x + 1 + d) + e rng = np.random.default_rng(408113519974467917) n = 100 x = np.arange(n) y = np.linspace(2, 7, n) + rng.random(n) p, cov = optimize.curve_fit(f, x, y, maxfev=100000) assert np.all(np.diag(cov) > 0) eigs = linalg.eigh(cov)[0] # separate line for debugging # some platforms see a small negative eigevenvalue assert np.all(eigs > -1e-2) assert_allclose(cov, cov.T) def test_gh4555b(self): # check that PR gh-17247 did not significantly change covariance matrix # for simple cases rng = np.random.default_rng(408113519974467917) def func(x, a, b, c): return a * np.exp(-b * x) + c xdata = np.linspace(0, 4, 50) y = func(xdata, 2.5, 1.3, 0.5) y_noise = 0.2 * rng.normal(size=xdata.size) ydata = y + y_noise _, res = curve_fit(func, xdata, ydata) # reference from commit 1d80a2f254380d2b45733258ca42eb6b55c8755b ref = [[+0.0158972536486215, 0.0069207183284242, -0.0007474400714749], [+0.0069207183284242, 0.0205057958128679, +0.0053997711275403], [-0.0007474400714749, 0.0053997711275403, +0.0027833930320877]] # Linux_Python_38_32bit_full fails with default tolerance assert_allclose(res, ref, 2e-7) def test_gh13670(self): # gh-13670 reported that `curve_fit` executes callables # with the same values of the parameters at the beginning of # optimization. Check that this has been resolved. rng = np.random.default_rng(8250058582555444926) x = np.linspace(0, 3, 101) y = 2 * x + 1 + rng.normal(size=101) * 0.5 def line(x, *p): assert not np.all(line.last_p == p) line.last_p = p return x * p[0] + p[1] def jac(x, *p): assert not np.all(jac.last_p == p) jac.last_p = p return np.array([x, np.ones_like(x)]).T line.last_p = None jac.last_p = None p0 = np.array([1.0, 5.0]) curve_fit(line, x, y, p0, method='lm', jac=jac) class TestFixedPoint: def test_scalar_trivial(self): # f(x) = 2x; fixed point should be x=0 def func(x): return 2.0*x x0 = 1.0 x = fixed_point(func, x0) assert_almost_equal(x, 0.0) def test_scalar_basic1(self): # f(x) = x**2; x0=1.05; fixed point should be x=1 def func(x): return x**2 x0 = 1.05 x = fixed_point(func, x0) assert_almost_equal(x, 1.0) def test_scalar_basic2(self): # f(x) = x**0.5; x0=1.05; fixed point should be x=1 def func(x): return x**0.5 x0 = 1.05 x = fixed_point(func, x0) assert_almost_equal(x, 1.0) def test_array_trivial(self): def func(x): return 2.0*x x0 = [0.3, 0.15] with np.errstate(all='ignore'): x = fixed_point(func, x0) assert_almost_equal(x, [0.0, 0.0]) def test_array_basic1(self): # f(x) = c * x**2; fixed point should be x=1/c def func(x, c): return c * x**2 c = array([0.75, 1.0, 1.25]) x0 = [1.1, 1.15, 0.9] with np.errstate(all='ignore'): x = fixed_point(func, x0, args=(c,)) assert_almost_equal(x, 1.0/c) def test_array_basic2(self): # f(x) = c * x**0.5; fixed point should be x=c**2 def func(x, c): return c * x**0.5 c = array([0.75, 1.0, 1.25]) x0 = [0.8, 1.1, 1.1] x = fixed_point(func, x0, args=(c,)) assert_almost_equal(x, c**2) def test_lambertw(self): # python-list/2010-December/594592.html xxroot = fixed_point(lambda xx: np.exp(-2.0*xx)/2.0, 1.0, args=(), xtol=1e-12, maxiter=500) assert_allclose(xxroot, np.exp(-2.0*xxroot)/2.0) assert_allclose(xxroot, lambertw(1)/2) def test_no_acceleration(self): # github issue 5460 ks = 2 kl = 6 m = 1.3 n0 = 1.001 i0 = ((m-1)/m)*(kl/ks/m)**(1/(m-1)) def func(n): return np.log(kl/ks/n) / np.log(i0*n/(n - 1)) + 1 n = fixed_point(func, n0, method='iteration') assert_allclose(n, m)
40,676
36.629047
113
py
scipy
scipy-main/scipy/optimize/tests/test_hessian_update_strategy.py
import numpy as np from copy import deepcopy from numpy.linalg import norm from numpy.testing import (TestCase, assert_array_almost_equal, assert_array_equal, assert_array_less) from scipy.optimize import (BFGS, SR1) class Rosenbrock: """Rosenbrock function. The following optimization problem: minimize sum(100.0*(x[1:] - x[:-1]**2.0)**2.0 + (1 - x[:-1])**2.0) """ def __init__(self, n=2, random_state=0): rng = np.random.RandomState(random_state) self.x0 = rng.uniform(-1, 1, n) self.x_opt = np.ones(n) def fun(self, x): x = np.asarray(x) r = np.sum(100.0 * (x[1:] - x[:-1]**2.0)**2.0 + (1 - x[:-1])**2.0, axis=0) return r def grad(self, x): x = np.asarray(x) xm = x[1:-1] xm_m1 = x[:-2] xm_p1 = x[2:] der = np.zeros_like(x) der[1:-1] = (200 * (xm - xm_m1**2) - 400 * (xm_p1 - xm**2) * xm - 2 * (1 - xm)) der[0] = -400 * x[0] * (x[1] - x[0]**2) - 2 * (1 - x[0]) der[-1] = 200 * (x[-1] - x[-2]**2) return der def hess(self, x): x = np.atleast_1d(x) H = np.diag(-400 * x[:-1], 1) - np.diag(400 * x[:-1], -1) diagonal = np.zeros(len(x), dtype=x.dtype) diagonal[0] = 1200 * x[0]**2 - 400 * x[1] + 2 diagonal[-1] = 200 diagonal[1:-1] = 202 + 1200 * x[1:-1]**2 - 400 * x[2:] H = H + np.diag(diagonal) return H class TestHessianUpdateStrategy(TestCase): def test_hessian_initialization(self): quasi_newton = (BFGS(), SR1()) for qn in quasi_newton: qn.initialize(5, 'hess') B = qn.get_matrix() assert_array_equal(B, np.eye(5)) # For this list of points, it is known # that no exception occur during the # Hessian update. Hence no update is # skiped or damped. def test_rosenbrock_with_no_exception(self): # Define auxiliar problem prob = Rosenbrock(n=5) # Define iteration points x_list = [[0.0976270, 0.4303787, 0.2055267, 0.0897663, -0.15269040], [0.1847239, 0.0505757, 0.2123832, 0.0255081, 0.00083286], [0.2142498, -0.0188480, 0.0503822, 0.0347033, 0.03323606], [0.2071680, -0.0185071, 0.0341337, -0.0139298, 0.02881750], [0.1533055, -0.0322935, 0.0280418, -0.0083592, 0.01503699], [0.1382378, -0.0276671, 0.0266161, -0.0074060, 0.02801610], [0.1651957, -0.0049124, 0.0269665, -0.0040025, 0.02138184], [0.2354930, 0.0443711, 0.0173959, 0.0041872, 0.00794563], [0.4168118, 0.1433867, 0.0111714, 0.0126265, -0.00658537], [0.4681972, 0.2153273, 0.0225249, 0.0152704, -0.00463809], [0.6023068, 0.3346815, 0.0731108, 0.0186618, -0.00371541], [0.6415743, 0.3985468, 0.1324422, 0.0214160, -0.00062401], [0.7503690, 0.5447616, 0.2804541, 0.0539851, 0.00242230], [0.7452626, 0.5644594, 0.3324679, 0.0865153, 0.00454960], [0.8059782, 0.6586838, 0.4229577, 0.1452990, 0.00976702], [0.8549542, 0.7226562, 0.4991309, 0.2420093, 0.02772661], [0.8571332, 0.7285741, 0.5279076, 0.2824549, 0.06030276], [0.8835633, 0.7727077, 0.5957984, 0.3411303, 0.09652185], [0.9071558, 0.8299587, 0.6771400, 0.4402896, 0.17469338], [0.9190793, 0.8486480, 0.7163332, 0.5083780, 0.26107691], [0.9371223, 0.8762177, 0.7653702, 0.5773109, 0.32181041], [0.9554613, 0.9119893, 0.8282687, 0.6776178, 0.43162744], [0.9545744, 0.9099264, 0.8270244, 0.6822220, 0.45237623], [0.9688112, 0.9351710, 0.8730961, 0.7546601, 0.56622448], [0.9743227, 0.9491953, 0.9005150, 0.8086497, 0.64505437], [0.9807345, 0.9638853, 0.9283012, 0.8631675, 0.73812581], [0.9886746, 0.9777760, 0.9558950, 0.9123417, 0.82726553], [0.9899096, 0.9803828, 0.9615592, 0.9255600, 0.85822149], [0.9969510, 0.9935441, 0.9864657, 0.9726775, 0.94358663], [0.9979533, 0.9960274, 0.9921724, 0.9837415, 0.96626288], [0.9995981, 0.9989171, 0.9974178, 0.9949954, 0.99023356], [1.0002640, 1.0005088, 1.0010594, 1.0021161, 1.00386912], [0.9998903, 0.9998459, 0.9997795, 0.9995484, 0.99916305], [1.0000008, 0.9999905, 0.9999481, 0.9998903, 0.99978047], [1.0000004, 0.9999983, 1.0000001, 1.0000031, 1.00000297], [0.9999995, 1.0000003, 1.0000005, 1.0000001, 1.00000032], [0.9999999, 0.9999997, 0.9999994, 0.9999989, 0.99999786], [0.9999999, 0.9999999, 0.9999999, 0.9999999, 0.99999991]] # Get iteration points grad_list = [prob.grad(x) for x in x_list] delta_x = [np.array(x_list[i+1])-np.array(x_list[i]) for i in range(len(x_list)-1)] delta_grad = [grad_list[i+1]-grad_list[i] for i in range(len(grad_list)-1)] # Check curvature condition for s, y in zip(delta_x, delta_grad): if np.dot(s, y) <= 0: raise ArithmeticError() # Define QuasiNewton update for quasi_newton in (BFGS(init_scale=1, min_curvature=1e-4), SR1(init_scale=1)): hess = deepcopy(quasi_newton) inv_hess = deepcopy(quasi_newton) hess.initialize(len(x_list[0]), 'hess') inv_hess.initialize(len(x_list[0]), 'inv_hess') # Compare the hessian and its inverse for s, y in zip(delta_x, delta_grad): hess.update(s, y) inv_hess.update(s, y) B = hess.get_matrix() H = inv_hess.get_matrix() assert_array_almost_equal(np.linalg.inv(B), H, decimal=10) B_true = prob.hess(x_list[len(delta_x)]) assert_array_less(norm(B - B_true)/norm(B_true), 0.1) def test_SR1_skip_update(self): # Define auxiliary problem prob = Rosenbrock(n=5) # Define iteration points x_list = [[0.0976270, 0.4303787, 0.2055267, 0.0897663, -0.15269040], [0.1847239, 0.0505757, 0.2123832, 0.0255081, 0.00083286], [0.2142498, -0.0188480, 0.0503822, 0.0347033, 0.03323606], [0.2071680, -0.0185071, 0.0341337, -0.0139298, 0.02881750], [0.1533055, -0.0322935, 0.0280418, -0.0083592, 0.01503699], [0.1382378, -0.0276671, 0.0266161, -0.0074060, 0.02801610], [0.1651957, -0.0049124, 0.0269665, -0.0040025, 0.02138184], [0.2354930, 0.0443711, 0.0173959, 0.0041872, 0.00794563], [0.4168118, 0.1433867, 0.0111714, 0.0126265, -0.00658537], [0.4681972, 0.2153273, 0.0225249, 0.0152704, -0.00463809], [0.6023068, 0.3346815, 0.0731108, 0.0186618, -0.00371541], [0.6415743, 0.3985468, 0.1324422, 0.0214160, -0.00062401], [0.7503690, 0.5447616, 0.2804541, 0.0539851, 0.00242230], [0.7452626, 0.5644594, 0.3324679, 0.0865153, 0.00454960], [0.8059782, 0.6586838, 0.4229577, 0.1452990, 0.00976702], [0.8549542, 0.7226562, 0.4991309, 0.2420093, 0.02772661], [0.8571332, 0.7285741, 0.5279076, 0.2824549, 0.06030276], [0.8835633, 0.7727077, 0.5957984, 0.3411303, 0.09652185], [0.9071558, 0.8299587, 0.6771400, 0.4402896, 0.17469338]] # Get iteration points grad_list = [prob.grad(x) for x in x_list] delta_x = [np.array(x_list[i+1])-np.array(x_list[i]) for i in range(len(x_list)-1)] delta_grad = [grad_list[i+1]-grad_list[i] for i in range(len(grad_list)-1)] hess = SR1(init_scale=1, min_denominator=1e-2) hess.initialize(len(x_list[0]), 'hess') # Compare the Hessian and its inverse for i in range(len(delta_x)-1): s = delta_x[i] y = delta_grad[i] hess.update(s, y) # Test skip update B = np.copy(hess.get_matrix()) s = delta_x[17] y = delta_grad[17] hess.update(s, y) B_updated = np.copy(hess.get_matrix()) assert_array_equal(B, B_updated) def test_BFGS_skip_update(self): # Define auxiliar problem prob = Rosenbrock(n=5) # Define iteration points x_list = [[0.0976270, 0.4303787, 0.2055267, 0.0897663, -0.15269040], [0.1847239, 0.0505757, 0.2123832, 0.0255081, 0.00083286], [0.2142498, -0.0188480, 0.0503822, 0.0347033, 0.03323606], [0.2071680, -0.0185071, 0.0341337, -0.0139298, 0.02881750], [0.1533055, -0.0322935, 0.0280418, -0.0083592, 0.01503699], [0.1382378, -0.0276671, 0.0266161, -0.0074060, 0.02801610], [0.1651957, -0.0049124, 0.0269665, -0.0040025, 0.02138184]] # Get iteration points grad_list = [prob.grad(x) for x in x_list] delta_x = [np.array(x_list[i+1])-np.array(x_list[i]) for i in range(len(x_list)-1)] delta_grad = [grad_list[i+1]-grad_list[i] for i in range(len(grad_list)-1)] hess = BFGS(init_scale=1, min_curvature=10) hess.initialize(len(x_list[0]), 'hess') # Compare the Hessian and its inverse for i in range(len(delta_x)-1): s = delta_x[i] y = delta_grad[i] hess.update(s, y) # Test skip update B = np.copy(hess.get_matrix()) s = delta_x[5] y = delta_grad[5] hess.update(s, y) B_updated = np.copy(hess.get_matrix()) assert_array_equal(B, B_updated)
10,112
47.38756
77
py
scipy
scipy-main/scipy/optimize/tests/test_trustregion.py
""" Unit tests for trust-region optimization routines. To run it in its simplest form:: nosetests test_optimize.py """ import pytest import numpy as np from numpy.testing import assert_, assert_equal, assert_allclose from scipy.optimize import (minimize, rosen, rosen_der, rosen_hess, rosen_hess_prod) class Accumulator: """ This is for testing callbacks.""" def __init__(self): self.count = 0 self.accum = None def __call__(self, x): self.count += 1 if self.accum is None: self.accum = np.array(x) else: self.accum += x class TestTrustRegionSolvers: def setup_method(self): self.x_opt = [1.0, 1.0] self.easy_guess = [2.0, 2.0] self.hard_guess = [-1.2, 1.0] def test_dogleg_accuracy(self): # test the accuracy and the return_all option x0 = self.hard_guess r = minimize(rosen, x0, jac=rosen_der, hess=rosen_hess, tol=1e-8, method='dogleg', options={'return_all': True},) assert_allclose(x0, r['allvecs'][0]) assert_allclose(r['x'], r['allvecs'][-1]) assert_allclose(r['x'], self.x_opt) def test_dogleg_callback(self): # test the callback mechanism and the maxiter and return_all options accumulator = Accumulator() maxiter = 5 r = minimize(rosen, self.hard_guess, jac=rosen_der, hess=rosen_hess, callback=accumulator, method='dogleg', options={'return_all': True, 'maxiter': maxiter},) assert_equal(accumulator.count, maxiter) assert_equal(len(r['allvecs']), maxiter+1) assert_allclose(r['x'], r['allvecs'][-1]) assert_allclose(sum(r['allvecs'][1:]), accumulator.accum) def test_dogleg_user_warning(self): with pytest.warns(RuntimeWarning, match=r'Maximum number of iterations'): minimize(rosen, self.hard_guess, jac=rosen_der, hess=rosen_hess, method='dogleg', options={'disp': True, 'maxiter': 1}, ) def test_solver_concordance(self): # Assert that dogleg uses fewer iterations than ncg on the Rosenbrock # test function, although this does not necessarily mean # that dogleg is faster or better than ncg even for this function # and especially not for other test functions. f = rosen g = rosen_der h = rosen_hess for x0 in (self.easy_guess, self.hard_guess): r_dogleg = minimize(f, x0, jac=g, hess=h, tol=1e-8, method='dogleg', options={'return_all': True}) r_trust_ncg = minimize(f, x0, jac=g, hess=h, tol=1e-8, method='trust-ncg', options={'return_all': True}) r_trust_krylov = minimize(f, x0, jac=g, hess=h, tol=1e-8, method='trust-krylov', options={'return_all': True}) r_ncg = minimize(f, x0, jac=g, hess=h, tol=1e-8, method='newton-cg', options={'return_all': True}) r_iterative = minimize(f, x0, jac=g, hess=h, tol=1e-8, method='trust-exact', options={'return_all': True}) assert_allclose(self.x_opt, r_dogleg['x']) assert_allclose(self.x_opt, r_trust_ncg['x']) assert_allclose(self.x_opt, r_trust_krylov['x']) assert_allclose(self.x_opt, r_ncg['x']) assert_allclose(self.x_opt, r_iterative['x']) assert_(len(r_dogleg['allvecs']) < len(r_ncg['allvecs'])) def test_trust_ncg_hessp(self): for x0 in (self.easy_guess, self.hard_guess, self.x_opt): r = minimize(rosen, x0, jac=rosen_der, hessp=rosen_hess_prod, tol=1e-8, method='trust-ncg') assert_allclose(self.x_opt, r['x']) def test_trust_ncg_start_in_optimum(self): r = minimize(rosen, x0=self.x_opt, jac=rosen_der, hess=rosen_hess, tol=1e-8, method='trust-ncg') assert_allclose(self.x_opt, r['x']) def test_trust_krylov_start_in_optimum(self): r = minimize(rosen, x0=self.x_opt, jac=rosen_der, hess=rosen_hess, tol=1e-8, method='trust-krylov') assert_allclose(self.x_opt, r['x']) def test_trust_exact_start_in_optimum(self): r = minimize(rosen, x0=self.x_opt, jac=rosen_der, hess=rosen_hess, tol=1e-8, method='trust-exact') assert_allclose(self.x_opt, r['x'])
4,701
40.610619
78
py
scipy
scipy-main/scipy/optimize/tests/test_constraint_conversion.py
""" Unit test for constraint conversion """ import numpy as np from numpy.testing import (assert_array_almost_equal, assert_allclose, assert_warns, suppress_warnings) import pytest from scipy.optimize import (NonlinearConstraint, LinearConstraint, OptimizeWarning, minimize, BFGS) from .test_minimize_constrained import (Maratos, HyperbolicIneq, Rosenbrock, IneqRosenbrock, EqIneqRosenbrock, BoundedRosenbrock, Elec) class TestOldToNew: x0 = (2, 0) bnds = ((0, None), (0, None)) method = "trust-constr" def test_constraint_dictionary_1(self): def fun(x): return (x[0] - 1) ** 2 + (x[1] - 2.5) ** 2 cons = ({'type': 'ineq', 'fun': lambda x: x[0] - 2 * x[1] + 2}, {'type': 'ineq', 'fun': lambda x: -x[0] - 2 * x[1] + 6}, {'type': 'ineq', 'fun': lambda x: -x[0] + 2 * x[1] + 2}) with suppress_warnings() as sup: sup.filter(UserWarning, "delta_grad == 0.0") res = minimize(fun, self.x0, method=self.method, bounds=self.bnds, constraints=cons) assert_allclose(res.x, [1.4, 1.7], rtol=1e-4) assert_allclose(res.fun, 0.8, rtol=1e-4) def test_constraint_dictionary_2(self): def fun(x): return (x[0] - 1) ** 2 + (x[1] - 2.5) ** 2 cons = {'type': 'eq', 'fun': lambda x, p1, p2: p1*x[0] - p2*x[1], 'args': (1, 1.1), 'jac': lambda x, p1, p2: np.array([[p1, -p2]])} with suppress_warnings() as sup: sup.filter(UserWarning, "delta_grad == 0.0") res = minimize(fun, self.x0, method=self.method, bounds=self.bnds, constraints=cons) assert_allclose(res.x, [1.7918552, 1.62895927]) assert_allclose(res.fun, 1.3857466063348418) def test_constraint_dictionary_3(self): def fun(x): return (x[0] - 1) ** 2 + (x[1] - 2.5) ** 2 cons = [{'type': 'ineq', 'fun': lambda x: x[0] - 2 * x[1] + 2}, NonlinearConstraint(lambda x: x[0] - x[1], 0, 0)] with suppress_warnings() as sup: sup.filter(UserWarning, "delta_grad == 0.0") res = minimize(fun, self.x0, method=self.method, bounds=self.bnds, constraints=cons) assert_allclose(res.x, [1.75, 1.75], rtol=1e-4) assert_allclose(res.fun, 1.125, rtol=1e-4) class TestNewToOld: def test_multiple_constraint_objects(self): def fun(x): return (x[0] - 1) ** 2 + (x[1] - 2.5) ** 2 + (x[2] - 0.75) ** 2 x0 = [2, 0, 1] coni = [] # only inequality constraints (can use cobyla) methods = ["slsqp", "cobyla", "trust-constr"] # mixed old and new coni.append([{'type': 'ineq', 'fun': lambda x: x[0] - 2 * x[1] + 2}, NonlinearConstraint(lambda x: x[0] - x[1], -1, 1)]) coni.append([LinearConstraint([1, -2, 0], -2, np.inf), NonlinearConstraint(lambda x: x[0] - x[1], -1, 1)]) coni.append([NonlinearConstraint(lambda x: x[0] - 2 * x[1] + 2, 0, np.inf), NonlinearConstraint(lambda x: x[0] - x[1], -1, 1)]) for con in coni: funs = {} for method in methods: with suppress_warnings() as sup: sup.filter(UserWarning) result = minimize(fun, x0, method=method, constraints=con) funs[method] = result.fun assert_allclose(funs['slsqp'], funs['trust-constr'], rtol=1e-4) assert_allclose(funs['cobyla'], funs['trust-constr'], rtol=1e-4) def test_individual_constraint_objects(self): def fun(x): return (x[0] - 1) ** 2 + (x[1] - 2.5) ** 2 + (x[2] - 0.75) ** 2 x0 = [2, 0, 1] cone = [] # with equality constraints (can't use cobyla) coni = [] # only inequality constraints (can use cobyla) methods = ["slsqp", "cobyla", "trust-constr"] # nonstandard data types for constraint equality bounds cone.append(NonlinearConstraint(lambda x: x[0] - x[1], 1, 1)) cone.append(NonlinearConstraint(lambda x: x[0] - x[1], [1.21], [1.21])) cone.append(NonlinearConstraint(lambda x: x[0] - x[1], 1.21, np.array([1.21]))) # multiple equalities cone.append(NonlinearConstraint( lambda x: [x[0] - x[1], x[1] - x[2]], 1.21, 1.21)) # two same equalities cone.append(NonlinearConstraint( lambda x: [x[0] - x[1], x[1] - x[2]], [1.21, 1.4], [1.21, 1.4])) # two different equalities cone.append(NonlinearConstraint( lambda x: [x[0] - x[1], x[1] - x[2]], [1.21, 1.21], 1.21)) # equality specified two ways cone.append(NonlinearConstraint( lambda x: [x[0] - x[1], x[1] - x[2]], [1.21, -np.inf], [1.21, np.inf])) # equality + unbounded # nonstandard data types for constraint inequality bounds coni.append(NonlinearConstraint(lambda x: x[0] - x[1], 1.21, np.inf)) coni.append(NonlinearConstraint(lambda x: x[0] - x[1], [1.21], np.inf)) coni.append(NonlinearConstraint(lambda x: x[0] - x[1], 1.21, np.array([np.inf]))) coni.append(NonlinearConstraint(lambda x: x[0] - x[1], -np.inf, -3)) coni.append(NonlinearConstraint(lambda x: x[0] - x[1], np.array(-np.inf), -3)) # multiple inequalities/equalities coni.append(NonlinearConstraint( lambda x: [x[0] - x[1], x[1] - x[2]], 1.21, np.inf)) # two same inequalities cone.append(NonlinearConstraint( lambda x: [x[0] - x[1], x[1] - x[2]], [1.21, -np.inf], [1.21, 1.4])) # mixed equality/inequality coni.append(NonlinearConstraint( lambda x: [x[0] - x[1], x[1] - x[2]], [1.1, .8], [1.2, 1.4])) # bounded above and below coni.append(NonlinearConstraint( lambda x: [x[0] - x[1], x[1] - x[2]], [-1.2, -1.4], [-1.1, -.8])) # - bounded above and below # quick check of LinearConstraint class (very little new code to test) cone.append(LinearConstraint([1, -1, 0], 1.21, 1.21)) cone.append(LinearConstraint([[1, -1, 0], [0, 1, -1]], 1.21, 1.21)) cone.append(LinearConstraint([[1, -1, 0], [0, 1, -1]], [1.21, -np.inf], [1.21, 1.4])) for con in coni: funs = {} for method in methods: with suppress_warnings() as sup: sup.filter(UserWarning) result = minimize(fun, x0, method=method, constraints=con) funs[method] = result.fun assert_allclose(funs['slsqp'], funs['trust-constr'], rtol=1e-3) assert_allclose(funs['cobyla'], funs['trust-constr'], rtol=1e-3) for con in cone: funs = {} for method in methods[::2]: # skip cobyla with suppress_warnings() as sup: sup.filter(UserWarning) result = minimize(fun, x0, method=method, constraints=con) funs[method] = result.fun assert_allclose(funs['slsqp'], funs['trust-constr'], rtol=1e-3) class TestNewToOldSLSQP: method = 'slsqp' elec = Elec(n_electrons=2) elec.x_opt = np.array([-0.58438468, 0.58438466, 0.73597047, -0.73597044, 0.34180668, -0.34180667]) brock = BoundedRosenbrock() brock.x_opt = [0, 0] list_of_problems = [Maratos(), HyperbolicIneq(), Rosenbrock(), IneqRosenbrock(), EqIneqRosenbrock(), elec, brock ] def test_list_of_problems(self): for prob in self.list_of_problems: with suppress_warnings() as sup: sup.filter(UserWarning) result = minimize(prob.fun, prob.x0, method=self.method, bounds=prob.bounds, constraints=prob.constr) assert_array_almost_equal(result.x, prob.x_opt, decimal=3) def test_warn_mixed_constraints(self): # warns about inefficiency of mixed equality/inequality constraints def fun(x): return (x[0] - 1) ** 2 + (x[1] - 2.5) ** 2 + (x[2] - 0.75) ** 2 cons = NonlinearConstraint(lambda x: [x[0]**2 - x[1], x[1] - x[2]], [1.1, .8], [1.1, 1.4]) bnds = ((0, None), (0, None), (0, None)) with suppress_warnings() as sup: sup.filter(UserWarning, "delta_grad == 0.0") assert_warns(OptimizeWarning, minimize, fun, (2, 0, 1), method=self.method, bounds=bnds, constraints=cons) def test_warn_ignored_options(self): # warns about constraint options being ignored def fun(x): return (x[0] - 1) ** 2 + (x[1] - 2.5) ** 2 + (x[2] - 0.75) ** 2 x0 = (2, 0, 1) if self.method == "slsqp": bnds = ((0, None), (0, None), (0, None)) else: bnds = None cons = NonlinearConstraint(lambda x: x[0], 2, np.inf) res = minimize(fun, x0, method=self.method, bounds=bnds, constraints=cons) # no warnings without constraint options assert_allclose(res.fun, 1) cons = LinearConstraint([1, 0, 0], 2, np.inf) res = minimize(fun, x0, method=self.method, bounds=bnds, constraints=cons) # no warnings without constraint options assert_allclose(res.fun, 1) cons = [] cons.append(NonlinearConstraint(lambda x: x[0]**2, 2, np.inf, keep_feasible=True)) cons.append(NonlinearConstraint(lambda x: x[0]**2, 2, np.inf, hess=BFGS())) cons.append(NonlinearConstraint(lambda x: x[0]**2, 2, np.inf, finite_diff_jac_sparsity=42)) cons.append(NonlinearConstraint(lambda x: x[0]**2, 2, np.inf, finite_diff_rel_step=42)) cons.append(LinearConstraint([1, 0, 0], 2, np.inf, keep_feasible=True)) for con in cons: assert_warns(OptimizeWarning, minimize, fun, x0, method=self.method, bounds=bnds, constraints=cons) class TestNewToOldCobyla: method = 'cobyla' list_of_problems = [ Elec(n_electrons=2), Elec(n_electrons=4), ] @pytest.mark.slow def test_list_of_problems(self): for prob in self.list_of_problems: with suppress_warnings() as sup: sup.filter(UserWarning) truth = minimize(prob.fun, prob.x0, method='trust-constr', bounds=prob.bounds, constraints=prob.constr) result = minimize(prob.fun, prob.x0, method=self.method, bounds=prob.bounds, constraints=prob.constr) assert_allclose(result.fun, truth.fun, rtol=1e-3)
11,887
42.229091
83
py
scipy
scipy-main/scipy/optimize/tests/test_optimize.py
""" Unit tests for optimization routines from optimize.py Authors: Ed Schofield, Nov 2005 Andrew Straw, April 2008 To run it in its simplest form:: nosetests test_optimize.py """ import itertools import platform import numpy as np from numpy.testing import (assert_allclose, assert_equal, assert_almost_equal, assert_no_warnings, assert_warns, assert_array_less, suppress_warnings) import pytest from pytest import raises as assert_raises from scipy import optimize from scipy.optimize._minimize import Bounds, NonlinearConstraint from scipy.optimize._minimize import (MINIMIZE_METHODS, MINIMIZE_METHODS_NEW_CB, MINIMIZE_SCALAR_METHODS) from scipy.optimize._linprog import LINPROG_METHODS from scipy.optimize._root import ROOT_METHODS from scipy.optimize._root_scalar import ROOT_SCALAR_METHODS from scipy.optimize._qap import QUADRATIC_ASSIGNMENT_METHODS from scipy.optimize._differentiable_functions import ScalarFunction, FD_METHODS from scipy.optimize._optimize import MemoizeJac, show_options, OptimizeResult def test_check_grad(): # Verify if check_grad is able to estimate the derivative of the # expit (logistic sigmoid) function. def expit(x): return 1 / (1 + np.exp(-x)) def der_expit(x): return np.exp(-x) / (1 + np.exp(-x))**2 x0 = np.array([1.5]) r = optimize.check_grad(expit, der_expit, x0) assert_almost_equal(r, 0) r = optimize.check_grad(expit, der_expit, x0, direction='random', seed=1234) assert_almost_equal(r, 0) r = optimize.check_grad(expit, der_expit, x0, epsilon=1e-6) assert_almost_equal(r, 0) r = optimize.check_grad(expit, der_expit, x0, epsilon=1e-6, direction='random', seed=1234) assert_almost_equal(r, 0) # Check if the epsilon parameter is being considered. r = abs(optimize.check_grad(expit, der_expit, x0, epsilon=1e-1) - 0) assert r > 1e-7 r = abs(optimize.check_grad(expit, der_expit, x0, epsilon=1e-1, direction='random', seed=1234) - 0) assert r > 1e-7 def x_sinx(x): return (x*np.sin(x)).sum() def der_x_sinx(x): return np.sin(x) + x*np.cos(x) x0 = np.arange(0, 2, 0.2) r = optimize.check_grad(x_sinx, der_x_sinx, x0, direction='random', seed=1234) assert_almost_equal(r, 0) assert_raises(ValueError, optimize.check_grad, x_sinx, der_x_sinx, x0, direction='random_projection', seed=1234) # checking can be done for derivatives of vector valued functions r = optimize.check_grad(himmelblau_grad, himmelblau_hess, himmelblau_x0, direction='all', seed=1234) assert r < 5e-7 class CheckOptimize: """ Base test case for a simple constrained entropy maximization problem (the machine translation example of Berger et al in Computational Linguistics, vol 22, num 1, pp 39--72, 1996.) """ def setup_method(self): self.F = np.array([[1, 1, 1], [1, 1, 0], [1, 0, 1], [1, 0, 0], [1, 0, 0]]) self.K = np.array([1., 0.3, 0.5]) self.startparams = np.zeros(3, np.float64) self.solution = np.array([0., -0.524869316, 0.487525860]) self.maxiter = 1000 self.funccalls = 0 self.gradcalls = 0 self.trace = [] def func(self, x): self.funccalls += 1 if self.funccalls > 6000: raise RuntimeError("too many iterations in optimization routine") log_pdot = np.dot(self.F, x) logZ = np.log(sum(np.exp(log_pdot))) f = logZ - np.dot(self.K, x) self.trace.append(np.copy(x)) return f def grad(self, x): self.gradcalls += 1 log_pdot = np.dot(self.F, x) logZ = np.log(sum(np.exp(log_pdot))) p = np.exp(log_pdot - logZ) return np.dot(self.F.transpose(), p) - self.K def hess(self, x): log_pdot = np.dot(self.F, x) logZ = np.log(sum(np.exp(log_pdot))) p = np.exp(log_pdot - logZ) return np.dot(self.F.T, np.dot(np.diag(p), self.F - np.dot(self.F.T, p))) def hessp(self, x, p): return np.dot(self.hess(x), p) class CheckOptimizeParameterized(CheckOptimize): def test_cg(self): # conjugate gradient optimization routine if self.use_wrapper: opts = {'maxiter': self.maxiter, 'disp': self.disp, 'return_all': False} res = optimize.minimize(self.func, self.startparams, args=(), method='CG', jac=self.grad, options=opts) params, fopt, func_calls, grad_calls, warnflag = \ res['x'], res['fun'], res['nfev'], res['njev'], res['status'] else: retval = optimize.fmin_cg(self.func, self.startparams, self.grad, (), maxiter=self.maxiter, full_output=True, disp=self.disp, retall=False) (params, fopt, func_calls, grad_calls, warnflag) = retval assert_allclose(self.func(params), self.func(self.solution), atol=1e-6) # Ensure that function call counts are 'known good'; these are from # SciPy 0.7.0. Don't allow them to increase. assert self.funccalls == 9, self.funccalls assert self.gradcalls == 7, self.gradcalls # Ensure that the function behaves the same; this is from SciPy 0.7.0 assert_allclose(self.trace[2:4], [[0, -0.5, 0.5], [0, -5.05700028e-01, 4.95985862e-01]], atol=1e-14, rtol=1e-7) def test_cg_cornercase(self): def f(r): return 2.5 * (1 - np.exp(-1.5*(r - 0.5)))**2 # Check several initial guesses. (Too far away from the # minimum, the function ends up in the flat region of exp.) for x0 in np.linspace(-0.75, 3, 71): sol = optimize.minimize(f, [x0], method='CG') assert sol.success assert_allclose(sol.x, [0.5], rtol=1e-5) def test_bfgs(self): # Broyden-Fletcher-Goldfarb-Shanno optimization routine if self.use_wrapper: opts = {'maxiter': self.maxiter, 'disp': self.disp, 'return_all': False} res = optimize.minimize(self.func, self.startparams, jac=self.grad, method='BFGS', args=(), options=opts) params, fopt, gopt, Hopt, func_calls, grad_calls, warnflag = ( res['x'], res['fun'], res['jac'], res['hess_inv'], res['nfev'], res['njev'], res['status']) else: retval = optimize.fmin_bfgs(self.func, self.startparams, self.grad, args=(), maxiter=self.maxiter, full_output=True, disp=self.disp, retall=False) (params, fopt, gopt, Hopt, func_calls, grad_calls, warnflag) = retval assert_allclose(self.func(params), self.func(self.solution), atol=1e-6) # Ensure that function call counts are 'known good'; these are from # SciPy 0.7.0. Don't allow them to increase. assert self.funccalls == 10, self.funccalls assert self.gradcalls == 8, self.gradcalls # Ensure that the function behaves the same; this is from SciPy 0.7.0 assert_allclose(self.trace[6:8], [[0, -5.25060743e-01, 4.87748473e-01], [0, -5.24885582e-01, 4.87530347e-01]], atol=1e-14, rtol=1e-7) @pytest.mark.filterwarnings('ignore::UserWarning') def test_bfgs_infinite(self): # Test corner case where -Inf is the minimum. See gh-2019. def func(x): return -np.e ** (-x) def fprime(x): return -func(x) x0 = [0] with np.errstate(over='ignore'): if self.use_wrapper: opts = {'disp': self.disp} x = optimize.minimize(func, x0, jac=fprime, method='BFGS', args=(), options=opts)['x'] else: x = optimize.fmin_bfgs(func, x0, fprime, disp=self.disp) assert not np.isfinite(func(x)) def test_bfgs_xrtol(self): # test for #17345 to test xrtol parameter x0 = [1.3, 0.7, 0.8, 1.9, 1.2] res = optimize.minimize(optimize.rosen, x0, method='bfgs', options={'xrtol': 1e-3}) ref = optimize.minimize(optimize.rosen, x0, method='bfgs', options={'gtol': 1e-3}) assert res.nit != ref.nit def test_powell(self): # Powell (direction set) optimization routine if self.use_wrapper: opts = {'maxiter': self.maxiter, 'disp': self.disp, 'return_all': False} res = optimize.minimize(self.func, self.startparams, args=(), method='Powell', options=opts) params, fopt, direc, numiter, func_calls, warnflag = ( res['x'], res['fun'], res['direc'], res['nit'], res['nfev'], res['status']) else: retval = optimize.fmin_powell(self.func, self.startparams, args=(), maxiter=self.maxiter, full_output=True, disp=self.disp, retall=False) (params, fopt, direc, numiter, func_calls, warnflag) = retval assert_allclose(self.func(params), self.func(self.solution), atol=1e-6) # params[0] does not affect the objective function assert_allclose(params[1:], self.solution[1:], atol=5e-6) # Ensure that function call counts are 'known good'; these are from # SciPy 0.7.0. Don't allow them to increase. # # However, some leeway must be added: the exact evaluation # count is sensitive to numerical error, and floating-point # computations are not bit-for-bit reproducible across # machines, and when using e.g., MKL, data alignment # etc., affect the rounding error. # assert self.funccalls <= 116 + 20, self.funccalls assert self.gradcalls == 0, self.gradcalls @pytest.mark.xfail(reason="This part of test_powell fails on some " "platforms, but the solution returned by powell is " "still valid.") def test_powell_gh14014(self): # This part of test_powell started failing on some CI platforms; # see gh-14014. Since the solution is still correct and the comments # in test_powell suggest that small differences in the bits are known # to change the "trace" of the solution, seems safe to xfail to get CI # green now and investigate later. # Powell (direction set) optimization routine if self.use_wrapper: opts = {'maxiter': self.maxiter, 'disp': self.disp, 'return_all': False} res = optimize.minimize(self.func, self.startparams, args=(), method='Powell', options=opts) params, fopt, direc, numiter, func_calls, warnflag = ( res['x'], res['fun'], res['direc'], res['nit'], res['nfev'], res['status']) else: retval = optimize.fmin_powell(self.func, self.startparams, args=(), maxiter=self.maxiter, full_output=True, disp=self.disp, retall=False) (params, fopt, direc, numiter, func_calls, warnflag) = retval # Ensure that the function behaves the same; this is from SciPy 0.7.0 assert_allclose(self.trace[34:39], [[0.72949016, -0.44156936, 0.47100962], [0.72949016, -0.44156936, 0.48052496], [1.45898031, -0.88313872, 0.95153458], [0.72949016, -0.44156936, 0.47576729], [1.72949016, -0.44156936, 0.47576729]], atol=1e-14, rtol=1e-7) def test_powell_bounded(self): # Powell (direction set) optimization routine # same as test_powell above, but with bounds bounds = [(-np.pi, np.pi) for _ in self.startparams] if self.use_wrapper: opts = {'maxiter': self.maxiter, 'disp': self.disp, 'return_all': False} res = optimize.minimize(self.func, self.startparams, args=(), bounds=bounds, method='Powell', options=opts) params, func_calls = (res['x'], res['nfev']) assert func_calls == self.funccalls assert_allclose(self.func(params), self.func(self.solution), atol=1e-6, rtol=1e-5) # The exact evaluation count is sensitive to numerical error, and # floating-point computations are not bit-for-bit reproducible # across machines, and when using e.g. MKL, data alignment etc. # affect the rounding error. # It takes 155 calls on my machine, but we can add the same +20 # margin as is used in `test_powell` assert self.funccalls <= 155 + 20 assert self.gradcalls == 0 def test_neldermead(self): # Nelder-Mead simplex algorithm if self.use_wrapper: opts = {'maxiter': self.maxiter, 'disp': self.disp, 'return_all': False} res = optimize.minimize(self.func, self.startparams, args=(), method='Nelder-mead', options=opts) params, fopt, numiter, func_calls, warnflag = ( res['x'], res['fun'], res['nit'], res['nfev'], res['status']) else: retval = optimize.fmin(self.func, self.startparams, args=(), maxiter=self.maxiter, full_output=True, disp=self.disp, retall=False) (params, fopt, numiter, func_calls, warnflag) = retval assert_allclose(self.func(params), self.func(self.solution), atol=1e-6) # Ensure that function call counts are 'known good'; these are from # SciPy 0.7.0. Don't allow them to increase. assert self.funccalls == 167, self.funccalls assert self.gradcalls == 0, self.gradcalls # Ensure that the function behaves the same; this is from SciPy 0.7.0 assert_allclose(self.trace[76:78], [[0.1928968, -0.62780447, 0.35166118], [0.19572515, -0.63648426, 0.35838135]], atol=1e-14, rtol=1e-7) def test_neldermead_initial_simplex(self): # Nelder-Mead simplex algorithm simplex = np.zeros((4, 3)) simplex[...] = self.startparams for j in range(3): simplex[j+1, j] += 0.1 if self.use_wrapper: opts = {'maxiter': self.maxiter, 'disp': False, 'return_all': True, 'initial_simplex': simplex} res = optimize.minimize(self.func, self.startparams, args=(), method='Nelder-mead', options=opts) params, fopt, numiter, func_calls, warnflag = (res['x'], res['fun'], res['nit'], res['nfev'], res['status']) assert_allclose(res['allvecs'][0], simplex[0]) else: retval = optimize.fmin(self.func, self.startparams, args=(), maxiter=self.maxiter, full_output=True, disp=False, retall=False, initial_simplex=simplex) (params, fopt, numiter, func_calls, warnflag) = retval assert_allclose(self.func(params), self.func(self.solution), atol=1e-6) # Ensure that function call counts are 'known good'; these are from # SciPy 0.17.0. Don't allow them to increase. assert self.funccalls == 100, self.funccalls assert self.gradcalls == 0, self.gradcalls # Ensure that the function behaves the same; this is from SciPy 0.15.0 assert_allclose(self.trace[50:52], [[0.14687474, -0.5103282, 0.48252111], [0.14474003, -0.5282084, 0.48743951]], atol=1e-14, rtol=1e-7) def test_neldermead_initial_simplex_bad(self): # Check it fails with a bad simplices bad_simplices = [] simplex = np.zeros((3, 2)) simplex[...] = self.startparams[:2] for j in range(2): simplex[j+1, j] += 0.1 bad_simplices.append(simplex) simplex = np.zeros((3, 3)) bad_simplices.append(simplex) for simplex in bad_simplices: if self.use_wrapper: opts = {'maxiter': self.maxiter, 'disp': False, 'return_all': False, 'initial_simplex': simplex} assert_raises(ValueError, optimize.minimize, self.func, self.startparams, args=(), method='Nelder-mead', options=opts) else: assert_raises(ValueError, optimize.fmin, self.func, self.startparams, args=(), maxiter=self.maxiter, full_output=True, disp=False, retall=False, initial_simplex=simplex) def test_ncg_negative_maxiter(self): # Regression test for gh-8241 opts = {'maxiter': -1} result = optimize.minimize(self.func, self.startparams, method='Newton-CG', jac=self.grad, args=(), options=opts) assert result.status == 1 def test_ncg(self): # line-search Newton conjugate gradient optimization routine if self.use_wrapper: opts = {'maxiter': self.maxiter, 'disp': self.disp, 'return_all': False} retval = optimize.minimize(self.func, self.startparams, method='Newton-CG', jac=self.grad, args=(), options=opts)['x'] else: retval = optimize.fmin_ncg(self.func, self.startparams, self.grad, args=(), maxiter=self.maxiter, full_output=False, disp=self.disp, retall=False) params = retval assert_allclose(self.func(params), self.func(self.solution), atol=1e-6) # Ensure that function call counts are 'known good'; these are from # SciPy 0.7.0. Don't allow them to increase. assert self.funccalls == 7, self.funccalls assert self.gradcalls <= 22, self.gradcalls # 0.13.0 # assert self.gradcalls <= 18, self.gradcalls # 0.9.0 # assert self.gradcalls == 18, self.gradcalls # 0.8.0 # assert self.gradcalls == 22, self.gradcalls # 0.7.0 # Ensure that the function behaves the same; this is from SciPy 0.7.0 assert_allclose(self.trace[3:5], [[-4.35700753e-07, -5.24869435e-01, 4.87527480e-01], [-4.35700753e-07, -5.24869401e-01, 4.87527774e-01]], atol=1e-6, rtol=1e-7) def test_ncg_hess(self): # Newton conjugate gradient with Hessian if self.use_wrapper: opts = {'maxiter': self.maxiter, 'disp': self.disp, 'return_all': False} retval = optimize.minimize(self.func, self.startparams, method='Newton-CG', jac=self.grad, hess=self.hess, args=(), options=opts)['x'] else: retval = optimize.fmin_ncg(self.func, self.startparams, self.grad, fhess=self.hess, args=(), maxiter=self.maxiter, full_output=False, disp=self.disp, retall=False) params = retval assert_allclose(self.func(params), self.func(self.solution), atol=1e-6) # Ensure that function call counts are 'known good'; these are from # SciPy 0.7.0. Don't allow them to increase. assert self.funccalls <= 7, self.funccalls # gh10673 assert self.gradcalls <= 18, self.gradcalls # 0.9.0 # assert self.gradcalls == 18, self.gradcalls # 0.8.0 # assert self.gradcalls == 22, self.gradcalls # 0.7.0 # Ensure that the function behaves the same; this is from SciPy 0.7.0 assert_allclose(self.trace[3:5], [[-4.35700753e-07, -5.24869435e-01, 4.87527480e-01], [-4.35700753e-07, -5.24869401e-01, 4.87527774e-01]], atol=1e-6, rtol=1e-7) def test_ncg_hessp(self): # Newton conjugate gradient with Hessian times a vector p. if self.use_wrapper: opts = {'maxiter': self.maxiter, 'disp': self.disp, 'return_all': False} retval = optimize.minimize(self.func, self.startparams, method='Newton-CG', jac=self.grad, hessp=self.hessp, args=(), options=opts)['x'] else: retval = optimize.fmin_ncg(self.func, self.startparams, self.grad, fhess_p=self.hessp, args=(), maxiter=self.maxiter, full_output=False, disp=self.disp, retall=False) params = retval assert_allclose(self.func(params), self.func(self.solution), atol=1e-6) # Ensure that function call counts are 'known good'; these are from # SciPy 0.7.0. Don't allow them to increase. assert self.funccalls <= 7, self.funccalls # gh10673 assert self.gradcalls <= 18, self.gradcalls # 0.9.0 # assert self.gradcalls == 18, self.gradcalls # 0.8.0 # assert self.gradcalls == 22, self.gradcalls # 0.7.0 # Ensure that the function behaves the same; this is from SciPy 0.7.0 assert_allclose(self.trace[3:5], [[-4.35700753e-07, -5.24869435e-01, 4.87527480e-01], [-4.35700753e-07, -5.24869401e-01, 4.87527774e-01]], atol=1e-6, rtol=1e-7) def test_maxfev_test(): rng = np.random.default_rng(271707100830272976862395227613146332411) def cost(x): return rng.random(1) * 1000 # never converged problem for imaxfev in [1, 10, 50]: # "TNC" and "L-BFGS-B" also supports max function evaluation, but # these may violate the limit because of evaluating gradients # by numerical differentiation. See the discussion in PR #14805. for method in ['Powell', 'Nelder-Mead']: result = optimize.minimize(cost, rng.random(10), method=method, options={'maxfev': imaxfev}) assert result["nfev"] == imaxfev def test_wrap_scalar_function_with_validation(): def func_(x): return x fcalls, func = optimize._optimize.\ _wrap_scalar_function_maxfun_validation(func_, np.asarray(1), 5) for i in range(5): func(np.asarray(i)) assert fcalls[0] == i+1 msg = "Too many function calls" with assert_raises(optimize._optimize._MaxFuncCallError, match=msg): func(np.asarray(i)) # exceeded maximum function call fcalls, func = optimize._optimize.\ _wrap_scalar_function_maxfun_validation(func_, np.asarray(1), 5) msg = "The user-provided objective function must return a scalar value." with assert_raises(ValueError, match=msg): func(np.array([1, 1])) def test_obj_func_returns_scalar(): match = ("The user-provided " "objective function must " "return a scalar value.") with assert_raises(ValueError, match=match): optimize.minimize(lambda x: x, np.array([1, 1]), method='BFGS') def test_neldermead_iteration_num(): x0 = np.array([1.3, 0.7, 0.8, 1.9, 1.2]) res = optimize._minimize._minimize_neldermead(optimize.rosen, x0, xatol=1e-8) assert res.nit <= 339 def test_neldermead_respect_fp(): # Nelder-Mead should respect the fp type of the input + function x0 = np.array([5.0, 4.0]).astype(np.float32) def rosen_(x): assert x.dtype == np.float32 return optimize.rosen(x) optimize.minimize(rosen_, x0, method='Nelder-Mead') def test_neldermead_xatol_fatol(): # gh4484 # test we can call with fatol, xatol specified def func(x): return x[0] ** 2 + x[1] ** 2 optimize._minimize._minimize_neldermead(func, [1, 1], maxiter=2, xatol=1e-3, fatol=1e-3) def test_neldermead_adaptive(): def func(x): return np.sum(x ** 2) p0 = [0.15746215, 0.48087031, 0.44519198, 0.4223638, 0.61505159, 0.32308456, 0.9692297, 0.4471682, 0.77411992, 0.80441652, 0.35994957, 0.75487856, 0.99973421, 0.65063887, 0.09626474] res = optimize.minimize(func, p0, method='Nelder-Mead') assert_equal(res.success, False) res = optimize.minimize(func, p0, method='Nelder-Mead', options={'adaptive': True}) assert_equal(res.success, True) def test_bounded_powell_outsidebounds(): # With the bounded Powell method if you start outside the bounds the final # should still be within the bounds (provided that the user doesn't make a # bad choice for the `direc` argument). def func(x): return np.sum(x ** 2) bounds = (-1, 1), (-1, 1), (-1, 1) x0 = [-4, .5, -.8] # we're starting outside the bounds, so we should get a warning with assert_warns(optimize.OptimizeWarning): res = optimize.minimize(func, x0, bounds=bounds, method="Powell") assert_allclose(res.x, np.array([0.] * len(x0)), atol=1e-6) assert_equal(res.success, True) assert_equal(res.status, 0) # However, now if we change the `direc` argument such that the # set of vectors does not span the parameter space, then we may # not end up back within the bounds. Here we see that the first # parameter cannot be updated! direc = [[0, 0, 0], [0, 1, 0], [0, 0, 1]] # we're starting outside the bounds, so we should get a warning with assert_warns(optimize.OptimizeWarning): res = optimize.minimize(func, x0, bounds=bounds, method="Powell", options={'direc': direc}) assert_allclose(res.x, np.array([-4., 0, 0]), atol=1e-6) assert_equal(res.success, False) assert_equal(res.status, 4) def test_bounded_powell_vs_powell(): # here we test an example where the bounded Powell method # will return a different result than the standard Powell # method. # first we test a simple example where the minimum is at # the origin and the minimum that is within the bounds is # larger than the minimum at the origin. def func(x): return np.sum(x ** 2) bounds = (-5, -1), (-10, -0.1), (1, 9.2), (-4, 7.6), (-15.9, -2) x0 = [-2.1, -5.2, 1.9, 0, -2] options = {'ftol': 1e-10, 'xtol': 1e-10} res_powell = optimize.minimize(func, x0, method="Powell", options=options) assert_allclose(res_powell.x, 0., atol=1e-6) assert_allclose(res_powell.fun, 0., atol=1e-6) res_bounded_powell = optimize.minimize(func, x0, options=options, bounds=bounds, method="Powell") p = np.array([-1, -0.1, 1, 0, -2]) assert_allclose(res_bounded_powell.x, p, atol=1e-6) assert_allclose(res_bounded_powell.fun, func(p), atol=1e-6) # now we test bounded Powell but with a mix of inf bounds. bounds = (None, -1), (-np.inf, -.1), (1, np.inf), (-4, None), (-15.9, -2) res_bounded_powell = optimize.minimize(func, x0, options=options, bounds=bounds, method="Powell") p = np.array([-1, -0.1, 1, 0, -2]) assert_allclose(res_bounded_powell.x, p, atol=1e-6) assert_allclose(res_bounded_powell.fun, func(p), atol=1e-6) # next we test an example where the global minimum is within # the bounds, but the bounded Powell method performs better # than the standard Powell method. def func(x): t = np.sin(-x[0]) * np.cos(x[1]) * np.sin(-x[0] * x[1]) * np.cos(x[1]) t -= np.cos(np.sin(x[1] * x[2]) * np.cos(x[2])) return t**2 bounds = [(-2, 5)] * 3 x0 = [-0.5, -0.5, -0.5] res_powell = optimize.minimize(func, x0, method="Powell") res_bounded_powell = optimize.minimize(func, x0, bounds=bounds, method="Powell") assert_allclose(res_powell.fun, 0.007136253919761627, atol=1e-6) assert_allclose(res_bounded_powell.fun, 0, atol=1e-6) # next we test the previous example where the we provide Powell # with (-inf, inf) bounds, and compare it to providing Powell # with no bounds. They should end up the same. bounds = [(-np.inf, np.inf)] * 3 res_bounded_powell = optimize.minimize(func, x0, bounds=bounds, method="Powell") assert_allclose(res_powell.fun, res_bounded_powell.fun, atol=1e-6) assert_allclose(res_powell.nfev, res_bounded_powell.nfev, atol=1e-6) assert_allclose(res_powell.x, res_bounded_powell.x, atol=1e-6) # now test when x0 starts outside of the bounds. x0 = [45.46254415, -26.52351498, 31.74830248] bounds = [(-2, 5)] * 3 # we're starting outside the bounds, so we should get a warning with assert_warns(optimize.OptimizeWarning): res_bounded_powell = optimize.minimize(func, x0, bounds=bounds, method="Powell") assert_allclose(res_bounded_powell.fun, 0, atol=1e-6) def test_onesided_bounded_powell_stability(): # When the Powell method is bounded on only one side, a # np.tan transform is done in order to convert it into a # completely bounded problem. Here we do some simple tests # of one-sided bounded Powell where the optimal solutions # are large to test the stability of the transformation. kwargs = {'method': 'Powell', 'bounds': [(-np.inf, 1e6)] * 3, 'options': {'ftol': 1e-8, 'xtol': 1e-8}} x0 = [1, 1, 1] # df/dx is constant. def f(x): return -np.sum(x) res = optimize.minimize(f, x0, **kwargs) assert_allclose(res.fun, -3e6, atol=1e-4) # df/dx gets smaller and smaller. def f(x): return -np.abs(np.sum(x)) ** (0.1) * (1 if np.all(x > 0) else -1) res = optimize.minimize(f, x0, **kwargs) assert_allclose(res.fun, -(3e6) ** (0.1)) # df/dx gets larger and larger. def f(x): return -np.abs(np.sum(x)) ** 10 * (1 if np.all(x > 0) else -1) res = optimize.minimize(f, x0, **kwargs) assert_allclose(res.fun, -(3e6) ** 10, rtol=1e-7) # df/dx gets larger for some of the variables and smaller for others. def f(x): t = -np.abs(np.sum(x[:2])) ** 5 - np.abs(np.sum(x[2:])) ** (0.1) t *= (1 if np.all(x > 0) else -1) return t kwargs['bounds'] = [(-np.inf, 1e3)] * 3 res = optimize.minimize(f, x0, **kwargs) assert_allclose(res.fun, -(2e3) ** 5 - (1e6) ** (0.1), rtol=1e-7) class TestOptimizeWrapperDisp(CheckOptimizeParameterized): use_wrapper = True disp = True class TestOptimizeWrapperNoDisp(CheckOptimizeParameterized): use_wrapper = True disp = False class TestOptimizeNoWrapperDisp(CheckOptimizeParameterized): use_wrapper = False disp = True class TestOptimizeNoWrapperNoDisp(CheckOptimizeParameterized): use_wrapper = False disp = False class TestOptimizeSimple(CheckOptimize): def test_bfgs_nan(self): # Test corner case where nan is fed to optimizer. See gh-2067. def func(x): return x def fprime(x): return np.ones_like(x) x0 = [np.nan] with np.errstate(over='ignore', invalid='ignore'): x = optimize.fmin_bfgs(func, x0, fprime, disp=False) assert np.isnan(func(x)) def test_bfgs_nan_return(self): # Test corner cases where fun returns NaN. See gh-4793. # First case: NaN from first call. def func(x): return np.nan with np.errstate(invalid='ignore'): result = optimize.minimize(func, 0) assert np.isnan(result['fun']) assert result['success'] is False # Second case: NaN from second call. def func(x): return 0 if x == 0 else np.nan def fprime(x): return np.ones_like(x) # Steer away from zero. with np.errstate(invalid='ignore'): result = optimize.minimize(func, 0, jac=fprime) assert np.isnan(result['fun']) assert result['success'] is False def test_bfgs_numerical_jacobian(self): # BFGS with numerical Jacobian and a vector epsilon parameter. # define the epsilon parameter using a random vector epsilon = np.sqrt(np.spacing(1.)) * np.random.rand(len(self.solution)) params = optimize.fmin_bfgs(self.func, self.startparams, epsilon=epsilon, args=(), maxiter=self.maxiter, disp=False) assert_allclose(self.func(params), self.func(self.solution), atol=1e-6) def test_finite_differences_jac(self): methods = ['BFGS', 'CG', 'TNC'] jacs = ['2-point', '3-point', None] for method, jac in itertools.product(methods, jacs): result = optimize.minimize(self.func, self.startparams, method=method, jac=jac) assert_allclose(self.func(result.x), self.func(self.solution), atol=1e-6) def test_finite_differences_hess(self): # test that all the methods that require hess can use finite-difference # For Newton-CG, trust-ncg, trust-krylov the FD estimated hessian is # wrapped in a hessp function # dogleg, trust-exact actually require true hessians at the moment, so # they're excluded. methods = ['trust-constr', 'Newton-CG', 'trust-ncg', 'trust-krylov'] hesses = FD_METHODS + (optimize.BFGS,) for method, hess in itertools.product(methods, hesses): if hess is optimize.BFGS: hess = hess() result = optimize.minimize(self.func, self.startparams, method=method, jac=self.grad, hess=hess) assert result.success # check that the methods demand some sort of Hessian specification # Newton-CG creates its own hessp, and trust-constr doesn't need a hess # specified either methods = ['trust-ncg', 'trust-krylov', 'dogleg', 'trust-exact'] for method in methods: with pytest.raises(ValueError): optimize.minimize(self.func, self.startparams, method=method, jac=self.grad, hess=None) def test_bfgs_gh_2169(self): def f(x): if x < 0: return 1.79769313e+308 else: return x + 1./x xs = optimize.fmin_bfgs(f, [10.], disp=False) assert_allclose(xs, 1.0, rtol=1e-4, atol=1e-4) def test_bfgs_double_evaluations(self): # check BFGS does not evaluate twice in a row at same point def f(x): xp = x[0] assert xp not in seen seen.add(xp) return 10*x**2, 20*x seen = set() optimize.minimize(f, -100, method='bfgs', jac=True, tol=1e-7) def test_l_bfgs_b(self): # limited-memory bound-constrained BFGS algorithm retval = optimize.fmin_l_bfgs_b(self.func, self.startparams, self.grad, args=(), maxiter=self.maxiter) (params, fopt, d) = retval assert_allclose(self.func(params), self.func(self.solution), atol=1e-6) # Ensure that function call counts are 'known good'; these are from # SciPy 0.7.0. Don't allow them to increase. assert self.funccalls == 7, self.funccalls assert self.gradcalls == 5, self.gradcalls # Ensure that the function behaves the same; this is from SciPy 0.7.0 # test fixed in gh10673 assert_allclose(self.trace[3:5], [[8.117083e-16, -5.196198e-01, 4.897617e-01], [0., -0.52489628, 0.48753042]], atol=1e-14, rtol=1e-7) def test_l_bfgs_b_numjac(self): # L-BFGS-B with numerical Jacobian retval = optimize.fmin_l_bfgs_b(self.func, self.startparams, approx_grad=True, maxiter=self.maxiter) (params, fopt, d) = retval assert_allclose(self.func(params), self.func(self.solution), atol=1e-6) def test_l_bfgs_b_funjac(self): # L-BFGS-B with combined objective function and Jacobian def fun(x): return self.func(x), self.grad(x) retval = optimize.fmin_l_bfgs_b(fun, self.startparams, maxiter=self.maxiter) (params, fopt, d) = retval assert_allclose(self.func(params), self.func(self.solution), atol=1e-6) def test_l_bfgs_b_maxiter(self): # gh7854 # Ensure that not more than maxiters are ever run. class Callback: def __init__(self): self.nit = 0 self.fun = None self.x = None def __call__(self, x): self.x = x self.fun = optimize.rosen(x) self.nit += 1 c = Callback() res = optimize.minimize(optimize.rosen, [0., 0.], method='l-bfgs-b', callback=c, options={'maxiter': 5}) assert_equal(res.nit, 5) assert_almost_equal(res.x, c.x) assert_almost_equal(res.fun, c.fun) assert_equal(res.status, 1) assert res.success is False assert_equal(res.message, 'STOP: TOTAL NO. of ITERATIONS REACHED LIMIT') def test_minimize_l_bfgs_b(self): # Minimize with L-BFGS-B method opts = {'disp': False, 'maxiter': self.maxiter} r = optimize.minimize(self.func, self.startparams, method='L-BFGS-B', jac=self.grad, options=opts) assert_allclose(self.func(r.x), self.func(self.solution), atol=1e-6) assert self.gradcalls == r.njev self.funccalls = self.gradcalls = 0 # approximate jacobian ra = optimize.minimize(self.func, self.startparams, method='L-BFGS-B', options=opts) # check that function evaluations in approximate jacobian are counted # assert_(ra.nfev > r.nfev) assert self.funccalls == ra.nfev assert_allclose(self.func(ra.x), self.func(self.solution), atol=1e-6) self.funccalls = self.gradcalls = 0 # approximate jacobian ra = optimize.minimize(self.func, self.startparams, jac='3-point', method='L-BFGS-B', options=opts) assert self.funccalls == ra.nfev assert_allclose(self.func(ra.x), self.func(self.solution), atol=1e-6) def test_minimize_l_bfgs_b_ftol(self): # Check that the `ftol` parameter in l_bfgs_b works as expected v0 = None for tol in [1e-1, 1e-4, 1e-7, 1e-10]: opts = {'disp': False, 'maxiter': self.maxiter, 'ftol': tol} sol = optimize.minimize(self.func, self.startparams, method='L-BFGS-B', jac=self.grad, options=opts) v = self.func(sol.x) if v0 is None: v0 = v else: assert v < v0 assert_allclose(v, self.func(self.solution), rtol=tol) def test_minimize_l_bfgs_maxls(self): # check that the maxls is passed down to the Fortran routine sol = optimize.minimize(optimize.rosen, np.array([-1.2, 1.0]), method='L-BFGS-B', jac=optimize.rosen_der, options={'disp': False, 'maxls': 1}) assert not sol.success def test_minimize_l_bfgs_b_maxfun_interruption(self): # gh-6162 f = optimize.rosen g = optimize.rosen_der values = [] x0 = np.full(7, 1000) def objfun(x): value = f(x) values.append(value) return value # Look for an interesting test case. # Request a maxfun that stops at a particularly bad function # evaluation somewhere between 100 and 300 evaluations. low, medium, high = 30, 100, 300 optimize.fmin_l_bfgs_b(objfun, x0, fprime=g, maxfun=high) v, k = max((y, i) for i, y in enumerate(values[medium:])) maxfun = medium + k # If the minimization strategy is reasonable, # the minimize() result should not be worse than the best # of the first 30 function evaluations. target = min(values[:low]) xmin, fmin, d = optimize.fmin_l_bfgs_b(f, x0, fprime=g, maxfun=maxfun) assert_array_less(fmin, target) def test_custom(self): # This function comes from the documentation example. def custmin(fun, x0, args=(), maxfev=None, stepsize=0.1, maxiter=100, callback=None, **options): bestx = x0 besty = fun(x0) funcalls = 1 niter = 0 improved = True stop = False while improved and not stop and niter < maxiter: improved = False niter += 1 for dim in range(np.size(x0)): for s in [bestx[dim] - stepsize, bestx[dim] + stepsize]: testx = np.copy(bestx) testx[dim] = s testy = fun(testx, *args) funcalls += 1 if testy < besty: besty = testy bestx = testx improved = True if callback is not None: callback(bestx) if maxfev is not None and funcalls >= maxfev: stop = True break return optimize.OptimizeResult(fun=besty, x=bestx, nit=niter, nfev=funcalls, success=(niter > 1)) x0 = [1.35, 0.9, 0.8, 1.1, 1.2] res = optimize.minimize(optimize.rosen, x0, method=custmin, options=dict(stepsize=0.05)) assert_allclose(res.x, 1.0, rtol=1e-4, atol=1e-4) @pytest.mark.xfail(reason="output not reliable on all platforms") def test_gh13321(self, capfd): # gh-13321 reported issues with console output in fmin_l_bfgs_b; # check that iprint=0 works. kwargs = {'func': optimize.rosen, 'x0': [4, 3], 'fprime': optimize.rosen_der, 'bounds': ((3, 5), (3, 5))} # "L-BFGS-B" is always in output; should show when iprint >= 0 # "At iterate" is iterate info; should show when iprint >= 1 optimize.fmin_l_bfgs_b(**kwargs, iprint=-1) out, _ = capfd.readouterr() assert "L-BFGS-B" not in out and "At iterate" not in out optimize.fmin_l_bfgs_b(**kwargs, iprint=0) out, _ = capfd.readouterr() assert "L-BFGS-B" in out and "At iterate" not in out optimize.fmin_l_bfgs_b(**kwargs, iprint=1) out, _ = capfd.readouterr() assert "L-BFGS-B" in out and "At iterate" in out # `disp is not None` overrides `iprint` behavior # `disp=0` should suppress all output # `disp=1` should be the same as `iprint = 1` optimize.fmin_l_bfgs_b(**kwargs, iprint=1, disp=False) out, _ = capfd.readouterr() assert "L-BFGS-B" not in out and "At iterate" not in out optimize.fmin_l_bfgs_b(**kwargs, iprint=-1, disp=True) out, _ = capfd.readouterr() assert "L-BFGS-B" in out and "At iterate" in out def test_gh10771(self): # check that minimize passes bounds and constraints to a custom # minimizer without altering them. bounds = [(-2, 2), (0, 3)] constraints = 'constraints' def custmin(fun, x0, **options): assert options['bounds'] is bounds assert options['constraints'] is constraints return optimize.OptimizeResult() x0 = [1, 1] optimize.minimize(optimize.rosen, x0, method=custmin, bounds=bounds, constraints=constraints) def test_minimize_tol_parameter(self): # Check that the minimize() tol= argument does something def func(z): x, y = z return x**2*y**2 + x**4 + 1 def dfunc(z): x, y = z return np.array([2*x*y**2 + 4*x**3, 2*x**2*y]) for method in ['nelder-mead', 'powell', 'cg', 'bfgs', 'newton-cg', 'l-bfgs-b', 'tnc', 'cobyla', 'slsqp']: if method in ('nelder-mead', 'powell', 'cobyla'): jac = None else: jac = dfunc sol1 = optimize.minimize(func, [1, 1], jac=jac, tol=1e-10, method=method) sol2 = optimize.minimize(func, [1, 1], jac=jac, tol=1.0, method=method) assert func(sol1.x) < func(sol2.x), f"{method}: {func(sol1.x)} vs. {func(sol2.x)}" @pytest.mark.filterwarnings('ignore::UserWarning') @pytest.mark.filterwarnings('ignore::RuntimeWarning') # See gh-18547 @pytest.mark.parametrize('method', ['fmin', 'fmin_powell', 'fmin_cg', 'fmin_bfgs', 'fmin_ncg', 'fmin_l_bfgs_b', 'fmin_tnc', 'fmin_slsqp'] + MINIMIZE_METHODS) def test_minimize_callback_copies_array(self, method): # Check that arrays passed to callbacks are not modified # inplace by the optimizer afterward if method in ('fmin_tnc', 'fmin_l_bfgs_b'): def func(x): return optimize.rosen(x), optimize.rosen_der(x) else: func = optimize.rosen jac = optimize.rosen_der hess = optimize.rosen_hess x0 = np.zeros(10) # Set options kwargs = {} if method.startswith('fmin'): routine = getattr(optimize, method) if method == 'fmin_slsqp': kwargs['iter'] = 5 elif method == 'fmin_tnc': kwargs['maxfun'] = 100 elif method in ('fmin', 'fmin_powell'): kwargs['maxiter'] = 3500 else: kwargs['maxiter'] = 5 else: def routine(*a, **kw): kw['method'] = method return optimize.minimize(*a, **kw) if method == 'tnc': kwargs['options'] = dict(maxfun=100) else: kwargs['options'] = dict(maxiter=5) if method in ('fmin_ncg',): kwargs['fprime'] = jac elif method in ('newton-cg',): kwargs['jac'] = jac elif method in ('trust-krylov', 'trust-exact', 'trust-ncg', 'dogleg', 'trust-constr'): kwargs['jac'] = jac kwargs['hess'] = hess # Run with callback results = [] def callback(x, *args, **kwargs): assert not isinstance(x, optimize.OptimizeResult) results.append((x, np.copy(x))) routine(func, x0, callback=callback, **kwargs) # Check returned arrays coincide with their copies # and have no memory overlap assert len(results) > 2 assert all(np.all(x == y) for x, y in results) assert not any(np.may_share_memory(x[0], y[0]) for x, y in itertools.combinations(results, 2)) @pytest.mark.parametrize('method', ['nelder-mead', 'powell', 'cg', 'bfgs', 'newton-cg', 'l-bfgs-b', 'tnc', 'cobyla', 'slsqp']) def test_no_increase(self, method): # Check that the solver doesn't return a value worse than the # initial point. def func(x): return (x - 1)**2 def bad_grad(x): # purposefully invalid gradient function, simulates a case # where line searches start failing return 2*(x - 1) * (-1) - 2 x0 = np.array([2.0]) f0 = func(x0) jac = bad_grad options = dict(maxfun=20) if method == 'tnc' else dict(maxiter=20) if method in ['nelder-mead', 'powell', 'cobyla']: jac = None sol = optimize.minimize(func, x0, jac=jac, method=method, options=options) assert_equal(func(sol.x), sol.fun) if method == 'slsqp': pytest.xfail("SLSQP returns slightly worse") assert func(sol.x) <= f0 def test_slsqp_respect_bounds(self): # Regression test for gh-3108 def f(x): return sum((x - np.array([1., 2., 3., 4.]))**2) def cons(x): a = np.array([[-1, -1, -1, -1], [-3, -3, -2, -1]]) return np.concatenate([np.dot(a, x) + np.array([5, 10]), x]) x0 = np.array([0.5, 1., 1.5, 2.]) res = optimize.minimize(f, x0, method='slsqp', constraints={'type': 'ineq', 'fun': cons}) assert_allclose(res.x, np.array([0., 2, 5, 8])/3, atol=1e-12) @pytest.mark.parametrize('method', ['Nelder-Mead', 'Powell', 'CG', 'BFGS', 'Newton-CG', 'L-BFGS-B', 'SLSQP', 'trust-constr', 'dogleg', 'trust-ncg', 'trust-exact', 'trust-krylov']) def test_respect_maxiter(self, method): # Check that the number of iterations equals max_iter, assuming # convergence doesn't establish before MAXITER = 4 x0 = np.zeros(10) sf = ScalarFunction(optimize.rosen, x0, (), optimize.rosen_der, optimize.rosen_hess, None, None) # Set options kwargs = {'method': method, 'options': dict(maxiter=MAXITER)} if method in ('Newton-CG',): kwargs['jac'] = sf.grad elif method in ('trust-krylov', 'trust-exact', 'trust-ncg', 'dogleg', 'trust-constr'): kwargs['jac'] = sf.grad kwargs['hess'] = sf.hess sol = optimize.minimize(sf.fun, x0, **kwargs) assert sol.nit == MAXITER assert sol.nfev >= sf.nfev if hasattr(sol, 'njev'): assert sol.njev >= sf.ngev # method specific tests if method == 'SLSQP': assert sol.status == 9 # Iteration limit reached @pytest.mark.parametrize('method', ['Nelder-Mead', 'Powell', 'fmin', 'fmin_powell']) def test_runtime_warning(self, method): x0 = np.zeros(10) sf = ScalarFunction(optimize.rosen, x0, (), optimize.rosen_der, optimize.rosen_hess, None, None) options = {"maxiter": 1, "disp": True} with pytest.warns(RuntimeWarning, match=r'Maximum number of iterations'): if method.startswith('fmin'): routine = getattr(optimize, method) routine(sf.fun, x0, **options) else: optimize.minimize(sf.fun, x0, method=method, options=options) def test_respect_maxiter_trust_constr_ineq_constraints(self): # special case of minimization with trust-constr and inequality # constraints to check maxiter limit is obeyed when using internal # method 'tr_interior_point' MAXITER = 4 f = optimize.rosen jac = optimize.rosen_der hess = optimize.rosen_hess def fun(x): return np.array([0.2 * x[0] - 0.4 * x[1] - 0.33 * x[2]]) cons = ({'type': 'ineq', 'fun': fun},) x0 = np.zeros(10) sol = optimize.minimize(f, x0, constraints=cons, jac=jac, hess=hess, method='trust-constr', options=dict(maxiter=MAXITER)) assert sol.nit == MAXITER def test_minimize_automethod(self): def f(x): return x**2 def cons(x): return x - 2 x0 = np.array([10.]) sol_0 = optimize.minimize(f, x0) sol_1 = optimize.minimize(f, x0, constraints=[{'type': 'ineq', 'fun': cons}]) sol_2 = optimize.minimize(f, x0, bounds=[(5, 10)]) sol_3 = optimize.minimize(f, x0, constraints=[{'type': 'ineq', 'fun': cons}], bounds=[(5, 10)]) sol_4 = optimize.minimize(f, x0, constraints=[{'type': 'ineq', 'fun': cons}], bounds=[(1, 10)]) for sol in [sol_0, sol_1, sol_2, sol_3, sol_4]: assert sol.success assert_allclose(sol_0.x, 0, atol=1e-7) assert_allclose(sol_1.x, 2, atol=1e-7) assert_allclose(sol_2.x, 5, atol=1e-7) assert_allclose(sol_3.x, 5, atol=1e-7) assert_allclose(sol_4.x, 2, atol=1e-7) def test_minimize_coerce_args_param(self): # Regression test for gh-3503 def Y(x, c): return np.sum((x-c)**2) def dY_dx(x, c=None): return 2*(x-c) c = np.array([3, 1, 4, 1, 5, 9, 2, 6, 5, 3, 5]) xinit = np.random.randn(len(c)) optimize.minimize(Y, xinit, jac=dY_dx, args=(c), method="BFGS") def test_initial_step_scaling(self): # Check that optimizer initial step is not huge even if the # function and gradients are scales = [1e-50, 1, 1e50] methods = ['CG', 'BFGS', 'L-BFGS-B', 'Newton-CG'] def f(x): if first_step_size[0] is None and x[0] != x0[0]: first_step_size[0] = abs(x[0] - x0[0]) if abs(x).max() > 1e4: raise AssertionError("Optimization stepped far away!") return scale*(x[0] - 1)**2 def g(x): return np.array([scale*(x[0] - 1)]) for scale, method in itertools.product(scales, methods): if method in ('CG', 'BFGS'): options = dict(gtol=scale*1e-8) else: options = dict() if scale < 1e-10 and method in ('L-BFGS-B', 'Newton-CG'): # XXX: return initial point if they see small gradient continue x0 = [-1.0] first_step_size = [None] res = optimize.minimize(f, x0, jac=g, method=method, options=options) err_msg = "{} {}: {}: {}".format(method, scale, first_step_size, res) assert res.success, err_msg assert_allclose(res.x, [1.0], err_msg=err_msg) assert res.nit <= 3, err_msg if scale > 1e-10: if method in ('CG', 'BFGS'): assert_allclose(first_step_size[0], 1.01, err_msg=err_msg) else: # Newton-CG and L-BFGS-B use different logic for the first # step, but are both scaling invariant with step sizes ~ 1 assert first_step_size[0] > 0.5 and first_step_size[0] < 3, err_msg else: # step size has upper bound of ||grad||, so line # search makes many small steps pass @pytest.mark.parametrize('method', ['nelder-mead', 'powell', 'cg', 'bfgs', 'newton-cg', 'l-bfgs-b', 'tnc', 'cobyla', 'slsqp', 'trust-constr', 'dogleg', 'trust-ncg', 'trust-exact', 'trust-krylov']) def test_nan_values(self, method): # Check nan values result to failed exit status np.random.seed(1234) count = [0] def func(x): return np.nan def func2(x): count[0] += 1 if count[0] > 2: return np.nan else: return np.random.rand() def grad(x): return np.array([1.0]) def hess(x): return np.array([[1.0]]) x0 = np.array([1.0]) needs_grad = method in ('newton-cg', 'trust-krylov', 'trust-exact', 'trust-ncg', 'dogleg') needs_hess = method in ('trust-krylov', 'trust-exact', 'trust-ncg', 'dogleg') funcs = [func, func2] grads = [grad] if needs_grad else [grad, None] hesss = [hess] if needs_hess else [hess, None] options = dict(maxfun=20) if method == 'tnc' else dict(maxiter=20) with np.errstate(invalid='ignore'), suppress_warnings() as sup: sup.filter(UserWarning, "delta_grad == 0.*") sup.filter(RuntimeWarning, ".*does not use Hessian.*") sup.filter(RuntimeWarning, ".*does not use gradient.*") for f, g, h in itertools.product(funcs, grads, hesss): count = [0] sol = optimize.minimize(f, x0, jac=g, hess=h, method=method, options=options) assert_equal(sol.success, False) @pytest.mark.parametrize('method', ['nelder-mead', 'cg', 'bfgs', 'l-bfgs-b', 'tnc', 'cobyla', 'slsqp', 'trust-constr', 'dogleg', 'trust-ncg', 'trust-exact', 'trust-krylov']) def test_duplicate_evaluations(self, method): # check that there are no duplicate evaluations for any methods jac = hess = None if method in ('newton-cg', 'trust-krylov', 'trust-exact', 'trust-ncg', 'dogleg'): jac = self.grad if method in ('trust-krylov', 'trust-exact', 'trust-ncg', 'dogleg'): hess = self.hess with np.errstate(invalid='ignore'), suppress_warnings() as sup: # for trust-constr sup.filter(UserWarning, "delta_grad == 0.*") optimize.minimize(self.func, self.startparams, method=method, jac=jac, hess=hess) for i in range(1, len(self.trace)): if np.array_equal(self.trace[i - 1], self.trace[i]): raise RuntimeError( f"Duplicate evaluations made by {method}") @pytest.mark.filterwarnings('ignore::RuntimeWarning') @pytest.mark.parametrize('method', MINIMIZE_METHODS_NEW_CB) @pytest.mark.parametrize('new_cb_interface', [0, 1, 2]) def test_callback_stopiteration(self, method, new_cb_interface): # Check that if callback raises StopIteration, optimization # terminates with the same result as if iterations were limited def f(x): f.flag = False # check that f isn't called after StopIteration return optimize.rosen(x) f.flag = False def g(x): f.flag = False return optimize.rosen_der(x) def h(x): f.flag = False return optimize.rosen_hess(x) maxiter = 5 if new_cb_interface == 1: def callback_interface(*, intermediate_result): assert intermediate_result.fun == f(intermediate_result.x) callback() elif new_cb_interface == 2: class Callback: def __call__(self, intermediate_result: OptimizeResult): assert intermediate_result.fun == f(intermediate_result.x) callback() callback_interface = Callback() else: def callback_interface(xk, *args): # type: ignore[misc] callback() def callback(): callback.i += 1 callback.flag = False if callback.i == maxiter: callback.flag = True raise StopIteration() callback.i = 0 callback.flag = False kwargs = {'x0': [1.1]*5, 'method': method, 'fun': f, 'jac': g, 'hess': h} res = optimize.minimize(**kwargs, callback=callback_interface) if method == 'nelder-mead': maxiter = maxiter + 1 # nelder-mead counts differently ref = optimize.minimize(**kwargs, options={'maxiter': maxiter}) assert res.fun == ref.fun assert_equal(res.x, ref.x) assert res.nit == ref.nit == maxiter assert res.status == (3 if method == 'trust-constr' else 99) def test_ndim_error(self): msg = "'x0' must only have one dimension." with assert_raises(ValueError, match=msg): optimize.minimize(lambda x: x, np.ones((2, 1))) @pytest.mark.parametrize('method', ('nelder-mead', 'l-bfgs-b', 'tnc', 'powell', 'cobyla', 'trust-constr')) def test_minimize_invalid_bounds(self, method): def f(x): return np.sum(x**2) bounds = Bounds([1, 2], [3, 4]) msg = 'The number of bounds is not compatible with the length of `x0`.' with pytest.raises(ValueError, match=msg): optimize.minimize(f, x0=[1, 2, 3], method=method, bounds=bounds) bounds = Bounds([1, 6, 1], [3, 4, 2]) msg = 'An upper bound is less than the corresponding lower bound.' with pytest.raises(ValueError, match=msg): optimize.minimize(f, x0=[1, 2, 3], method=method, bounds=bounds) @pytest.mark.parametrize('method', ['bfgs', 'cg', 'newton-cg', 'powell']) def test_minimize_warnings_gh1953(self, method): # test that minimize methods produce warnings rather than just using # `print`; see gh-1953. kwargs = {} if method=='powell' else {'jac': optimize.rosen_der} warning_type = (RuntimeWarning if method=='powell' else optimize.OptimizeWarning) options = {'disp': True, 'maxiter': 10} with pytest.warns(warning_type, match='Maximum number'): optimize.minimize(lambda x: optimize.rosen(x), [0, 0], method=method, options=options, **kwargs) options['disp'] = False optimize.minimize(lambda x: optimize.rosen(x), [0, 0], method=method, options=options, **kwargs) @pytest.mark.parametrize( 'method', ['l-bfgs-b', 'tnc', 'Powell', 'Nelder-Mead'] ) def test_minimize_with_scalar(method): # checks that minimize works with a scalar being provided to it. def f(x): return np.sum(x ** 2) res = optimize.minimize(f, 17, bounds=[(-100, 100)], method=method) assert res.success assert_allclose(res.x, [0.0], atol=1e-5) class TestLBFGSBBounds: def setup_method(self): self.bounds = ((1, None), (None, None)) self.solution = (1, 0) def fun(self, x, p=2.0): return 1.0 / p * (x[0]**p + x[1]**p) def jac(self, x, p=2.0): return x**(p - 1) def fj(self, x, p=2.0): return self.fun(x, p), self.jac(x, p) def test_l_bfgs_b_bounds(self): x, f, d = optimize.fmin_l_bfgs_b(self.fun, [0, -1], fprime=self.jac, bounds=self.bounds) assert d['warnflag'] == 0, d['task'] assert_allclose(x, self.solution, atol=1e-6) def test_l_bfgs_b_funjac(self): # L-BFGS-B with fun and jac combined and extra arguments x, f, d = optimize.fmin_l_bfgs_b(self.fj, [0, -1], args=(2.0, ), bounds=self.bounds) assert d['warnflag'] == 0, d['task'] assert_allclose(x, self.solution, atol=1e-6) def test_minimize_l_bfgs_b_bounds(self): # Minimize with method='L-BFGS-B' with bounds res = optimize.minimize(self.fun, [0, -1], method='L-BFGS-B', jac=self.jac, bounds=self.bounds) assert res['success'], res['message'] assert_allclose(res.x, self.solution, atol=1e-6) @pytest.mark.parametrize('bounds', [ ([(10, 1), (1, 10)]), ([(1, 10), (10, 1)]), ([(10, 1), (10, 1)]) ]) def test_minimize_l_bfgs_b_incorrect_bounds(self, bounds): with pytest.raises(ValueError, match='.*bound.*'): optimize.minimize(self.fun, [0, -1], method='L-BFGS-B', jac=self.jac, bounds=bounds) def test_minimize_l_bfgs_b_bounds_FD(self): # test that initial starting value outside bounds doesn't raise # an error (done with clipping). # test all different finite differences combos, with and without args jacs = ['2-point', '3-point', None] argss = [(2.,), ()] for jac, args in itertools.product(jacs, argss): res = optimize.minimize(self.fun, [0, -1], args=args, method='L-BFGS-B', jac=jac, bounds=self.bounds, options={'finite_diff_rel_step': None}) assert res['success'], res['message'] assert_allclose(res.x, self.solution, atol=1e-6) class TestOptimizeScalar: def setup_method(self): self.solution = 1.5 def fun(self, x, a=1.5): """Objective function""" return (x - a)**2 - 0.8 def test_brent(self): x = optimize.brent(self.fun) assert_allclose(x, self.solution, atol=1e-6) x = optimize.brent(self.fun, brack=(-3, -2)) assert_allclose(x, self.solution, atol=1e-6) x = optimize.brent(self.fun, full_output=True) assert_allclose(x[0], self.solution, atol=1e-6) x = optimize.brent(self.fun, brack=(-15, -1, 15)) assert_allclose(x, self.solution, atol=1e-6) message = r"\(f\(xb\) < f\(xa\)\) and \(f\(xb\) < f\(xc\)\)" with pytest.raises(ValueError, match=message): optimize.brent(self.fun, brack=(-1, 0, 1)) message = r"\(xa < xb\) and \(xb < xc\)" with pytest.raises(ValueError, match=message): optimize.brent(self.fun, brack=(0, -1, 1)) @pytest.mark.filterwarnings('ignore::UserWarning') def test_golden(self): x = optimize.golden(self.fun) assert_allclose(x, self.solution, atol=1e-6) x = optimize.golden(self.fun, brack=(-3, -2)) assert_allclose(x, self.solution, atol=1e-6) x = optimize.golden(self.fun, full_output=True) assert_allclose(x[0], self.solution, atol=1e-6) x = optimize.golden(self.fun, brack=(-15, -1, 15)) assert_allclose(x, self.solution, atol=1e-6) x = optimize.golden(self.fun, tol=0) assert_allclose(x, self.solution) maxiter_test_cases = [0, 1, 5] for maxiter in maxiter_test_cases: x0 = optimize.golden(self.fun, maxiter=0, full_output=True) x = optimize.golden(self.fun, maxiter=maxiter, full_output=True) nfev0, nfev = x0[2], x[2] assert_equal(nfev - nfev0, maxiter) message = r"\(f\(xb\) < f\(xa\)\) and \(f\(xb\) < f\(xc\)\)" with pytest.raises(ValueError, match=message): optimize.golden(self.fun, brack=(-1, 0, 1)) message = r"\(xa < xb\) and \(xb < xc\)" with pytest.raises(ValueError, match=message): optimize.golden(self.fun, brack=(0, -1, 1)) def test_fminbound(self): x = optimize.fminbound(self.fun, 0, 1) assert_allclose(x, 1, atol=1e-4) x = optimize.fminbound(self.fun, 1, 5) assert_allclose(x, self.solution, atol=1e-6) x = optimize.fminbound(self.fun, np.array([1]), np.array([5])) assert_allclose(x, self.solution, atol=1e-6) assert_raises(ValueError, optimize.fminbound, self.fun, 5, 1) def test_fminbound_scalar(self): with pytest.raises(ValueError, match='.*must be finite scalars.*'): optimize.fminbound(self.fun, np.zeros((1, 2)), 1) x = optimize.fminbound(self.fun, 1, np.array(5)) assert_allclose(x, self.solution, atol=1e-6) def test_gh11207(self): def fun(x): return x**2 optimize.fminbound(fun, 0, 0) def test_minimize_scalar(self): # combine all tests above for the minimize_scalar wrapper x = optimize.minimize_scalar(self.fun).x assert_allclose(x, self.solution, atol=1e-6) x = optimize.minimize_scalar(self.fun, method='Brent') assert x.success x = optimize.minimize_scalar(self.fun, method='Brent', options=dict(maxiter=3)) assert not x.success x = optimize.minimize_scalar(self.fun, bracket=(-3, -2), args=(1.5, ), method='Brent').x assert_allclose(x, self.solution, atol=1e-6) x = optimize.minimize_scalar(self.fun, method='Brent', args=(1.5,)).x assert_allclose(x, self.solution, atol=1e-6) x = optimize.minimize_scalar(self.fun, bracket=(-15, -1, 15), args=(1.5, ), method='Brent').x assert_allclose(x, self.solution, atol=1e-6) x = optimize.minimize_scalar(self.fun, bracket=(-3, -2), args=(1.5, ), method='golden').x assert_allclose(x, self.solution, atol=1e-6) x = optimize.minimize_scalar(self.fun, method='golden', args=(1.5,)).x assert_allclose(x, self.solution, atol=1e-6) x = optimize.minimize_scalar(self.fun, bracket=(-15, -1, 15), args=(1.5, ), method='golden').x assert_allclose(x, self.solution, atol=1e-6) x = optimize.minimize_scalar(self.fun, bounds=(0, 1), args=(1.5,), method='Bounded').x assert_allclose(x, 1, atol=1e-4) x = optimize.minimize_scalar(self.fun, bounds=(1, 5), args=(1.5, ), method='bounded').x assert_allclose(x, self.solution, atol=1e-6) x = optimize.minimize_scalar(self.fun, bounds=(np.array([1]), np.array([5])), args=(np.array([1.5]), ), method='bounded').x assert_allclose(x, self.solution, atol=1e-6) assert_raises(ValueError, optimize.minimize_scalar, self.fun, bounds=(5, 1), method='bounded', args=(1.5, )) assert_raises(ValueError, optimize.minimize_scalar, self.fun, bounds=(np.zeros(2), 1), method='bounded', args=(1.5, )) x = optimize.minimize_scalar(self.fun, bounds=(1, np.array(5)), method='bounded').x assert_allclose(x, self.solution, atol=1e-6) def test_minimize_scalar_custom(self): # This function comes from the documentation example. def custmin(fun, bracket, args=(), maxfev=None, stepsize=0.1, maxiter=100, callback=None, **options): bestx = (bracket[1] + bracket[0]) / 2.0 besty = fun(bestx) funcalls = 1 niter = 0 improved = True stop = False while improved and not stop and niter < maxiter: improved = False niter += 1 for testx in [bestx - stepsize, bestx + stepsize]: testy = fun(testx, *args) funcalls += 1 if testy < besty: besty = testy bestx = testx improved = True if callback is not None: callback(bestx) if maxfev is not None and funcalls >= maxfev: stop = True break return optimize.OptimizeResult(fun=besty, x=bestx, nit=niter, nfev=funcalls, success=(niter > 1)) res = optimize.minimize_scalar(self.fun, bracket=(0, 4), method=custmin, options=dict(stepsize=0.05)) assert_allclose(res.x, self.solution, atol=1e-6) def test_minimize_scalar_coerce_args_param(self): # Regression test for gh-3503 optimize.minimize_scalar(self.fun, args=1.5) @pytest.mark.parametrize('method', ['brent', 'bounded', 'golden']) def test_disp(self, method): # test that all minimize_scalar methods accept a disp option. for disp in [0, 1, 2, 3]: optimize.minimize_scalar(self.fun, options={"disp": disp}) @pytest.mark.parametrize('method', ['brent', 'bounded', 'golden']) def test_result_attributes(self, method): kwargs = {"bounds": [-10, 10]} if method == 'bounded' else {} result = optimize.minimize_scalar(self.fun, method=method, **kwargs) assert hasattr(result, "x") assert hasattr(result, "success") assert hasattr(result, "message") assert hasattr(result, "fun") assert hasattr(result, "nfev") assert hasattr(result, "nit") @pytest.mark.filterwarnings('ignore::UserWarning') @pytest.mark.parametrize('method', ['brent', 'bounded', 'golden']) def test_nan_values(self, method): # Check nan values result to failed exit status np.random.seed(1234) count = [0] def func(x): count[0] += 1 if count[0] > 4: return np.nan else: return x**2 + 0.1 * np.sin(x) bracket = (-1, 0, 1) bounds = (-1, 1) with np.errstate(invalid='ignore'), suppress_warnings() as sup: sup.filter(UserWarning, "delta_grad == 0.*") sup.filter(RuntimeWarning, ".*does not use Hessian.*") sup.filter(RuntimeWarning, ".*does not use gradient.*") count = [0] kwargs = {"bounds": bounds} if method == 'bounded' else {} sol = optimize.minimize_scalar(func, bracket=bracket, **kwargs, method=method, options=dict(maxiter=20)) assert_equal(sol.success, False) def test_minimize_scalar_defaults_gh10911(self): # Previously, bounds were silently ignored unless `method='bounds'` # was chosen. See gh-10911. Check that this is no longer the case. def f(x): return x**2 res = optimize.minimize_scalar(f) assert_allclose(res.x, 0, atol=1e-8) res = optimize.minimize_scalar(f, bounds=(1, 100), options={'xatol': 1e-10}) assert_allclose(res.x, 1) def test_minimize_non_finite_bounds_gh10911(self): # Previously, minimize_scalar misbehaved with infinite bounds. # See gh-10911. Check that it now raises an error, instead. msg = "Optimization bounds must be finite scalars." with pytest.raises(ValueError, match=msg): optimize.minimize_scalar(np.sin, bounds=(1, np.inf)) with pytest.raises(ValueError, match=msg): optimize.minimize_scalar(np.sin, bounds=(np.nan, 1)) @pytest.mark.parametrize("method", ['brent', 'golden']) def test_minimize_unbounded_method_with_bounds_gh10911(self, method): # Previously, `bounds` were silently ignored when `method='brent'` or # `method='golden'`. See gh-10911. Check that error is now raised. msg = "Use of `bounds` is incompatible with..." with pytest.raises(ValueError, match=msg): optimize.minimize_scalar(np.sin, method=method, bounds=(1, 2)) @pytest.mark.filterwarnings('ignore::RuntimeWarning') @pytest.mark.parametrize("method", MINIMIZE_SCALAR_METHODS) @pytest.mark.parametrize("tol", [1, 1e-6]) @pytest.mark.parametrize("fshape", [(), (1,), (1, 1)]) def test_minimize_scalar_dimensionality_gh16196(self, method, tol, fshape): # gh-16196 reported that the output shape of `minimize_scalar` was not # consistent when an objective function returned an array. Check that # `res.fun` and `res.x` are now consistent. def f(x): return np.array(x**4).reshape(fshape) a, b = -0.1, 0.2 kwargs = (dict(bracket=(a, b)) if method != "bounded" else dict(bounds=(a, b))) kwargs.update(dict(method=method, tol=tol)) res = optimize.minimize_scalar(f, **kwargs) assert res.x.shape == res.fun.shape == f(res.x).shape == fshape @pytest.mark.parametrize('method', ['bounded', 'brent', 'golden']) def test_minimize_scalar_warnings_gh1953(self, method): # test that minimize_scalar methods produce warnings rather than just # using `print`; see gh-1953. def f(x): return (x - 1)**2 kwargs = {} kwd = 'bounds' if method == 'bounded' else 'bracket' kwargs[kwd] = [-2, 10] options = {'disp': True, 'maxiter': 3} with pytest.warns(optimize.OptimizeWarning, match='Maximum number'): optimize.minimize_scalar(f, method=method, options=options, **kwargs) options['disp'] = False optimize.minimize_scalar(f, method=method, options=options, **kwargs) class TestBracket: @pytest.mark.filterwarnings('ignore::RuntimeWarning') def test_errors_and_status_false(self): # Check that `bracket` raises the errors it is supposed to def f(x): # gh-14858 return x**2 if ((-1 < x) & (x < 1)) else 100.0 message = "The algorithm terminated without finding a valid bracket." with pytest.raises(RuntimeError, match=message): optimize.bracket(f, -1, 1) with pytest.raises(RuntimeError, match=message): optimize.bracket(f, -1, np.inf) with pytest.raises(RuntimeError, match=message): optimize.brent(f, brack=(-1, 1)) with pytest.raises(RuntimeError, match=message): optimize.golden(f, brack=(-1, 1)) def f(x): # gh-5899 return -5 * x**5 + 4 * x**4 - 12 * x**3 + 11 * x**2 - 2 * x + 1 message = "No valid bracket was found before the iteration limit..." with pytest.raises(RuntimeError, match=message): optimize.bracket(f, -0.5, 0.5, maxiter=10) @pytest.mark.parametrize('method', ('brent', 'golden')) def test_minimize_scalar_success_false(self, method): # Check that status information from `bracket` gets to minimize_scalar def f(x): # gh-14858 return x**2 if ((-1 < x) & (x < 1)) else 100.0 message = "The algorithm terminated without finding a valid bracket." res = optimize.minimize_scalar(f, bracket=(-1, 1), method=method) assert not res.success assert message in res.message assert res.nfev == 3 assert res.nit == 0 assert res.fun == 100 def test_brent_negative_tolerance(): assert_raises(ValueError, optimize.brent, np.cos, tol=-.01) class TestNewtonCg: def test_rosenbrock(self): x0 = np.array([-1.2, 1.0]) sol = optimize.minimize(optimize.rosen, x0, jac=optimize.rosen_der, hess=optimize.rosen_hess, tol=1e-5, method='Newton-CG') assert sol.success, sol.message assert_allclose(sol.x, np.array([1, 1]), rtol=1e-4) def test_himmelblau(self): x0 = np.array(himmelblau_x0) sol = optimize.minimize(himmelblau, x0, jac=himmelblau_grad, hess=himmelblau_hess, method='Newton-CG', tol=1e-6) assert sol.success, sol.message assert_allclose(sol.x, himmelblau_xopt, rtol=1e-4) assert_allclose(sol.fun, himmelblau_min, atol=1e-4) def test_finite_difference(self): x0 = np.array([-1.2, 1.0]) sol = optimize.minimize(optimize.rosen, x0, jac=optimize.rosen_der, hess='2-point', tol=1e-5, method='Newton-CG') assert sol.success, sol.message assert_allclose(sol.x, np.array([1, 1]), rtol=1e-4) def test_hessian_update_strategy(self): x0 = np.array([-1.2, 1.0]) sol = optimize.minimize(optimize.rosen, x0, jac=optimize.rosen_der, hess=optimize.BFGS(), tol=1e-5, method='Newton-CG') assert sol.success, sol.message assert_allclose(sol.x, np.array([1, 1]), rtol=1e-4) def test_line_for_search(): # _line_for_search is only used in _linesearch_powell, which is also # tested below. Thus there are more tests of _line_for_search in the # test_linesearch_powell_bounded function. line_for_search = optimize._optimize._line_for_search # args are x0, alpha, lower_bound, upper_bound # returns lmin, lmax lower_bound = np.array([-5.3, -1, -1.5, -3]) upper_bound = np.array([1.9, 1, 2.8, 3]) # test when starting in the bounds x0 = np.array([0., 0, 0, 0]) # and when starting outside of the bounds x1 = np.array([0., 2, -3, 0]) all_tests = ( (x0, np.array([1., 0, 0, 0]), -5.3, 1.9), (x0, np.array([0., 1, 0, 0]), -1, 1), (x0, np.array([0., 0, 1, 0]), -1.5, 2.8), (x0, np.array([0., 0, 0, 1]), -3, 3), (x0, np.array([1., 1, 0, 0]), -1, 1), (x0, np.array([1., 0, -1, 2]), -1.5, 1.5), (x0, np.array([2., 0, -1, 2]), -1.5, 0.95), (x1, np.array([1., 0, 0, 0]), -5.3, 1.9), (x1, np.array([0., 1, 0, 0]), -3, -1), (x1, np.array([0., 0, 1, 0]), 1.5, 5.8), (x1, np.array([0., 0, 0, 1]), -3, 3), (x1, np.array([1., 1, 0, 0]), -3, -1), (x1, np.array([1., 0, -1, 0]), -5.3, -1.5), ) for x, alpha, lmin, lmax in all_tests: mi, ma = line_for_search(x, alpha, lower_bound, upper_bound) assert_allclose(mi, lmin, atol=1e-6) assert_allclose(ma, lmax, atol=1e-6) # now with infinite bounds lower_bound = np.array([-np.inf, -1, -np.inf, -3]) upper_bound = np.array([np.inf, 1, 2.8, np.inf]) all_tests = ( (x0, np.array([1., 0, 0, 0]), -np.inf, np.inf), (x0, np.array([0., 1, 0, 0]), -1, 1), (x0, np.array([0., 0, 1, 0]), -np.inf, 2.8), (x0, np.array([0., 0, 0, 1]), -3, np.inf), (x0, np.array([1., 1, 0, 0]), -1, 1), (x0, np.array([1., 0, -1, 2]), -1.5, np.inf), (x1, np.array([1., 0, 0, 0]), -np.inf, np.inf), (x1, np.array([0., 1, 0, 0]), -3, -1), (x1, np.array([0., 0, 1, 0]), -np.inf, 5.8), (x1, np.array([0., 0, 0, 1]), -3, np.inf), (x1, np.array([1., 1, 0, 0]), -3, -1), (x1, np.array([1., 0, -1, 0]), -5.8, np.inf), ) for x, alpha, lmin, lmax in all_tests: mi, ma = line_for_search(x, alpha, lower_bound, upper_bound) assert_allclose(mi, lmin, atol=1e-6) assert_allclose(ma, lmax, atol=1e-6) def test_linesearch_powell(): # helper function in optimize.py, not a public function. linesearch_powell = optimize._optimize._linesearch_powell # args are func, p, xi, fval, lower_bound=None, upper_bound=None, tol=1e-3 # returns new_fval, p + direction, direction def func(x): return np.sum((x - np.array([-1.0, 2.0, 1.5, -0.4])) ** 2) p0 = np.array([0., 0, 0, 0]) fval = func(p0) lower_bound = np.array([-np.inf] * 4) upper_bound = np.array([np.inf] * 4) all_tests = ( (np.array([1., 0, 0, 0]), -1), (np.array([0., 1, 0, 0]), 2), (np.array([0., 0, 1, 0]), 1.5), (np.array([0., 0, 0, 1]), -.4), (np.array([-1., 0, 1, 0]), 1.25), (np.array([0., 0, 1, 1]), .55), (np.array([2., 0, -1, 1]), -.65), ) for xi, l in all_tests: f, p, direction = linesearch_powell(func, p0, xi, fval=fval, tol=1e-5) assert_allclose(f, func(l * xi), atol=1e-6) assert_allclose(p, l * xi, atol=1e-6) assert_allclose(direction, l * xi, atol=1e-6) f, p, direction = linesearch_powell(func, p0, xi, tol=1e-5, lower_bound=lower_bound, upper_bound=upper_bound, fval=fval) assert_allclose(f, func(l * xi), atol=1e-6) assert_allclose(p, l * xi, atol=1e-6) assert_allclose(direction, l * xi, atol=1e-6) def test_linesearch_powell_bounded(): # helper function in optimize.py, not a public function. linesearch_powell = optimize._optimize._linesearch_powell # args are func, p, xi, fval, lower_bound=None, upper_bound=None, tol=1e-3 # returns new_fval, p+direction, direction def func(x): return np.sum((x - np.array([-1.0, 2.0, 1.5, -0.4])) ** 2) p0 = np.array([0., 0, 0, 0]) fval = func(p0) # first choose bounds such that the same tests from # test_linesearch_powell should pass. lower_bound = np.array([-2.]*4) upper_bound = np.array([2.]*4) all_tests = ( (np.array([1., 0, 0, 0]), -1), (np.array([0., 1, 0, 0]), 2), (np.array([0., 0, 1, 0]), 1.5), (np.array([0., 0, 0, 1]), -.4), (np.array([-1., 0, 1, 0]), 1.25), (np.array([0., 0, 1, 1]), .55), (np.array([2., 0, -1, 1]), -.65), ) for xi, l in all_tests: f, p, direction = linesearch_powell(func, p0, xi, tol=1e-5, lower_bound=lower_bound, upper_bound=upper_bound, fval=fval) assert_allclose(f, func(l * xi), atol=1e-6) assert_allclose(p, l * xi, atol=1e-6) assert_allclose(direction, l * xi, atol=1e-6) # now choose bounds such that unbounded vs bounded gives different results lower_bound = np.array([-.3]*3 + [-1]) upper_bound = np.array([.45]*3 + [.9]) all_tests = ( (np.array([1., 0, 0, 0]), -.3), (np.array([0., 1, 0, 0]), .45), (np.array([0., 0, 1, 0]), .45), (np.array([0., 0, 0, 1]), -.4), (np.array([-1., 0, 1, 0]), .3), (np.array([0., 0, 1, 1]), .45), (np.array([2., 0, -1, 1]), -.15), ) for xi, l in all_tests: f, p, direction = linesearch_powell(func, p0, xi, tol=1e-5, lower_bound=lower_bound, upper_bound=upper_bound, fval=fval) assert_allclose(f, func(l * xi), atol=1e-6) assert_allclose(p, l * xi, atol=1e-6) assert_allclose(direction, l * xi, atol=1e-6) # now choose as above but start outside the bounds p0 = np.array([-1., 0, 0, 2]) fval = func(p0) all_tests = ( (np.array([1., 0, 0, 0]), .7), (np.array([0., 1, 0, 0]), .45), (np.array([0., 0, 1, 0]), .45), (np.array([0., 0, 0, 1]), -2.4), ) for xi, l in all_tests: f, p, direction = linesearch_powell(func, p0, xi, tol=1e-5, lower_bound=lower_bound, upper_bound=upper_bound, fval=fval) assert_allclose(f, func(p0 + l * xi), atol=1e-6) assert_allclose(p, p0 + l * xi, atol=1e-6) assert_allclose(direction, l * xi, atol=1e-6) # now mix in inf p0 = np.array([0., 0, 0, 0]) fval = func(p0) # now choose bounds that mix inf lower_bound = np.array([-.3, -np.inf, -np.inf, -1]) upper_bound = np.array([np.inf, .45, np.inf, .9]) all_tests = ( (np.array([1., 0, 0, 0]), -.3), (np.array([0., 1, 0, 0]), .45), (np.array([0., 0, 1, 0]), 1.5), (np.array([0., 0, 0, 1]), -.4), (np.array([-1., 0, 1, 0]), .3), (np.array([0., 0, 1, 1]), .55), (np.array([2., 0, -1, 1]), -.15), ) for xi, l in all_tests: f, p, direction = linesearch_powell(func, p0, xi, tol=1e-5, lower_bound=lower_bound, upper_bound=upper_bound, fval=fval) assert_allclose(f, func(l * xi), atol=1e-6) assert_allclose(p, l * xi, atol=1e-6) assert_allclose(direction, l * xi, atol=1e-6) # now choose as above but start outside the bounds p0 = np.array([-1., 0, 0, 2]) fval = func(p0) all_tests = ( (np.array([1., 0, 0, 0]), .7), (np.array([0., 1, 0, 0]), .45), (np.array([0., 0, 1, 0]), 1.5), (np.array([0., 0, 0, 1]), -2.4), ) for xi, l in all_tests: f, p, direction = linesearch_powell(func, p0, xi, tol=1e-5, lower_bound=lower_bound, upper_bound=upper_bound, fval=fval) assert_allclose(f, func(p0 + l * xi), atol=1e-6) assert_allclose(p, p0 + l * xi, atol=1e-6) assert_allclose(direction, l * xi, atol=1e-6) def test_powell_limits(): # gh15342 - powell was going outside bounds for some function evaluations. bounds = optimize.Bounds([0, 0], [0.6, 20]) def fun(x): a, b = x assert (x >= bounds.lb).all() and (x <= bounds.ub).all() return a ** 2 + b ** 2 optimize.minimize(fun, x0=[0.6, 20], method='Powell', bounds=bounds) # Another test from the original report - gh-13411 bounds = optimize.Bounds(lb=[0,], ub=[1,], keep_feasible=[True,]) def func(x): assert x >= 0 and x <= 1 return np.exp(x) optimize.minimize(fun=func, x0=[0.5], method='powell', bounds=bounds) class TestRosen: def test_hess(self): # Compare rosen_hess(x) times p with rosen_hess_prod(x,p). See gh-1775. x = np.array([3, 4, 5]) p = np.array([2, 2, 2]) hp = optimize.rosen_hess_prod(x, p) dothp = np.dot(optimize.rosen_hess(x), p) assert_equal(hp, dothp) def himmelblau(p): """ R^2 -> R^1 test function for optimization. The function has four local minima where himmelblau(xopt) == 0. """ x, y = p a = x*x + y - 11 b = x + y*y - 7 return a*a + b*b def himmelblau_grad(p): x, y = p return np.array([4*x**3 + 4*x*y - 42*x + 2*y**2 - 14, 2*x**2 + 4*x*y + 4*y**3 - 26*y - 22]) def himmelblau_hess(p): x, y = p return np.array([[12*x**2 + 4*y - 42, 4*x + 4*y], [4*x + 4*y, 4*x + 12*y**2 - 26]]) himmelblau_x0 = [-0.27, -0.9] himmelblau_xopt = [3, 2] himmelblau_min = 0.0 def test_minimize_multiple_constraints(): # Regression test for gh-4240. def func(x): return np.array([25 - 0.2 * x[0] - 0.4 * x[1] - 0.33 * x[2]]) def func1(x): return np.array([x[1]]) def func2(x): return np.array([x[2]]) cons = ({'type': 'ineq', 'fun': func}, {'type': 'ineq', 'fun': func1}, {'type': 'ineq', 'fun': func2}) def f(x): return -1 * (x[0] + x[1] + x[2]) res = optimize.minimize(f, [0, 0, 0], method='SLSQP', constraints=cons) assert_allclose(res.x, [125, 0, 0], atol=1e-10) class TestOptimizeResultAttributes: # Test that all minimizers return an OptimizeResult containing # all the OptimizeResult attributes def setup_method(self): self.x0 = [5, 5] self.func = optimize.rosen self.jac = optimize.rosen_der self.hess = optimize.rosen_hess self.hessp = optimize.rosen_hess_prod self.bounds = [(0., 10.), (0., 10.)] def test_attributes_present(self): attributes = ['nit', 'nfev', 'x', 'success', 'status', 'fun', 'message'] skip = {'cobyla': ['nit']} for method in MINIMIZE_METHODS: with suppress_warnings() as sup: sup.filter(RuntimeWarning, ("Method .+ does not use (gradient|Hessian.*)" " information")) res = optimize.minimize(self.func, self.x0, method=method, jac=self.jac, hess=self.hess, hessp=self.hessp) for attribute in attributes: if method in skip and attribute in skip[method]: continue assert hasattr(res, attribute) assert attribute in dir(res) # gh13001, OptimizeResult.message should be a str assert isinstance(res.message, str) def f1(z, *params): x, y = z a, b, c, d, e, f, g, h, i, j, k, l, scale = params return (a * x**2 + b * x * y + c * y**2 + d*x + e*y + f) def f2(z, *params): x, y = z a, b, c, d, e, f, g, h, i, j, k, l, scale = params return (-g*np.exp(-((x-h)**2 + (y-i)**2) / scale)) def f3(z, *params): x, y = z a, b, c, d, e, f, g, h, i, j, k, l, scale = params return (-j*np.exp(-((x-k)**2 + (y-l)**2) / scale)) def brute_func(z, *params): return f1(z, *params) + f2(z, *params) + f3(z, *params) class TestBrute: # Test the "brute force" method def setup_method(self): self.params = (2, 3, 7, 8, 9, 10, 44, -1, 2, 26, 1, -2, 0.5) self.rranges = (slice(-4, 4, 0.25), slice(-4, 4, 0.25)) self.solution = np.array([-1.05665192, 1.80834843]) def brute_func(self, z, *params): # an instance method optimizing return brute_func(z, *params) def test_brute(self): # test fmin resbrute = optimize.brute(brute_func, self.rranges, args=self.params, full_output=True, finish=optimize.fmin) assert_allclose(resbrute[0], self.solution, atol=1e-3) assert_allclose(resbrute[1], brute_func(self.solution, *self.params), atol=1e-3) # test minimize resbrute = optimize.brute(brute_func, self.rranges, args=self.params, full_output=True, finish=optimize.minimize) assert_allclose(resbrute[0], self.solution, atol=1e-3) assert_allclose(resbrute[1], brute_func(self.solution, *self.params), atol=1e-3) # test that brute can optimize an instance method (the other tests use # a non-class based function resbrute = optimize.brute(self.brute_func, self.rranges, args=self.params, full_output=True, finish=optimize.minimize) assert_allclose(resbrute[0], self.solution, atol=1e-3) def test_1D(self): # test that for a 1-D problem the test function is passed an array, # not a scalar. def f(x): assert len(x.shape) == 1 assert x.shape[0] == 1 return x ** 2 optimize.brute(f, [(-1, 1)], Ns=3, finish=None) def test_workers(self): # check that parallel evaluation works resbrute = optimize.brute(brute_func, self.rranges, args=self.params, full_output=True, finish=None) resbrute1 = optimize.brute(brute_func, self.rranges, args=self.params, full_output=True, finish=None, workers=2) assert_allclose(resbrute1[-1], resbrute[-1]) assert_allclose(resbrute1[0], resbrute[0]) def test_runtime_warning(self): rng = np.random.default_rng(1234) def func(z, *params): return rng.random(1) * 1000 # never converged problem with pytest.warns(RuntimeWarning, match=r'Either final optimization did not succeed'): optimize.brute(func, self.rranges, args=self.params, disp=True) def test_coerce_args_param(self): # optimize.brute should coerce non-iterable args to a tuple. def f(x, *args): return x ** args[0] resbrute = optimize.brute(f, (slice(-4, 4, .25),), args=2) assert_allclose(resbrute, 0) def test_cobyla_threadsafe(): # Verify that cobyla is threadsafe. Will segfault if it is not. import concurrent.futures import time def objective1(x): time.sleep(0.1) return x[0]**2 def objective2(x): time.sleep(0.1) return (x[0]-1)**2 min_method = "COBYLA" def minimizer1(): return optimize.minimize(objective1, [0.0], method=min_method) def minimizer2(): return optimize.minimize(objective2, [0.0], method=min_method) with concurrent.futures.ThreadPoolExecutor() as pool: tasks = [] tasks.append(pool.submit(minimizer1)) tasks.append(pool.submit(minimizer2)) for t in tasks: t.result() class TestIterationLimits: # Tests that optimisation does not give up before trying requested # number of iterations or evaluations. And that it does not succeed # by exceeding the limits. def setup_method(self): self.funcalls = 0 def slow_func(self, v): self.funcalls += 1 r, t = np.sqrt(v[0]**2+v[1]**2), np.arctan2(v[0], v[1]) return np.sin(r*20 + t)+r*0.5 def test_neldermead_limit(self): self.check_limits("Nelder-Mead", 200) def test_powell_limit(self): self.check_limits("powell", 1000) def check_limits(self, method, default_iters): for start_v in [[0.1, 0.1], [1, 1], [2, 2]]: for mfev in [50, 500, 5000]: self.funcalls = 0 res = optimize.minimize(self.slow_func, start_v, method=method, options={"maxfev": mfev}) assert self.funcalls == res["nfev"] if res["success"]: assert res["nfev"] < mfev else: assert res["nfev"] >= mfev for mit in [50, 500, 5000]: res = optimize.minimize(self.slow_func, start_v, method=method, options={"maxiter": mit}) if res["success"]: assert res["nit"] <= mit else: assert res["nit"] >= mit for mfev, mit in [[50, 50], [5000, 5000], [5000, np.inf]]: self.funcalls = 0 res = optimize.minimize(self.slow_func, start_v, method=method, options={"maxiter": mit, "maxfev": mfev}) assert self.funcalls == res["nfev"] if res["success"]: assert res["nfev"] < mfev and res["nit"] <= mit else: assert res["nfev"] >= mfev or res["nit"] >= mit for mfev, mit in [[np.inf, None], [None, np.inf]]: self.funcalls = 0 res = optimize.minimize(self.slow_func, start_v, method=method, options={"maxiter": mit, "maxfev": mfev}) assert self.funcalls == res["nfev"] if res["success"]: if mfev is None: assert res["nfev"] < default_iters*2 else: assert res["nit"] <= default_iters*2 else: assert res["nfev"] >= default_iters*2 or res["nit"] >= default_iters*2 def test_result_x_shape_when_len_x_is_one(): def fun(x): return x * x def jac(x): return 2. * x def hess(x): return np.array([[2.]]) methods = ['Nelder-Mead', 'Powell', 'CG', 'BFGS', 'L-BFGS-B', 'TNC', 'COBYLA', 'SLSQP'] for method in methods: res = optimize.minimize(fun, np.array([0.1]), method=method) assert res.x.shape == (1,) # use jac + hess methods = ['trust-constr', 'dogleg', 'trust-ncg', 'trust-exact', 'trust-krylov', 'Newton-CG'] for method in methods: res = optimize.minimize(fun, np.array([0.1]), method=method, jac=jac, hess=hess) assert res.x.shape == (1,) class FunctionWithGradient: def __init__(self): self.number_of_calls = 0 def __call__(self, x): self.number_of_calls += 1 return np.sum(x**2), 2 * x @pytest.fixture def function_with_gradient(): return FunctionWithGradient() def test_memoize_jac_function_before_gradient(function_with_gradient): memoized_function = MemoizeJac(function_with_gradient) x0 = np.array([1.0, 2.0]) assert_allclose(memoized_function(x0), 5.0) assert function_with_gradient.number_of_calls == 1 assert_allclose(memoized_function.derivative(x0), 2 * x0) assert function_with_gradient.number_of_calls == 1, \ "function is not recomputed " \ "if gradient is requested after function value" assert_allclose( memoized_function(2 * x0), 20.0, err_msg="different input triggers new computation") assert function_with_gradient.number_of_calls == 2, \ "different input triggers new computation" def test_memoize_jac_gradient_before_function(function_with_gradient): memoized_function = MemoizeJac(function_with_gradient) x0 = np.array([1.0, 2.0]) assert_allclose(memoized_function.derivative(x0), 2 * x0) assert function_with_gradient.number_of_calls == 1 assert_allclose(memoized_function(x0), 5.0) assert function_with_gradient.number_of_calls == 1, \ "function is not recomputed " \ "if function value is requested after gradient" assert_allclose( memoized_function.derivative(2 * x0), 4 * x0, err_msg="different input triggers new computation") assert function_with_gradient.number_of_calls == 2, \ "different input triggers new computation" def test_memoize_jac_with_bfgs(function_with_gradient): """ Tests that using MemoizedJac in combination with ScalarFunction and BFGS does not lead to repeated function evaluations. Tests changes made in response to GH11868. """ memoized_function = MemoizeJac(function_with_gradient) jac = memoized_function.derivative hess = optimize.BFGS() x0 = np.array([1.0, 0.5]) scalar_function = ScalarFunction( memoized_function, x0, (), jac, hess, None, None) assert function_with_gradient.number_of_calls == 1 scalar_function.fun(x0 + 0.1) assert function_with_gradient.number_of_calls == 2 scalar_function.fun(x0 + 0.2) assert function_with_gradient.number_of_calls == 3 def test_gh12696(): # Test that optimize doesn't throw warning gh-12696 with assert_no_warnings(): optimize.fminbound( lambda x: np.array([x**2]), -np.pi, np.pi, disp=False) # --- Test minimize with equal upper and lower bounds --- # def setup_test_equal_bounds(): np.random.seed(0) x0 = np.random.rand(4) lb = np.array([0, 2, -1, -1.0]) ub = np.array([3, 2, 2, -1.0]) i_eb = (lb == ub) def check_x(x, check_size=True, check_values=True): if check_size: assert x.size == 4 if check_values: assert_allclose(x[i_eb], lb[i_eb]) def func(x): check_x(x) return optimize.rosen(x) def grad(x): check_x(x) return optimize.rosen_der(x) def callback(x, *args): check_x(x) def constraint1(x): check_x(x, check_values=False) return x[0:1] - 1 def jacobian1(x): check_x(x, check_values=False) dc = np.zeros_like(x) dc[0] = 1 return dc def constraint2(x): check_x(x, check_values=False) return x[2:3] - 0.5 def jacobian2(x): check_x(x, check_values=False) dc = np.zeros_like(x) dc[2] = 1 return dc c1a = NonlinearConstraint(constraint1, -np.inf, 0) c1b = NonlinearConstraint(constraint1, -np.inf, 0, jacobian1) c2a = NonlinearConstraint(constraint2, -np.inf, 0) c2b = NonlinearConstraint(constraint2, -np.inf, 0, jacobian2) # test using the three methods that accept bounds, use derivatives, and # have some trouble when bounds fix variables methods = ('L-BFGS-B', 'SLSQP', 'TNC') # test w/out gradient, w/ gradient, and w/ combined objective/gradient kwds = ({"fun": func, "jac": False}, {"fun": func, "jac": grad}, {"fun": (lambda x: (func(x), grad(x))), "jac": True}) # test with both old- and new-style bounds bound_types = (lambda lb, ub: list(zip(lb, ub)), Bounds) # Test for many combinations of constraints w/ and w/out jacobian # Pairs in format: (test constraints, reference constraints) # (always use analytical jacobian in reference) constraints = ((None, None), ([], []), (c1a, c1b), (c2b, c2b), ([c1b], [c1b]), ([c2a], [c2b]), ([c1a, c2a], [c1b, c2b]), ([c1a, c2b], [c1b, c2b]), ([c1b, c2b], [c1b, c2b])) # test with and without callback function callbacks = (None, callback) data = {"methods": methods, "kwds": kwds, "bound_types": bound_types, "constraints": constraints, "callbacks": callbacks, "lb": lb, "ub": ub, "x0": x0, "i_eb": i_eb} return data eb_data = setup_test_equal_bounds() # This test is about handling fixed variables, not the accuracy of the solvers @pytest.mark.xfail_on_32bit("Failures due to floating point issues, not logic") @pytest.mark.parametrize('method', eb_data["methods"]) @pytest.mark.parametrize('kwds', eb_data["kwds"]) @pytest.mark.parametrize('bound_type', eb_data["bound_types"]) @pytest.mark.parametrize('constraints', eb_data["constraints"]) @pytest.mark.parametrize('callback', eb_data["callbacks"]) def test_equal_bounds(method, kwds, bound_type, constraints, callback): """ Tests that minimizers still work if (bounds.lb == bounds.ub).any() gh12502 - Divide by zero in Jacobian numerical differentiation when equality bounds constraints are used """ # GH-15051; slightly more skips than necessary; hopefully fixed by GH-14882 if (platform.machine() == 'aarch64' and method == "TNC" and kwds["jac"] is False and callback is not None): pytest.skip('Tolerance violation on aarch') lb, ub = eb_data["lb"], eb_data["ub"] x0, i_eb = eb_data["x0"], eb_data["i_eb"] test_constraints, reference_constraints = constraints if test_constraints and not method == 'SLSQP': pytest.skip('Only SLSQP supports nonlinear constraints') # reference constraints always have analytical jacobian # if test constraints are not the same, we'll need finite differences fd_needed = (test_constraints != reference_constraints) bounds = bound_type(lb, ub) # old- or new-style kwds.update({"x0": x0, "method": method, "bounds": bounds, "constraints": test_constraints, "callback": callback}) res = optimize.minimize(**kwds) expected = optimize.minimize(optimize.rosen, x0, method=method, jac=optimize.rosen_der, bounds=bounds, constraints=reference_constraints) # compare the output of a solution with FD vs that of an analytic grad assert res.success assert_allclose(res.fun, expected.fun, rtol=1e-6) assert_allclose(res.x, expected.x, rtol=5e-4) if fd_needed or kwds['jac'] is False: expected.jac[i_eb] = np.nan assert res.jac.shape[0] == 4 assert_allclose(res.jac[i_eb], expected.jac[i_eb], rtol=1e-6) if not (kwds['jac'] or test_constraints or isinstance(bounds, Bounds)): # compare the output to an equivalent FD minimization that doesn't # need factorization def fun(x): new_x = np.array([np.nan, 2, np.nan, -1]) new_x[[0, 2]] = x return optimize.rosen(new_x) fd_res = optimize.minimize(fun, x0[[0, 2]], method=method, bounds=bounds[::2]) assert_allclose(res.fun, fd_res.fun) # TODO this test should really be equivalent to factorized version # above, down to res.nfev. However, testing found that when TNC is # called with or without a callback the output is different. The two # should be the same! This indicates that the TNC callback may be # mutating something when it should't. assert_allclose(res.x[[0, 2]], fd_res.x, rtol=2e-6) @pytest.mark.parametrize('method', eb_data["methods"]) def test_all_bounds_equal(method): # this only tests methods that have parameters factored out when lb==ub # it does not test other methods that work with bounds def f(x, p1=1): return np.linalg.norm(x) + p1 bounds = [(1, 1), (2, 2)] x0 = (1.0, 3.0) res = optimize.minimize(f, x0, bounds=bounds, method=method) assert res.success assert_allclose(res.fun, f([1.0, 2.0])) assert res.nfev == 1 assert res.message == 'All independent variables were fixed by bounds.' args = (2,) res = optimize.minimize(f, x0, bounds=bounds, method=method, args=args) assert res.success assert_allclose(res.fun, f([1.0, 2.0], 2)) if method.upper() == 'SLSQP': def con(x): return np.sum(x) nlc = NonlinearConstraint(con, -np.inf, 0.0) res = optimize.minimize( f, x0, bounds=bounds, method=method, constraints=[nlc] ) assert res.success is False assert_allclose(res.fun, f([1.0, 2.0])) assert res.nfev == 1 message = "All independent variables were fixed by bounds, but" assert res.message.startswith(message) nlc = NonlinearConstraint(con, -np.inf, 4) res = optimize.minimize( f, x0, bounds=bounds, method=method, constraints=[nlc] ) assert res.success is True assert_allclose(res.fun, f([1.0, 2.0])) assert res.nfev == 1 message = "All independent variables were fixed by bounds at values" assert res.message.startswith(message) def test_eb_constraints(): # make sure constraint functions aren't overwritten when equal bounds # are employed, and a parameter is factored out. GH14859 def f(x): return x[0]**3 + x[1]**2 + x[2]*x[3] def cfun(x): return x[0] + x[1] + x[2] + x[3] - 40 constraints = [{'type': 'ineq', 'fun': cfun}] bounds = [(0, 20)] * 4 bounds[1] = (5, 5) optimize.minimize( f, x0=[1, 2, 3, 4], method='SLSQP', bounds=bounds, constraints=constraints, ) assert constraints[0]['fun'] == cfun def test_show_options(): solver_methods = { 'minimize': MINIMIZE_METHODS, 'minimize_scalar': MINIMIZE_SCALAR_METHODS, 'root': ROOT_METHODS, 'root_scalar': ROOT_SCALAR_METHODS, 'linprog': LINPROG_METHODS, 'quadratic_assignment': QUADRATIC_ASSIGNMENT_METHODS, } for solver, methods in solver_methods.items(): for method in methods: # testing that `show_options` works without error show_options(solver, method) unknown_solver_method = { 'minimize': "ekki", # unknown method 'maximize': "cg", # unknown solver 'maximize_scalar': "ekki", # unknown solver and method } for solver, method in unknown_solver_method.items(): # testing that `show_options` raises ValueError assert_raises(ValueError, show_options, solver, method) def test_bounds_with_list(): # gh13501. Bounds created with lists weren't working for Powell. bounds = optimize.Bounds(lb=[5., 5.], ub=[10., 10.]) optimize.minimize( optimize.rosen, x0=np.array([9, 9]), method='Powell', bounds=bounds ) def test_x_overwritten_user_function(): # if the user overwrites the x-array in the user function it's likely # that the minimizer stops working properly. # gh13740 def fquad(x): a = np.arange(np.size(x)) x -= a x *= x return np.sum(x) def fquad_jac(x): a = np.arange(np.size(x)) x *= 2 x -= 2 * a return x def fquad_hess(x): return np.eye(np.size(x)) * 2.0 meth_jac = [ 'newton-cg', 'dogleg', 'trust-ncg', 'trust-exact', 'trust-krylov', 'trust-constr' ] meth_hess = [ 'dogleg', 'trust-ncg', 'trust-exact', 'trust-krylov', 'trust-constr' ] x0 = np.ones(5) * 1.5 for meth in MINIMIZE_METHODS: jac = None hess = None if meth in meth_jac: jac = fquad_jac if meth in meth_hess: hess = fquad_hess res = optimize.minimize(fquad, x0, method=meth, jac=jac, hess=hess) assert_allclose(res.x, np.arange(np.size(x0)), atol=2e-4) class TestGlobalOptimization: def test_optimize_result_attributes(self): def func(x): return x ** 2 # Note that `brute` solver does not return `OptimizeResult` results = [optimize.basinhopping(func, x0=1), optimize.differential_evolution(func, [(-4, 4)]), optimize.shgo(func, [(-4, 4)]), optimize.dual_annealing(func, [(-4, 4)]), optimize.direct(func, [(-4, 4)]), ] for result in results: assert isinstance(result, optimize.OptimizeResult) assert hasattr(result, "x") assert hasattr(result, "success") assert hasattr(result, "message") assert hasattr(result, "fun") assert hasattr(result, "nfev") assert hasattr(result, "nit") def test_approx_fprime(): # check that approx_fprime (serviced by approx_derivative) works for # jac and hess g = optimize.approx_fprime(himmelblau_x0, himmelblau) assert_allclose(g, himmelblau_grad(himmelblau_x0), rtol=5e-6) h = optimize.approx_fprime(himmelblau_x0, himmelblau_grad) assert_allclose(h, himmelblau_hess(himmelblau_x0), rtol=5e-6) def test_gh12594(): # gh-12594 reported an error in `_linesearch_powell` and # `_line_for_search` when `Bounds` was passed lists instead of arrays. # Check that results are the same whether the inputs are lists or arrays. def f(x): return x[0]**2 + (x[1] - 1)**2 bounds = Bounds(lb=[-10, -10], ub=[10, 10]) res = optimize.minimize(f, x0=(0, 0), method='Powell', bounds=bounds) bounds = Bounds(lb=np.array([-10, -10]), ub=np.array([10, 10])) ref = optimize.minimize(f, x0=(0, 0), method='Powell', bounds=bounds) assert_allclose(res.fun, ref.fun) assert_allclose(res.x, ref.x)
119,295
37.934726
102
py
scipy
scipy-main/scipy/optimize/tests/test_direct.py
""" Unit test for DIRECT optimization algorithm. """ from numpy.testing import (assert_allclose, assert_array_less) import pytest import numpy as np from scipy.optimize import direct, Bounds class TestDIRECT: def setup_method(self): self.fun_calls = 0 self.bounds_sphere = 4*[(-2, 3)] self.optimum_sphere_pos = np.zeros((4, )) self.optimum_sphere = 0.0 self.bounds_stylinski_tang = Bounds([-4., -4.], [4., 4.]) self.maxiter = 1000 # test functions def sphere(self, x): self.fun_calls += 1 return np.square(x).sum() def inv(self, x): if np.sum(x) == 0: raise ZeroDivisionError() return 1/np.sum(x) def nan_fun(self, x): return np.nan def inf_fun(self, x): return np.inf def styblinski_tang(self, pos): x, y = pos return 0.5 * (x**4 - 16 * x**2 + 5 * x + y**4 - 16 * y**2 + 5 * y) @pytest.mark.parametrize("locally_biased", [True, False]) def test_direct(self, locally_biased): res = direct(self.sphere, self.bounds_sphere, locally_biased=locally_biased) # test accuracy assert_allclose(res.x, self.optimum_sphere_pos, rtol=1e-3, atol=1e-3) assert_allclose(res.fun, self.optimum_sphere, atol=1e-5, rtol=1e-5) # test that result lies within bounds _bounds = np.asarray(self.bounds_sphere) assert_array_less(_bounds[:, 0], res.x) assert_array_less(res.x, _bounds[:, 1]) # test number of function evaluations. Original DIRECT overshoots by # up to 500 evaluations in last iteration assert res.nfev <= 1000 * (len(self.bounds_sphere) + 1) # test that number of function evaluations is correct assert res.nfev == self.fun_calls # test that number of iterations is below supplied maximum assert res.nit <= self.maxiter @pytest.mark.parametrize("locally_biased", [True, False]) def test_direct_callback(self, locally_biased): # test that callback does not change the result res = direct(self.sphere, self.bounds_sphere, locally_biased=locally_biased) def callback(x): x = 2*x dummy = np.square(x) print("DIRECT minimization algorithm callback test") return dummy res_callback = direct(self.sphere, self.bounds_sphere, locally_biased=locally_biased, callback=callback) assert_allclose(res.x, res_callback.x) assert res.nit == res_callback.nit assert res.nfev == res_callback.nfev assert res.status == res_callback.status assert res.success == res_callback.success assert res.fun == res_callback.fun assert_allclose(res.x, res_callback.x) assert res.message == res_callback.message # test accuracy assert_allclose(res_callback.x, self.optimum_sphere_pos, rtol=1e-3, atol=1e-3) assert_allclose(res_callback.fun, self.optimum_sphere, atol=1e-5, rtol=1e-5) @pytest.mark.parametrize("locally_biased", [True, False]) def test_exception(self, locally_biased): bounds = 4*[(-10, 10)] with pytest.raises(ZeroDivisionError): direct(self.inv, bounds=bounds, locally_biased=locally_biased) @pytest.mark.parametrize("locally_biased", [True, False]) def test_nan(self, locally_biased): bounds = 4*[(-10, 10)] direct(self.nan_fun, bounds=bounds, locally_biased=locally_biased) @pytest.mark.parametrize("len_tol", [1e-3, 1e-4]) @pytest.mark.parametrize("locally_biased", [True, False]) def test_len_tol(self, len_tol, locally_biased): bounds = 4*[(-10., 10.)] res = direct(self.sphere, bounds=bounds, len_tol=len_tol, vol_tol=1e-30, locally_biased=locally_biased) assert res.status == 5 assert res.success assert_allclose(res.x, np.zeros((4, ))) message = ("The side length measure of the hyperrectangle containing " "the lowest function value found is below " f"len_tol={len_tol}") assert res.message == message @pytest.mark.parametrize("vol_tol", [1e-6, 1e-8]) @pytest.mark.parametrize("locally_biased", [True, False]) def test_vol_tol(self, vol_tol, locally_biased): bounds = 4*[(-10., 10.)] res = direct(self.sphere, bounds=bounds, vol_tol=vol_tol, len_tol=0., locally_biased=locally_biased) assert res.status == 4 assert res.success assert_allclose(res.x, np.zeros((4, ))) message = ("The volume of the hyperrectangle containing the lowest " f"function value found is below vol_tol={vol_tol}") assert res.message == message @pytest.mark.parametrize("f_min_rtol", [1e-3, 1e-5, 1e-7]) @pytest.mark.parametrize("locally_biased", [True, False]) def test_f_min(self, f_min_rtol, locally_biased): # test that desired function value is reached within # relative tolerance of f_min_rtol f_min = 1. bounds = 4*[(-2., 10.)] res = direct(self.sphere, bounds=bounds, f_min=f_min, f_min_rtol=f_min_rtol, locally_biased=locally_biased) assert res.status == 3 assert res.success assert res.fun < f_min * (1. + f_min_rtol) message = ("The best function value found is within a relative " f"error={f_min_rtol} of the (known) global optimum f_min") assert res.message == message def circle_with_args(self, x, a, b): return np.square(x[0] - a) + np.square(x[1] - b).sum() @pytest.mark.parametrize("locally_biased", [True, False]) def test_f_circle_with_args(self, locally_biased): bounds = 2*[(-2.0, 2.0)] res = direct(self.circle_with_args, bounds, args=(1, 1), maxfun=1250, locally_biased=locally_biased) assert_allclose(res.x, np.array([1., 1.]), rtol=1e-5) @pytest.mark.parametrize("locally_biased", [True, False]) def test_failure_maxfun(self, locally_biased): # test that if optimization runs for the maximal number of # evaluations, success = False is returned maxfun = 100 result = direct(self.styblinski_tang, self.bounds_stylinski_tang, maxfun=maxfun, locally_biased=locally_biased) assert result.success is False assert result.status == 1 assert result.nfev >= maxfun message = ("Number of function evaluations done is " f"larger than maxfun={maxfun}") assert result.message == message @pytest.mark.parametrize("locally_biased", [True, False]) def test_failure_maxiter(self, locally_biased): # test that if optimization runs for the maximal number of # iterations, success = False is returned maxiter = 10 result = direct(self.styblinski_tang, self.bounds_stylinski_tang, maxiter=maxiter, locally_biased=locally_biased) assert result.success is False assert result.status == 2 assert result.nit >= maxiter message = f"Number of iterations is larger than maxiter={maxiter}" assert result.message == message @pytest.mark.parametrize("locally_biased", [True, False]) def test_bounds_variants(self, locally_biased): # test that new and old bounds yield same result lb = [-6., 1., -5.] ub = [-1., 3., 5.] x_opt = np.array([-1., 1., 0.]) bounds_old = list(zip(lb, ub)) bounds_new = Bounds(lb, ub) res_old_bounds = direct(self.sphere, bounds_old, locally_biased=locally_biased) res_new_bounds = direct(self.sphere, bounds_new, locally_biased=locally_biased) assert res_new_bounds.nfev == res_old_bounds.nfev assert res_new_bounds.message == res_old_bounds.message assert res_new_bounds.success == res_old_bounds.success assert res_new_bounds.nit == res_old_bounds.nit assert_allclose(res_new_bounds.x, res_old_bounds.x) assert_allclose(res_new_bounds.x, x_opt, rtol=1e-2) @pytest.mark.parametrize("locally_biased", [True, False]) @pytest.mark.parametrize("eps", [1e-5, 1e-4, 1e-3]) def test_epsilon(self, eps, locally_biased): result = direct(self.styblinski_tang, self.bounds_stylinski_tang, eps=eps, vol_tol=1e-6, locally_biased=locally_biased) assert result.status == 4 assert result.success @pytest.mark.xslow @pytest.mark.parametrize("locally_biased", [True, False]) def test_no_segmentation_fault(self, locally_biased): # test that an excessive number of function evaluations # does not result in segmentation fault bounds = [(-5., 20.)] * 100 result = direct(self.sphere, bounds, maxfun=10000000, maxiter=1000000, locally_biased=locally_biased) assert result is not None @pytest.mark.parametrize("locally_biased", [True, False]) def test_inf_fun(self, locally_biased): # test that an objective value of infinity does not crash DIRECT bounds = [(-5., 5.)] * 2 result = direct(self.inf_fun, bounds, locally_biased=locally_biased) assert result is not None @pytest.mark.parametrize("len_tol", [-1, 2]) def test_len_tol_validation(self, len_tol): error_msg = "len_tol must be between 0 and 1." with pytest.raises(ValueError, match=error_msg): direct(self.styblinski_tang, self.bounds_stylinski_tang, len_tol=len_tol) @pytest.mark.parametrize("vol_tol", [-1, 2]) def test_vol_tol_validation(self, vol_tol): error_msg = "vol_tol must be between 0 and 1." with pytest.raises(ValueError, match=error_msg): direct(self.styblinski_tang, self.bounds_stylinski_tang, vol_tol=vol_tol) @pytest.mark.parametrize("f_min_rtol", [-1, 2]) def test_fmin_rtol_validation(self, f_min_rtol): error_msg = "f_min_rtol must be between 0 and 1." with pytest.raises(ValueError, match=error_msg): direct(self.styblinski_tang, self.bounds_stylinski_tang, f_min_rtol=f_min_rtol, f_min=0.) @pytest.mark.parametrize("maxfun", [1.5, "string", (1, 2)]) def test_maxfun_wrong_type(self, maxfun): error_msg = "maxfun must be of type int." with pytest.raises(ValueError, match=error_msg): direct(self.styblinski_tang, self.bounds_stylinski_tang, maxfun=maxfun) @pytest.mark.parametrize("maxiter", [1.5, "string", (1, 2)]) def test_maxiter_wrong_type(self, maxiter): error_msg = "maxiter must be of type int." with pytest.raises(ValueError, match=error_msg): direct(self.styblinski_tang, self.bounds_stylinski_tang, maxiter=maxiter) def test_negative_maxiter(self): error_msg = "maxiter must be > 0." with pytest.raises(ValueError, match=error_msg): direct(self.styblinski_tang, self.bounds_stylinski_tang, maxiter=-1) def test_negative_maxfun(self): error_msg = "maxfun must be > 0." with pytest.raises(ValueError, match=error_msg): direct(self.styblinski_tang, self.bounds_stylinski_tang, maxfun=-1) @pytest.mark.parametrize("bounds", ["bounds", 2., 0]) def test_invalid_bounds_type(self, bounds): error_msg = ("bounds must be a sequence or " "instance of Bounds class") with pytest.raises(ValueError, match=error_msg): direct(self.styblinski_tang, bounds) @pytest.mark.parametrize("bounds", [Bounds([-1., -1], [-2, 1]), Bounds([-np.nan, -1], [-2, np.nan]), ] ) def test_incorrect_bounds(self, bounds): error_msg = 'Bounds are not consistent min < max' with pytest.raises(ValueError, match=error_msg): direct(self.styblinski_tang, bounds) def test_inf_bounds(self): error_msg = 'Bounds must not be inf.' bounds = Bounds([-np.inf, -1], [-2, np.inf]) with pytest.raises(ValueError, match=error_msg): direct(self.styblinski_tang, bounds) @pytest.mark.parametrize("locally_biased", ["bias", [0, 0], 2.]) def test_locally_biased_validation(self, locally_biased): error_msg = 'locally_biased must be True or False.' with pytest.raises(ValueError, match=error_msg): direct(self.styblinski_tang, self.bounds_stylinski_tang, locally_biased=locally_biased)
13,152
40.231975
78
py
scipy
scipy-main/scipy/optimize/tests/test__dual_annealing.py
# Dual annealing unit tests implementation. # Copyright (c) 2018 Sylvain Gubian <sylvain.gubian@pmi.com>, # Yang Xiang <yang.xiang@pmi.com> # Author: Sylvain Gubian, PMP S.A. """ Unit tests for the dual annealing global optimizer """ from scipy.optimize import dual_annealing, Bounds from scipy.optimize._dual_annealing import EnergyState from scipy.optimize._dual_annealing import LocalSearchWrapper from scipy.optimize._dual_annealing import ObjectiveFunWrapper from scipy.optimize._dual_annealing import StrategyChain from scipy.optimize._dual_annealing import VisitingDistribution from scipy.optimize import rosen, rosen_der import pytest import numpy as np from numpy.testing import assert_equal, assert_allclose, assert_array_less from pytest import raises as assert_raises from scipy._lib._util import check_random_state class TestDualAnnealing: def setup_method(self): # A function that returns always infinity for initialization tests self.weirdfunc = lambda x: np.inf # 2-D bounds for testing function self.ld_bounds = [(-5.12, 5.12)] * 2 # 4-D bounds for testing function self.hd_bounds = self.ld_bounds * 4 # Number of values to be generated for testing visit function self.nbtestvalues = 5000 self.high_temperature = 5230 self.low_temperature = 0.1 self.qv = 2.62 self.seed = 1234 self.rs = check_random_state(self.seed) self.nb_fun_call = 0 self.ngev = 0 def callback(self, x, f, context): # For testing callback mechanism. Should stop for e <= 1 as # the callback function returns True if f <= 1.0: return True def func(self, x, args=()): # Using Rastrigin function for performing tests if args: shift = args else: shift = 0 y = np.sum((x - shift) ** 2 - 10 * np.cos(2 * np.pi * ( x - shift))) + 10 * np.size(x) + shift self.nb_fun_call += 1 return y def rosen_der_wrapper(self, x, args=()): self.ngev += 1 return rosen_der(x, *args) # FIXME: there are some discontinuities in behaviour as a function of `qv`, # this needs investigating - see gh-12384 @pytest.mark.parametrize('qv', [1.1, 1.41, 2, 2.62, 2.9]) def test_visiting_stepping(self, qv): lu = list(zip(*self.ld_bounds)) lower = np.array(lu[0]) upper = np.array(lu[1]) dim = lower.size vd = VisitingDistribution(lower, upper, qv, self.rs) values = np.zeros(dim) x_step_low = vd.visiting(values, 0, self.high_temperature) # Make sure that only the first component is changed assert_equal(np.not_equal(x_step_low, 0), True) values = np.zeros(dim) x_step_high = vd.visiting(values, dim, self.high_temperature) # Make sure that component other than at dim has changed assert_equal(np.not_equal(x_step_high[0], 0), True) @pytest.mark.parametrize('qv', [2.25, 2.62, 2.9]) def test_visiting_dist_high_temperature(self, qv): lu = list(zip(*self.ld_bounds)) lower = np.array(lu[0]) upper = np.array(lu[1]) vd = VisitingDistribution(lower, upper, qv, self.rs) # values = np.zeros(self.nbtestvalues) # for i in np.arange(self.nbtestvalues): # values[i] = vd.visit_fn(self.high_temperature) values = vd.visit_fn(self.high_temperature, self.nbtestvalues) # Visiting distribution is a distorted version of Cauchy-Lorentz # distribution, and as no 1st and higher moments (no mean defined, # no variance defined). # Check that big tails values are generated assert_array_less(np.min(values), 1e-10) assert_array_less(1e+10, np.max(values)) def test_reset(self): owf = ObjectiveFunWrapper(self.weirdfunc) lu = list(zip(*self.ld_bounds)) lower = np.array(lu[0]) upper = np.array(lu[1]) es = EnergyState(lower, upper) assert_raises(ValueError, es.reset, owf, check_random_state(None)) def test_low_dim(self): ret = dual_annealing( self.func, self.ld_bounds, seed=self.seed) assert_allclose(ret.fun, 0., atol=1e-12) assert ret.success def test_high_dim(self): ret = dual_annealing(self.func, self.hd_bounds, seed=self.seed) assert_allclose(ret.fun, 0., atol=1e-12) assert ret.success def test_low_dim_no_ls(self): ret = dual_annealing(self.func, self.ld_bounds, no_local_search=True, seed=self.seed) assert_allclose(ret.fun, 0., atol=1e-4) def test_high_dim_no_ls(self): ret = dual_annealing(self.func, self.hd_bounds, no_local_search=True, seed=self.seed) assert_allclose(ret.fun, 0., atol=1e-4) def test_nb_fun_call(self): ret = dual_annealing(self.func, self.ld_bounds, seed=self.seed) assert_equal(self.nb_fun_call, ret.nfev) def test_nb_fun_call_no_ls(self): ret = dual_annealing(self.func, self.ld_bounds, no_local_search=True, seed=self.seed) assert_equal(self.nb_fun_call, ret.nfev) def test_max_reinit(self): assert_raises(ValueError, dual_annealing, self.weirdfunc, self.ld_bounds) def test_reproduce(self): res1 = dual_annealing(self.func, self.ld_bounds, seed=self.seed) res2 = dual_annealing(self.func, self.ld_bounds, seed=self.seed) res3 = dual_annealing(self.func, self.ld_bounds, seed=self.seed) # If we have reproducible results, x components found has to # be exactly the same, which is not the case with no seeding assert_equal(res1.x, res2.x) assert_equal(res1.x, res3.x) def test_rand_gen(self): # check that np.random.Generator can be used (numpy >= 1.17) # obtain a np.random.Generator object rng = np.random.default_rng(1) res1 = dual_annealing(self.func, self.ld_bounds, seed=rng) # seed again rng = np.random.default_rng(1) res2 = dual_annealing(self.func, self.ld_bounds, seed=rng) # If we have reproducible results, x components found has to # be exactly the same, which is not the case with no seeding assert_equal(res1.x, res2.x) def test_bounds_integrity(self): wrong_bounds = [(-5.12, 5.12), (1, 0), (5.12, 5.12)] assert_raises(ValueError, dual_annealing, self.func, wrong_bounds) def test_bound_validity(self): invalid_bounds = [(-5, 5), (-np.inf, 0), (-5, 5)] assert_raises(ValueError, dual_annealing, self.func, invalid_bounds) invalid_bounds = [(-5, 5), (0, np.inf), (-5, 5)] assert_raises(ValueError, dual_annealing, self.func, invalid_bounds) invalid_bounds = [(-5, 5), (0, np.nan), (-5, 5)] assert_raises(ValueError, dual_annealing, self.func, invalid_bounds) def test_deprecated_local_search_options_bounds(self): def func(x): return np.sum((x - 5) * (x - 1)) bounds = list(zip([-6, -5], [6, 5])) # Test bounds can be passed (see gh-10831) with pytest.warns(RuntimeWarning, match=r"Method CG cannot handle "): dual_annealing( func, bounds=bounds, minimizer_kwargs={"method": "CG", "bounds": bounds}) def test_minimizer_kwargs_bounds(self): def func(x): return np.sum((x - 5) * (x - 1)) bounds = list(zip([-6, -5], [6, 5])) # Test bounds can be passed (see gh-10831) dual_annealing( func, bounds=bounds, minimizer_kwargs={"method": "SLSQP", "bounds": bounds}) with pytest.warns(RuntimeWarning, match=r"Method CG cannot handle "): dual_annealing( func, bounds=bounds, minimizer_kwargs={"method": "CG", "bounds": bounds}) def test_max_fun_ls(self): ret = dual_annealing(self.func, self.ld_bounds, maxfun=100, seed=self.seed) ls_max_iter = min(max( len(self.ld_bounds) * LocalSearchWrapper.LS_MAXITER_RATIO, LocalSearchWrapper.LS_MAXITER_MIN), LocalSearchWrapper.LS_MAXITER_MAX) assert ret.nfev <= 100 + ls_max_iter assert not ret.success def test_max_fun_no_ls(self): ret = dual_annealing(self.func, self.ld_bounds, no_local_search=True, maxfun=500, seed=self.seed) assert ret.nfev <= 500 assert not ret.success def test_maxiter(self): ret = dual_annealing(self.func, self.ld_bounds, maxiter=700, seed=self.seed) assert ret.nit <= 700 # Testing that args are passed correctly for dual_annealing def test_fun_args_ls(self): ret = dual_annealing(self.func, self.ld_bounds, args=((3.14159,)), seed=self.seed) assert_allclose(ret.fun, 3.14159, atol=1e-6) # Testing that args are passed correctly for pure simulated annealing def test_fun_args_no_ls(self): ret = dual_annealing(self.func, self.ld_bounds, args=((3.14159, )), no_local_search=True, seed=self.seed) assert_allclose(ret.fun, 3.14159, atol=1e-4) def test_callback_stop(self): # Testing that callback make the algorithm stop for # fun value <= 1.0 (see callback method) ret = dual_annealing(self.func, self.ld_bounds, callback=self.callback, seed=self.seed) assert ret.fun <= 1.0 assert 'stop early' in ret.message[0] assert not ret.success @pytest.mark.parametrize('method, atol', [ ('Nelder-Mead', 2e-5), ('COBYLA', 1e-5), ('Powell', 1e-8), ('CG', 1e-8), ('BFGS', 1e-8), ('TNC', 1e-8), ('SLSQP', 2e-7), ]) def test_multi_ls_minimizer(self, method, atol): ret = dual_annealing(self.func, self.ld_bounds, minimizer_kwargs=dict(method=method), seed=self.seed) assert_allclose(ret.fun, 0., atol=atol) def test_wrong_restart_temp(self): assert_raises(ValueError, dual_annealing, self.func, self.ld_bounds, restart_temp_ratio=1) assert_raises(ValueError, dual_annealing, self.func, self.ld_bounds, restart_temp_ratio=0) def test_gradient_gnev(self): minimizer_opts = { 'jac': self.rosen_der_wrapper, } ret = dual_annealing(rosen, self.ld_bounds, minimizer_kwargs=minimizer_opts, seed=self.seed) assert ret.njev == self.ngev def test_from_docstring(self): def func(x): return np.sum(x * x - 10 * np.cos(2 * np.pi * x)) + 10 * np.size(x) lw = [-5.12] * 10 up = [5.12] * 10 ret = dual_annealing(func, bounds=list(zip(lw, up)), seed=1234) assert_allclose(ret.x, [-4.26437714e-09, -3.91699361e-09, -1.86149218e-09, -3.97165720e-09, -6.29151648e-09, -6.53145322e-09, -3.93616815e-09, -6.55623025e-09, -6.05775280e-09, -5.00668935e-09], atol=4e-8) assert_allclose(ret.fun, 0.000000, atol=5e-13) @pytest.mark.parametrize('new_e, temp_step, accepted, accept_rate', [ (0, 100, 1000, 1.0097587941791923), (0, 2, 1000, 1.2599210498948732), (10, 100, 878, 0.8786035869128718), (10, 60, 695, 0.6812920690579612), (2, 100, 990, 0.9897404249173424), ]) def test_accept_reject_probabilistic( self, new_e, temp_step, accepted, accept_rate): # Test accepts unconditionally with e < current_energy and # probabilistically with e > current_energy rs = check_random_state(123) count_accepted = 0 iterations = 1000 accept_param = -5 current_energy = 1 for _ in range(iterations): energy_state = EnergyState(lower=None, upper=None) # Set energy state with current_energy, any location. energy_state.update_current(current_energy, [0]) chain = StrategyChain( accept_param, None, None, None, rs, energy_state) # Normally this is set in run() chain.temperature_step = temp_step # Check if update is accepted. chain.accept_reject(j=1, e=new_e, x_visit=[2]) if energy_state.current_energy == new_e: count_accepted += 1 assert count_accepted == accepted # Check accept rate pqv = 1 - (1 - accept_param) * (new_e - current_energy) / temp_step rate = 0 if pqv <= 0 else np.exp(np.log(pqv) / (1 - accept_param)) assert_allclose(rate, accept_rate) def test_bounds_class(self): # test that result does not depend on the bounds type def func(x): f = np.sum(x * x - 10 * np.cos(2 * np.pi * x)) + 10 * np.size(x) return f lw = [-5.12] * 5 up = [5.12] * 5 # Unbounded global minimum is all zeros. Most bounds below will force # a DV away from unbounded minimum and be active at solution. up[0] = -2.0 up[1] = -1.0 lw[3] = 1.0 lw[4] = 2.0 # run optimizations bounds = Bounds(lw, up) ret_bounds_class = dual_annealing(func, bounds=bounds, seed=1234) bounds_old = list(zip(lw, up)) ret_bounds_list = dual_annealing(func, bounds=bounds_old, seed=1234) # test that found minima, function evaluations and iterations match assert_allclose(ret_bounds_class.x, ret_bounds_list.x, atol=1e-8) assert_allclose(ret_bounds_class.x, np.arange(-2, 3), atol=1e-7) assert_allclose(ret_bounds_list.fun, ret_bounds_class.fun, atol=1e-9) assert ret_bounds_list.nfev == ret_bounds_class.nfev def test_callable_jac_with_args_gh11052(self): # dual_annealing used to fail when `jac` was callable and `args` were # used; check that this is resolved. Example is from gh-11052. rng = np.random.default_rng(94253637693657847462) def f(x, power): return np.sum(np.exp(x ** power)) def jac(x, power): return np.exp(x ** power) * power * x ** (power - 1) res1 = dual_annealing(f, args=(2, ), bounds=[[0, 1], [0, 1]], seed=rng, minimizer_kwargs=dict(method='L-BFGS-B')) res2 = dual_annealing(f, args=(2, ), bounds=[[0, 1], [0, 1]], seed=rng, minimizer_kwargs=dict(method='L-BFGS-B', jac=jac)) assert_allclose(res1.fun, res2.fun, rtol=1e-6)
15,173
38.931579
79
py
scipy
scipy-main/scipy/optimize/tests/test__basinhopping.py
""" Unit tests for the basin hopping global minimization algorithm. """ import copy from numpy.testing import (assert_almost_equal, assert_equal, assert_, assert_allclose) import pytest from pytest import raises as assert_raises import numpy as np from numpy import cos, sin from scipy.optimize import basinhopping, OptimizeResult from scipy.optimize._basinhopping import ( Storage, RandomDisplacement, Metropolis, AdaptiveStepsize) def func1d(x): f = cos(14.5 * x - 0.3) + (x + 0.2) * x df = np.array(-14.5 * sin(14.5 * x - 0.3) + 2. * x + 0.2) return f, df def func2d_nograd(x): f = cos(14.5 * x[0] - 0.3) + (x[1] + 0.2) * x[1] + (x[0] + 0.2) * x[0] return f def func2d(x): f = cos(14.5 * x[0] - 0.3) + (x[1] + 0.2) * x[1] + (x[0] + 0.2) * x[0] df = np.zeros(2) df[0] = -14.5 * sin(14.5 * x[0] - 0.3) + 2. * x[0] + 0.2 df[1] = 2. * x[1] + 0.2 return f, df def func2d_easyderiv(x): f = 2.0*x[0]**2 + 2.0*x[0]*x[1] + 2.0*x[1]**2 - 6.0*x[0] df = np.zeros(2) df[0] = 4.0*x[0] + 2.0*x[1] - 6.0 df[1] = 2.0*x[0] + 4.0*x[1] return f, df class MyTakeStep1(RandomDisplacement): """use a copy of displace, but have it set a special parameter to make sure it's actually being used.""" def __init__(self): self.been_called = False super().__init__() def __call__(self, x): self.been_called = True return super().__call__(x) def myTakeStep2(x): """redo RandomDisplacement in function form without the attribute stepsize to make sure everything still works ok """ s = 0.5 x += np.random.uniform(-s, s, np.shape(x)) return x class MyAcceptTest: """pass a custom accept test This does nothing but make sure it's being used and ensure all the possible return values are accepted """ def __init__(self): self.been_called = False self.ncalls = 0 self.testres = [False, 'force accept', True, np.bool_(True), np.bool_(False), [], {}, 0, 1] def __call__(self, **kwargs): self.been_called = True self.ncalls += 1 if self.ncalls - 1 < len(self.testres): return self.testres[self.ncalls - 1] else: return True class MyCallBack: """pass a custom callback function This makes sure it's being used. It also returns True after 10 steps to ensure that it's stopping early. """ def __init__(self): self.been_called = False self.ncalls = 0 def __call__(self, x, f, accepted): self.been_called = True self.ncalls += 1 if self.ncalls == 10: return True class TestBasinHopping: def setup_method(self): """ Tests setup. Run tests based on the 1-D and 2-D functions described above. """ self.x0 = (1.0, [1.0, 1.0]) self.sol = (-0.195, np.array([-0.195, -0.1])) self.tol = 3 # number of decimal places self.niter = 100 self.disp = False # fix random seed np.random.seed(1234) self.kwargs = {"method": "L-BFGS-B", "jac": True} self.kwargs_nograd = {"method": "L-BFGS-B"} def test_TypeError(self): # test the TypeErrors are raised on bad input i = 1 # if take_step is passed, it must be callable assert_raises(TypeError, basinhopping, func2d, self.x0[i], take_step=1) # if accept_test is passed, it must be callable assert_raises(TypeError, basinhopping, func2d, self.x0[i], accept_test=1) def test_input_validation(self): msg = 'target_accept_rate has to be in range \\(0, 1\\)' with assert_raises(ValueError, match=msg): basinhopping(func1d, self.x0[0], target_accept_rate=0.) with assert_raises(ValueError, match=msg): basinhopping(func1d, self.x0[0], target_accept_rate=1.) msg = 'stepwise_factor has to be in range \\(0, 1\\)' with assert_raises(ValueError, match=msg): basinhopping(func1d, self.x0[0], stepwise_factor=0.) with assert_raises(ValueError, match=msg): basinhopping(func1d, self.x0[0], stepwise_factor=1.) def test_1d_grad(self): # test 1-D minimizations with gradient i = 0 res = basinhopping(func1d, self.x0[i], minimizer_kwargs=self.kwargs, niter=self.niter, disp=self.disp) assert_almost_equal(res.x, self.sol[i], self.tol) def test_2d(self): # test 2d minimizations with gradient i = 1 res = basinhopping(func2d, self.x0[i], minimizer_kwargs=self.kwargs, niter=self.niter, disp=self.disp) assert_almost_equal(res.x, self.sol[i], self.tol) assert_(res.nfev > 0) def test_njev(self): # test njev is returned correctly i = 1 minimizer_kwargs = self.kwargs.copy() # L-BFGS-B doesn't use njev, but BFGS does minimizer_kwargs["method"] = "BFGS" res = basinhopping(func2d, self.x0[i], minimizer_kwargs=minimizer_kwargs, niter=self.niter, disp=self.disp) assert_(res.nfev > 0) assert_equal(res.nfev, res.njev) def test_jac(self): # test Jacobian returned minimizer_kwargs = self.kwargs.copy() # BFGS returns a Jacobian minimizer_kwargs["method"] = "BFGS" res = basinhopping(func2d_easyderiv, [0.0, 0.0], minimizer_kwargs=minimizer_kwargs, niter=self.niter, disp=self.disp) assert_(hasattr(res.lowest_optimization_result, "jac")) # in this case, the Jacobian is just [df/dx, df/dy] _, jacobian = func2d_easyderiv(res.x) assert_almost_equal(res.lowest_optimization_result.jac, jacobian, self.tol) def test_2d_nograd(self): # test 2-D minimizations without gradient i = 1 res = basinhopping(func2d_nograd, self.x0[i], minimizer_kwargs=self.kwargs_nograd, niter=self.niter, disp=self.disp) assert_almost_equal(res.x, self.sol[i], self.tol) def test_all_minimizers(self): # Test 2-D minimizations with gradient. Nelder-Mead, Powell, and COBYLA # don't accept jac=True, so aren't included here. i = 1 methods = ['CG', 'BFGS', 'Newton-CG', 'L-BFGS-B', 'TNC', 'SLSQP'] minimizer_kwargs = copy.copy(self.kwargs) for method in methods: minimizer_kwargs["method"] = method res = basinhopping(func2d, self.x0[i], minimizer_kwargs=minimizer_kwargs, niter=self.niter, disp=self.disp) assert_almost_equal(res.x, self.sol[i], self.tol) def test_all_nograd_minimizers(self): # Test 2-D minimizations without gradient. Newton-CG requires jac=True, # so not included here. i = 1 methods = ['CG', 'BFGS', 'L-BFGS-B', 'TNC', 'SLSQP', 'Nelder-Mead', 'Powell', 'COBYLA'] minimizer_kwargs = copy.copy(self.kwargs_nograd) for method in methods: minimizer_kwargs["method"] = method res = basinhopping(func2d_nograd, self.x0[i], minimizer_kwargs=minimizer_kwargs, niter=self.niter, disp=self.disp) tol = self.tol if method == 'COBYLA': tol = 2 assert_almost_equal(res.x, self.sol[i], decimal=tol) def test_pass_takestep(self): # test that passing a custom takestep works # also test that the stepsize is being adjusted takestep = MyTakeStep1() initial_step_size = takestep.stepsize i = 1 res = basinhopping(func2d, self.x0[i], minimizer_kwargs=self.kwargs, niter=self.niter, disp=self.disp, take_step=takestep) assert_almost_equal(res.x, self.sol[i], self.tol) assert_(takestep.been_called) # make sure that the build in adaptive step size has been used assert_(initial_step_size != takestep.stepsize) def test_pass_simple_takestep(self): # test that passing a custom takestep without attribute stepsize takestep = myTakeStep2 i = 1 res = basinhopping(func2d_nograd, self.x0[i], minimizer_kwargs=self.kwargs_nograd, niter=self.niter, disp=self.disp, take_step=takestep) assert_almost_equal(res.x, self.sol[i], self.tol) def test_pass_accept_test(self): # test passing a custom accept test # makes sure it's being used and ensures all the possible return values # are accepted. accept_test = MyAcceptTest() i = 1 # there's no point in running it more than a few steps. basinhopping(func2d, self.x0[i], minimizer_kwargs=self.kwargs, niter=10, disp=self.disp, accept_test=accept_test) assert_(accept_test.been_called) def test_pass_callback(self): # test passing a custom callback function # This makes sure it's being used. It also returns True after 10 steps # to ensure that it's stopping early. callback = MyCallBack() i = 1 # there's no point in running it more than a few steps. res = basinhopping(func2d, self.x0[i], minimizer_kwargs=self.kwargs, niter=30, disp=self.disp, callback=callback) assert_(callback.been_called) assert_("callback" in res.message[0]) # One of the calls of MyCallBack is during BasinHoppingRunner # construction, so there are only 9 remaining before MyCallBack stops # the minimization. assert_equal(res.nit, 9) def test_minimizer_fail(self): # test if a minimizer fails i = 1 self.kwargs["options"] = dict(maxiter=0) self.niter = 10 res = basinhopping(func2d, self.x0[i], minimizer_kwargs=self.kwargs, niter=self.niter, disp=self.disp) # the number of failed minimizations should be the number of # iterations + 1 assert_equal(res.nit + 1, res.minimization_failures) def test_niter_zero(self): # gh5915, what happens if you call basinhopping with niter=0 i = 0 basinhopping(func1d, self.x0[i], minimizer_kwargs=self.kwargs, niter=0, disp=self.disp) def test_seed_reproducibility(self): # seed should ensure reproducibility between runs minimizer_kwargs = {"method": "L-BFGS-B", "jac": True} f_1 = [] def callback(x, f, accepted): f_1.append(f) basinhopping(func2d, [1.0, 1.0], minimizer_kwargs=minimizer_kwargs, niter=10, callback=callback, seed=10) f_2 = [] def callback2(x, f, accepted): f_2.append(f) basinhopping(func2d, [1.0, 1.0], minimizer_kwargs=minimizer_kwargs, niter=10, callback=callback2, seed=10) assert_equal(np.array(f_1), np.array(f_2)) def test_random_gen(self): # check that np.random.Generator can be used (numpy >= 1.17) rng = np.random.default_rng(1) minimizer_kwargs = {"method": "L-BFGS-B", "jac": True} res1 = basinhopping(func2d, [1.0, 1.0], minimizer_kwargs=minimizer_kwargs, niter=10, seed=rng) rng = np.random.default_rng(1) res2 = basinhopping(func2d, [1.0, 1.0], minimizer_kwargs=minimizer_kwargs, niter=10, seed=rng) assert_equal(res1.x, res2.x) def test_monotonic_basin_hopping(self): # test 1-D minimizations with gradient and T=0 i = 0 res = basinhopping(func1d, self.x0[i], minimizer_kwargs=self.kwargs, niter=self.niter, disp=self.disp, T=0) assert_almost_equal(res.x, self.sol[i], self.tol) class Test_Storage: def setup_method(self): self.x0 = np.array(1) self.f0 = 0 minres = OptimizeResult(success=True) minres.x = self.x0 minres.fun = self.f0 self.storage = Storage(minres) def test_higher_f_rejected(self): new_minres = OptimizeResult(success=True) new_minres.x = self.x0 + 1 new_minres.fun = self.f0 + 1 ret = self.storage.update(new_minres) minres = self.storage.get_lowest() assert_equal(self.x0, minres.x) assert_equal(self.f0, minres.fun) assert_(not ret) @pytest.mark.parametrize('success', [True, False]) def test_lower_f_accepted(self, success): new_minres = OptimizeResult(success=success) new_minres.x = self.x0 + 1 new_minres.fun = self.f0 - 1 ret = self.storage.update(new_minres) minres = self.storage.get_lowest() assert (self.x0 != minres.x) == success # can't use `is` assert (self.f0 != minres.fun) == success # left side is NumPy bool assert ret is success class Test_RandomDisplacement: def setup_method(self): self.stepsize = 1.0 self.displace = RandomDisplacement(stepsize=self.stepsize) self.N = 300000 self.x0 = np.zeros([self.N]) def test_random(self): # the mean should be 0 # the variance should be (2*stepsize)**2 / 12 # note these tests are random, they will fail from time to time x = self.displace(self.x0) v = (2. * self.stepsize) ** 2 / 12 assert_almost_equal(np.mean(x), 0., 1) assert_almost_equal(np.var(x), v, 1) class Test_Metropolis: def setup_method(self): self.T = 2. self.met = Metropolis(self.T) self.res_new = OptimizeResult(success=True, fun=0.) self.res_old = OptimizeResult(success=True, fun=1.) def test_boolean_return(self): # the return must be a bool, else an error will be raised in # basinhopping ret = self.met(res_new=self.res_new, res_old=self.res_old) assert isinstance(ret, bool) def test_lower_f_accepted(self): assert_(self.met(res_new=self.res_new, res_old=self.res_old)) def test_accept(self): # test that steps are randomly accepted for f_new > f_old one_accept = False one_reject = False for i in range(1000): if one_accept and one_reject: break res_new = OptimizeResult(success=True, fun=1.) res_old = OptimizeResult(success=True, fun=0.5) ret = self.met(res_new=res_new, res_old=res_old) if ret: one_accept = True else: one_reject = True assert_(one_accept) assert_(one_reject) def test_GH7495(self): # an overflow in exp was producing a RuntimeWarning # create own object here in case someone changes self.T met = Metropolis(2) res_new = OptimizeResult(success=True, fun=0.) res_old = OptimizeResult(success=True, fun=2000) with np.errstate(over='raise'): met.accept_reject(res_new=res_new, res_old=res_old) def test_gh7799(self): # gh-7799 reported a problem in which local search was successful but # basinhopping returned an invalid solution. Show that this is fixed. def func(x): return (x**2-8)**2+(x+2)**2 x0 = -4 limit = 50 # Constrain to func value >= 50 con = {'type': 'ineq', 'fun': lambda x: func(x) - limit}, res = basinhopping(func, x0, 30, minimizer_kwargs={'constraints': con}) assert res.success assert_allclose(res.fun, limit, rtol=1e-6) def test_accept_gh7799(self): # Metropolis should not accept the result of an unsuccessful new local # search if the old local search was successful met = Metropolis(0) # monotonic basin hopping res_new = OptimizeResult(success=True, fun=0.) res_old = OptimizeResult(success=True, fun=1.) # if new local search was successful and energy is lower, accept assert met(res_new=res_new, res_old=res_old) # if new res is unsuccessful, don't accept - even if energy is lower res_new.success = False assert not met(res_new=res_new, res_old=res_old) # ...unless the old res was unsuccessful, too. In that case, why not? res_old.success = False assert met(res_new=res_new, res_old=res_old) def test_reject_all_gh7799(self): # Test the behavior when there is no feasible solution def fun(x): return x@x def constraint(x): return x + 1 kwargs = {'constraints': {'type': 'eq', 'fun': constraint}, 'bounds': [(0, 1), (0, 1)], 'method': 'slsqp'} res = basinhopping(fun, x0=[2, 3], niter=10, minimizer_kwargs=kwargs) assert not res.success class Test_AdaptiveStepsize: def setup_method(self): self.stepsize = 1. self.ts = RandomDisplacement(stepsize=self.stepsize) self.target_accept_rate = 0.5 self.takestep = AdaptiveStepsize(takestep=self.ts, verbose=False, accept_rate=self.target_accept_rate) def test_adaptive_increase(self): # if few steps are rejected, the stepsize should increase x = 0. self.takestep(x) self.takestep.report(False) for i in range(self.takestep.interval): self.takestep(x) self.takestep.report(True) assert_(self.ts.stepsize > self.stepsize) def test_adaptive_decrease(self): # if few steps are rejected, the stepsize should increase x = 0. self.takestep(x) self.takestep.report(True) for i in range(self.takestep.interval): self.takestep(x) self.takestep.report(False) assert_(self.ts.stepsize < self.stepsize) def test_all_accepted(self): # test that everything works OK if all steps were accepted x = 0. for i in range(self.takestep.interval + 1): self.takestep(x) self.takestep.report(True) assert_(self.ts.stepsize > self.stepsize) def test_all_rejected(self): # test that everything works OK if all steps were rejected x = 0. for i in range(self.takestep.interval + 1): self.takestep(x) self.takestep.report(False) assert_(self.ts.stepsize < self.stepsize)
18,897
34.927757
79
py
scipy
scipy-main/scipy/optimize/tests/test__remove_redundancy.py
""" Unit test for Linear Programming via Simplex Algorithm. """ # TODO: add tests for: # https://github.com/scipy/scipy/issues/5400 # https://github.com/scipy/scipy/issues/6690 import numpy as np from numpy.testing import ( assert_, assert_allclose, assert_equal) from .test_linprog import magic_square from scipy.optimize._remove_redundancy import _remove_redundancy_svd from scipy.optimize._remove_redundancy import _remove_redundancy_pivot_dense from scipy.optimize._remove_redundancy import _remove_redundancy_pivot_sparse from scipy.optimize._remove_redundancy import _remove_redundancy_id from scipy.sparse import csc_matrix def setup_module(): np.random.seed(2017) def redundancy_removed(A, B): """Checks whether a matrix contains only independent rows of another""" for rowA in A: # `rowA in B` is not a reliable check for rowB in B: if np.all(rowA == rowB): break else: return False return A.shape[0] == np.linalg.matrix_rank(A) == np.linalg.matrix_rank(B) class RRCommonTests: def test_no_redundancy(self): m, n = 10, 10 A0 = np.random.rand(m, n) b0 = np.random.rand(m) A1, b1, status, message = self.rr(A0, b0) assert_allclose(A0, A1) assert_allclose(b0, b1) assert_equal(status, 0) def test_infeasible_zero_row(self): A = np.eye(3) A[1, :] = 0 b = np.random.rand(3) A1, b1, status, message = self.rr(A, b) assert_equal(status, 2) def test_remove_zero_row(self): A = np.eye(3) A[1, :] = 0 b = np.random.rand(3) b[1] = 0 A1, b1, status, message = self.rr(A, b) assert_equal(status, 0) assert_allclose(A1, A[[0, 2], :]) assert_allclose(b1, b[[0, 2]]) def test_infeasible_m_gt_n(self): m, n = 20, 10 A0 = np.random.rand(m, n) b0 = np.random.rand(m) A1, b1, status, message = self.rr(A0, b0) assert_equal(status, 2) def test_infeasible_m_eq_n(self): m, n = 10, 10 A0 = np.random.rand(m, n) b0 = np.random.rand(m) A0[-1, :] = 2 * A0[-2, :] A1, b1, status, message = self.rr(A0, b0) assert_equal(status, 2) def test_infeasible_m_lt_n(self): m, n = 9, 10 A0 = np.random.rand(m, n) b0 = np.random.rand(m) A0[-1, :] = np.arange(m - 1).dot(A0[:-1]) A1, b1, status, message = self.rr(A0, b0) assert_equal(status, 2) def test_m_gt_n(self): np.random.seed(2032) m, n = 20, 10 A0 = np.random.rand(m, n) b0 = np.random.rand(m) x = np.linalg.solve(A0[:n, :], b0[:n]) b0[n:] = A0[n:, :].dot(x) A1, b1, status, message = self.rr(A0, b0) assert_equal(status, 0) assert_equal(A1.shape[0], n) assert_equal(np.linalg.matrix_rank(A1), n) def test_m_gt_n_rank_deficient(self): m, n = 20, 10 A0 = np.zeros((m, n)) A0[:, 0] = 1 b0 = np.ones(m) A1, b1, status, message = self.rr(A0, b0) assert_equal(status, 0) assert_allclose(A1, A0[0:1, :]) assert_allclose(b1, b0[0]) def test_m_lt_n_rank_deficient(self): m, n = 9, 10 A0 = np.random.rand(m, n) b0 = np.random.rand(m) A0[-1, :] = np.arange(m - 1).dot(A0[:-1]) b0[-1] = np.arange(m - 1).dot(b0[:-1]) A1, b1, status, message = self.rr(A0, b0) assert_equal(status, 0) assert_equal(A1.shape[0], 8) assert_equal(np.linalg.matrix_rank(A1), 8) def test_dense1(self): A = np.ones((6, 6)) A[0, :3] = 0 A[1, 3:] = 0 A[3:, ::2] = -1 A[3, :2] = 0 A[4, 2:] = 0 b = np.zeros(A.shape[0]) A1, b1, status, message = self.rr(A, b) assert_(redundancy_removed(A1, A)) assert_equal(status, 0) def test_dense2(self): A = np.eye(6) A[-2, -1] = 1 A[-1, :] = 1 b = np.zeros(A.shape[0]) A1, b1, status, message = self.rr(A, b) assert_(redundancy_removed(A1, A)) assert_equal(status, 0) def test_dense3(self): A = np.eye(6) A[-2, -1] = 1 A[-1, :] = 1 b = np.random.rand(A.shape[0]) b[-1] = np.sum(b[:-1]) A1, b1, status, message = self.rr(A, b) assert_(redundancy_removed(A1, A)) assert_equal(status, 0) def test_m_gt_n_sparse(self): np.random.seed(2013) m, n = 20, 5 p = 0.1 A = np.random.rand(m, n) A[np.random.rand(m, n) > p] = 0 rank = np.linalg.matrix_rank(A) b = np.zeros(A.shape[0]) A1, b1, status, message = self.rr(A, b) assert_equal(status, 0) assert_equal(A1.shape[0], rank) assert_equal(np.linalg.matrix_rank(A1), rank) def test_m_lt_n_sparse(self): np.random.seed(2017) m, n = 20, 50 p = 0.05 A = np.random.rand(m, n) A[np.random.rand(m, n) > p] = 0 rank = np.linalg.matrix_rank(A) b = np.zeros(A.shape[0]) A1, b1, status, message = self.rr(A, b) assert_equal(status, 0) assert_equal(A1.shape[0], rank) assert_equal(np.linalg.matrix_rank(A1), rank) def test_m_eq_n_sparse(self): np.random.seed(2017) m, n = 100, 100 p = 0.01 A = np.random.rand(m, n) A[np.random.rand(m, n) > p] = 0 rank = np.linalg.matrix_rank(A) b = np.zeros(A.shape[0]) A1, b1, status, message = self.rr(A, b) assert_equal(status, 0) assert_equal(A1.shape[0], rank) assert_equal(np.linalg.matrix_rank(A1), rank) def test_magic_square(self): A, b, c, numbers, _ = magic_square(3) A1, b1, status, message = self.rr(A, b) assert_equal(status, 0) assert_equal(A1.shape[0], 23) assert_equal(np.linalg.matrix_rank(A1), 23) def test_magic_square2(self): A, b, c, numbers, _ = magic_square(4) A1, b1, status, message = self.rr(A, b) assert_equal(status, 0) assert_equal(A1.shape[0], 39) assert_equal(np.linalg.matrix_rank(A1), 39) class TestRRSVD(RRCommonTests): def rr(self, A, b): return _remove_redundancy_svd(A, b) class TestRRPivotDense(RRCommonTests): def rr(self, A, b): return _remove_redundancy_pivot_dense(A, b) class TestRRID(RRCommonTests): def rr(self, A, b): return _remove_redundancy_id(A, b) class TestRRPivotSparse(RRCommonTests): def rr(self, A, b): rr_res = _remove_redundancy_pivot_sparse(csc_matrix(A), b) A1, b1, status, message = rr_res return A1.toarray(), b1, status, message
6,799
28.694323
77
py
scipy
scipy-main/scipy/optimize/tests/test_linear_assignment.py
# Author: Brian M. Clapper, G. Varoquaux, Lars Buitinck # License: BSD from numpy.testing import assert_array_equal import pytest import numpy as np from scipy.optimize import linear_sum_assignment from scipy.sparse import random from scipy.sparse._sputils import matrix from scipy.sparse.csgraph import min_weight_full_bipartite_matching from scipy.sparse.csgraph.tests.test_matching import ( linear_sum_assignment_assertions, linear_sum_assignment_test_cases ) def test_linear_sum_assignment_input_shape(): with pytest.raises(ValueError, match="expected a matrix"): linear_sum_assignment([1, 2, 3]) def test_linear_sum_assignment_input_object(): C = [[1, 2, 3], [4, 5, 6]] assert_array_equal(linear_sum_assignment(C), linear_sum_assignment(np.asarray(C))) assert_array_equal(linear_sum_assignment(C), linear_sum_assignment(matrix(C))) def test_linear_sum_assignment_input_bool(): I = np.identity(3) assert_array_equal(linear_sum_assignment(I.astype(np.bool_)), linear_sum_assignment(I)) def test_linear_sum_assignment_input_string(): I = np.identity(3) with pytest.raises(TypeError, match="Cannot cast array data"): linear_sum_assignment(I.astype(str)) def test_linear_sum_assignment_input_nan(): I = np.diag([np.nan, 1, 1]) with pytest.raises(ValueError, match="contains invalid numeric entries"): linear_sum_assignment(I) def test_linear_sum_assignment_input_neginf(): I = np.diag([1, -np.inf, 1]) with pytest.raises(ValueError, match="contains invalid numeric entries"): linear_sum_assignment(I) def test_linear_sum_assignment_input_inf(): I = np.identity(3) I[:, 0] = np.inf with pytest.raises(ValueError, match="cost matrix is infeasible"): linear_sum_assignment(I) def test_constant_cost_matrix(): # Fixes #11602 n = 8 C = np.ones((n, n)) row_ind, col_ind = linear_sum_assignment(C) assert_array_equal(row_ind, np.arange(n)) assert_array_equal(col_ind, np.arange(n)) @pytest.mark.parametrize('num_rows,num_cols', [(0, 0), (2, 0), (0, 3)]) def test_linear_sum_assignment_trivial_cost(num_rows, num_cols): C = np.empty(shape=(num_cols, num_rows)) row_ind, col_ind = linear_sum_assignment(C) assert len(row_ind) == 0 assert len(col_ind) == 0 @pytest.mark.parametrize('sign,test_case', linear_sum_assignment_test_cases) def test_linear_sum_assignment_small_inputs(sign, test_case): linear_sum_assignment_assertions( linear_sum_assignment, np.array, sign, test_case) # Tests that combine scipy.optimize.linear_sum_assignment and # scipy.sparse.csgraph.min_weight_full_bipartite_matching def test_two_methods_give_same_result_on_many_sparse_inputs(): # As opposed to the test above, here we do not spell out the expected # output; only assert that the two methods give the same result. # Concretely, the below tests 100 cases of size 100x100, out of which # 36 are infeasible. np.random.seed(1234) for _ in range(100): lsa_raises = False mwfbm_raises = False sparse = random(100, 100, density=0.06, data_rvs=lambda size: np.random.randint(1, 100, size)) # In csgraph, zeros correspond to missing edges, so we explicitly # replace those with infinities dense = np.full(sparse.shape, np.inf) dense[sparse.row, sparse.col] = sparse.data sparse = sparse.tocsr() try: row_ind, col_ind = linear_sum_assignment(dense) lsa_cost = dense[row_ind, col_ind].sum() except ValueError: lsa_raises = True try: row_ind, col_ind = min_weight_full_bipartite_matching(sparse) mwfbm_cost = sparse[row_ind, col_ind].sum() except ValueError: mwfbm_raises = True # Ensure that if one method raises, so does the other one. assert lsa_raises == mwfbm_raises if not lsa_raises: assert lsa_cost == mwfbm_cost
4,085
33.923077
78
py
scipy
scipy-main/scipy/optimize/tests/test_tnc.py
""" Unit tests for TNC optimization routine from tnc.py """ import pytest from numpy.testing import assert_allclose, assert_equal import numpy as np from math import pow from scipy import optimize class TestTnc: """TNC non-linear optimization. These tests are taken from Prof. K. Schittkowski's test examples for constrained non-linear programming. http://www.uni-bayreuth.de/departments/math/~kschittkowski/home.htm """ def setup_method(self): # options for minimize self.opts = {'disp': False, 'maxfun': 200} # objective functions and Jacobian for each test def f1(self, x, a=100.0): return a * pow((x[1] - pow(x[0], 2)), 2) + pow(1.0 - x[0], 2) def g1(self, x, a=100.0): dif = [0, 0] dif[1] = 2 * a * (x[1] - pow(x[0], 2)) dif[0] = -2.0 * (x[0] * (dif[1] - 1.0) + 1.0) return dif def fg1(self, x, a=100.0): return self.f1(x, a), self.g1(x, a) def f3(self, x): return x[1] + pow(x[1] - x[0], 2) * 1.0e-5 def g3(self, x): dif = [0, 0] dif[0] = -2.0 * (x[1] - x[0]) * 1.0e-5 dif[1] = 1.0 - dif[0] return dif def fg3(self, x): return self.f3(x), self.g3(x) def f4(self, x): return pow(x[0] + 1.0, 3) / 3.0 + x[1] def g4(self, x): dif = [0, 0] dif[0] = pow(x[0] + 1.0, 2) dif[1] = 1.0 return dif def fg4(self, x): return self.f4(x), self.g4(x) def f5(self, x): return np.sin(x[0] + x[1]) + pow(x[0] - x[1], 2) - \ 1.5 * x[0] + 2.5 * x[1] + 1.0 def g5(self, x): dif = [0, 0] v1 = np.cos(x[0] + x[1]) v2 = 2.0*(x[0] - x[1]) dif[0] = v1 + v2 - 1.5 dif[1] = v1 - v2 + 2.5 return dif def fg5(self, x): return self.f5(x), self.g5(x) def f38(self, x): return (100.0 * pow(x[1] - pow(x[0], 2), 2) + pow(1.0 - x[0], 2) + 90.0 * pow(x[3] - pow(x[2], 2), 2) + pow(1.0 - x[2], 2) + 10.1 * (pow(x[1] - 1.0, 2) + pow(x[3] - 1.0, 2)) + 19.8 * (x[1] - 1.0) * (x[3] - 1.0)) * 1.0e-5 def g38(self, x): dif = [0, 0, 0, 0] dif[0] = (-400.0 * x[0] * (x[1] - pow(x[0], 2)) - 2.0 * (1.0 - x[0])) * 1.0e-5 dif[1] = (200.0 * (x[1] - pow(x[0], 2)) + 20.2 * (x[1] - 1.0) + 19.8 * (x[3] - 1.0)) * 1.0e-5 dif[2] = (- 360.0 * x[2] * (x[3] - pow(x[2], 2)) - 2.0 * (1.0 - x[2])) * 1.0e-5 dif[3] = (180.0 * (x[3] - pow(x[2], 2)) + 20.2 * (x[3] - 1.0) + 19.8 * (x[1] - 1.0)) * 1.0e-5 return dif def fg38(self, x): return self.f38(x), self.g38(x) def f45(self, x): return 2.0 - x[0] * x[1] * x[2] * x[3] * x[4] / 120.0 def g45(self, x): dif = [0] * 5 dif[0] = - x[1] * x[2] * x[3] * x[4] / 120.0 dif[1] = - x[0] * x[2] * x[3] * x[4] / 120.0 dif[2] = - x[0] * x[1] * x[3] * x[4] / 120.0 dif[3] = - x[0] * x[1] * x[2] * x[4] / 120.0 dif[4] = - x[0] * x[1] * x[2] * x[3] / 120.0 return dif def fg45(self, x): return self.f45(x), self.g45(x) # tests # minimize with method=TNC def test_minimize_tnc1(self): x0, bnds = [-2, 1], ([-np.inf, None], [-1.5, None]) xopt = [1, 1] iterx = [] # to test callback res = optimize.minimize(self.f1, x0, method='TNC', jac=self.g1, bounds=bnds, options=self.opts, callback=iterx.append) assert_allclose(res.fun, self.f1(xopt), atol=1e-8) assert_equal(len(iterx), res.nit) def test_minimize_tnc1b(self): x0, bnds = np.array([-2, 1]), ([-np.inf, None], [-1.5, None]) xopt = [1, 1] x = optimize.minimize(self.f1, x0, method='TNC', bounds=bnds, options=self.opts).x assert_allclose(self.f1(x), self.f1(xopt), atol=1e-4) def test_minimize_tnc1c(self): x0, bnds = [-2, 1], ([-np.inf, None],[-1.5, None]) xopt = [1, 1] x = optimize.minimize(self.fg1, x0, method='TNC', jac=True, bounds=bnds, options=self.opts).x assert_allclose(self.f1(x), self.f1(xopt), atol=1e-8) def test_minimize_tnc2(self): x0, bnds = [-2, 1], ([-np.inf, None], [1.5, None]) xopt = [-1.2210262419616387, 1.5] x = optimize.minimize(self.f1, x0, method='TNC', jac=self.g1, bounds=bnds, options=self.opts).x assert_allclose(self.f1(x), self.f1(xopt), atol=1e-8) def test_minimize_tnc3(self): x0, bnds = [10, 1], ([-np.inf, None], [0.0, None]) xopt = [0, 0] x = optimize.minimize(self.f3, x0, method='TNC', jac=self.g3, bounds=bnds, options=self.opts).x assert_allclose(self.f3(x), self.f3(xopt), atol=1e-8) def test_minimize_tnc4(self): x0,bnds = [1.125, 0.125], [(1, None), (0, None)] xopt = [1, 0] x = optimize.minimize(self.f4, x0, method='TNC', jac=self.g4, bounds=bnds, options=self.opts).x assert_allclose(self.f4(x), self.f4(xopt), atol=1e-8) def test_minimize_tnc5(self): x0, bnds = [0, 0], [(-1.5, 4),(-3, 3)] xopt = [-0.54719755119659763, -1.5471975511965976] x = optimize.minimize(self.f5, x0, method='TNC', jac=self.g5, bounds=bnds, options=self.opts).x assert_allclose(self.f5(x), self.f5(xopt), atol=1e-8) def test_minimize_tnc38(self): x0, bnds = np.array([-3, -1, -3, -1]), [(-10, 10)]*4 xopt = [1]*4 x = optimize.minimize(self.f38, x0, method='TNC', jac=self.g38, bounds=bnds, options=self.opts).x assert_allclose(self.f38(x), self.f38(xopt), atol=1e-8) def test_minimize_tnc45(self): x0, bnds = [2] * 5, [(0, 1), (0, 2), (0, 3), (0, 4), (0, 5)] xopt = [1, 2, 3, 4, 5] x = optimize.minimize(self.f45, x0, method='TNC', jac=self.g45, bounds=bnds, options=self.opts).x assert_allclose(self.f45(x), self.f45(xopt), atol=1e-8) # fmin_tnc def test_tnc1(self): fg, x, bounds = self.fg1, [-2, 1], ([-np.inf, None], [-1.5, None]) xopt = [1, 1] x, nf, rc = optimize.fmin_tnc(fg, x, bounds=bounds, args=(100.0, ), messages=optimize._tnc.MSG_NONE, maxfun=200) assert_allclose(self.f1(x), self.f1(xopt), atol=1e-8, err_msg="TNC failed with status: " + optimize._tnc.RCSTRINGS[rc]) def test_tnc1b(self): x, bounds = [-2, 1], ([-np.inf, None], [-1.5, None]) xopt = [1, 1] x, nf, rc = optimize.fmin_tnc(self.f1, x, approx_grad=True, bounds=bounds, messages=optimize._tnc.MSG_NONE, maxfun=200) assert_allclose(self.f1(x), self.f1(xopt), atol=1e-4, err_msg="TNC failed with status: " + optimize._tnc.RCSTRINGS[rc]) def test_tnc1c(self): x, bounds = [-2, 1], ([-np.inf, None], [-1.5, None]) xopt = [1, 1] x, nf, rc = optimize.fmin_tnc(self.f1, x, fprime=self.g1, bounds=bounds, messages=optimize._tnc.MSG_NONE, maxfun=200) assert_allclose(self.f1(x), self.f1(xopt), atol=1e-8, err_msg="TNC failed with status: " + optimize._tnc.RCSTRINGS[rc]) def test_tnc2(self): fg, x, bounds = self.fg1, [-2, 1], ([-np.inf, None], [1.5, None]) xopt = [-1.2210262419616387, 1.5] x, nf, rc = optimize.fmin_tnc(fg, x, bounds=bounds, messages=optimize._tnc.MSG_NONE, maxfun=200) assert_allclose(self.f1(x), self.f1(xopt), atol=1e-8, err_msg="TNC failed with status: " + optimize._tnc.RCSTRINGS[rc]) def test_tnc3(self): fg, x, bounds = self.fg3, [10, 1], ([-np.inf, None], [0.0, None]) xopt = [0, 0] x, nf, rc = optimize.fmin_tnc(fg, x, bounds=bounds, messages=optimize._tnc.MSG_NONE, maxfun=200) assert_allclose(self.f3(x), self.f3(xopt), atol=1e-8, err_msg="TNC failed with status: " + optimize._tnc.RCSTRINGS[rc]) def test_tnc4(self): fg, x, bounds = self.fg4, [1.125, 0.125], [(1, None), (0, None)] xopt = [1, 0] x, nf, rc = optimize.fmin_tnc(fg, x, bounds=bounds, messages=optimize._tnc.MSG_NONE, maxfun=200) assert_allclose(self.f4(x), self.f4(xopt), atol=1e-8, err_msg="TNC failed with status: " + optimize._tnc.RCSTRINGS[rc]) def test_tnc5(self): fg, x, bounds = self.fg5, [0, 0], [(-1.5, 4),(-3, 3)] xopt = [-0.54719755119659763, -1.5471975511965976] x, nf, rc = optimize.fmin_tnc(fg, x, bounds=bounds, messages=optimize._tnc.MSG_NONE, maxfun=200) assert_allclose(self.f5(x), self.f5(xopt), atol=1e-8, err_msg="TNC failed with status: " + optimize._tnc.RCSTRINGS[rc]) def test_tnc38(self): fg, x, bounds = self.fg38, np.array([-3, -1, -3, -1]), [(-10, 10)]*4 xopt = [1]*4 x, nf, rc = optimize.fmin_tnc(fg, x, bounds=bounds, messages=optimize._tnc.MSG_NONE, maxfun=200) assert_allclose(self.f38(x), self.f38(xopt), atol=1e-8, err_msg="TNC failed with status: " + optimize._tnc.RCSTRINGS[rc]) def test_tnc45(self): fg, x, bounds = self.fg45, [2] * 5, [(0, 1), (0, 2), (0, 3), (0, 4), (0, 5)] xopt = [1, 2, 3, 4, 5] x, nf, rc = optimize.fmin_tnc(fg, x, bounds=bounds, messages=optimize._tnc.MSG_NONE, maxfun=200) assert_allclose(self.f45(x), self.f45(xopt), atol=1e-8, err_msg="TNC failed with status: " + optimize._tnc.RCSTRINGS[rc]) def test_raising_exceptions(self): # tnc was ported to cython from hand-crafted cpython code # check that Exception handling works. def myfunc(x): raise RuntimeError("myfunc") def myfunc1(x): return optimize.rosen(x) def callback(x): raise ValueError("callback") with pytest.raises(RuntimeError): optimize.minimize(myfunc, [0, 1], method="TNC") with pytest.raises(ValueError): optimize.minimize( myfunc1, [0, 1], method="TNC", callback=callback ) def test_callback_shouldnt_affect_minimization(self): # gh14879. The output of a TNC minimization was different depending # on whether a callback was used or not. The two should be equivalent. # The issue was that TNC was unscaling/scaling x, and this process was # altering x in the process. Now the callback uses an unscaled # temporary copy of x. def callback(x): pass fun = optimize.rosen bounds = [(0, 10)] * 4 x0 = [1, 2, 3, 4.] res = optimize.minimize( fun, x0, bounds=bounds, method="TNC", options={"maxfun": 1000} ) res2 = optimize.minimize( fun, x0, bounds=bounds, method="TNC", options={"maxfun": 1000}, callback=callback ) assert_allclose(res2.x, res.x) assert_allclose(res2.fun, res.fun) assert_equal(res2.nfev, res.nfev)
12,700
35.708092
78
py
scipy
scipy-main/scipy/optimize/tests/test_linprog.py
""" Unit test for Linear Programming """ import sys import platform import numpy as np from numpy.testing import (assert_, assert_allclose, assert_equal, assert_array_less, assert_warns, suppress_warnings) from pytest import raises as assert_raises from scipy.optimize import linprog, OptimizeWarning from scipy.optimize._numdiff import approx_derivative from scipy.sparse.linalg import MatrixRankWarning from scipy.linalg import LinAlgWarning import scipy.sparse import pytest has_umfpack = True try: from scikits.umfpack import UmfpackWarning except ImportError: has_umfpack = False has_cholmod = True try: import sksparse # noqa: F401 from sksparse.cholmod import cholesky as cholmod # noqa: F401 except ImportError: has_cholmod = False def _assert_iteration_limit_reached(res, maxiter): assert_(not res.success, "Incorrectly reported success") assert_(res.success < maxiter, "Incorrectly reported number of iterations") assert_equal(res.status, 1, "Failed to report iteration limit reached") def _assert_infeasible(res): # res: linprog result object assert_(not res.success, "incorrectly reported success") assert_equal(res.status, 2, "failed to report infeasible status") def _assert_unbounded(res): # res: linprog result object assert_(not res.success, "incorrectly reported success") assert_equal(res.status, 3, "failed to report unbounded status") def _assert_unable_to_find_basic_feasible_sol(res): # res: linprog result object # The status may be either 2 or 4 depending on why the feasible solution # could not be found. If the undelying problem is expected to not have a # feasible solution, _assert_infeasible should be used. assert_(not res.success, "incorrectly reported success") assert_(res.status in (2, 4), "failed to report optimization failure") def _assert_success(res, desired_fun=None, desired_x=None, rtol=1e-8, atol=1e-8): # res: linprog result object # desired_fun: desired objective function value or None # desired_x: desired solution or None if not res.success: msg = "linprog status {}, message: {}".format(res.status, res.message) raise AssertionError(msg) assert_equal(res.status, 0) if desired_fun is not None: assert_allclose(res.fun, desired_fun, err_msg="converged to an unexpected objective value", rtol=rtol, atol=atol) if desired_x is not None: assert_allclose(res.x, desired_x, err_msg="converged to an unexpected solution", rtol=rtol, atol=atol) def magic_square(n): """ Generates a linear program for which integer solutions represent an n x n magic square; binary decision variables represent the presence (or absence) of an integer 1 to n^2 in each position of the square. """ np.random.seed(0) M = n * (n**2 + 1) / 2 numbers = np.arange(n**4) // n**2 + 1 numbers = numbers.reshape(n**2, n, n) zeros = np.zeros((n**2, n, n)) A_list = [] b_list = [] # Rule 1: use every number exactly once for i in range(n**2): A_row = zeros.copy() A_row[i, :, :] = 1 A_list.append(A_row.flatten()) b_list.append(1) # Rule 2: Only one number per square for i in range(n): for j in range(n): A_row = zeros.copy() A_row[:, i, j] = 1 A_list.append(A_row.flatten()) b_list.append(1) # Rule 3: sum of rows is M for i in range(n): A_row = zeros.copy() A_row[:, i, :] = numbers[:, i, :] A_list.append(A_row.flatten()) b_list.append(M) # Rule 4: sum of columns is M for i in range(n): A_row = zeros.copy() A_row[:, :, i] = numbers[:, :, i] A_list.append(A_row.flatten()) b_list.append(M) # Rule 5: sum of diagonals is M A_row = zeros.copy() A_row[:, range(n), range(n)] = numbers[:, range(n), range(n)] A_list.append(A_row.flatten()) b_list.append(M) A_row = zeros.copy() A_row[:, range(n), range(-1, -n - 1, -1)] = \ numbers[:, range(n), range(-1, -n - 1, -1)] A_list.append(A_row.flatten()) b_list.append(M) A = np.array(np.vstack(A_list), dtype=float) b = np.array(b_list, dtype=float) c = np.random.rand(A.shape[1]) return A, b, c, numbers, M def lpgen_2d(m, n): """ -> A b c LP test: m*n vars, m+n constraints row sums == n/m, col sums == 1 https://gist.github.com/denis-bz/8647461 """ np.random.seed(0) c = - np.random.exponential(size=(m, n)) Arow = np.zeros((m, m * n)) brow = np.zeros(m) for j in range(m): j1 = j + 1 Arow[j, j * n:j1 * n] = 1 brow[j] = n / m Acol = np.zeros((n, m * n)) bcol = np.zeros(n) for j in range(n): j1 = j + 1 Acol[j, j::n] = 1 bcol[j] = 1 A = np.vstack((Arow, Acol)) b = np.hstack((brow, bcol)) return A, b, c.ravel() def very_random_gen(seed=0): np.random.seed(seed) m_eq, m_ub, n = 10, 20, 50 c = np.random.rand(n)-0.5 A_ub = np.random.rand(m_ub, n)-0.5 b_ub = np.random.rand(m_ub)-0.5 A_eq = np.random.rand(m_eq, n)-0.5 b_eq = np.random.rand(m_eq)-0.5 lb = -np.random.rand(n) ub = np.random.rand(n) lb[lb < -np.random.rand()] = -np.inf ub[ub > np.random.rand()] = np.inf bounds = np.vstack((lb, ub)).T return c, A_ub, b_ub, A_eq, b_eq, bounds def nontrivial_problem(): c = [-1, 8, 4, -6] A_ub = [[-7, -7, 6, 9], [1, -1, -3, 0], [10, -10, -7, 7], [6, -1, 3, 4]] b_ub = [-3, 6, -6, 6] A_eq = [[-10, 1, 1, -8]] b_eq = [-4] x_star = [101 / 1391, 1462 / 1391, 0, 752 / 1391] f_star = 7083 / 1391 return c, A_ub, b_ub, A_eq, b_eq, x_star, f_star def l1_regression_prob(seed=0, m=8, d=9, n=100): ''' Training data is {(x0, y0), (x1, y2), ..., (xn-1, yn-1)} x in R^d y in R n: number of training samples d: dimension of x, i.e. x in R^d phi: feature map R^d -> R^m m: dimension of feature space ''' np.random.seed(seed) phi = np.random.normal(0, 1, size=(m, d)) # random feature mapping w_true = np.random.randn(m) x = np.random.normal(0, 1, size=(d, n)) # features y = w_true @ (phi @ x) + np.random.normal(0, 1e-5, size=n) # measurements # construct the problem c = np.ones(m+n) c[:m] = 0 A_ub = scipy.sparse.lil_matrix((2*n, n+m)) idx = 0 for ii in range(n): A_ub[idx, :m] = phi @ x[:, ii] A_ub[idx, m+ii] = -1 A_ub[idx+1, :m] = -1*phi @ x[:, ii] A_ub[idx+1, m+ii] = -1 idx += 2 A_ub = A_ub.tocsc() b_ub = np.zeros(2*n) b_ub[0::2] = y b_ub[1::2] = -y bnds = [(None, None)]*m + [(0, None)]*n return c, A_ub, b_ub, bnds def generic_callback_test(self): # Check that callback is as advertised last_cb = {} def cb(res): message = res.pop('message') complete = res.pop('complete') assert_(res.pop('phase') in (1, 2)) assert_(res.pop('status') in range(4)) assert_(isinstance(res.pop('nit'), int)) assert_(isinstance(complete, bool)) assert_(isinstance(message, str)) last_cb['x'] = res['x'] last_cb['fun'] = res['fun'] last_cb['slack'] = res['slack'] last_cb['con'] = res['con'] c = np.array([-3, -2]) A_ub = [[2, 1], [1, 1], [1, 0]] b_ub = [10, 8, 4] res = linprog(c, A_ub=A_ub, b_ub=b_ub, callback=cb, method=self.method) _assert_success(res, desired_fun=-18.0, desired_x=[2, 6]) assert_allclose(last_cb['fun'], res['fun']) assert_allclose(last_cb['x'], res['x']) assert_allclose(last_cb['con'], res['con']) assert_allclose(last_cb['slack'], res['slack']) def test_unknown_solvers_and_options(): c = np.array([-3, -2]) A_ub = [[2, 1], [1, 1], [1, 0]] b_ub = [10, 8, 4] assert_raises(ValueError, linprog, c, A_ub=A_ub, b_ub=b_ub, method='ekki-ekki-ekki') assert_raises(ValueError, linprog, c, A_ub=A_ub, b_ub=b_ub, method='highs-ekki') message = "Unrecognized options detected: {'rr_method': 'ekki-ekki-ekki'}" with pytest.warns(OptimizeWarning, match=message): linprog(c, A_ub=A_ub, b_ub=b_ub, options={"rr_method": 'ekki-ekki-ekki'}) def test_choose_solver(): # 'highs' chooses 'dual' c = np.array([-3, -2]) A_ub = [[2, 1], [1, 1], [1, 0]] b_ub = [10, 8, 4] res = linprog(c, A_ub, b_ub, method='highs') _assert_success(res, desired_fun=-18.0, desired_x=[2, 6]) def test_deprecation(): with pytest.warns(DeprecationWarning): linprog(1, method='interior-point') with pytest.warns(DeprecationWarning): linprog(1, method='revised simplex') with pytest.warns(DeprecationWarning): linprog(1, method='simplex') def test_highs_status_message(): res = linprog(1, method='highs') msg = "Optimization terminated successfully. (HiGHS Status 7:" assert res.status == 0 assert res.message.startswith(msg) A, b, c, numbers, M = magic_square(6) bounds = [(0, 1)] * len(c) integrality = [1] * len(c) options = {"time_limit": 0.1} res = linprog(c=c, A_eq=A, b_eq=b, bounds=bounds, method='highs', options=options, integrality=integrality) msg = "Time limit reached. (HiGHS Status 13:" assert res.status == 1 assert res.message.startswith(msg) options = {"maxiter": 10} res = linprog(c=c, A_eq=A, b_eq=b, bounds=bounds, method='highs-ds', options=options) msg = "Iteration limit reached. (HiGHS Status 14:" assert res.status == 1 assert res.message.startswith(msg) res = linprog(1, bounds=(1, -1), method='highs') msg = "The problem is infeasible. (HiGHS Status 8:" assert res.status == 2 assert res.message.startswith(msg) res = linprog(-1, method='highs') msg = "The problem is unbounded. (HiGHS Status 10:" assert res.status == 3 assert res.message.startswith(msg) from scipy.optimize._linprog_highs import _highs_to_scipy_status_message status, message = _highs_to_scipy_status_message(58, "Hello!") msg = "The HiGHS status code was not recognized. (HiGHS Status 58:" assert status == 4 assert message.startswith(msg) status, message = _highs_to_scipy_status_message(None, None) msg = "HiGHS did not provide a status code. (HiGHS Status None: None)" assert status == 4 assert message.startswith(msg) def test_bug_17380(): linprog([1, 1], A_ub=[[-1, 0]], b_ub=[-2.5], integrality=[1, 1]) A_ub = None b_ub = None A_eq = None b_eq = None bounds = None ################ # Common Tests # ################ class LinprogCommonTests: """ Base class for `linprog` tests. Generally, each test will be performed once for every derived class of LinprogCommonTests, each of which will typically change self.options and/or self.method. Effectively, these tests are run for many combination of method (simplex, revised simplex, and interior point) and options (such as pivoting rule or sparse treatment). """ ################## # Targeted Tests # ################## def test_callback(self): generic_callback_test(self) def test_disp(self): # test that display option does not break anything. A, b, c = lpgen_2d(20, 20) res = linprog(c, A_ub=A, b_ub=b, method=self.method, options={"disp": True}) _assert_success(res, desired_fun=-64.049494229) def test_docstring_example(self): # Example from linprog docstring. c = [-1, 4] A = [[-3, 1], [1, 2]] b = [6, 4] x0_bounds = (None, None) x1_bounds = (-3, None) res = linprog(c, A_ub=A, b_ub=b, bounds=(x0_bounds, x1_bounds), options=self.options, method=self.method) _assert_success(res, desired_fun=-22) def test_type_error(self): # (presumably) checks that linprog recognizes type errors # This is tested more carefully in test__linprog_clean_inputs.py c = [1] A_eq = [[1]] b_eq = "hello" assert_raises(TypeError, linprog, c, A_eq=A_eq, b_eq=b_eq, method=self.method, options=self.options) def test_aliasing_b_ub(self): # (presumably) checks that linprog does not modify b_ub # This is tested more carefully in test__linprog_clean_inputs.py c = np.array([1.0]) A_ub = np.array([[1.0]]) b_ub_orig = np.array([3.0]) b_ub = b_ub_orig.copy() bounds = (-4.0, np.inf) res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds, method=self.method, options=self.options) _assert_success(res, desired_fun=-4, desired_x=[-4]) assert_allclose(b_ub_orig, b_ub) def test_aliasing_b_eq(self): # (presumably) checks that linprog does not modify b_eq # This is tested more carefully in test__linprog_clean_inputs.py c = np.array([1.0]) A_eq = np.array([[1.0]]) b_eq_orig = np.array([3.0]) b_eq = b_eq_orig.copy() bounds = (-4.0, np.inf) res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds, method=self.method, options=self.options) _assert_success(res, desired_fun=3, desired_x=[3]) assert_allclose(b_eq_orig, b_eq) def test_non_ndarray_args(self): # (presumably) checks that linprog accepts list in place of arrays # This is tested more carefully in test__linprog_clean_inputs.py c = [1.0] A_ub = [[1.0]] b_ub = [3.0] A_eq = [[1.0]] b_eq = [2.0] bounds = (-1.0, 10.0) res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds, method=self.method, options=self.options) _assert_success(res, desired_fun=2, desired_x=[2]) def test_unknown_options(self): c = np.array([-3, -2]) A_ub = [[2, 1], [1, 1], [1, 0]] b_ub = [10, 8, 4] def f(c, A_ub=None, b_ub=None, A_eq=None, b_eq=None, bounds=None, options={}): linprog(c, A_ub, b_ub, A_eq, b_eq, bounds, method=self.method, options=options) o = {key: self.options[key] for key in self.options} o['spam'] = 42 assert_warns(OptimizeWarning, f, c, A_ub=A_ub, b_ub=b_ub, options=o) def test_integrality_without_highs(self): # ensure that using `integrality` parameter without `method='highs'` # raises warning and produces correct solution to relaxed problem # source: https://en.wikipedia.org/wiki/Integer_programming#Example A_ub = np.array([[-1, 1], [3, 2], [2, 3]]) b_ub = np.array([1, 12, 12]) c = -np.array([0, 1]) bounds = [(0, np.inf)] * len(c) integrality = [1] * len(c) with np.testing.assert_warns(OptimizeWarning): res = linprog(c=c, A_ub=A_ub, b_ub=b_ub, bounds=bounds, method=self.method, integrality=integrality) np.testing.assert_allclose(res.x, [1.8, 2.8]) np.testing.assert_allclose(res.fun, -2.8) def test_invalid_inputs(self): def f(c, A_ub=None, b_ub=None, A_eq=None, b_eq=None, bounds=None): linprog(c, A_ub, b_ub, A_eq, b_eq, bounds, method=self.method, options=self.options) # Test ill-formatted bounds assert_raises(ValueError, f, [1, 2, 3], bounds=[(1, 2), (3, 4)]) with np.testing.suppress_warnings() as sup: sup.filter(np.VisibleDeprecationWarning, "Creating an ndarray from ragged") assert_raises(ValueError, f, [1, 2, 3], bounds=[(1, 2), (3, 4), (3, 4, 5)]) assert_raises(ValueError, f, [1, 2, 3], bounds=[(1, -2), (1, 2)]) # Test other invalid inputs assert_raises(ValueError, f, [1, 2], A_ub=[[1, 2]], b_ub=[1, 2]) assert_raises(ValueError, f, [1, 2], A_ub=[[1]], b_ub=[1]) assert_raises(ValueError, f, [1, 2], A_eq=[[1, 2]], b_eq=[1, 2]) assert_raises(ValueError, f, [1, 2], A_eq=[[1]], b_eq=[1]) assert_raises(ValueError, f, [1, 2], A_eq=[1], b_eq=1) # this last check doesn't make sense for sparse presolve if ("_sparse_presolve" in self.options and self.options["_sparse_presolve"]): return # there aren't 3-D sparse matrices assert_raises(ValueError, f, [1, 2], A_ub=np.zeros((1, 1, 3)), b_eq=1) def test_sparse_constraints(self): # gh-13559: improve error message for sparse inputs when unsupported def f(c, A_ub=None, b_ub=None, A_eq=None, b_eq=None, bounds=None): linprog(c, A_ub, b_ub, A_eq, b_eq, bounds, method=self.method, options=self.options) np.random.seed(0) m = 100 n = 150 A_eq = scipy.sparse.rand(m, n, 0.5) x_valid = np.random.randn(n) c = np.random.randn(n) ub = x_valid + np.random.rand(n) lb = x_valid - np.random.rand(n) bounds = np.column_stack((lb, ub)) b_eq = A_eq * x_valid if self.method in {'simplex', 'revised simplex'}: # simplex and revised simplex should raise error with assert_raises(ValueError, match=f"Method '{self.method}' " "does not support sparse constraint matrices."): linprog(c=c, A_eq=A_eq, b_eq=b_eq, bounds=bounds, method=self.method, options=self.options) else: # other methods should succeed options = {**self.options} if self.method in {'interior-point'}: options['sparse'] = True res = linprog(c=c, A_eq=A_eq, b_eq=b_eq, bounds=bounds, method=self.method, options=options) assert res.success def test_maxiter(self): # test iteration limit w/ Enzo example c = [4, 8, 3, 0, 0, 0] A = [ [2, 5, 3, -1, 0, 0], [3, 2.5, 8, 0, -1, 0], [8, 10, 4, 0, 0, -1]] b = [185, 155, 600] np.random.seed(0) maxiter = 3 res = linprog(c, A_eq=A, b_eq=b, method=self.method, options={"maxiter": maxiter}) _assert_iteration_limit_reached(res, maxiter) assert_equal(res.nit, maxiter) def test_bounds_fixed(self): # Test fixed bounds (upper equal to lower) # If presolve option True, test if solution found in presolve (i.e. # number of iterations is 0). do_presolve = self.options.get('presolve', True) res = linprog([1], bounds=(1, 1), method=self.method, options=self.options) _assert_success(res, 1, 1) if do_presolve: assert_equal(res.nit, 0) res = linprog([1, 2, 3], bounds=[(5, 5), (-1, -1), (3, 3)], method=self.method, options=self.options) _assert_success(res, 12, [5, -1, 3]) if do_presolve: assert_equal(res.nit, 0) res = linprog([1, 1], bounds=[(1, 1), (1, 3)], method=self.method, options=self.options) _assert_success(res, 2, [1, 1]) if do_presolve: assert_equal(res.nit, 0) res = linprog([1, 1, 2], A_eq=[[1, 0, 0], [0, 1, 0]], b_eq=[1, 7], bounds=[(-5, 5), (0, 10), (3.5, 3.5)], method=self.method, options=self.options) _assert_success(res, 15, [1, 7, 3.5]) if do_presolve: assert_equal(res.nit, 0) def test_bounds_infeasible(self): # Test ill-valued bounds (upper less than lower) # If presolve option True, test if solution found in presolve (i.e. # number of iterations is 0). do_presolve = self.options.get('presolve', True) res = linprog([1], bounds=(1, -2), method=self.method, options=self.options) _assert_infeasible(res) if do_presolve: assert_equal(res.nit, 0) res = linprog([1], bounds=[(1, -2)], method=self.method, options=self.options) _assert_infeasible(res) if do_presolve: assert_equal(res.nit, 0) res = linprog([1, 2, 3], bounds=[(5, 0), (1, 2), (3, 4)], method=self.method, options=self.options) _assert_infeasible(res) if do_presolve: assert_equal(res.nit, 0) def test_bounds_infeasible_2(self): # Test ill-valued bounds (lower inf, upper -inf) # If presolve option True, test if solution found in presolve (i.e. # number of iterations is 0). # For the simplex method, the cases do not result in an # infeasible status, but in a RuntimeWarning. This is a # consequence of having _presolve() take care of feasibility # checks. See issue gh-11618. do_presolve = self.options.get('presolve', True) simplex_without_presolve = not do_presolve and self.method == 'simplex' c = [1, 2, 3] bounds_1 = [(1, 2), (np.inf, np.inf), (3, 4)] bounds_2 = [(1, 2), (-np.inf, -np.inf), (3, 4)] if simplex_without_presolve: def g(c, bounds): res = linprog(c, bounds=bounds, method=self.method, options=self.options) return res with pytest.warns(RuntimeWarning): with pytest.raises(IndexError): g(c, bounds=bounds_1) with pytest.warns(RuntimeWarning): with pytest.raises(IndexError): g(c, bounds=bounds_2) else: res = linprog(c=c, bounds=bounds_1, method=self.method, options=self.options) _assert_infeasible(res) if do_presolve: assert_equal(res.nit, 0) res = linprog(c=c, bounds=bounds_2, method=self.method, options=self.options) _assert_infeasible(res) if do_presolve: assert_equal(res.nit, 0) def test_empty_constraint_1(self): c = [-1, -2] res = linprog(c, method=self.method, options=self.options) _assert_unbounded(res) def test_empty_constraint_2(self): c = [-1, 1, -1, 1] bounds = [(0, np.inf), (-np.inf, 0), (-1, 1), (-1, 1)] res = linprog(c, bounds=bounds, method=self.method, options=self.options) _assert_unbounded(res) # Unboundedness detected in presolve requires no iterations if self.options.get('presolve', True): assert_equal(res.nit, 0) def test_empty_constraint_3(self): c = [1, -1, 1, -1] bounds = [(0, np.inf), (-np.inf, 0), (-1, 1), (-1, 1)] res = linprog(c, bounds=bounds, method=self.method, options=self.options) _assert_success(res, desired_x=[0, 0, -1, 1], desired_fun=-2) def test_inequality_constraints(self): # Minimize linear function subject to linear inequality constraints. # http://www.dam.brown.edu/people/huiwang/classes/am121/Archive/simplex_121_c.pdf c = np.array([3, 2]) * -1 # maximize A_ub = [[2, 1], [1, 1], [1, 0]] b_ub = [10, 8, 4] res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds, method=self.method, options=self.options) _assert_success(res, desired_fun=-18, desired_x=[2, 6]) def test_inequality_constraints2(self): # Minimize linear function subject to linear inequality constraints. # http://www.statslab.cam.ac.uk/~ff271/teaching/opt/notes/notes8.pdf # (dead link) c = [6, 3] A_ub = [[0, 3], [-1, -1], [-2, 1]] b_ub = [2, -1, -1] res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds, method=self.method, options=self.options) _assert_success(res, desired_fun=5, desired_x=[2 / 3, 1 / 3]) def test_bounds_simple(self): c = [1, 2] bounds = (1, 2) res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds, method=self.method, options=self.options) _assert_success(res, desired_x=[1, 1]) bounds = [(1, 2), (1, 2)] res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds, method=self.method, options=self.options) _assert_success(res, desired_x=[1, 1]) def test_bounded_below_only_1(self): c = np.array([1.0]) A_eq = np.array([[1.0]]) b_eq = np.array([3.0]) bounds = (1.0, None) res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds, method=self.method, options=self.options) _assert_success(res, desired_fun=3, desired_x=[3]) def test_bounded_below_only_2(self): c = np.ones(3) A_eq = np.eye(3) b_eq = np.array([1, 2, 3]) bounds = (0.5, np.inf) res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds, method=self.method, options=self.options) _assert_success(res, desired_x=b_eq, desired_fun=np.sum(b_eq)) def test_bounded_above_only_1(self): c = np.array([1.0]) A_eq = np.array([[1.0]]) b_eq = np.array([3.0]) bounds = (None, 10.0) res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds, method=self.method, options=self.options) _assert_success(res, desired_fun=3, desired_x=[3]) def test_bounded_above_only_2(self): c = np.ones(3) A_eq = np.eye(3) b_eq = np.array([1, 2, 3]) bounds = (-np.inf, 4) res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds, method=self.method, options=self.options) _assert_success(res, desired_x=b_eq, desired_fun=np.sum(b_eq)) def test_bounds_infinity(self): c = np.ones(3) A_eq = np.eye(3) b_eq = np.array([1, 2, 3]) bounds = (-np.inf, np.inf) res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds, method=self.method, options=self.options) _assert_success(res, desired_x=b_eq, desired_fun=np.sum(b_eq)) def test_bounds_mixed(self): # Problem has one unbounded variable and # another with a negative lower bound. c = np.array([-1, 4]) * -1 # maximize A_ub = np.array([[-3, 1], [1, 2]], dtype=np.float64) b_ub = [6, 4] x0_bounds = (-np.inf, np.inf) x1_bounds = (-3, np.inf) bounds = (x0_bounds, x1_bounds) res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds, method=self.method, options=self.options) _assert_success(res, desired_fun=-80 / 7, desired_x=[-8 / 7, 18 / 7]) def test_bounds_equal_but_infeasible(self): c = [-4, 1] A_ub = [[7, -2], [0, 1], [2, -2]] b_ub = [14, 0, 3] bounds = [(2, 2), (0, None)] res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds, method=self.method, options=self.options) _assert_infeasible(res) def test_bounds_equal_but_infeasible2(self): c = [-4, 1] A_eq = [[7, -2], [0, 1], [2, -2]] b_eq = [14, 0, 3] bounds = [(2, 2), (0, None)] res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds, method=self.method, options=self.options) _assert_infeasible(res) def test_bounds_equal_no_presolve(self): # There was a bug when a lower and upper bound were equal but # presolve was not on to eliminate the variable. The bound # was being converted to an equality constraint, but the bound # was not eliminated, leading to issues in postprocessing. c = [1, 2] A_ub = [[1, 2], [1.1, 2.2]] b_ub = [4, 8] bounds = [(1, 2), (2, 2)] o = {key: self.options[key] for key in self.options} o["presolve"] = False res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds, method=self.method, options=o) _assert_infeasible(res) def test_zero_column_1(self): m, n = 3, 4 np.random.seed(0) c = np.random.rand(n) c[1] = 1 A_eq = np.random.rand(m, n) A_eq[:, 1] = 0 b_eq = np.random.rand(m) A_ub = [[1, 0, 1, 1]] b_ub = 3 bounds = [(-10, 10), (-10, 10), (-10, None), (None, None)] res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds, method=self.method, options=self.options) _assert_success(res, desired_fun=-9.7087836730413404) def test_zero_column_2(self): if self.method in {'highs-ds', 'highs-ipm'}: # See upstream issue https://github.com/ERGO-Code/HiGHS/issues/648 pytest.xfail() np.random.seed(0) m, n = 2, 4 c = np.random.rand(n) c[1] = -1 A_eq = np.random.rand(m, n) A_eq[:, 1] = 0 b_eq = np.random.rand(m) A_ub = np.random.rand(m, n) A_ub[:, 1] = 0 b_ub = np.random.rand(m) bounds = (None, None) res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds, method=self.method, options=self.options) _assert_unbounded(res) # Unboundedness detected in presolve if self.options.get('presolve', True) and "highs" not in self.method: # HiGHS detects unboundedness or infeasibility in presolve # It needs an iteration of simplex to be sure of unboundedness # Other solvers report that the problem is unbounded if feasible assert_equal(res.nit, 0) def test_zero_row_1(self): c = [1, 2, 3] A_eq = [[0, 0, 0], [1, 1, 1], [0, 0, 0]] b_eq = [0, 3, 0] res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds, method=self.method, options=self.options) _assert_success(res, desired_fun=3) def test_zero_row_2(self): A_ub = [[0, 0, 0], [1, 1, 1], [0, 0, 0]] b_ub = [0, 3, 0] c = [1, 2, 3] res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds, method=self.method, options=self.options) _assert_success(res, desired_fun=0) def test_zero_row_3(self): m, n = 2, 4 c = np.random.rand(n) A_eq = np.random.rand(m, n) A_eq[0, :] = 0 b_eq = np.random.rand(m) res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds, method=self.method, options=self.options) _assert_infeasible(res) # Infeasibility detected in presolve if self.options.get('presolve', True): assert_equal(res.nit, 0) def test_zero_row_4(self): m, n = 2, 4 c = np.random.rand(n) A_ub = np.random.rand(m, n) A_ub[0, :] = 0 b_ub = -np.random.rand(m) res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds, method=self.method, options=self.options) _assert_infeasible(res) # Infeasibility detected in presolve if self.options.get('presolve', True): assert_equal(res.nit, 0) def test_singleton_row_eq_1(self): c = [1, 1, 1, 2] A_eq = [[1, 0, 0, 0], [0, 2, 0, 0], [1, 0, 0, 0], [1, 1, 1, 1]] b_eq = [1, 2, 2, 4] res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds, method=self.method, options=self.options) _assert_infeasible(res) # Infeasibility detected in presolve if self.options.get('presolve', True): assert_equal(res.nit, 0) def test_singleton_row_eq_2(self): c = [1, 1, 1, 2] A_eq = [[1, 0, 0, 0], [0, 2, 0, 0], [1, 0, 0, 0], [1, 1, 1, 1]] b_eq = [1, 2, 1, 4] res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds, method=self.method, options=self.options) _assert_success(res, desired_fun=4) def test_singleton_row_ub_1(self): c = [1, 1, 1, 2] A_ub = [[1, 0, 0, 0], [0, 2, 0, 0], [-1, 0, 0, 0], [1, 1, 1, 1]] b_ub = [1, 2, -2, 4] bounds = [(None, None), (0, None), (0, None), (0, None)] res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds, method=self.method, options=self.options) _assert_infeasible(res) # Infeasibility detected in presolve if self.options.get('presolve', True): assert_equal(res.nit, 0) def test_singleton_row_ub_2(self): c = [1, 1, 1, 2] A_ub = [[1, 0, 0, 0], [0, 2, 0, 0], [-1, 0, 0, 0], [1, 1, 1, 1]] b_ub = [1, 2, -0.5, 4] bounds = [(None, None), (0, None), (0, None), (0, None)] res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds, method=self.method, options=self.options) _assert_success(res, desired_fun=0.5) def test_infeasible(self): # Test linprog response to an infeasible problem c = [-1, -1] A_ub = [[1, 0], [0, 1], [-1, -1]] b_ub = [2, 2, -5] res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds, method=self.method, options=self.options) _assert_infeasible(res) def test_infeasible_inequality_bounds(self): c = [1] A_ub = [[2]] b_ub = 4 bounds = (5, 6) res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds, method=self.method, options=self.options) _assert_infeasible(res) # Infeasibility detected in presolve if self.options.get('presolve', True): assert_equal(res.nit, 0) def test_unbounded(self): # Test linprog response to an unbounded problem c = np.array([1, 1]) * -1 # maximize A_ub = [[-1, 1], [-1, -1]] b_ub = [-1, -2] res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds, method=self.method, options=self.options) _assert_unbounded(res) def test_unbounded_below_no_presolve_corrected(self): c = [1] bounds = [(None, 1)] o = {key: self.options[key] for key in self.options} o["presolve"] = False res = linprog(c=c, bounds=bounds, method=self.method, options=o) if self.method == "revised simplex": # Revised simplex has a special pathway for no constraints. assert_equal(res.status, 5) else: _assert_unbounded(res) def test_unbounded_no_nontrivial_constraints_1(self): """ Test whether presolve pathway for detecting unboundedness after constraint elimination is working. """ c = np.array([0, 0, 0, 1, -1, -1]) A_ub = np.array([[1, 0, 0, 0, 0, 0], [0, 1, 0, 0, 0, 0], [0, 0, 0, 0, 0, -1]]) b_ub = np.array([2, -2, 0]) bounds = [(None, None), (None, None), (None, None), (-1, 1), (-1, 1), (0, None)] res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds, method=self.method, options=self.options) _assert_unbounded(res) if not self.method.lower().startswith("highs"): assert_equal(res.x[-1], np.inf) assert_equal(res.message[:36], "The problem is (trivially) unbounded") def test_unbounded_no_nontrivial_constraints_2(self): """ Test whether presolve pathway for detecting unboundedness after constraint elimination is working. """ c = np.array([0, 0, 0, 1, -1, 1]) A_ub = np.array([[1, 0, 0, 0, 0, 0], [0, 1, 0, 0, 0, 0], [0, 0, 0, 0, 0, 1]]) b_ub = np.array([2, -2, 0]) bounds = [(None, None), (None, None), (None, None), (-1, 1), (-1, 1), (None, 0)] res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds, method=self.method, options=self.options) _assert_unbounded(res) if not self.method.lower().startswith("highs"): assert_equal(res.x[-1], -np.inf) assert_equal(res.message[:36], "The problem is (trivially) unbounded") def test_cyclic_recovery(self): # Test linprogs recovery from cycling using the Klee-Minty problem # Klee-Minty https://www.math.ubc.ca/~israel/m340/kleemin3.pdf c = np.array([100, 10, 1]) * -1 # maximize A_ub = [[1, 0, 0], [20, 1, 0], [200, 20, 1]] b_ub = [1, 100, 10000] res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds, method=self.method, options=self.options) _assert_success(res, desired_x=[0, 0, 10000], atol=5e-6, rtol=1e-7) def test_cyclic_bland(self): # Test the effect of Bland's rule on a cycling problem c = np.array([-10, 57, 9, 24.]) A_ub = np.array([[0.5, -5.5, -2.5, 9], [0.5, -1.5, -0.5, 1], [1, 0, 0, 0]]) b_ub = [0, 0, 1] # copy the existing options dictionary but change maxiter maxiter = 100 o = {key: val for key, val in self.options.items()} o['maxiter'] = maxiter res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds, method=self.method, options=o) if self.method == 'simplex' and not self.options.get('bland'): # simplex cycles without Bland's rule _assert_iteration_limit_reached(res, o['maxiter']) else: # other methods, including simplex with Bland's rule, succeed _assert_success(res, desired_x=[1, 0, 1, 0]) # note that revised simplex skips this test because it may or may not # cycle depending on the initial basis def test_remove_redundancy_infeasibility(self): # mostly a test of redundancy removal, which is carefully tested in # test__remove_redundancy.py m, n = 10, 10 c = np.random.rand(n) A_eq = np.random.rand(m, n) b_eq = np.random.rand(m) A_eq[-1, :] = 2 * A_eq[-2, :] b_eq[-1] *= -1 with suppress_warnings() as sup: sup.filter(OptimizeWarning, "A_eq does not appear...") res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds, method=self.method, options=self.options) _assert_infeasible(res) ################# # General Tests # ################# def test_nontrivial_problem(self): # Problem involves all constraint types, # negative resource limits, and rounding issues. c, A_ub, b_ub, A_eq, b_eq, x_star, f_star = nontrivial_problem() res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds, method=self.method, options=self.options) _assert_success(res, desired_fun=f_star, desired_x=x_star) def test_lpgen_problem(self): # Test linprog with a rather large problem (400 variables, # 40 constraints) generated by https://gist.github.com/denis-bz/8647461 A_ub, b_ub, c = lpgen_2d(20, 20) with suppress_warnings() as sup: sup.filter(OptimizeWarning, "Solving system with option 'sym_pos'") sup.filter(RuntimeWarning, "invalid value encountered") sup.filter(LinAlgWarning) res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds, method=self.method, options=self.options) _assert_success(res, desired_fun=-64.049494229) def test_network_flow(self): # A network flow problem with supply and demand at nodes # and with costs along directed edges. # https://www.princeton.edu/~rvdb/542/lectures/lec10.pdf c = [2, 4, 9, 11, 4, 3, 8, 7, 0, 15, 16, 18] n, p = -1, 1 A_eq = [ [n, n, p, 0, p, 0, 0, 0, 0, p, 0, 0], [p, 0, 0, p, 0, p, 0, 0, 0, 0, 0, 0], [0, 0, n, n, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, p, p, 0, 0, p, 0], [0, 0, 0, 0, n, n, n, 0, p, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, n, n, 0, 0, p], [0, 0, 0, 0, 0, 0, 0, 0, 0, n, n, n]] b_eq = [0, 19, -16, 33, 0, 0, -36] with suppress_warnings() as sup: sup.filter(LinAlgWarning) res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds, method=self.method, options=self.options) _assert_success(res, desired_fun=755, atol=1e-6, rtol=1e-7) def test_network_flow_limited_capacity(self): # A network flow problem with supply and demand at nodes # and with costs and capacities along directed edges. # http://blog.sommer-forst.de/2013/04/10/ c = [2, 2, 1, 3, 1] bounds = [ [0, 4], [0, 2], [0, 2], [0, 3], [0, 5]] n, p = -1, 1 A_eq = [ [n, n, 0, 0, 0], [p, 0, n, n, 0], [0, p, p, 0, n], [0, 0, 0, p, p]] b_eq = [-4, 0, 0, 4] with suppress_warnings() as sup: # this is an UmfpackWarning but I had trouble importing it if has_umfpack: sup.filter(UmfpackWarning) sup.filter(RuntimeWarning, "scipy.linalg.solve\nIll...") sup.filter(OptimizeWarning, "A_eq does not appear...") sup.filter(OptimizeWarning, "Solving system with option...") sup.filter(LinAlgWarning) res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds, method=self.method, options=self.options) _assert_success(res, desired_fun=14) def test_simplex_algorithm_wikipedia_example(self): # https://en.wikipedia.org/wiki/Simplex_algorithm#Example c = [-2, -3, -4] A_ub = [ [3, 2, 1], [2, 5, 3]] b_ub = [10, 15] res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds, method=self.method, options=self.options) _assert_success(res, desired_fun=-20) def test_enzo_example(self): # https://github.com/scipy/scipy/issues/1779 lp2.py # # Translated from Octave code at: # http://www.ecs.shimane-u.ac.jp/~kyoshida/lpeng.htm # and placed under MIT licence by Enzo Michelangeli # with permission explicitly granted by the original author, # Prof. Kazunobu Yoshida c = [4, 8, 3, 0, 0, 0] A_eq = [ [2, 5, 3, -1, 0, 0], [3, 2.5, 8, 0, -1, 0], [8, 10, 4, 0, 0, -1]] b_eq = [185, 155, 600] res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds, method=self.method, options=self.options) _assert_success(res, desired_fun=317.5, desired_x=[66.25, 0, 17.5, 0, 183.75, 0], atol=6e-6, rtol=1e-7) def test_enzo_example_b(self): # rescued from https://github.com/scipy/scipy/pull/218 c = [2.8, 6.3, 10.8, -2.8, -6.3, -10.8] A_eq = [[-1, -1, -1, 0, 0, 0], [0, 0, 0, 1, 1, 1], [1, 0, 0, 1, 0, 0], [0, 1, 0, 0, 1, 0], [0, 0, 1, 0, 0, 1]] b_eq = [-0.5, 0.4, 0.3, 0.3, 0.3] with suppress_warnings() as sup: sup.filter(OptimizeWarning, "A_eq does not appear...") res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds, method=self.method, options=self.options) _assert_success(res, desired_fun=-1.77, desired_x=[0.3, 0.2, 0.0, 0.0, 0.1, 0.3]) def test_enzo_example_c_with_degeneracy(self): # rescued from https://github.com/scipy/scipy/pull/218 m = 20 c = -np.ones(m) tmp = 2 * np.pi * np.arange(1, m + 1) / (m + 1) A_eq = np.vstack((np.cos(tmp) - 1, np.sin(tmp))) b_eq = [0, 0] res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds, method=self.method, options=self.options) _assert_success(res, desired_fun=0, desired_x=np.zeros(m)) def test_enzo_example_c_with_unboundedness(self): # rescued from https://github.com/scipy/scipy/pull/218 m = 50 c = -np.ones(m) tmp = 2 * np.pi * np.arange(m) / (m + 1) # This test relies on `cos(0) -1 == sin(0)`, so ensure that's true # (SIMD code or -ffast-math may cause spurious failures otherwise) row0 = np.cos(tmp) - 1 row0[0] = 0.0 row1 = np.sin(tmp) row1[0] = 0.0 A_eq = np.vstack((row0, row1)) b_eq = [0, 0] res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds, method=self.method, options=self.options) _assert_unbounded(res) def test_enzo_example_c_with_infeasibility(self): # rescued from https://github.com/scipy/scipy/pull/218 m = 50 c = -np.ones(m) tmp = 2 * np.pi * np.arange(m) / (m + 1) A_eq = np.vstack((np.cos(tmp) - 1, np.sin(tmp))) b_eq = [1, 1] o = {key: self.options[key] for key in self.options} o["presolve"] = False res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds, method=self.method, options=o) _assert_infeasible(res) def test_basic_artificial_vars(self): # Problem is chosen to test two phase simplex methods when at the end # of phase 1 some artificial variables remain in the basis. # Also, for `method='simplex'`, the row in the tableau corresponding # with the artificial variables is not all zero. c = np.array([-0.1, -0.07, 0.004, 0.004, 0.004, 0.004]) A_ub = np.array([[1.0, 0, 0, 0, 0, 0], [-1.0, 0, 0, 0, 0, 0], [0, -1.0, 0, 0, 0, 0], [0, 1.0, 0, 0, 0, 0], [1.0, 1.0, 0, 0, 0, 0]]) b_ub = np.array([3.0, 3.0, 3.0, 3.0, 20.0]) A_eq = np.array([[1.0, 0, -1, 1, -1, 1], [0, -1.0, -1, 1, -1, 1]]) b_eq = np.array([0, 0]) res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds, method=self.method, options=self.options) _assert_success(res, desired_fun=0, desired_x=np.zeros_like(c), atol=2e-6) def test_optimize_result(self): # check all fields in OptimizeResult c, A_ub, b_ub, A_eq, b_eq, bounds = very_random_gen(0) res = linprog(c, A_ub=A_ub, b_ub=b_ub, A_eq=A_eq, b_eq=b_eq, bounds=bounds, method=self.method, options=self.options) assert_(res.success) assert_(res.nit) assert_(not res.status) if 'highs' not in self.method: # HiGHS status/message tested separately assert_(res.message == "Optimization terminated successfully.") assert_allclose(c @ res.x, res.fun) assert_allclose(b_eq - A_eq @ res.x, res.con, atol=1e-11) assert_allclose(b_ub - A_ub @ res.x, res.slack, atol=1e-11) for key in ['eqlin', 'ineqlin', 'lower', 'upper']: if key in res.keys(): assert isinstance(res[key]['marginals'], np.ndarray) assert isinstance(res[key]['residual'], np.ndarray) ################# # Bug Fix Tests # ################# def test_bug_5400(self): # https://github.com/scipy/scipy/issues/5400 bounds = [ (0, None), (0, 100), (0, 100), (0, 100), (0, 100), (0, 100), (0, 100), (0, 900), (0, 900), (0, 900), (0, 900), (0, 900), (0, 900), (0, None), (0, None), (0, None), (0, None), (0, None), (0, None)] f = 1 / 9 g = -1e4 h = -3.1 A_ub = np.array([ [1, -2.99, 0, 0, -3, 0, 0, 0, -1, -1, 0, -1, -1, 1, 1, 0, 0, 0, 0], [1, 0, -2.9, h, 0, -3, 0, -1, 0, 0, -1, 0, -1, 0, 0, 1, 1, 0, 0], [1, 0, 0, h, 0, 0, -3, -1, -1, 0, -1, -1, 0, 0, 0, 0, 0, 1, 1], [0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -1, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -1, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -1, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -1, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -1, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -1], [0, 1.99, -1, -1, 0, 0, 0, -1, f, f, 0, 0, 0, g, 0, 0, 0, 0, 0], [0, 0, 0, 0, 2, -1, -1, 0, 0, 0, -1, f, f, 0, g, 0, 0, 0, 0], [0, -1, 1.9, 2.1, 0, 0, 0, f, -1, -1, 0, 0, 0, 0, 0, g, 0, 0, 0], [0, 0, 0, 0, -1, 2, -1, 0, 0, 0, f, -1, f, 0, 0, 0, g, 0, 0], [0, -1, -1, 2.1, 0, 0, 0, f, f, -1, 0, 0, 0, 0, 0, 0, 0, g, 0], [0, 0, 0, 0, -1, -1, 2, 0, 0, 0, f, f, -1, 0, 0, 0, 0, 0, g]]) b_ub = np.array([ 0.0, 0, 0, 100, 100, 100, 100, 100, 100, 900, 900, 900, 900, 900, 900, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]) c = np.array([-1.0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0]) with suppress_warnings() as sup: sup.filter(OptimizeWarning, "Solving system with option 'sym_pos'") sup.filter(RuntimeWarning, "invalid value encountered") sup.filter(LinAlgWarning) res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds, method=self.method, options=self.options) _assert_success(res, desired_fun=-106.63507541835018) def test_bug_6139(self): # linprog(method='simplex') fails to find a basic feasible solution # if phase 1 pseudo-objective function is outside the provided tol. # https://github.com/scipy/scipy/issues/6139 # Note: This is not strictly a bug as the default tolerance determines # if a result is "close enough" to zero and should not be expected # to work for all cases. c = np.array([1, 1, 1]) A_eq = np.array([[1., 0., 0.], [-1000., 0., - 1000.]]) b_eq = np.array([5.00000000e+00, -1.00000000e+04]) A_ub = -np.array([[0., 1000000., 1010000.]]) b_ub = -np.array([10000000.]) bounds = (None, None) res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds, method=self.method, options=self.options) _assert_success(res, desired_fun=14.95, desired_x=np.array([5, 4.95, 5])) def test_bug_6690(self): # linprog simplex used to violate bound constraint despite reporting # success. # https://github.com/scipy/scipy/issues/6690 A_eq = np.array([[0, 0, 0, 0.93, 0, 0.65, 0, 0, 0.83, 0]]) b_eq = np.array([0.9626]) A_ub = np.array([ [0, 0, 0, 1.18, 0, 0, 0, -0.2, 0, -0.22], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0.43, 0, 0, 0, 0, 0, 0], [0, -1.22, -0.25, 0, 0, 0, -2.06, 0, 0, 1.37], [0, 0, 0, 0, 0, 0, 0, -0.25, 0, 0] ]) b_ub = np.array([0.615, 0, 0.172, -0.869, -0.022]) bounds = np.array([ [-0.84, -0.97, 0.34, 0.4, -0.33, -0.74, 0.47, 0.09, -1.45, -0.73], [0.37, 0.02, 2.86, 0.86, 1.18, 0.5, 1.76, 0.17, 0.32, -0.15] ]).T c = np.array([ -1.64, 0.7, 1.8, -1.06, -1.16, 0.26, 2.13, 1.53, 0.66, 0.28 ]) with suppress_warnings() as sup: if has_umfpack: sup.filter(UmfpackWarning) sup.filter(OptimizeWarning, "Solving system with option 'cholesky'") sup.filter(OptimizeWarning, "Solving system with option 'sym_pos'") sup.filter(RuntimeWarning, "invalid value encountered") sup.filter(LinAlgWarning) res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds, method=self.method, options=self.options) desired_fun = -1.19099999999 desired_x = np.array([0.3700, -0.9700, 0.3400, 0.4000, 1.1800, 0.5000, 0.4700, 0.0900, 0.3200, -0.7300]) _assert_success(res, desired_fun=desired_fun, desired_x=desired_x) # Add small tol value to ensure arrays are less than or equal. atol = 1e-6 assert_array_less(bounds[:, 0] - atol, res.x) assert_array_less(res.x, bounds[:, 1] + atol) def test_bug_7044(self): # linprog simplex failed to "identify correct constraints" (?) # leading to a non-optimal solution if A is rank-deficient. # https://github.com/scipy/scipy/issues/7044 A_eq, b_eq, c, _, _ = magic_square(3) with suppress_warnings() as sup: sup.filter(OptimizeWarning, "A_eq does not appear...") sup.filter(RuntimeWarning, "invalid value encountered") sup.filter(LinAlgWarning) res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds, method=self.method, options=self.options) desired_fun = 1.730550597 _assert_success(res, desired_fun=desired_fun) assert_allclose(A_eq.dot(res.x), b_eq) assert_array_less(np.zeros(res.x.size) - 1e-5, res.x) def test_bug_7237(self): # https://github.com/scipy/scipy/issues/7237 # linprog simplex "explodes" when the pivot value is very # close to zero. c = np.array([-1, 0, 0, 0, 0, 0, 0, 0, 0]) A_ub = np.array([ [1., -724., 911., -551., -555., -896., 478., -80., -293.], [1., 566., 42., 937., 233., 883., 392., -909., 57.], [1., -208., -894., 539., 321., 532., -924., 942., 55.], [1., 857., -859., 83., 462., -265., -971., 826., 482.], [1., 314., -424., 245., -424., 194., -443., -104., -429.], [1., 540., 679., 361., 149., -827., 876., 633., 302.], [0., -1., -0., -0., -0., -0., -0., -0., -0.], [0., -0., -1., -0., -0., -0., -0., -0., -0.], [0., -0., -0., -1., -0., -0., -0., -0., -0.], [0., -0., -0., -0., -1., -0., -0., -0., -0.], [0., -0., -0., -0., -0., -1., -0., -0., -0.], [0., -0., -0., -0., -0., -0., -1., -0., -0.], [0., -0., -0., -0., -0., -0., -0., -1., -0.], [0., -0., -0., -0., -0., -0., -0., -0., -1.], [0., 1., 0., 0., 0., 0., 0., 0., 0.], [0., 0., 1., 0., 0., 0., 0., 0., 0.], [0., 0., 0., 1., 0., 0., 0., 0., 0.], [0., 0., 0., 0., 1., 0., 0., 0., 0.], [0., 0., 0., 0., 0., 1., 0., 0., 0.], [0., 0., 0., 0., 0., 0., 1., 0., 0.], [0., 0., 0., 0., 0., 0., 0., 1., 0.], [0., 0., 0., 0., 0., 0., 0., 0., 1.] ]) b_ub = np.array([ 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 1., 1., 1., 1., 1., 1., 1., 1.]) A_eq = np.array([[0., 1., 1., 1., 1., 1., 1., 1., 1.]]) b_eq = np.array([[1.]]) bounds = [(None, None)] * 9 res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds, method=self.method, options=self.options) _assert_success(res, desired_fun=108.568535, atol=1e-6) def test_bug_8174(self): # https://github.com/scipy/scipy/issues/8174 # The simplex method sometimes "explodes" if the pivot value is very # close to zero. A_ub = np.array([ [22714, 1008, 13380, -2713.5, -1116], [-4986, -1092, -31220, 17386.5, 684], [-4986, 0, 0, -2713.5, 0], [22714, 0, 0, 17386.5, 0]]) b_ub = np.zeros(A_ub.shape[0]) c = -np.ones(A_ub.shape[1]) bounds = [(0, 1)] * A_ub.shape[1] with suppress_warnings() as sup: sup.filter(RuntimeWarning, "invalid value encountered") sup.filter(LinAlgWarning) res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds, method=self.method, options=self.options) if self.options.get('tol', 1e-9) < 1e-10 and self.method == 'simplex': _assert_unable_to_find_basic_feasible_sol(res) else: _assert_success(res, desired_fun=-2.0080717488789235, atol=1e-6) def test_bug_8174_2(self): # Test supplementary example from issue 8174. # https://github.com/scipy/scipy/issues/8174 # https://stackoverflow.com/questions/47717012/linprog-in-scipy-optimize-checking-solution c = np.array([1, 0, 0, 0, 0, 0, 0]) A_ub = -np.identity(7) b_ub = np.array([[-2], [-2], [-2], [-2], [-2], [-2], [-2]]) A_eq = np.array([ [1, 1, 1, 1, 1, 1, 0], [0.3, 1.3, 0.9, 0, 0, 0, -1], [0.3, 0, 0, 0, 0, 0, -2/3], [0, 0.65, 0, 0, 0, 0, -1/15], [0, 0, 0.3, 0, 0, 0, -1/15] ]) b_eq = np.array([[100], [0], [0], [0], [0]]) with suppress_warnings() as sup: if has_umfpack: sup.filter(UmfpackWarning) sup.filter(OptimizeWarning, "A_eq does not appear...") res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds, method=self.method, options=self.options) _assert_success(res, desired_fun=43.3333333331385) def test_bug_8561(self): # Test that pivot row is chosen correctly when using Bland's rule # This was originally written for the simplex method with # Bland's rule only, but it doesn't hurt to test all methods/options # https://github.com/scipy/scipy/issues/8561 c = np.array([7, 0, -4, 1.5, 1.5]) A_ub = np.array([ [4, 5.5, 1.5, 1.0, -3.5], [1, -2.5, -2, 2.5, 0.5], [3, -0.5, 4, -12.5, -7], [-1, 4.5, 2, -3.5, -2], [5.5, 2, -4.5, -1, 9.5]]) b_ub = np.array([0, 0, 0, 0, 1]) res = linprog(c, A_ub=A_ub, b_ub=b_ub, options=self.options, method=self.method) _assert_success(res, desired_x=[0, 0, 19, 16/3, 29/3]) def test_bug_8662(self): # linprog simplex used to report incorrect optimal results # https://github.com/scipy/scipy/issues/8662 c = [-10, 10, 6, 3] A_ub = [[8, -8, -4, 6], [-8, 8, 4, -6], [-4, 4, 8, -4], [3, -3, -3, -10]] b_ub = [9, -9, -9, -4] bounds = [(0, None), (0, None), (0, None), (0, None)] desired_fun = 36.0000000000 with suppress_warnings() as sup: if has_umfpack: sup.filter(UmfpackWarning) sup.filter(RuntimeWarning, "invalid value encountered") sup.filter(LinAlgWarning) res1 = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds, method=self.method, options=self.options) # Set boundary condition as a constraint A_ub.append([0, 0, -1, 0]) b_ub.append(0) bounds[2] = (None, None) with suppress_warnings() as sup: if has_umfpack: sup.filter(UmfpackWarning) sup.filter(RuntimeWarning, "invalid value encountered") sup.filter(LinAlgWarning) res2 = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds, method=self.method, options=self.options) rtol = 1e-5 _assert_success(res1, desired_fun=desired_fun, rtol=rtol) _assert_success(res2, desired_fun=desired_fun, rtol=rtol) def test_bug_8663(self): # exposed a bug in presolve # https://github.com/scipy/scipy/issues/8663 c = [1, 5] A_eq = [[0, -7]] b_eq = [-6] bounds = [(0, None), (None, None)] res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds, method=self.method, options=self.options) _assert_success(res, desired_x=[0, 6./7], desired_fun=5*6./7) def test_bug_8664(self): # interior-point has trouble with this when presolve is off # tested for interior-point with presolve off in TestLinprogIPSpecific # https://github.com/scipy/scipy/issues/8664 c = [4] A_ub = [[2], [5]] b_ub = [4, 4] A_eq = [[0], [-8], [9]] b_eq = [3, 2, 10] with suppress_warnings() as sup: sup.filter(RuntimeWarning) sup.filter(OptimizeWarning, "Solving system with option...") res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds, method=self.method, options=self.options) _assert_infeasible(res) def test_bug_8973(self): """ Test whether bug described at: https://github.com/scipy/scipy/issues/8973 was fixed. """ c = np.array([0, 0, 0, 1, -1]) A_ub = np.array([[1, 0, 0, 0, 0], [0, 1, 0, 0, 0]]) b_ub = np.array([2, -2]) bounds = [(None, None), (None, None), (None, None), (-1, 1), (-1, 1)] res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds, method=self.method, options=self.options) # solution vector x is not unique _assert_success(res, desired_fun=-2) # HiGHS IPM had an issue where the following wasn't true! assert_equal(c @ res.x, res.fun) def test_bug_8973_2(self): """ Additional test for: https://github.com/scipy/scipy/issues/8973 suggested in https://github.com/scipy/scipy/pull/8985 review by @antonior92 """ c = np.zeros(1) A_ub = np.array([[1]]) b_ub = np.array([-2]) bounds = (None, None) res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds, method=self.method, options=self.options) _assert_success(res, desired_x=[-2], desired_fun=0) def test_bug_10124(self): """ Test for linprog docstring problem 'disp'=True caused revised simplex failure """ c = np.zeros(1) A_ub = np.array([[1]]) b_ub = np.array([-2]) bounds = (None, None) c = [-1, 4] A_ub = [[-3, 1], [1, 2]] b_ub = [6, 4] bounds = [(None, None), (-3, None)] o = {"disp": True} o.update(self.options) res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds, method=self.method, options=o) _assert_success(res, desired_x=[10, -3], desired_fun=-22) def test_bug_10349(self): """ Test for redundancy removal tolerance issue https://github.com/scipy/scipy/issues/10349 """ A_eq = np.array([[1, 1, 0, 0, 0, 0], [0, 0, 1, 1, 0, 0], [0, 0, 0, 0, 1, 1], [1, 0, 1, 0, 0, 0], [0, 0, 0, 1, 1, 0], [0, 1, 0, 0, 0, 1]]) b_eq = np.array([221, 210, 10, 141, 198, 102]) c = np.concatenate((0, 1, np.zeros(4)), axis=None) with suppress_warnings() as sup: sup.filter(OptimizeWarning, "A_eq does not appear...") res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds, method=self.method, options=self.options) _assert_success(res, desired_x=[129, 92, 12, 198, 0, 10], desired_fun=92) @pytest.mark.skipif(sys.platform == 'darwin', reason=("Failing on some local macOS builds, " "see gh-13846")) def test_bug_10466(self): """ Test that autoscale fixes poorly-scaled problem """ c = [-8., -0., -8., -0., -8., -0., -0., -0., -0., -0., -0., -0., -0.] A_eq = [[1., 1., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.], [0., 0., 1., 1., 0., 0., 0., 0., 0., 0., 0., 0., 0.], [0., 0., 0., 0., 1., 1., 0., 0., 0., 0., 0., 0., 0.], [1., 0., 1., 0., 1., 0., -1., 0., 0., 0., 0., 0., 0.], [1., 0., 1., 0., 1., 0., 0., 1., 0., 0., 0., 0., 0.], [1., 0., 0., 0., 0., 0., 0., 0., 1., 0., 0., 0., 0.], [1., 0., 0., 0., 0., 0., 0., 0., 0., 1., 0., 0., 0.], [1., 0., 1., 0., 1., 0., 0., 0., 0., 0., 1., 0., 0.], [0., 0., 1., 0., 1., 0., 0., 0., 0., 0., 0., 1., 0.], [0., 0., 1., 0., 1., 0., 0., 0., 0., 0., 0., 0., 1.]] b_eq = [3.14572800e+08, 4.19430400e+08, 5.24288000e+08, 1.00663296e+09, 1.07374182e+09, 1.07374182e+09, 1.07374182e+09, 1.07374182e+09, 1.07374182e+09, 1.07374182e+09] o = {} # HiGHS methods don't use autoscale option if not self.method.startswith("highs"): o = {"autoscale": True} o.update(self.options) with suppress_warnings() as sup: sup.filter(OptimizeWarning, "Solving system with option...") if has_umfpack: sup.filter(UmfpackWarning) sup.filter(RuntimeWarning, "scipy.linalg.solve\nIll...") sup.filter(RuntimeWarning, "divide by zero encountered...") sup.filter(RuntimeWarning, "overflow encountered...") sup.filter(RuntimeWarning, "invalid value encountered...") sup.filter(LinAlgWarning, "Ill-conditioned matrix...") res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds, method=self.method, options=o) assert_allclose(res.fun, -8589934560) ######################### # Method-specific Tests # ######################### @pytest.mark.filterwarnings("ignore::DeprecationWarning") class LinprogSimplexTests(LinprogCommonTests): method = "simplex" @pytest.mark.filterwarnings("ignore::DeprecationWarning") class LinprogIPTests(LinprogCommonTests): method = "interior-point" def test_bug_10466(self): pytest.skip("Test is failing, but solver is deprecated.") @pytest.mark.filterwarnings("ignore::DeprecationWarning") class LinprogRSTests(LinprogCommonTests): method = "revised simplex" # Revised simplex does not reliably solve these problems. # Failure is intermittent due to the random choice of elements to complete # the basis after phase 1 terminates. In any case, linprog exists # gracefully, reporting numerical difficulties. I do not think this should # prevent revised simplex from being merged, as it solves the problems # most of the time and solves a broader range of problems than the existing # simplex implementation. # I believe that the root cause is the same for all three and that this # same issue prevents revised simplex from solving many other problems # reliably. Somehow the pivoting rule allows the algorithm to pivot into # a singular basis. I haven't been able to find a reference that # acknowledges this possibility, suggesting that there is a bug. On the # other hand, the pivoting rule is quite simple, and I can't find a # mistake, which suggests that this is a possibility with the pivoting # rule. Hopefully, a better pivoting rule will fix the issue. def test_bug_5400(self): pytest.skip("Intermittent failure acceptable.") def test_bug_8662(self): pytest.skip("Intermittent failure acceptable.") def test_network_flow(self): pytest.skip("Intermittent failure acceptable.") class LinprogHiGHSTests(LinprogCommonTests): def test_callback(self): # this is the problem from test_callback def cb(res): return None c = np.array([-3, -2]) A_ub = [[2, 1], [1, 1], [1, 0]] b_ub = [10, 8, 4] assert_raises(NotImplementedError, linprog, c, A_ub=A_ub, b_ub=b_ub, callback=cb, method=self.method) res = linprog(c, A_ub=A_ub, b_ub=b_ub, method=self.method) _assert_success(res, desired_fun=-18.0, desired_x=[2, 6]) @pytest.mark.parametrize("options", [{"maxiter": -1}, {"disp": -1}, {"presolve": -1}, {"time_limit": -1}, {"dual_feasibility_tolerance": -1}, {"primal_feasibility_tolerance": -1}, {"ipm_optimality_tolerance": -1}, {"simplex_dual_edge_weight_strategy": "ekki"}, ]) def test_invalid_option_values(self, options): def f(options): linprog(1, method=self.method, options=options) options.update(self.options) assert_warns(OptimizeWarning, f, options=options) def test_crossover(self): A_eq, b_eq, c, _, _ = magic_square(4) bounds = (0, 1) res = linprog(c, A_eq=A_eq, b_eq=b_eq, bounds=bounds, method=self.method, options=self.options) # there should be nonzero crossover iterations for IPM (only) assert_equal(res.crossover_nit == 0, self.method != "highs-ipm") def test_marginals(self): # Ensure lagrange multipliers are correct by comparing the derivative # w.r.t. b_ub/b_eq/ub/lb to the reported duals. c, A_ub, b_ub, A_eq, b_eq, bounds = very_random_gen(seed=0) res = linprog(c, A_ub=A_ub, b_ub=b_ub, A_eq=A_eq, b_eq=b_eq, bounds=bounds, method=self.method, options=self.options) lb, ub = bounds.T # sensitivity w.r.t. b_ub def f_bub(x): return linprog(c, A_ub, x, A_eq, b_eq, bounds, method=self.method).fun dfdbub = approx_derivative(f_bub, b_ub, method='3-point', f0=res.fun) assert_allclose(res.ineqlin.marginals, dfdbub) # sensitivity w.r.t. b_eq def f_beq(x): return linprog(c, A_ub, b_ub, A_eq, x, bounds, method=self.method).fun dfdbeq = approx_derivative(f_beq, b_eq, method='3-point', f0=res.fun) assert_allclose(res.eqlin.marginals, dfdbeq) # sensitivity w.r.t. lb def f_lb(x): bounds = np.array([x, ub]).T return linprog(c, A_ub, b_ub, A_eq, b_eq, bounds, method=self.method).fun with np.errstate(invalid='ignore'): # approx_derivative has trouble where lb is infinite dfdlb = approx_derivative(f_lb, lb, method='3-point', f0=res.fun) dfdlb[~np.isfinite(lb)] = 0 assert_allclose(res.lower.marginals, dfdlb) # sensitivity w.r.t. ub def f_ub(x): bounds = np.array([lb, x]).T return linprog(c, A_ub, b_ub, A_eq, b_eq, bounds, method=self.method).fun with np.errstate(invalid='ignore'): dfdub = approx_derivative(f_ub, ub, method='3-point', f0=res.fun) dfdub[~np.isfinite(ub)] = 0 assert_allclose(res.upper.marginals, dfdub) def test_dual_feasibility(self): # Ensure solution is dual feasible using marginals c, A_ub, b_ub, A_eq, b_eq, bounds = very_random_gen(seed=42) res = linprog(c, A_ub=A_ub, b_ub=b_ub, A_eq=A_eq, b_eq=b_eq, bounds=bounds, method=self.method, options=self.options) # KKT dual feasibility equation from Theorem 1 from # http://www.personal.psu.edu/cxg286/LPKKT.pdf resid = (-c + A_ub.T @ res.ineqlin.marginals + A_eq.T @ res.eqlin.marginals + res.upper.marginals + res.lower.marginals) assert_allclose(resid, 0, atol=1e-12) def test_complementary_slackness(self): # Ensure that the complementary slackness condition is satisfied. c, A_ub, b_ub, A_eq, b_eq, bounds = very_random_gen(seed=42) res = linprog(c, A_ub=A_ub, b_ub=b_ub, A_eq=A_eq, b_eq=b_eq, bounds=bounds, method=self.method, options=self.options) # KKT complementary slackness equation from Theorem 1 from # http://www.personal.psu.edu/cxg286/LPKKT.pdf modified for # non-zero RHS assert np.allclose(res.ineqlin.marginals @ (b_ub - A_ub @ res.x), 0) ################################ # Simplex Option-Specific Tests# ################################ class TestLinprogSimplexDefault(LinprogSimplexTests): def setup_method(self): self.options = {} def test_bug_5400(self): pytest.skip("Simplex fails on this problem.") def test_bug_7237_low_tol(self): # Fails if the tolerance is too strict. Here, we test that # even if the solution is wrong, the appropriate error is raised. pytest.skip("Simplex fails on this problem.") def test_bug_8174_low_tol(self): # Fails if the tolerance is too strict. Here, we test that # even if the solution is wrong, the appropriate warning is issued. self.options.update({'tol': 1e-12}) with pytest.warns(OptimizeWarning): super().test_bug_8174() class TestLinprogSimplexBland(LinprogSimplexTests): def setup_method(self): self.options = {'bland': True} def test_bug_5400(self): pytest.skip("Simplex fails on this problem.") def test_bug_8174_low_tol(self): # Fails if the tolerance is too strict. Here, we test that # even if the solution is wrong, the appropriate error is raised. self.options.update({'tol': 1e-12}) with pytest.raises(AssertionError): with pytest.warns(OptimizeWarning): super().test_bug_8174() class TestLinprogSimplexNoPresolve(LinprogSimplexTests): def setup_method(self): self.options = {'presolve': False} is_32_bit = np.intp(0).itemsize < 8 is_linux = sys.platform.startswith('linux') @pytest.mark.xfail( condition=is_32_bit and is_linux, reason='Fails with warning on 32-bit linux') def test_bug_5400(self): super().test_bug_5400() def test_bug_6139_low_tol(self): # Linprog(method='simplex') fails to find a basic feasible solution # if phase 1 pseudo-objective function is outside the provided tol. # https://github.com/scipy/scipy/issues/6139 # Without ``presolve`` eliminating such rows the result is incorrect. self.options.update({'tol': 1e-12}) with pytest.raises(AssertionError, match='linprog status 4'): return super().test_bug_6139() def test_bug_7237_low_tol(self): pytest.skip("Simplex fails on this problem.") def test_bug_8174_low_tol(self): # Fails if the tolerance is too strict. Here, we test that # even if the solution is wrong, the appropriate warning is issued. self.options.update({'tol': 1e-12}) with pytest.warns(OptimizeWarning): super().test_bug_8174() def test_unbounded_no_nontrivial_constraints_1(self): pytest.skip("Tests behavior specific to presolve") def test_unbounded_no_nontrivial_constraints_2(self): pytest.skip("Tests behavior specific to presolve") ####################################### # Interior-Point Option-Specific Tests# ####################################### class TestLinprogIPDense(LinprogIPTests): options = {"sparse": False} if has_cholmod: class TestLinprogIPSparseCholmod(LinprogIPTests): options = {"sparse": True, "cholesky": True} if has_umfpack: class TestLinprogIPSparseUmfpack(LinprogIPTests): options = {"sparse": True, "cholesky": False} def test_network_flow_limited_capacity(self): pytest.skip("Failing due to numerical issues on some platforms.") class TestLinprogIPSparse(LinprogIPTests): options = {"sparse": True, "cholesky": False, "sym_pos": False} @pytest.mark.xfail_on_32bit("This test is sensitive to machine epsilon level " "perturbations in linear system solution in " "_linprog_ip._sym_solve.") def test_bug_6139(self): super().test_bug_6139() @pytest.mark.xfail(reason='Fails with ATLAS, see gh-7877') def test_bug_6690(self): # Test defined in base class, but can't mark as xfail there super().test_bug_6690() def test_magic_square_sparse_no_presolve(self): # test linprog with a problem with a rank-deficient A_eq matrix A_eq, b_eq, c, _, _ = magic_square(3) bounds = (0, 1) with suppress_warnings() as sup: if has_umfpack: sup.filter(UmfpackWarning) sup.filter(MatrixRankWarning, "Matrix is exactly singular") sup.filter(OptimizeWarning, "Solving system with option...") o = {key: self.options[key] for key in self.options} o["presolve"] = False res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds, method=self.method, options=o) _assert_success(res, desired_fun=1.730550597) def test_sparse_solve_options(self): # checking that problem is solved with all column permutation options A_eq, b_eq, c, _, _ = magic_square(3) with suppress_warnings() as sup: sup.filter(OptimizeWarning, "A_eq does not appear...") sup.filter(OptimizeWarning, "Invalid permc_spec option") o = {key: self.options[key] for key in self.options} permc_specs = ('NATURAL', 'MMD_ATA', 'MMD_AT_PLUS_A', 'COLAMD', 'ekki-ekki-ekki') # 'ekki-ekki-ekki' raises warning about invalid permc_spec option # and uses default for permc_spec in permc_specs: o["permc_spec"] = permc_spec res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds, method=self.method, options=o) _assert_success(res, desired_fun=1.730550597) class TestLinprogIPSparsePresolve(LinprogIPTests): options = {"sparse": True, "_sparse_presolve": True} @pytest.mark.xfail_on_32bit("This test is sensitive to machine epsilon level " "perturbations in linear system solution in " "_linprog_ip._sym_solve.") def test_bug_6139(self): super().test_bug_6139() def test_enzo_example_c_with_infeasibility(self): pytest.skip('_sparse_presolve=True incompatible with presolve=False') @pytest.mark.xfail(reason='Fails with ATLAS, see gh-7877') def test_bug_6690(self): # Test defined in base class, but can't mark as xfail there super().test_bug_6690() @pytest.mark.filterwarnings("ignore::DeprecationWarning") class TestLinprogIPSpecific: method = "interior-point" # the following tests don't need to be performed separately for # sparse presolve, sparse after presolve, and dense def test_solver_select(self): # check that default solver is selected as expected if has_cholmod: options = {'sparse': True, 'cholesky': True} elif has_umfpack: options = {'sparse': True, 'cholesky': False} else: options = {'sparse': True, 'cholesky': False, 'sym_pos': False} A, b, c = lpgen_2d(20, 20) res1 = linprog(c, A_ub=A, b_ub=b, method=self.method, options=options) res2 = linprog(c, A_ub=A, b_ub=b, method=self.method) # default solver assert_allclose(res1.fun, res2.fun, err_msg="linprog default solver unexpected result", rtol=2e-15, atol=1e-15) def test_unbounded_below_no_presolve_original(self): # formerly caused segfault in TravisCI w/ "cholesky":True c = [-1] bounds = [(None, 1)] res = linprog(c=c, bounds=bounds, method=self.method, options={"presolve": False, "cholesky": True}) _assert_success(res, desired_fun=-1) def test_cholesky(self): # use cholesky factorization and triangular solves A, b, c = lpgen_2d(20, 20) res = linprog(c, A_ub=A, b_ub=b, method=self.method, options={"cholesky": True}) # only for dense _assert_success(res, desired_fun=-64.049494229) def test_alternate_initial_point(self): # use "improved" initial point A, b, c = lpgen_2d(20, 20) with suppress_warnings() as sup: sup.filter(RuntimeWarning, "scipy.linalg.solve\nIll...") sup.filter(OptimizeWarning, "Solving system with option...") sup.filter(LinAlgWarning, "Ill-conditioned matrix...") res = linprog(c, A_ub=A, b_ub=b, method=self.method, options={"ip": True, "disp": True}) # ip code is independent of sparse/dense _assert_success(res, desired_fun=-64.049494229) def test_bug_8664(self): # interior-point has trouble with this when presolve is off c = [4] A_ub = [[2], [5]] b_ub = [4, 4] A_eq = [[0], [-8], [9]] b_eq = [3, 2, 10] with suppress_warnings() as sup: sup.filter(RuntimeWarning) sup.filter(OptimizeWarning, "Solving system with option...") res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds, method=self.method, options={"presolve": False}) assert_(not res.success, "Incorrectly reported success") ######################################## # Revised Simplex Option-Specific Tests# ######################################## class TestLinprogRSCommon(LinprogRSTests): options = {} def test_cyclic_bland(self): pytest.skip("Intermittent failure acceptable.") def test_nontrivial_problem_with_guess(self): c, A_ub, b_ub, A_eq, b_eq, x_star, f_star = nontrivial_problem() res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds, method=self.method, options=self.options, x0=x_star) _assert_success(res, desired_fun=f_star, desired_x=x_star) assert_equal(res.nit, 0) def test_nontrivial_problem_with_unbounded_variables(self): c, A_ub, b_ub, A_eq, b_eq, x_star, f_star = nontrivial_problem() bounds = [(None, None), (None, None), (0, None), (None, None)] res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds, method=self.method, options=self.options, x0=x_star) _assert_success(res, desired_fun=f_star, desired_x=x_star) assert_equal(res.nit, 0) def test_nontrivial_problem_with_bounded_variables(self): c, A_ub, b_ub, A_eq, b_eq, x_star, f_star = nontrivial_problem() bounds = [(None, 1), (1, None), (0, None), (.4, .6)] res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds, method=self.method, options=self.options, x0=x_star) _assert_success(res, desired_fun=f_star, desired_x=x_star) assert_equal(res.nit, 0) def test_nontrivial_problem_with_negative_unbounded_variable(self): c, A_ub, b_ub, A_eq, b_eq, x_star, f_star = nontrivial_problem() b_eq = [4] x_star = np.array([-219/385, 582/385, 0, 4/10]) f_star = 3951/385 bounds = [(None, None), (1, None), (0, None), (.4, .6)] res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds, method=self.method, options=self.options, x0=x_star) _assert_success(res, desired_fun=f_star, desired_x=x_star) assert_equal(res.nit, 0) def test_nontrivial_problem_with_bad_guess(self): c, A_ub, b_ub, A_eq, b_eq, x_star, f_star = nontrivial_problem() bad_guess = [1, 2, 3, .5] res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds, method=self.method, options=self.options, x0=bad_guess) assert_equal(res.status, 6) def test_redundant_constraints_with_guess(self): A, b, c, _, _ = magic_square(3) p = np.random.rand(*c.shape) with suppress_warnings() as sup: sup.filter(OptimizeWarning, "A_eq does not appear...") sup.filter(RuntimeWarning, "invalid value encountered") sup.filter(LinAlgWarning) res = linprog(c, A_eq=A, b_eq=b, method=self.method) res2 = linprog(c, A_eq=A, b_eq=b, method=self.method, x0=res.x) res3 = linprog(c + p, A_eq=A, b_eq=b, method=self.method, x0=res.x) _assert_success(res2, desired_fun=1.730550597) assert_equal(res2.nit, 0) _assert_success(res3) assert_(res3.nit < res.nit) # hot start reduces iterations class TestLinprogRSBland(LinprogRSTests): options = {"pivot": "bland"} ############################################ # HiGHS-Simplex-Dual Option-Specific Tests # ############################################ class TestLinprogHiGHSSimplexDual(LinprogHiGHSTests): method = "highs-ds" options = {} def test_lad_regression(self): ''' The scaled model should be optimal, i.e. not produce unscaled model infeasible. See https://github.com/ERGO-Code/HiGHS/issues/494. ''' # Test to ensure gh-13610 is resolved (mismatch between HiGHS scaled # and unscaled model statuses) c, A_ub, b_ub, bnds = l1_regression_prob() res = linprog(c, A_ub=A_ub, b_ub=b_ub, bounds=bnds, method=self.method, options=self.options) assert_equal(res.status, 0) assert_(res.x is not None) assert_(np.all(res.slack > -1e-6)) assert_(np.all(res.x <= [np.inf if ub is None else ub for lb, ub in bnds])) assert_(np.all(res.x >= [-np.inf if lb is None else lb - 1e-7 for lb, ub in bnds])) ################################### # HiGHS-IPM Option-Specific Tests # ################################### class TestLinprogHiGHSIPM(LinprogHiGHSTests): method = "highs-ipm" options = {} ################################### # HiGHS-MIP Option-Specific Tests # ################################### class TestLinprogHiGHSMIP(): method = "highs" options = {} @pytest.mark.xfail(condition=(sys.maxsize < 2 ** 32 and platform.system() == "Linux"), run=False, reason="gh-16347") def test_mip1(self): # solve non-relaxed magic square problem (finally!) # also check that values are all integers - they don't always # come out of HiGHS that way n = 4 A, b, c, numbers, M = magic_square(n) bounds = [(0, 1)] * len(c) integrality = [1] * len(c) res = linprog(c=c*0, A_eq=A, b_eq=b, bounds=bounds, method=self.method, integrality=integrality) s = (numbers.flatten() * res.x).reshape(n**2, n, n) square = np.sum(s, axis=0) np.testing.assert_allclose(square.sum(axis=0), M) np.testing.assert_allclose(square.sum(axis=1), M) np.testing.assert_allclose(np.diag(square).sum(), M) np.testing.assert_allclose(np.diag(square[:, ::-1]).sum(), M) np.testing.assert_allclose(res.x, np.round(res.x), atol=1e-12) def test_mip2(self): # solve MIP with inequality constraints and all integer constraints # source: slide 5, # https://www.cs.upc.edu/~erodri/webpage/cps/theory/lp/milp/slides.pdf # use all array inputs to test gh-16681 (integrality couldn't be array) A_ub = np.array([[2, -2], [-8, 10]]) b_ub = np.array([-1, 13]) c = -np.array([1, 1]) bounds = np.array([(0, np.inf)] * len(c)) integrality = np.ones_like(c) res = linprog(c=c, A_ub=A_ub, b_ub=b_ub, bounds=bounds, method=self.method, integrality=integrality) np.testing.assert_allclose(res.x, [1, 2]) np.testing.assert_allclose(res.fun, -3) def test_mip3(self): # solve MIP with inequality constraints and all integer constraints # source: https://en.wikipedia.org/wiki/Integer_programming#Example A_ub = np.array([[-1, 1], [3, 2], [2, 3]]) b_ub = np.array([1, 12, 12]) c = -np.array([0, 1]) bounds = [(0, np.inf)] * len(c) integrality = [1] * len(c) res = linprog(c=c, A_ub=A_ub, b_ub=b_ub, bounds=bounds, method=self.method, integrality=integrality) np.testing.assert_allclose(res.fun, -2) # two optimal solutions possible, just need one of them assert np.allclose(res.x, [1, 2]) or np.allclose(res.x, [2, 2]) def test_mip4(self): # solve MIP with inequality constraints and only one integer constraint # source: https://www.mathworks.com/help/optim/ug/intlinprog.html A_ub = np.array([[-1, -2], [-4, -1], [2, 1]]) b_ub = np.array([14, -33, 20]) c = np.array([8, 1]) bounds = [(0, np.inf)] * len(c) integrality = [0, 1] res = linprog(c=c, A_ub=A_ub, b_ub=b_ub, bounds=bounds, method=self.method, integrality=integrality) np.testing.assert_allclose(res.x, [6.5, 7]) np.testing.assert_allclose(res.fun, 59) def test_mip5(self): # solve MIP with inequality and inequality constraints # source: https://www.mathworks.com/help/optim/ug/intlinprog.html A_ub = np.array([[1, 1, 1]]) b_ub = np.array([7]) A_eq = np.array([[4, 2, 1]]) b_eq = np.array([12]) c = np.array([-3, -2, -1]) bounds = [(0, np.inf), (0, np.inf), (0, 1)] integrality = [0, 1, 0] res = linprog(c=c, A_ub=A_ub, b_ub=b_ub, A_eq=A_eq, b_eq=b_eq, bounds=bounds, method=self.method, integrality=integrality) np.testing.assert_allclose(res.x, [0, 6, 0]) np.testing.assert_allclose(res.fun, -12) # gh-16897: these fields were not present, ensure that they are now assert res.get("mip_node_count", None) is not None assert res.get("mip_dual_bound", None) is not None assert res.get("mip_gap", None) is not None @pytest.mark.slow @pytest.mark.timeout(120) # prerelease_deps_coverage_64bit_blas job def test_mip6(self): # solve a larger MIP with only equality constraints # source: https://www.mathworks.com/help/optim/ug/intlinprog.html A_eq = np.array([[22, 13, 26, 33, 21, 3, 14, 26], [39, 16, 22, 28, 26, 30, 23, 24], [18, 14, 29, 27, 30, 38, 26, 26], [41, 26, 28, 36, 18, 38, 16, 26]]) b_eq = np.array([7872, 10466, 11322, 12058]) c = np.array([2, 10, 13, 17, 7, 5, 7, 3]) bounds = [(0, np.inf)]*8 integrality = [1]*8 res = linprog(c=c, A_eq=A_eq, b_eq=b_eq, bounds=bounds, method=self.method, integrality=integrality) np.testing.assert_allclose(res.fun, 1854) @pytest.mark.xslow def test_mip_rel_gap_passdown(self): # MIP taken from test_mip6, solved with different values of mip_rel_gap # solve a larger MIP with only equality constraints # source: https://www.mathworks.com/help/optim/ug/intlinprog.html A_eq = np.array([[22, 13, 26, 33, 21, 3, 14, 26], [39, 16, 22, 28, 26, 30, 23, 24], [18, 14, 29, 27, 30, 38, 26, 26], [41, 26, 28, 36, 18, 38, 16, 26]]) b_eq = np.array([7872, 10466, 11322, 12058]) c = np.array([2, 10, 13, 17, 7, 5, 7, 3]) bounds = [(0, np.inf)]*8 integrality = [1]*8 mip_rel_gaps = [0.5, 0.25, 0.01, 0.001] sol_mip_gaps = [] for mip_rel_gap in mip_rel_gaps: res = linprog(c=c, A_ub=A_ub, b_ub=b_ub, A_eq=A_eq, b_eq=b_eq, bounds=bounds, method=self.method, integrality=integrality, options={"mip_rel_gap": mip_rel_gap}) final_mip_gap = res["mip_gap"] # assert that the solution actually has mip_gap lower than the # required mip_rel_gap supplied assert final_mip_gap <= mip_rel_gap sol_mip_gaps.append(final_mip_gap) # make sure that the mip_rel_gap parameter is actually doing something # check that differences between solution gaps are declining # monotonically with the mip_rel_gap parameter. np.diff does # x[i+1] - x[i], so flip the array before differencing to get # what should be a positive, monotone decreasing series of solution # gaps gap_diffs = np.diff(np.flip(sol_mip_gaps)) assert np.all(gap_diffs >= 0) assert not np.all(gap_diffs == 0) def test_semi_continuous(self): # See issue #18106. This tests whether the solution is being # checked correctly (status is 0) when integrality > 1: # values are allowed to be 0 even if 0 is out of bounds. c = np.array([1., 1., -1, -1]) bounds = np.array([[0.5, 1.5], [0.5, 1.5], [0.5, 1.5], [0.5, 1.5]]) integrality = np.array([2, 3, 2, 3]) res = linprog(c, bounds=bounds, integrality=integrality, method='highs') np.testing.assert_allclose(res.x, [0, 0, 1.5, 1]) assert res.status == 0 ########################### # Autoscale-Specific Tests# ########################### @pytest.mark.filterwarnings("ignore::DeprecationWarning") class AutoscaleTests: options = {"autoscale": True} test_bug_6139 = LinprogCommonTests.test_bug_6139 test_bug_6690 = LinprogCommonTests.test_bug_6690 test_bug_7237 = LinprogCommonTests.test_bug_7237 class TestAutoscaleIP(AutoscaleTests): method = "interior-point" def test_bug_6139(self): self.options['tol'] = 1e-10 return AutoscaleTests.test_bug_6139(self) class TestAutoscaleSimplex(AutoscaleTests): method = "simplex" class TestAutoscaleRS(AutoscaleTests): method = "revised simplex" def test_nontrivial_problem_with_guess(self): c, A_ub, b_ub, A_eq, b_eq, x_star, f_star = nontrivial_problem() res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds, method=self.method, options=self.options, x0=x_star) _assert_success(res, desired_fun=f_star, desired_x=x_star) assert_equal(res.nit, 0) def test_nontrivial_problem_with_bad_guess(self): c, A_ub, b_ub, A_eq, b_eq, x_star, f_star = nontrivial_problem() bad_guess = [1, 2, 3, .5] res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds, method=self.method, options=self.options, x0=bad_guess) assert_equal(res.status, 6) ########################### # Redundancy Removal Tests# ########################### @pytest.mark.filterwarnings("ignore::DeprecationWarning") class RRTests: method = "interior-point" LCT = LinprogCommonTests # these are a few of the existing tests that have redundancy test_RR_infeasibility = LCT.test_remove_redundancy_infeasibility test_bug_10349 = LCT.test_bug_10349 test_bug_7044 = LCT.test_bug_7044 test_NFLC = LCT.test_network_flow_limited_capacity test_enzo_example_b = LCT.test_enzo_example_b class TestRRSVD(RRTests): options = {"rr_method": "SVD"} class TestRRPivot(RRTests): options = {"rr_method": "pivot"} class TestRRID(RRTests): options = {"rr_method": "ID"}
96,628
38.232237
107
py