Add files using upload-large-folder tool
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- .gitattributes +2 -0
- llava_next/lib/python3.10/site-packages/scipy/optimize/_differentiate.py +856 -0
- llava_next/lib/python3.10/site-packages/scipy/optimize/_hessian_update_strategy.py +475 -0
- llava_next/lib/python3.10/site-packages/scipy/optimize/_linprog_ip.py +1126 -0
- llava_next/lib/python3.10/site-packages/scipy/optimize/_linprog_simplex.py +661 -0
- llava_next/lib/python3.10/site-packages/scipy/optimize/_minimize.py +1116 -0
- llava_next/lib/python3.10/site-packages/scipy/optimize/_minpack2.cpython-310-x86_64-linux-gnu.so +0 -0
- llava_next/lib/python3.10/site-packages/scipy/optimize/_nnls.py +164 -0
- llava_next/lib/python3.10/site-packages/scipy/optimize/_numdiff.py +779 -0
- llava_next/lib/python3.10/site-packages/scipy/optimize/_optimize.py +0 -0
- llava_next/lib/python3.10/site-packages/scipy/optimize/_slsqp.cpython-310-x86_64-linux-gnu.so +0 -0
- llava_next/lib/python3.10/site-packages/scipy/optimize/_spectral.py +260 -0
- llava_next/lib/python3.10/site-packages/scipy/optimize/_trustregion_ncg.py +126 -0
- llava_next/lib/python3.10/site-packages/scipy/optimize/cobyla.py +19 -0
- llava_next/lib/python3.10/site-packages/scipy/optimize/minpack2.py +17 -0
- llava_next/lib/python3.10/site-packages/scipy/optimize/slsqp.py +23 -0
- parrot/lib/python3.10/site-packages/gradio_client/__pycache__/media_data.cpython-310.pyc +3 -0
- parrot/lib/python3.10/site-packages/torch/cpu/amp/__init__.py +2 -0
- parrot/lib/python3.10/site-packages/torch/cpu/amp/__pycache__/__init__.cpython-310.pyc +0 -0
- parrot/lib/python3.10/site-packages/torch/cpu/amp/__pycache__/autocast_mode.cpython-310.pyc +0 -0
- parrot/lib/python3.10/site-packages/torch/cpu/amp/__pycache__/grad_scaler.cpython-310.pyc +0 -0
- parrot/lib/python3.10/site-packages/torch/cpu/amp/autocast_mode.py +50 -0
- parrot/lib/python3.10/site-packages/torch/cpu/amp/grad_scaler.py +34 -0
- parrot/lib/python3.10/site-packages/torch/distributed/__init__.py +146 -0
- parrot/lib/python3.10/site-packages/torch/distributed/_functional_collectives.py +1147 -0
- parrot/lib/python3.10/site-packages/torch/distributed/_functional_collectives_impl.py +116 -0
- parrot/lib/python3.10/site-packages/torch/distributed/_shard/__init__.py +6 -0
- parrot/lib/python3.10/site-packages/torch/distributed/_shard/_utils.py +28 -0
- parrot/lib/python3.10/site-packages/torch/distributed/_shard/checkpoint/__init__.py +19 -0
- parrot/lib/python3.10/site-packages/torch/distributed/_shard/common_op_utils.py +62 -0
- parrot/lib/python3.10/site-packages/torch/distributed/_shard/metadata.py +62 -0
- parrot/lib/python3.10/site-packages/torch/distributed/_shard/sharded_optim/__pycache__/__init__.cpython-310.pyc +0 -0
- parrot/lib/python3.10/site-packages/torch/distributed/_shard/sharded_optim/__pycache__/api.cpython-310.pyc +0 -0
- parrot/lib/python3.10/site-packages/torch/distributed/_shard/sharded_optim/api.py +98 -0
- parrot/lib/python3.10/site-packages/torch/distributed/_shard/sharder.py +27 -0
- parrot/lib/python3.10/site-packages/torch/distributed/argparse_util.py +104 -0
- parrot/lib/python3.10/site-packages/torch/distributed/autograd/__init__.py +53 -0
- parrot/lib/python3.10/site-packages/torch/distributed/autograd/__pycache__/__init__.cpython-310.pyc +0 -0
- parrot/lib/python3.10/site-packages/torch/distributed/constants.py +23 -0
- parrot/lib/python3.10/site-packages/torch/distributed/device_mesh.py +719 -0
- parrot/lib/python3.10/site-packages/torch/distributed/distributed_c10d.py +0 -0
- parrot/lib/python3.10/site-packages/torch/distributed/launch.py +208 -0
- parrot/lib/python3.10/site-packages/torch/distributed/optim/__init__.py +47 -0
- parrot/lib/python3.10/site-packages/torch/distributed/optim/__pycache__/__init__.cpython-310.pyc +0 -0
- parrot/lib/python3.10/site-packages/torch/distributed/optim/__pycache__/apply_optimizer_in_backward.cpython-310.pyc +0 -0
- parrot/lib/python3.10/site-packages/torch/distributed/optim/__pycache__/functional_adam.cpython-310.pyc +0 -0
- parrot/lib/python3.10/site-packages/torch/distributed/optim/__pycache__/functional_adamax.cpython-310.pyc +0 -0
- parrot/lib/python3.10/site-packages/torch/distributed/optim/__pycache__/functional_rmsprop.cpython-310.pyc +0 -0
- parrot/lib/python3.10/site-packages/torch/distributed/optim/__pycache__/functional_rprop.cpython-310.pyc +0 -0
- parrot/lib/python3.10/site-packages/torch/distributed/optim/__pycache__/post_localSGD_optimizer.cpython-310.pyc +0 -0
.gitattributes
CHANGED
|
@@ -351,3 +351,5 @@ llava_next/lib/python3.10/site-packages/scipy/stats/tests/__pycache__/test_stats
|
|
| 351 |
llava_next/lib/python3.10/site-packages/scipy/stats/__pycache__/_morestats.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
|
| 352 |
llava_next/lib/python3.10/site-packages/scipy/cluster/__pycache__/hierarchy.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
|
| 353 |
llava_next/lib/python3.10/site-packages/scipy/optimize/_moduleTNC.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
|
|
|
| 351 |
llava_next/lib/python3.10/site-packages/scipy/stats/__pycache__/_morestats.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
|
| 352 |
llava_next/lib/python3.10/site-packages/scipy/cluster/__pycache__/hierarchy.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
|
| 353 |
llava_next/lib/python3.10/site-packages/scipy/optimize/_moduleTNC.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
|
| 354 |
+
parrot/lib/python3.10/site-packages/gradio_client/__pycache__/media_data.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
|
| 355 |
+
parrot/lib/python3.10/site-packages/torchvision.libs/libjpeg.ceea7512.so.62 filter=lfs diff=lfs merge=lfs -text
|
llava_next/lib/python3.10/site-packages/scipy/optimize/_differentiate.py
ADDED
|
@@ -0,0 +1,856 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# mypy: disable-error-code="attr-defined"
|
| 2 |
+
import numpy as np
|
| 3 |
+
import scipy._lib._elementwise_iterative_method as eim
|
| 4 |
+
from scipy._lib._util import _RichResult
|
| 5 |
+
|
| 6 |
+
_EERRORINCREASE = -1 # used in _differentiate
|
| 7 |
+
|
| 8 |
+
def _differentiate_iv(func, x, args, atol, rtol, maxiter, order, initial_step,
|
| 9 |
+
step_factor, step_direction, preserve_shape, callback):
|
| 10 |
+
# Input validation for `_differentiate`
|
| 11 |
+
|
| 12 |
+
if not callable(func):
|
| 13 |
+
raise ValueError('`func` must be callable.')
|
| 14 |
+
|
| 15 |
+
# x has more complex IV that is taken care of during initialization
|
| 16 |
+
x = np.asarray(x)
|
| 17 |
+
dtype = x.dtype if np.issubdtype(x.dtype, np.inexact) else np.float64
|
| 18 |
+
|
| 19 |
+
if not np.iterable(args):
|
| 20 |
+
args = (args,)
|
| 21 |
+
|
| 22 |
+
if atol is None:
|
| 23 |
+
atol = np.finfo(dtype).tiny
|
| 24 |
+
|
| 25 |
+
if rtol is None:
|
| 26 |
+
rtol = np.sqrt(np.finfo(dtype).eps)
|
| 27 |
+
|
| 28 |
+
message = 'Tolerances and step parameters must be non-negative scalars.'
|
| 29 |
+
tols = np.asarray([atol, rtol, initial_step, step_factor])
|
| 30 |
+
if (not np.issubdtype(tols.dtype, np.number)
|
| 31 |
+
or np.any(tols < 0)
|
| 32 |
+
or tols.shape != (4,)):
|
| 33 |
+
raise ValueError(message)
|
| 34 |
+
initial_step, step_factor = tols[2:].astype(dtype)
|
| 35 |
+
|
| 36 |
+
maxiter_int = int(maxiter)
|
| 37 |
+
if maxiter != maxiter_int or maxiter <= 0:
|
| 38 |
+
raise ValueError('`maxiter` must be a positive integer.')
|
| 39 |
+
|
| 40 |
+
order_int = int(order)
|
| 41 |
+
if order_int != order or order <= 0:
|
| 42 |
+
raise ValueError('`order` must be a positive integer.')
|
| 43 |
+
|
| 44 |
+
step_direction = np.sign(step_direction).astype(dtype)
|
| 45 |
+
x, step_direction = np.broadcast_arrays(x, step_direction)
|
| 46 |
+
x, step_direction = x[()], step_direction[()]
|
| 47 |
+
|
| 48 |
+
message = '`preserve_shape` must be True or False.'
|
| 49 |
+
if preserve_shape not in {True, False}:
|
| 50 |
+
raise ValueError(message)
|
| 51 |
+
|
| 52 |
+
if callback is not None and not callable(callback):
|
| 53 |
+
raise ValueError('`callback` must be callable.')
|
| 54 |
+
|
| 55 |
+
return (func, x, args, atol, rtol, maxiter_int, order_int, initial_step,
|
| 56 |
+
step_factor, step_direction, preserve_shape, callback)
|
| 57 |
+
|
| 58 |
+
|
| 59 |
+
def _differentiate(func, x, *, args=(), atol=None, rtol=None, maxiter=10,
|
| 60 |
+
order=8, initial_step=0.5, step_factor=2.0,
|
| 61 |
+
step_direction=0, preserve_shape=False, callback=None):
|
| 62 |
+
"""Evaluate the derivative of an elementwise scalar function numerically.
|
| 63 |
+
|
| 64 |
+
Parameters
|
| 65 |
+
----------
|
| 66 |
+
func : callable
|
| 67 |
+
The function whose derivative is desired. The signature must be::
|
| 68 |
+
|
| 69 |
+
func(x: ndarray, *fargs) -> ndarray
|
| 70 |
+
|
| 71 |
+
where each element of ``x`` is a finite real number and ``fargs`` is a tuple,
|
| 72 |
+
which may contain an arbitrary number of arrays that are broadcastable
|
| 73 |
+
with `x`. ``func`` must be an elementwise function: each element
|
| 74 |
+
``func(x)[i]`` must equal ``func(x[i])`` for all indices ``i``.
|
| 75 |
+
x : array_like
|
| 76 |
+
Abscissae at which to evaluate the derivative.
|
| 77 |
+
args : tuple, optional
|
| 78 |
+
Additional positional arguments to be passed to `func`. Must be arrays
|
| 79 |
+
broadcastable with `x`. If the callable to be differentiated requires
|
| 80 |
+
arguments that are not broadcastable with `x`, wrap that callable with
|
| 81 |
+
`func`. See Examples.
|
| 82 |
+
atol, rtol : float, optional
|
| 83 |
+
Absolute and relative tolerances for the stopping condition: iteration
|
| 84 |
+
will stop when ``res.error < atol + rtol * abs(res.df)``. The default
|
| 85 |
+
`atol` is the smallest normal number of the appropriate dtype, and
|
| 86 |
+
the default `rtol` is the square root of the precision of the
|
| 87 |
+
appropriate dtype.
|
| 88 |
+
order : int, default: 8
|
| 89 |
+
The (positive integer) order of the finite difference formula to be
|
| 90 |
+
used. Odd integers will be rounded up to the next even integer.
|
| 91 |
+
initial_step : float, default: 0.5
|
| 92 |
+
The (absolute) initial step size for the finite difference derivative
|
| 93 |
+
approximation.
|
| 94 |
+
step_factor : float, default: 2.0
|
| 95 |
+
The factor by which the step size is *reduced* in each iteration; i.e.
|
| 96 |
+
the step size in iteration 1 is ``initial_step/step_factor``. If
|
| 97 |
+
``step_factor < 1``, subsequent steps will be greater than the initial
|
| 98 |
+
step; this may be useful if steps smaller than some threshold are
|
| 99 |
+
undesirable (e.g. due to subtractive cancellation error).
|
| 100 |
+
maxiter : int, default: 10
|
| 101 |
+
The maximum number of iterations of the algorithm to perform. See
|
| 102 |
+
notes.
|
| 103 |
+
step_direction : array_like
|
| 104 |
+
An array representing the direction of the finite difference steps (for
|
| 105 |
+
use when `x` lies near to the boundary of the domain of the function.)
|
| 106 |
+
Must be broadcastable with `x` and all `args`.
|
| 107 |
+
Where 0 (default), central differences are used; where negative (e.g.
|
| 108 |
+
-1), steps are non-positive; and where positive (e.g. 1), all steps are
|
| 109 |
+
non-negative.
|
| 110 |
+
preserve_shape : bool, default: False
|
| 111 |
+
In the following, "arguments of `func`" refers to the array ``x`` and
|
| 112 |
+
any arrays within ``fargs``. Let ``shape`` be the broadcasted shape
|
| 113 |
+
of `x` and all elements of `args` (which is conceptually
|
| 114 |
+
distinct from ``fargs`` passed into `f`).
|
| 115 |
+
|
| 116 |
+
- When ``preserve_shape=False`` (default), `f` must accept arguments
|
| 117 |
+
of *any* broadcastable shapes.
|
| 118 |
+
|
| 119 |
+
- When ``preserve_shape=True``, `f` must accept arguments of shape
|
| 120 |
+
``shape`` *or* ``shape + (n,)``, where ``(n,)`` is the number of
|
| 121 |
+
abscissae at which the function is being evaluated.
|
| 122 |
+
|
| 123 |
+
In either case, for each scalar element ``xi`` within `x`, the array
|
| 124 |
+
returned by `f` must include the scalar ``f(xi)`` at the same index.
|
| 125 |
+
Consequently, the shape of the output is always the shape of the input
|
| 126 |
+
``x``.
|
| 127 |
+
|
| 128 |
+
See Examples.
|
| 129 |
+
callback : callable, optional
|
| 130 |
+
An optional user-supplied function to be called before the first
|
| 131 |
+
iteration and after each iteration.
|
| 132 |
+
Called as ``callback(res)``, where ``res`` is a ``_RichResult``
|
| 133 |
+
similar to that returned by `_differentiate` (but containing the
|
| 134 |
+
current iterate's values of all variables). If `callback` raises a
|
| 135 |
+
``StopIteration``, the algorithm will terminate immediately and
|
| 136 |
+
`_differentiate` will return a result.
|
| 137 |
+
|
| 138 |
+
Returns
|
| 139 |
+
-------
|
| 140 |
+
res : _RichResult
|
| 141 |
+
An instance of `scipy._lib._util._RichResult` with the following
|
| 142 |
+
attributes. (The descriptions are written as though the values will be
|
| 143 |
+
scalars; however, if `func` returns an array, the outputs will be
|
| 144 |
+
arrays of the same shape.)
|
| 145 |
+
|
| 146 |
+
success : bool
|
| 147 |
+
``True`` when the algorithm terminated successfully (status ``0``).
|
| 148 |
+
status : int
|
| 149 |
+
An integer representing the exit status of the algorithm.
|
| 150 |
+
``0`` : The algorithm converged to the specified tolerances.
|
| 151 |
+
``-1`` : The error estimate increased, so iteration was terminated.
|
| 152 |
+
``-2`` : The maximum number of iterations was reached.
|
| 153 |
+
``-3`` : A non-finite value was encountered.
|
| 154 |
+
``-4`` : Iteration was terminated by `callback`.
|
| 155 |
+
``1`` : The algorithm is proceeding normally (in `callback` only).
|
| 156 |
+
df : float
|
| 157 |
+
The derivative of `func` at `x`, if the algorithm terminated
|
| 158 |
+
successfully.
|
| 159 |
+
error : float
|
| 160 |
+
An estimate of the error: the magnitude of the difference between
|
| 161 |
+
the current estimate of the derivative and the estimate in the
|
| 162 |
+
previous iteration.
|
| 163 |
+
nit : int
|
| 164 |
+
The number of iterations performed.
|
| 165 |
+
nfev : int
|
| 166 |
+
The number of points at which `func` was evaluated.
|
| 167 |
+
x : float
|
| 168 |
+
The value at which the derivative of `func` was evaluated
|
| 169 |
+
(after broadcasting with `args` and `step_direction`).
|
| 170 |
+
|
| 171 |
+
Notes
|
| 172 |
+
-----
|
| 173 |
+
The implementation was inspired by jacobi [1]_, numdifftools [2]_, and
|
| 174 |
+
DERIVEST [3]_, but the implementation follows the theory of Taylor series
|
| 175 |
+
more straightforwardly (and arguably naively so).
|
| 176 |
+
In the first iteration, the derivative is estimated using a finite
|
| 177 |
+
difference formula of order `order` with maximum step size `initial_step`.
|
| 178 |
+
Each subsequent iteration, the maximum step size is reduced by
|
| 179 |
+
`step_factor`, and the derivative is estimated again until a termination
|
| 180 |
+
condition is reached. The error estimate is the magnitude of the difference
|
| 181 |
+
between the current derivative approximation and that of the previous
|
| 182 |
+
iteration.
|
| 183 |
+
|
| 184 |
+
The stencils of the finite difference formulae are designed such that
|
| 185 |
+
abscissae are "nested": after `func` is evaluated at ``order + 1``
|
| 186 |
+
points in the first iteration, `func` is evaluated at only two new points
|
| 187 |
+
in each subsequent iteration; ``order - 1`` previously evaluated function
|
| 188 |
+
values required by the finite difference formula are reused, and two
|
| 189 |
+
function values (evaluations at the points furthest from `x`) are unused.
|
| 190 |
+
|
| 191 |
+
Step sizes are absolute. When the step size is small relative to the
|
| 192 |
+
magnitude of `x`, precision is lost; for example, if `x` is ``1e20``, the
|
| 193 |
+
default initial step size of ``0.5`` cannot be resolved. Accordingly,
|
| 194 |
+
consider using larger initial step sizes for large magnitudes of `x`.
|
| 195 |
+
|
| 196 |
+
The default tolerances are challenging to satisfy at points where the
|
| 197 |
+
true derivative is exactly zero. If the derivative may be exactly zero,
|
| 198 |
+
consider specifying an absolute tolerance (e.g. ``atol=1e-16``) to
|
| 199 |
+
improve convergence.
|
| 200 |
+
|
| 201 |
+
References
|
| 202 |
+
----------
|
| 203 |
+
[1]_ Hans Dembinski (@HDembinski). jacobi.
|
| 204 |
+
https://github.com/HDembinski/jacobi
|
| 205 |
+
[2]_ Per A. Brodtkorb and John D'Errico. numdifftools.
|
| 206 |
+
https://numdifftools.readthedocs.io/en/latest/
|
| 207 |
+
[3]_ John D'Errico. DERIVEST: Adaptive Robust Numerical Differentiation.
|
| 208 |
+
https://www.mathworks.com/matlabcentral/fileexchange/13490-adaptive-robust-numerical-differentiation
|
| 209 |
+
[4]_ Numerical Differentition. Wikipedia.
|
| 210 |
+
https://en.wikipedia.org/wiki/Numerical_differentiation
|
| 211 |
+
|
| 212 |
+
Examples
|
| 213 |
+
--------
|
| 214 |
+
Evaluate the derivative of ``np.exp`` at several points ``x``.
|
| 215 |
+
|
| 216 |
+
>>> import numpy as np
|
| 217 |
+
>>> from scipy.optimize._differentiate import _differentiate
|
| 218 |
+
>>> f = np.exp
|
| 219 |
+
>>> df = np.exp # true derivative
|
| 220 |
+
>>> x = np.linspace(1, 2, 5)
|
| 221 |
+
>>> res = _differentiate(f, x)
|
| 222 |
+
>>> res.df # approximation of the derivative
|
| 223 |
+
array([2.71828183, 3.49034296, 4.48168907, 5.75460268, 7.3890561 ])
|
| 224 |
+
>>> res.error # estimate of the error
|
| 225 |
+
array(
|
| 226 |
+
[7.12940817e-12, 9.16688947e-12, 1.17594823e-11, 1.50972568e-11, 1.93942640e-11]
|
| 227 |
+
)
|
| 228 |
+
>>> abs(res.df - df(x)) # true error
|
| 229 |
+
array(
|
| 230 |
+
[3.06421555e-14, 3.01980663e-14, 5.06261699e-14, 6.30606678e-14, 8.34887715e-14]
|
| 231 |
+
)
|
| 232 |
+
|
| 233 |
+
Show the convergence of the approximation as the step size is reduced.
|
| 234 |
+
Each iteration, the step size is reduced by `step_factor`, so for
|
| 235 |
+
sufficiently small initial step, each iteration reduces the error by a
|
| 236 |
+
factor of ``1/step_factor**order`` until finite precision arithmetic
|
| 237 |
+
inhibits further improvement.
|
| 238 |
+
|
| 239 |
+
>>> iter = list(range(1, 12)) # maximum iterations
|
| 240 |
+
>>> hfac = 2 # step size reduction per iteration
|
| 241 |
+
>>> hdir = [-1, 0, 1] # compare left-, central-, and right- steps
|
| 242 |
+
>>> order = 4 # order of differentiation formula
|
| 243 |
+
>>> x = 1
|
| 244 |
+
>>> ref = df(x)
|
| 245 |
+
>>> errors = [] # true error
|
| 246 |
+
>>> for i in iter:
|
| 247 |
+
... res = _differentiate(f, x, maxiter=i, step_factor=hfac,
|
| 248 |
+
... step_direction=hdir, order=order,
|
| 249 |
+
... atol=0, rtol=0) # prevent early termination
|
| 250 |
+
... errors.append(abs(res.df - ref))
|
| 251 |
+
>>> errors = np.array(errors)
|
| 252 |
+
>>> plt.semilogy(iter, errors[:, 0], label='left differences')
|
| 253 |
+
>>> plt.semilogy(iter, errors[:, 1], label='central differences')
|
| 254 |
+
>>> plt.semilogy(iter, errors[:, 2], label='right differences')
|
| 255 |
+
>>> plt.xlabel('iteration')
|
| 256 |
+
>>> plt.ylabel('error')
|
| 257 |
+
>>> plt.legend()
|
| 258 |
+
>>> plt.show()
|
| 259 |
+
>>> (errors[1, 1] / errors[0, 1], 1 / hfac**order)
|
| 260 |
+
(0.06215223140159822, 0.0625)
|
| 261 |
+
|
| 262 |
+
The implementation is vectorized over `x`, `step_direction`, and `args`.
|
| 263 |
+
The function is evaluated once before the first iteration to perform input
|
| 264 |
+
validation and standardization, and once per iteration thereafter.
|
| 265 |
+
|
| 266 |
+
>>> def f(x, p):
|
| 267 |
+
... print('here')
|
| 268 |
+
... f.nit += 1
|
| 269 |
+
... return x**p
|
| 270 |
+
>>> f.nit = 0
|
| 271 |
+
>>> def df(x, p):
|
| 272 |
+
... return p*x**(p-1)
|
| 273 |
+
>>> x = np.arange(1, 5)
|
| 274 |
+
>>> p = np.arange(1, 6).reshape((-1, 1))
|
| 275 |
+
>>> hdir = np.arange(-1, 2).reshape((-1, 1, 1))
|
| 276 |
+
>>> res = _differentiate(f, x, args=(p,), step_direction=hdir, maxiter=1)
|
| 277 |
+
>>> np.allclose(res.df, df(x, p))
|
| 278 |
+
True
|
| 279 |
+
>>> res.df.shape
|
| 280 |
+
(3, 5, 4)
|
| 281 |
+
>>> f.nit
|
| 282 |
+
2
|
| 283 |
+
|
| 284 |
+
By default, `preserve_shape` is False, and therefore the callable
|
| 285 |
+
`f` may be called with arrays of any broadcastable shapes.
|
| 286 |
+
For example:
|
| 287 |
+
|
| 288 |
+
>>> shapes = []
|
| 289 |
+
>>> def f(x, c):
|
| 290 |
+
... shape = np.broadcast_shapes(x.shape, c.shape)
|
| 291 |
+
... shapes.append(shape)
|
| 292 |
+
... return np.sin(c*x)
|
| 293 |
+
>>>
|
| 294 |
+
>>> c = [1, 5, 10, 20]
|
| 295 |
+
>>> res = _differentiate(f, 0, args=(c,))
|
| 296 |
+
>>> shapes
|
| 297 |
+
[(4,), (4, 8), (4, 2), (3, 2), (2, 2), (1, 2)]
|
| 298 |
+
|
| 299 |
+
To understand where these shapes are coming from - and to better
|
| 300 |
+
understand how `_differentiate` computes accurate results - note that
|
| 301 |
+
higher values of ``c`` correspond with higher frequency sinusoids.
|
| 302 |
+
The higher frequency sinusoids make the function's derivative change
|
| 303 |
+
faster, so more function evaluations are required to achieve the target
|
| 304 |
+
accuracy:
|
| 305 |
+
|
| 306 |
+
>>> res.nfev
|
| 307 |
+
array([11, 13, 15, 17])
|
| 308 |
+
|
| 309 |
+
The initial ``shape``, ``(4,)``, corresponds with evaluating the
|
| 310 |
+
function at a single abscissa and all four frequencies; this is used
|
| 311 |
+
for input validation and to determine the size and dtype of the arrays
|
| 312 |
+
that store results. The next shape corresponds with evaluating the
|
| 313 |
+
function at an initial grid of abscissae and all four frequencies.
|
| 314 |
+
Successive calls to the function evaluate the function at two more
|
| 315 |
+
abscissae, increasing the effective order of the approximation by two.
|
| 316 |
+
However, in later function evaluations, the function is evaluated at
|
| 317 |
+
fewer frequencies because the corresponding derivative has already
|
| 318 |
+
converged to the required tolerance. This saves function evaluations to
|
| 319 |
+
improve performance, but it requires the function to accept arguments of
|
| 320 |
+
any shape.
|
| 321 |
+
|
| 322 |
+
"Vector-valued" functions are unlikely to satisfy this requirement.
|
| 323 |
+
For example, consider
|
| 324 |
+
|
| 325 |
+
>>> def f(x):
|
| 326 |
+
... return [x, np.sin(3*x), x+np.sin(10*x), np.sin(20*x)*(x-1)**2]
|
| 327 |
+
|
| 328 |
+
This integrand is not compatible with `_differentiate` as written; for instance,
|
| 329 |
+
the shape of the output will not be the same as the shape of ``x``. Such a
|
| 330 |
+
function *could* be converted to a compatible form with the introduction of
|
| 331 |
+
additional parameters, but this would be inconvenient. In such cases,
|
| 332 |
+
a simpler solution would be to use `preserve_shape`.
|
| 333 |
+
|
| 334 |
+
>>> shapes = []
|
| 335 |
+
>>> def f(x):
|
| 336 |
+
... shapes.append(x.shape)
|
| 337 |
+
... x0, x1, x2, x3 = x
|
| 338 |
+
... return [x0, np.sin(3*x1), x2+np.sin(10*x2), np.sin(20*x3)*(x3-1)**2]
|
| 339 |
+
>>>
|
| 340 |
+
>>> x = np.zeros(4)
|
| 341 |
+
>>> res = _differentiate(f, x, preserve_shape=True)
|
| 342 |
+
>>> shapes
|
| 343 |
+
[(4,), (4, 8), (4, 2), (4, 2), (4, 2), (4, 2)]
|
| 344 |
+
|
| 345 |
+
Here, the shape of ``x`` is ``(4,)``. With ``preserve_shape=True``, the
|
| 346 |
+
function may be called with argument ``x`` of shape ``(4,)`` or ``(4, n)``,
|
| 347 |
+
and this is what we observe.
|
| 348 |
+
|
| 349 |
+
"""
|
| 350 |
+
# TODO (followup):
|
| 351 |
+
# - investigate behavior at saddle points
|
| 352 |
+
# - array initial_step / step_factor?
|
| 353 |
+
# - multivariate functions?
|
| 354 |
+
|
| 355 |
+
res = _differentiate_iv(func, x, args, atol, rtol, maxiter, order, initial_step,
|
| 356 |
+
step_factor, step_direction, preserve_shape, callback)
|
| 357 |
+
(func, x, args, atol, rtol, maxiter, order,
|
| 358 |
+
h0, fac, hdir, preserve_shape, callback) = res
|
| 359 |
+
|
| 360 |
+
# Initialization
|
| 361 |
+
# Since f(x) (no step) is not needed for central differences, it may be
|
| 362 |
+
# possible to eliminate this function evaluation. However, it's useful for
|
| 363 |
+
# input validation and standardization, and everything else is designed to
|
| 364 |
+
# reduce function calls, so let's keep it simple.
|
| 365 |
+
temp = eim._initialize(func, (x,), args, preserve_shape=preserve_shape)
|
| 366 |
+
func, xs, fs, args, shape, dtype, xp = temp
|
| 367 |
+
x, f = xs[0], fs[0]
|
| 368 |
+
df = np.full_like(f, np.nan)
|
| 369 |
+
# Ideally we'd broadcast the shape of `hdir` in `_elementwise_algo_init`, but
|
| 370 |
+
# it's simpler to do it here than to generalize `_elementwise_algo_init` further.
|
| 371 |
+
# `hdir` and `x` are already broadcasted in `_differentiate_iv`, so we know
|
| 372 |
+
# that `hdir` can be broadcasted to the final shape.
|
| 373 |
+
hdir = np.broadcast_to(hdir, shape).flatten()
|
| 374 |
+
|
| 375 |
+
status = np.full_like(x, eim._EINPROGRESS, dtype=int) # in progress
|
| 376 |
+
nit, nfev = 0, 1 # one function evaluations performed above
|
| 377 |
+
# Boolean indices of left, central, right, and (all) one-sided steps
|
| 378 |
+
il = hdir < 0
|
| 379 |
+
ic = hdir == 0
|
| 380 |
+
ir = hdir > 0
|
| 381 |
+
io = il | ir
|
| 382 |
+
|
| 383 |
+
# Most of these attributes are reasonably obvious, but:
|
| 384 |
+
# - `fs` holds all the function values of all active `x`. The zeroth
|
| 385 |
+
# axis corresponds with active points `x`, the first axis corresponds
|
| 386 |
+
# with the different steps (in the order described in
|
| 387 |
+
# `_differentiate_weights`).
|
| 388 |
+
# - `terms` (which could probably use a better name) is half the `order`,
|
| 389 |
+
# which is always even.
|
| 390 |
+
work = _RichResult(x=x, df=df, fs=f[:, np.newaxis], error=np.nan, h=h0,
|
| 391 |
+
df_last=np.nan, error_last=np.nan, h0=h0, fac=fac,
|
| 392 |
+
atol=atol, rtol=rtol, nit=nit, nfev=nfev,
|
| 393 |
+
status=status, dtype=dtype, terms=(order+1)//2,
|
| 394 |
+
hdir=hdir, il=il, ic=ic, ir=ir, io=io)
|
| 395 |
+
# This is the correspondence between terms in the `work` object and the
|
| 396 |
+
# final result. In this case, the mapping is trivial. Note that `success`
|
| 397 |
+
# is prepended automatically.
|
| 398 |
+
res_work_pairs = [('status', 'status'), ('df', 'df'), ('error', 'error'),
|
| 399 |
+
('nit', 'nit'), ('nfev', 'nfev'), ('x', 'x')]
|
| 400 |
+
|
| 401 |
+
def pre_func_eval(work):
|
| 402 |
+
"""Determine the abscissae at which the function needs to be evaluated.
|
| 403 |
+
|
| 404 |
+
See `_differentiate_weights` for a description of the stencil (pattern
|
| 405 |
+
of the abscissae).
|
| 406 |
+
|
| 407 |
+
In the first iteration, there is only one stored function value in
|
| 408 |
+
`work.fs`, `f(x)`, so we need to evaluate at `order` new points. In
|
| 409 |
+
subsequent iterations, we evaluate at two new points. Note that
|
| 410 |
+
`work.x` is always flattened into a 1D array after broadcasting with
|
| 411 |
+
all `args`, so we add a new axis at the end and evaluate all point
|
| 412 |
+
in one call to the function.
|
| 413 |
+
|
| 414 |
+
For improvement:
|
| 415 |
+
- Consider measuring the step size actually taken, since `(x + h) - x`
|
| 416 |
+
is not identically equal to `h` with floating point arithmetic.
|
| 417 |
+
- Adjust the step size automatically if `x` is too big to resolve the
|
| 418 |
+
step.
|
| 419 |
+
- We could probably save some work if there are no central difference
|
| 420 |
+
steps or no one-sided steps.
|
| 421 |
+
"""
|
| 422 |
+
n = work.terms # half the order
|
| 423 |
+
h = work.h # step size
|
| 424 |
+
c = work.fac # step reduction factor
|
| 425 |
+
d = c**0.5 # square root of step reduction factor (one-sided stencil)
|
| 426 |
+
# Note - no need to be careful about dtypes until we allocate `x_eval`
|
| 427 |
+
|
| 428 |
+
if work.nit == 0:
|
| 429 |
+
hc = h / c**np.arange(n)
|
| 430 |
+
hc = np.concatenate((-hc[::-1], hc))
|
| 431 |
+
else:
|
| 432 |
+
hc = np.asarray([-h, h]) / c**(n-1)
|
| 433 |
+
|
| 434 |
+
if work.nit == 0:
|
| 435 |
+
hr = h / d**np.arange(2*n)
|
| 436 |
+
else:
|
| 437 |
+
hr = np.asarray([h, h/d]) / c**(n-1)
|
| 438 |
+
|
| 439 |
+
n_new = 2*n if work.nit == 0 else 2 # number of new abscissae
|
| 440 |
+
x_eval = np.zeros((len(work.hdir), n_new), dtype=work.dtype)
|
| 441 |
+
il, ic, ir = work.il, work.ic, work.ir
|
| 442 |
+
x_eval[ir] = work.x[ir, np.newaxis] + hr
|
| 443 |
+
x_eval[ic] = work.x[ic, np.newaxis] + hc
|
| 444 |
+
x_eval[il] = work.x[il, np.newaxis] - hr
|
| 445 |
+
return x_eval
|
| 446 |
+
|
| 447 |
+
def post_func_eval(x, f, work):
|
| 448 |
+
""" Estimate the derivative and error from the function evaluations
|
| 449 |
+
|
| 450 |
+
As in `pre_func_eval`: in the first iteration, there is only one stored
|
| 451 |
+
function value in `work.fs`, `f(x)`, so we need to add the `order` new
|
| 452 |
+
points. In subsequent iterations, we add two new points. The tricky
|
| 453 |
+
part is getting the order to match that of the weights, which is
|
| 454 |
+
described in `_differentiate_weights`.
|
| 455 |
+
|
| 456 |
+
For improvement:
|
| 457 |
+
- Change the order of the weights (and steps in `pre_func_eval`) to
|
| 458 |
+
simplify `work_fc` concatenation and eliminate `fc` concatenation.
|
| 459 |
+
- It would be simple to do one-step Richardson extrapolation with `df`
|
| 460 |
+
and `df_last` to increase the order of the estimate and/or improve
|
| 461 |
+
the error estimate.
|
| 462 |
+
- Process the function evaluations in a more numerically favorable
|
| 463 |
+
way. For instance, combining the pairs of central difference evals
|
| 464 |
+
into a second-order approximation and using Richardson extrapolation
|
| 465 |
+
to produce a higher order approximation seemed to retain accuracy up
|
| 466 |
+
to very high order.
|
| 467 |
+
- Alternatively, we could use `polyfit` like Jacobi. An advantage of
|
| 468 |
+
fitting polynomial to more points than necessary is improved noise
|
| 469 |
+
tolerance.
|
| 470 |
+
"""
|
| 471 |
+
n = work.terms
|
| 472 |
+
n_new = n if work.nit == 0 else 1
|
| 473 |
+
il, ic, io = work.il, work.ic, work.io
|
| 474 |
+
|
| 475 |
+
# Central difference
|
| 476 |
+
# `work_fc` is *all* the points at which the function has been evaluated
|
| 477 |
+
# `fc` is the points we're using *this iteration* to produce the estimate
|
| 478 |
+
work_fc = (f[ic, :n_new], work.fs[ic, :], f[ic, -n_new:])
|
| 479 |
+
work_fc = np.concatenate(work_fc, axis=-1)
|
| 480 |
+
if work.nit == 0:
|
| 481 |
+
fc = work_fc
|
| 482 |
+
else:
|
| 483 |
+
fc = (work_fc[:, :n], work_fc[:, n:n+1], work_fc[:, -n:])
|
| 484 |
+
fc = np.concatenate(fc, axis=-1)
|
| 485 |
+
|
| 486 |
+
# One-sided difference
|
| 487 |
+
work_fo = np.concatenate((work.fs[io, :], f[io, :]), axis=-1)
|
| 488 |
+
if work.nit == 0:
|
| 489 |
+
fo = work_fo
|
| 490 |
+
else:
|
| 491 |
+
fo = np.concatenate((work_fo[:, 0:1], work_fo[:, -2*n:]), axis=-1)
|
| 492 |
+
|
| 493 |
+
work.fs = np.zeros((len(ic), work.fs.shape[-1] + 2*n_new))
|
| 494 |
+
work.fs[ic] = work_fc
|
| 495 |
+
work.fs[io] = work_fo
|
| 496 |
+
|
| 497 |
+
wc, wo = _differentiate_weights(work, n)
|
| 498 |
+
work.df_last = work.df.copy()
|
| 499 |
+
work.df[ic] = fc @ wc / work.h
|
| 500 |
+
work.df[io] = fo @ wo / work.h
|
| 501 |
+
work.df[il] *= -1
|
| 502 |
+
|
| 503 |
+
work.h /= work.fac
|
| 504 |
+
work.error_last = work.error
|
| 505 |
+
# Simple error estimate - the difference in derivative estimates between
|
| 506 |
+
# this iteration and the last. This is typically conservative because if
|
| 507 |
+
# convergence has begin, the true error is much closer to the difference
|
| 508 |
+
# between the current estimate and the *next* error estimate. However,
|
| 509 |
+
# we could use Richarson extrapolation to produce an error estimate that
|
| 510 |
+
# is one order higher, and take the difference between that and
|
| 511 |
+
# `work.df` (which would just be constant factor that depends on `fac`.)
|
| 512 |
+
work.error = abs(work.df - work.df_last)
|
| 513 |
+
|
| 514 |
+
def check_termination(work):
|
| 515 |
+
"""Terminate due to convergence, non-finite values, or error increase"""
|
| 516 |
+
stop = np.zeros_like(work.df).astype(bool)
|
| 517 |
+
|
| 518 |
+
i = work.error < work.atol + work.rtol*abs(work.df)
|
| 519 |
+
work.status[i] = eim._ECONVERGED
|
| 520 |
+
stop[i] = True
|
| 521 |
+
|
| 522 |
+
if work.nit > 0:
|
| 523 |
+
i = ~((np.isfinite(work.x) & np.isfinite(work.df)) | stop)
|
| 524 |
+
work.df[i], work.status[i] = np.nan, eim._EVALUEERR
|
| 525 |
+
stop[i] = True
|
| 526 |
+
|
| 527 |
+
# With infinite precision, there is a step size below which
|
| 528 |
+
# all smaller step sizes will reduce the error. But in floating point
|
| 529 |
+
# arithmetic, catastrophic cancellation will begin to cause the error
|
| 530 |
+
# to increase again. This heuristic tries to avoid step sizes that are
|
| 531 |
+
# too small. There may be more theoretically sound approaches for
|
| 532 |
+
# detecting a step size that minimizes the total error, but this
|
| 533 |
+
# heuristic seems simple and effective.
|
| 534 |
+
i = (work.error > work.error_last*10) & ~stop
|
| 535 |
+
work.status[i] = _EERRORINCREASE
|
| 536 |
+
stop[i] = True
|
| 537 |
+
|
| 538 |
+
return stop
|
| 539 |
+
|
| 540 |
+
def post_termination_check(work):
|
| 541 |
+
return
|
| 542 |
+
|
| 543 |
+
def customize_result(res, shape):
|
| 544 |
+
return shape
|
| 545 |
+
|
| 546 |
+
return eim._loop(work, callback, shape, maxiter, func, args, dtype,
|
| 547 |
+
pre_func_eval, post_func_eval, check_termination,
|
| 548 |
+
post_termination_check, customize_result, res_work_pairs,
|
| 549 |
+
xp, preserve_shape)
|
| 550 |
+
|
| 551 |
+
|
| 552 |
+
def _differentiate_weights(work, n):
|
| 553 |
+
# This produces the weights of the finite difference formula for a given
|
| 554 |
+
# stencil. In experiments, use of a second-order central difference formula
|
| 555 |
+
# with Richardson extrapolation was more accurate numerically, but it was
|
| 556 |
+
# more complicated, and it would have become even more complicated when
|
| 557 |
+
# adding support for one-sided differences. However, now that all the
|
| 558 |
+
# function evaluation values are stored, they can be processed in whatever
|
| 559 |
+
# way is desired to produce the derivative estimate. We leave alternative
|
| 560 |
+
# approaches to future work. To be more self-contained, here is the theory
|
| 561 |
+
# for deriving the weights below.
|
| 562 |
+
#
|
| 563 |
+
# Recall that the Taylor expansion of a univariate, scalar-values function
|
| 564 |
+
# about a point `x` may be expressed as:
|
| 565 |
+
# f(x + h) = f(x) + f'(x)*h + f''(x)/2!*h**2 + O(h**3)
|
| 566 |
+
# Suppose we evaluate f(x), f(x+h), and f(x-h). We have:
|
| 567 |
+
# f(x) = f(x)
|
| 568 |
+
# f(x + h) = f(x) + f'(x)*h + f''(x)/2!*h**2 + O(h**3)
|
| 569 |
+
# f(x - h) = f(x) - f'(x)*h + f''(x)/2!*h**2 + O(h**3)
|
| 570 |
+
# We can solve for weights `wi` such that:
|
| 571 |
+
# w1*f(x) = w1*(f(x))
|
| 572 |
+
# + w2*f(x + h) = w2*(f(x) + f'(x)*h + f''(x)/2!*h**2) + O(h**3)
|
| 573 |
+
# + w3*f(x - h) = w3*(f(x) - f'(x)*h + f''(x)/2!*h**2) + O(h**3)
|
| 574 |
+
# = 0 + f'(x)*h + 0 + O(h**3)
|
| 575 |
+
# Then
|
| 576 |
+
# f'(x) ~ (w1*f(x) + w2*f(x+h) + w3*f(x-h))/h
|
| 577 |
+
# is a finite difference derivative approximation with error O(h**2),
|
| 578 |
+
# and so it is said to be a "second-order" approximation. Under certain
|
| 579 |
+
# conditions (e.g. well-behaved function, `h` sufficiently small), the
|
| 580 |
+
# error in the approximation will decrease with h**2; that is, if `h` is
|
| 581 |
+
# reduced by a factor of 2, the error is reduced by a factor of 4.
|
| 582 |
+
#
|
| 583 |
+
# By default, we use eighth-order formulae. Our central-difference formula
|
| 584 |
+
# uses abscissae:
|
| 585 |
+
# x-h/c**3, x-h/c**2, x-h/c, x-h, x, x+h, x+h/c, x+h/c**2, x+h/c**3
|
| 586 |
+
# where `c` is the step factor. (Typically, the step factor is greater than
|
| 587 |
+
# one, so the outermost points - as written above - are actually closest to
|
| 588 |
+
# `x`.) This "stencil" is chosen so that each iteration, the step can be
|
| 589 |
+
# reduced by the factor `c`, and most of the function evaluations can be
|
| 590 |
+
# reused with the new step size. For example, in the next iteration, we
|
| 591 |
+
# will have:
|
| 592 |
+
# x-h/c**4, x-h/c**3, x-h/c**2, x-h/c, x, x+h/c, x+h/c**2, x+h/c**3, x+h/c**4
|
| 593 |
+
# We do not reuse `x-h` and `x+h` for the new derivative estimate.
|
| 594 |
+
# While this would increase the order of the formula and thus the
|
| 595 |
+
# theoretical convergence rate, it is also less stable numerically.
|
| 596 |
+
# (As noted above, there are other ways of processing the values that are
|
| 597 |
+
# more stable. Thus, even now we store `f(x-h)` and `f(x+h)` in `work.fs`
|
| 598 |
+
# to simplify future development of this sort of improvement.)
|
| 599 |
+
#
|
| 600 |
+
# The (right) one-sided formula is produced similarly using abscissae
|
| 601 |
+
# x, x+h, x+h/d, x+h/d**2, ..., x+h/d**6, x+h/d**7, x+h/d**7
|
| 602 |
+
# where `d` is the square root of `c`. (The left one-sided formula simply
|
| 603 |
+
# uses -h.) When the step size is reduced by factor `c = d**2`, we have
|
| 604 |
+
# abscissae:
|
| 605 |
+
# x, x+h/d**2, x+h/d**3..., x+h/d**8, x+h/d**9, x+h/d**9
|
| 606 |
+
# `d` is chosen as the square root of `c` so that the rate of the step-size
|
| 607 |
+
# reduction is the same per iteration as in the central difference case.
|
| 608 |
+
# Note that because the central difference formulas are inherently of even
|
| 609 |
+
# order, for simplicity, we use only even-order formulas for one-sided
|
| 610 |
+
# differences, too.
|
| 611 |
+
|
| 612 |
+
# It's possible for the user to specify `fac` in, say, double precision but
|
| 613 |
+
# `x` and `args` in single precision. `fac` gets converted to single
|
| 614 |
+
# precision, but we should always use double precision for the intermediate
|
| 615 |
+
# calculations here to avoid additional error in the weights.
|
| 616 |
+
fac = work.fac.astype(np.float64)
|
| 617 |
+
|
| 618 |
+
# Note that if the user switches back to floating point precision with
|
| 619 |
+
# `x` and `args`, then `fac` will not necessarily equal the (lower
|
| 620 |
+
# precision) cached `_differentiate_weights.fac`, and the weights will
|
| 621 |
+
# need to be recalculated. This could be fixed, but it's late, and of
|
| 622 |
+
# low consequence.
|
| 623 |
+
if fac != _differentiate_weights.fac:
|
| 624 |
+
_differentiate_weights.central = []
|
| 625 |
+
_differentiate_weights.right = []
|
| 626 |
+
_differentiate_weights.fac = fac
|
| 627 |
+
|
| 628 |
+
if len(_differentiate_weights.central) != 2*n + 1:
|
| 629 |
+
# Central difference weights. Consider refactoring this; it could
|
| 630 |
+
# probably be more compact.
|
| 631 |
+
i = np.arange(-n, n + 1)
|
| 632 |
+
p = np.abs(i) - 1. # center point has power `p` -1, but sign `s` is 0
|
| 633 |
+
s = np.sign(i)
|
| 634 |
+
|
| 635 |
+
h = s / fac ** p
|
| 636 |
+
A = np.vander(h, increasing=True).T
|
| 637 |
+
b = np.zeros(2*n + 1)
|
| 638 |
+
b[1] = 1
|
| 639 |
+
weights = np.linalg.solve(A, b)
|
| 640 |
+
|
| 641 |
+
# Enforce identities to improve accuracy
|
| 642 |
+
weights[n] = 0
|
| 643 |
+
for i in range(n):
|
| 644 |
+
weights[-i-1] = -weights[i]
|
| 645 |
+
|
| 646 |
+
# Cache the weights. We only need to calculate them once unless
|
| 647 |
+
# the step factor changes.
|
| 648 |
+
_differentiate_weights.central = weights
|
| 649 |
+
|
| 650 |
+
# One-sided difference weights. The left one-sided weights (with
|
| 651 |
+
# negative steps) are simply the negative of the right one-sided
|
| 652 |
+
# weights, so no need to compute them separately.
|
| 653 |
+
i = np.arange(2*n + 1)
|
| 654 |
+
p = i - 1.
|
| 655 |
+
s = np.sign(i)
|
| 656 |
+
|
| 657 |
+
h = s / np.sqrt(fac) ** p
|
| 658 |
+
A = np.vander(h, increasing=True).T
|
| 659 |
+
b = np.zeros(2 * n + 1)
|
| 660 |
+
b[1] = 1
|
| 661 |
+
weights = np.linalg.solve(A, b)
|
| 662 |
+
|
| 663 |
+
_differentiate_weights.right = weights
|
| 664 |
+
|
| 665 |
+
return (_differentiate_weights.central.astype(work.dtype, copy=False),
|
| 666 |
+
_differentiate_weights.right.astype(work.dtype, copy=False))
|
| 667 |
+
_differentiate_weights.central = []
|
| 668 |
+
_differentiate_weights.right = []
|
| 669 |
+
_differentiate_weights.fac = None
|
| 670 |
+
|
| 671 |
+
|
| 672 |
+
def _jacobian(func, x, *, atol=None, rtol=None, maxiter=10,
|
| 673 |
+
order=8, initial_step=0.5, step_factor=2.0):
|
| 674 |
+
r"""Evaluate the Jacobian of a function numerically.
|
| 675 |
+
|
| 676 |
+
Parameters
|
| 677 |
+
----------
|
| 678 |
+
func : callable
|
| 679 |
+
The function whose Jacobian is desired. The signature must be::
|
| 680 |
+
|
| 681 |
+
func(x: ndarray) -> ndarray
|
| 682 |
+
|
| 683 |
+
where each element of ``x`` is a finite real. If the function to be
|
| 684 |
+
differentiated accepts additional, arguments wrap it (e.g. using
|
| 685 |
+
`functools.partial` or ``lambda``) and pass the wrapped callable
|
| 686 |
+
into `_jacobian`. See Notes regarding vectorization and the dimensionality
|
| 687 |
+
of the input and output.
|
| 688 |
+
x : array_like
|
| 689 |
+
Points at which to evaluate the Jacobian. Must have at least one dimension.
|
| 690 |
+
See Notes regarding the dimensionality and vectorization.
|
| 691 |
+
atol, rtol : float, optional
|
| 692 |
+
Absolute and relative tolerances for the stopping condition: iteration
|
| 693 |
+
will stop for each element of the Jacobian when
|
| 694 |
+
``res.error < atol + rtol * abs(res.df)``. The default `atol` is the
|
| 695 |
+
smallest normal number of the appropriate dtype, and the default `rtol`
|
| 696 |
+
is the square root of the precision of the appropriate dtype.
|
| 697 |
+
order : int, default: 8
|
| 698 |
+
The (positive integer) order of the finite difference formula to be
|
| 699 |
+
used. Odd integers will be rounded up to the next even integer.
|
| 700 |
+
initial_step : float, default: 0.5
|
| 701 |
+
The (absolute) initial step size for the finite difference derivative
|
| 702 |
+
approximation.
|
| 703 |
+
step_factor : float, default: 2.0
|
| 704 |
+
The factor by which the step size is *reduced* in each iteration; i.e.
|
| 705 |
+
the step size in iteration 1 is ``initial_step/step_factor``. If
|
| 706 |
+
``step_factor < 1``, subsequent steps will be greater than the initial
|
| 707 |
+
step; this may be useful if steps smaller than some threshold are
|
| 708 |
+
undesirable (e.g. due to subtractive cancellation error).
|
| 709 |
+
maxiter : int, default: 10
|
| 710 |
+
The maximum number of iterations of the algorithm to perform.
|
| 711 |
+
|
| 712 |
+
Returns
|
| 713 |
+
-------
|
| 714 |
+
res : _RichResult
|
| 715 |
+
An instance of `scipy._lib._util._RichResult` with the following
|
| 716 |
+
attributes.
|
| 717 |
+
|
| 718 |
+
success : bool array
|
| 719 |
+
``True`` when the algorithm terminated successfully (status ``0``).
|
| 720 |
+
status : int array
|
| 721 |
+
An integer representing the exit status of the algorithm.
|
| 722 |
+
``0`` : The algorithm converged to the specified tolerances.
|
| 723 |
+
``-1`` : The error estimate increased, so iteration was terminated.
|
| 724 |
+
``-2`` : The maximum number of iterations was reached.
|
| 725 |
+
``-3`` : A non-finite value was encountered.
|
| 726 |
+
``-4`` : Iteration was terminated by `callback`.
|
| 727 |
+
``1`` : The algorithm is proceeding normally (in `callback` only).
|
| 728 |
+
df : float array
|
| 729 |
+
The Jacobian of `func` at `x`, if the algorithm terminated
|
| 730 |
+
successfully.
|
| 731 |
+
error : float array
|
| 732 |
+
An estimate of the error: the magnitude of the difference between
|
| 733 |
+
the current estimate of the derivative and the estimate in the
|
| 734 |
+
previous iteration.
|
| 735 |
+
nit : int array
|
| 736 |
+
The number of iterations performed.
|
| 737 |
+
nfev : int array
|
| 738 |
+
The number of points at which `func` was evaluated.
|
| 739 |
+
x : float array
|
| 740 |
+
The value at which the derivative of `func` was evaluated.
|
| 741 |
+
|
| 742 |
+
See Also
|
| 743 |
+
--------
|
| 744 |
+
_differentiate
|
| 745 |
+
|
| 746 |
+
Notes
|
| 747 |
+
-----
|
| 748 |
+
Suppose we wish to evaluate the Jacobian of a function
|
| 749 |
+
:math:`f: \mathbf{R^m} \rightarrow \mathbf{R^n}`, and assign to variables
|
| 750 |
+
``m`` and ``n`` the positive integer values of :math:`m` and :math:`n`,
|
| 751 |
+
respectively. If we wish to evaluate the Jacobian at a single point,
|
| 752 |
+
then:
|
| 753 |
+
|
| 754 |
+
- argument `x` must be an array of shape ``(m,)``
|
| 755 |
+
- argument `func` must be vectorized to accept an array of shape ``(m, p)``.
|
| 756 |
+
The first axis represents the :math:`m` inputs of :math:`f`; the second
|
| 757 |
+
is for evaluating the function at multiple points in a single call.
|
| 758 |
+
- argument `func` must return an array of shape ``(n, p)``. The first
|
| 759 |
+
axis represents the :math:`n` outputs of :math:`f`; the second
|
| 760 |
+
is for the result of evaluating the function at multiple points.
|
| 761 |
+
- attribute ``df`` of the result object will be an array of shape ``(n, m)``,
|
| 762 |
+
the Jacobian.
|
| 763 |
+
|
| 764 |
+
This function is also vectorized in the sense that the Jacobian can be
|
| 765 |
+
evaluated at ``k`` points in a single call. In this case, `x` would be an
|
| 766 |
+
array of shape ``(m, k)``, `func` would accept an array of shape
|
| 767 |
+
``(m, k, p)`` and return an array of shape ``(n, k, p)``, and the ``df``
|
| 768 |
+
attribute of the result would have shape ``(n, m, k)``.
|
| 769 |
+
|
| 770 |
+
References
|
| 771 |
+
----------
|
| 772 |
+
.. [1] Jacobian matrix and determinant, *Wikipedia*,
|
| 773 |
+
https://en.wikipedia.org/wiki/Jacobian_matrix_and_determinant
|
| 774 |
+
|
| 775 |
+
Examples
|
| 776 |
+
--------
|
| 777 |
+
The Rosenbrock function maps from :math:`\mathbf{R}^m \righarrow \mathbf{R}`;
|
| 778 |
+
the SciPy implementation `scipy.optimize.rosen` is vectorized to accept an
|
| 779 |
+
array of shape ``(m, p)`` and return an array of shape ``m``. Suppose we wish
|
| 780 |
+
to evaluate the Jacobian (AKA the gradient because the function returns a scalar)
|
| 781 |
+
at ``[0.5, 0.5, 0.5]``.
|
| 782 |
+
|
| 783 |
+
>>> import numpy as np
|
| 784 |
+
>>> from scipy.optimize._differentiate import _jacobian as jacobian
|
| 785 |
+
>>> from scipy.optimize import rosen, rosen_der
|
| 786 |
+
>>> m = 3
|
| 787 |
+
>>> x = np.full(m, 0.5)
|
| 788 |
+
>>> res = jacobian(rosen, x)
|
| 789 |
+
>>> ref = rosen_der(x) # reference value of the gradient
|
| 790 |
+
>>> res.df, ref
|
| 791 |
+
(array([-51., -1., 50.]), array([-51., -1., 50.]))
|
| 792 |
+
|
| 793 |
+
As an example of a function with multiple outputs, consider Example 4
|
| 794 |
+
from [1]_.
|
| 795 |
+
|
| 796 |
+
>>> def f(x):
|
| 797 |
+
... x1, x2, x3 = x ...
|
| 798 |
+
... return [x1, 5*x3, 4*x2**2 - 2*x3, x3*np.sin(x1)]
|
| 799 |
+
|
| 800 |
+
The true Jacobian is given by:
|
| 801 |
+
|
| 802 |
+
>>> def df(x):
|
| 803 |
+
... x1, x2, x3 = x
|
| 804 |
+
... one = np.ones_like(x1)
|
| 805 |
+
... return [[one, 0*one, 0*one],
|
| 806 |
+
... [0*one, 0*one, 5*one],
|
| 807 |
+
... [0*one, 8*x2, -2*one],
|
| 808 |
+
... [x3*np.cos(x1), 0*one, np.sin(x1)]]
|
| 809 |
+
|
| 810 |
+
Evaluate the Jacobian at an arbitrary point.
|
| 811 |
+
|
| 812 |
+
>>> rng = np.random.default_rng(389252938452)
|
| 813 |
+
>>> x = rng.random(size=3)
|
| 814 |
+
>>> res = jacobian(f, x)
|
| 815 |
+
>>> ref = df(x)
|
| 816 |
+
>>> res.df.shape == (4, 3)
|
| 817 |
+
True
|
| 818 |
+
>>> np.allclose(res.df, ref)
|
| 819 |
+
True
|
| 820 |
+
|
| 821 |
+
Evaluate the Jacobian at 10 arbitrary points in a single call.
|
| 822 |
+
|
| 823 |
+
>>> x = rng.random(size=(3, 10))
|
| 824 |
+
>>> res = jacobian(f, x)
|
| 825 |
+
>>> ref = df(x)
|
| 826 |
+
>>> res.df.shape == (4, 3, 10)
|
| 827 |
+
True
|
| 828 |
+
>>> np.allclose(res.df, ref)
|
| 829 |
+
True
|
| 830 |
+
|
| 831 |
+
"""
|
| 832 |
+
x = np.asarray(x)
|
| 833 |
+
int_dtype = np.issubdtype(x.dtype, np.integer)
|
| 834 |
+
x0 = np.asarray(x, dtype=float) if int_dtype else x
|
| 835 |
+
|
| 836 |
+
if x0.ndim < 1:
|
| 837 |
+
message = "Argument `x` must be at least 1-D."
|
| 838 |
+
raise ValueError(message)
|
| 839 |
+
|
| 840 |
+
m = x0.shape[0]
|
| 841 |
+
i = np.arange(m)
|
| 842 |
+
|
| 843 |
+
def wrapped(x):
|
| 844 |
+
p = () if x.ndim == x0.ndim else (x.shape[-1],) # number of abscissae
|
| 845 |
+
new_dims = (1,) if x.ndim == x0.ndim else (1, -1)
|
| 846 |
+
new_shape = (m, m) + x0.shape[1:] + p
|
| 847 |
+
xph = np.expand_dims(x0, new_dims)
|
| 848 |
+
xph = np.broadcast_to(xph, new_shape).copy()
|
| 849 |
+
xph[i, i] = x
|
| 850 |
+
return func(xph)
|
| 851 |
+
|
| 852 |
+
res = _differentiate(wrapped, x, atol=atol, rtol=rtol,
|
| 853 |
+
maxiter=maxiter, order=order, initial_step=initial_step,
|
| 854 |
+
step_factor=step_factor, preserve_shape=True)
|
| 855 |
+
del res.x # the user knows `x`, and the way it gets broadcasted is meaningless here
|
| 856 |
+
return res
|
llava_next/lib/python3.10/site-packages/scipy/optimize/_hessian_update_strategy.py
ADDED
|
@@ -0,0 +1,475 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Hessian update strategies for quasi-Newton optimization methods."""
|
| 2 |
+
import numpy as np
|
| 3 |
+
from numpy.linalg import norm
|
| 4 |
+
from scipy.linalg import get_blas_funcs, issymmetric
|
| 5 |
+
from warnings import warn
|
| 6 |
+
|
| 7 |
+
|
| 8 |
+
__all__ = ['HessianUpdateStrategy', 'BFGS', 'SR1']
|
| 9 |
+
|
| 10 |
+
|
| 11 |
+
class HessianUpdateStrategy:
|
| 12 |
+
"""Interface for implementing Hessian update strategies.
|
| 13 |
+
|
| 14 |
+
Many optimization methods make use of Hessian (or inverse Hessian)
|
| 15 |
+
approximations, such as the quasi-Newton methods BFGS, SR1, L-BFGS.
|
| 16 |
+
Some of these approximations, however, do not actually need to store
|
| 17 |
+
the entire matrix or can compute the internal matrix product with a
|
| 18 |
+
given vector in a very efficiently manner. This class serves as an
|
| 19 |
+
abstract interface between the optimization algorithm and the
|
| 20 |
+
quasi-Newton update strategies, giving freedom of implementation
|
| 21 |
+
to store and update the internal matrix as efficiently as possible.
|
| 22 |
+
Different choices of initialization and update procedure will result
|
| 23 |
+
in different quasi-Newton strategies.
|
| 24 |
+
|
| 25 |
+
Four methods should be implemented in derived classes: ``initialize``,
|
| 26 |
+
``update``, ``dot`` and ``get_matrix``.
|
| 27 |
+
|
| 28 |
+
Notes
|
| 29 |
+
-----
|
| 30 |
+
Any instance of a class that implements this interface,
|
| 31 |
+
can be accepted by the method ``minimize`` and used by
|
| 32 |
+
the compatible solvers to approximate the Hessian (or
|
| 33 |
+
inverse Hessian) used by the optimization algorithms.
|
| 34 |
+
"""
|
| 35 |
+
|
| 36 |
+
def initialize(self, n, approx_type):
|
| 37 |
+
"""Initialize internal matrix.
|
| 38 |
+
|
| 39 |
+
Allocate internal memory for storing and updating
|
| 40 |
+
the Hessian or its inverse.
|
| 41 |
+
|
| 42 |
+
Parameters
|
| 43 |
+
----------
|
| 44 |
+
n : int
|
| 45 |
+
Problem dimension.
|
| 46 |
+
approx_type : {'hess', 'inv_hess'}
|
| 47 |
+
Selects either the Hessian or the inverse Hessian.
|
| 48 |
+
When set to 'hess' the Hessian will be stored and updated.
|
| 49 |
+
When set to 'inv_hess' its inverse will be used instead.
|
| 50 |
+
"""
|
| 51 |
+
raise NotImplementedError("The method ``initialize(n, approx_type)``"
|
| 52 |
+
" is not implemented.")
|
| 53 |
+
|
| 54 |
+
def update(self, delta_x, delta_grad):
|
| 55 |
+
"""Update internal matrix.
|
| 56 |
+
|
| 57 |
+
Update Hessian matrix or its inverse (depending on how 'approx_type'
|
| 58 |
+
is defined) using information about the last evaluated points.
|
| 59 |
+
|
| 60 |
+
Parameters
|
| 61 |
+
----------
|
| 62 |
+
delta_x : ndarray
|
| 63 |
+
The difference between two points the gradient
|
| 64 |
+
function have been evaluated at: ``delta_x = x2 - x1``.
|
| 65 |
+
delta_grad : ndarray
|
| 66 |
+
The difference between the gradients:
|
| 67 |
+
``delta_grad = grad(x2) - grad(x1)``.
|
| 68 |
+
"""
|
| 69 |
+
raise NotImplementedError("The method ``update(delta_x, delta_grad)``"
|
| 70 |
+
" is not implemented.")
|
| 71 |
+
|
| 72 |
+
def dot(self, p):
|
| 73 |
+
"""Compute the product of the internal matrix with the given vector.
|
| 74 |
+
|
| 75 |
+
Parameters
|
| 76 |
+
----------
|
| 77 |
+
p : array_like
|
| 78 |
+
1-D array representing a vector.
|
| 79 |
+
|
| 80 |
+
Returns
|
| 81 |
+
-------
|
| 82 |
+
Hp : array
|
| 83 |
+
1-D represents the result of multiplying the approximation matrix
|
| 84 |
+
by vector p.
|
| 85 |
+
"""
|
| 86 |
+
raise NotImplementedError("The method ``dot(p)``"
|
| 87 |
+
" is not implemented.")
|
| 88 |
+
|
| 89 |
+
def get_matrix(self):
|
| 90 |
+
"""Return current internal matrix.
|
| 91 |
+
|
| 92 |
+
Returns
|
| 93 |
+
-------
|
| 94 |
+
H : ndarray, shape (n, n)
|
| 95 |
+
Dense matrix containing either the Hessian
|
| 96 |
+
or its inverse (depending on how 'approx_type'
|
| 97 |
+
is defined).
|
| 98 |
+
"""
|
| 99 |
+
raise NotImplementedError("The method ``get_matrix(p)``"
|
| 100 |
+
" is not implemented.")
|
| 101 |
+
|
| 102 |
+
|
| 103 |
+
class FullHessianUpdateStrategy(HessianUpdateStrategy):
|
| 104 |
+
"""Hessian update strategy with full dimensional internal representation.
|
| 105 |
+
"""
|
| 106 |
+
_syr = get_blas_funcs('syr', dtype='d') # Symmetric rank 1 update
|
| 107 |
+
_syr2 = get_blas_funcs('syr2', dtype='d') # Symmetric rank 2 update
|
| 108 |
+
# Symmetric matrix-vector product
|
| 109 |
+
_symv = get_blas_funcs('symv', dtype='d')
|
| 110 |
+
|
| 111 |
+
def __init__(self, init_scale='auto'):
|
| 112 |
+
self.init_scale = init_scale
|
| 113 |
+
# Until initialize is called we can't really use the class,
|
| 114 |
+
# so it makes sense to set everything to None.
|
| 115 |
+
self.first_iteration = None
|
| 116 |
+
self.approx_type = None
|
| 117 |
+
self.B = None
|
| 118 |
+
self.H = None
|
| 119 |
+
|
| 120 |
+
def initialize(self, n, approx_type):
|
| 121 |
+
"""Initialize internal matrix.
|
| 122 |
+
|
| 123 |
+
Allocate internal memory for storing and updating
|
| 124 |
+
the Hessian or its inverse.
|
| 125 |
+
|
| 126 |
+
Parameters
|
| 127 |
+
----------
|
| 128 |
+
n : int
|
| 129 |
+
Problem dimension.
|
| 130 |
+
approx_type : {'hess', 'inv_hess'}
|
| 131 |
+
Selects either the Hessian or the inverse Hessian.
|
| 132 |
+
When set to 'hess' the Hessian will be stored and updated.
|
| 133 |
+
When set to 'inv_hess' its inverse will be used instead.
|
| 134 |
+
"""
|
| 135 |
+
self.first_iteration = True
|
| 136 |
+
self.n = n
|
| 137 |
+
self.approx_type = approx_type
|
| 138 |
+
if approx_type not in ('hess', 'inv_hess'):
|
| 139 |
+
raise ValueError("`approx_type` must be 'hess' or 'inv_hess'.")
|
| 140 |
+
# Create matrix
|
| 141 |
+
if self.approx_type == 'hess':
|
| 142 |
+
self.B = np.eye(n, dtype=float)
|
| 143 |
+
else:
|
| 144 |
+
self.H = np.eye(n, dtype=float)
|
| 145 |
+
|
| 146 |
+
def _auto_scale(self, delta_x, delta_grad):
|
| 147 |
+
# Heuristic to scale matrix at first iteration.
|
| 148 |
+
# Described in Nocedal and Wright "Numerical Optimization"
|
| 149 |
+
# p.143 formula (6.20).
|
| 150 |
+
s_norm2 = np.dot(delta_x, delta_x)
|
| 151 |
+
y_norm2 = np.dot(delta_grad, delta_grad)
|
| 152 |
+
ys = np.abs(np.dot(delta_grad, delta_x))
|
| 153 |
+
if ys == 0.0 or y_norm2 == 0 or s_norm2 == 0:
|
| 154 |
+
return 1
|
| 155 |
+
if self.approx_type == 'hess':
|
| 156 |
+
return y_norm2 / ys
|
| 157 |
+
else:
|
| 158 |
+
return ys / y_norm2
|
| 159 |
+
|
| 160 |
+
def _update_implementation(self, delta_x, delta_grad):
|
| 161 |
+
raise NotImplementedError("The method ``_update_implementation``"
|
| 162 |
+
" is not implemented.")
|
| 163 |
+
|
| 164 |
+
def update(self, delta_x, delta_grad):
|
| 165 |
+
"""Update internal matrix.
|
| 166 |
+
|
| 167 |
+
Update Hessian matrix or its inverse (depending on how 'approx_type'
|
| 168 |
+
is defined) using information about the last evaluated points.
|
| 169 |
+
|
| 170 |
+
Parameters
|
| 171 |
+
----------
|
| 172 |
+
delta_x : ndarray
|
| 173 |
+
The difference between two points the gradient
|
| 174 |
+
function have been evaluated at: ``delta_x = x2 - x1``.
|
| 175 |
+
delta_grad : ndarray
|
| 176 |
+
The difference between the gradients:
|
| 177 |
+
``delta_grad = grad(x2) - grad(x1)``.
|
| 178 |
+
"""
|
| 179 |
+
if np.all(delta_x == 0.0):
|
| 180 |
+
return
|
| 181 |
+
if np.all(delta_grad == 0.0):
|
| 182 |
+
warn('delta_grad == 0.0. Check if the approximated '
|
| 183 |
+
'function is linear. If the function is linear '
|
| 184 |
+
'better results can be obtained by defining the '
|
| 185 |
+
'Hessian as zero instead of using quasi-Newton '
|
| 186 |
+
'approximations.',
|
| 187 |
+
UserWarning, stacklevel=2)
|
| 188 |
+
return
|
| 189 |
+
if self.first_iteration:
|
| 190 |
+
# Get user specific scale
|
| 191 |
+
if isinstance(self.init_scale, str) and self.init_scale == "auto":
|
| 192 |
+
scale = self._auto_scale(delta_x, delta_grad)
|
| 193 |
+
else:
|
| 194 |
+
scale = self.init_scale
|
| 195 |
+
|
| 196 |
+
# Check for complex: numpy will silently cast a complex array to
|
| 197 |
+
# a real one but not so for scalar as it raises a TypeError.
|
| 198 |
+
# Checking here brings a consistent behavior.
|
| 199 |
+
replace = False
|
| 200 |
+
if np.size(scale) == 1:
|
| 201 |
+
# to account for the legacy behavior having the exact same cast
|
| 202 |
+
scale = float(scale)
|
| 203 |
+
elif np.iscomplexobj(scale):
|
| 204 |
+
raise TypeError("init_scale contains complex elements, "
|
| 205 |
+
"must be real.")
|
| 206 |
+
else: # test explicitly for allowed shapes and values
|
| 207 |
+
replace = True
|
| 208 |
+
if self.approx_type == 'hess':
|
| 209 |
+
shape = np.shape(self.B)
|
| 210 |
+
dtype = self.B.dtype
|
| 211 |
+
else:
|
| 212 |
+
shape = np.shape(self.H)
|
| 213 |
+
dtype = self.H.dtype
|
| 214 |
+
# copy, will replace the original
|
| 215 |
+
scale = np.array(scale, dtype=dtype, copy=True)
|
| 216 |
+
|
| 217 |
+
# it has to match the shape of the matrix for the multiplication,
|
| 218 |
+
# no implicit broadcasting is allowed
|
| 219 |
+
if shape != (init_shape := np.shape(scale)):
|
| 220 |
+
raise ValueError("If init_scale is an array, it must have the "
|
| 221 |
+
f"dimensions of the hess/inv_hess: {shape}."
|
| 222 |
+
f" Got {init_shape}.")
|
| 223 |
+
if not issymmetric(scale):
|
| 224 |
+
raise ValueError("If init_scale is an array, it must be"
|
| 225 |
+
" symmetric (passing scipy.linalg.issymmetric)"
|
| 226 |
+
" to be an approximation of a hess/inv_hess.")
|
| 227 |
+
|
| 228 |
+
# Scale initial matrix with ``scale * np.eye(n)`` or replace
|
| 229 |
+
# This is not ideal, we could assign the scale directly in
|
| 230 |
+
# initialize, but we would need to
|
| 231 |
+
if self.approx_type == 'hess':
|
| 232 |
+
if replace:
|
| 233 |
+
self.B = scale
|
| 234 |
+
else:
|
| 235 |
+
self.B *= scale
|
| 236 |
+
else:
|
| 237 |
+
if replace:
|
| 238 |
+
self.H = scale
|
| 239 |
+
else:
|
| 240 |
+
self.H *= scale
|
| 241 |
+
self.first_iteration = False
|
| 242 |
+
self._update_implementation(delta_x, delta_grad)
|
| 243 |
+
|
| 244 |
+
def dot(self, p):
|
| 245 |
+
"""Compute the product of the internal matrix with the given vector.
|
| 246 |
+
|
| 247 |
+
Parameters
|
| 248 |
+
----------
|
| 249 |
+
p : array_like
|
| 250 |
+
1-D array representing a vector.
|
| 251 |
+
|
| 252 |
+
Returns
|
| 253 |
+
-------
|
| 254 |
+
Hp : array
|
| 255 |
+
1-D represents the result of multiplying the approximation matrix
|
| 256 |
+
by vector p.
|
| 257 |
+
"""
|
| 258 |
+
if self.approx_type == 'hess':
|
| 259 |
+
return self._symv(1, self.B, p)
|
| 260 |
+
else:
|
| 261 |
+
return self._symv(1, self.H, p)
|
| 262 |
+
|
| 263 |
+
def get_matrix(self):
|
| 264 |
+
"""Return the current internal matrix.
|
| 265 |
+
|
| 266 |
+
Returns
|
| 267 |
+
-------
|
| 268 |
+
M : ndarray, shape (n, n)
|
| 269 |
+
Dense matrix containing either the Hessian or its inverse
|
| 270 |
+
(depending on how `approx_type` was defined).
|
| 271 |
+
"""
|
| 272 |
+
if self.approx_type == 'hess':
|
| 273 |
+
M = np.copy(self.B)
|
| 274 |
+
else:
|
| 275 |
+
M = np.copy(self.H)
|
| 276 |
+
li = np.tril_indices_from(M, k=-1)
|
| 277 |
+
M[li] = M.T[li]
|
| 278 |
+
return M
|
| 279 |
+
|
| 280 |
+
|
| 281 |
+
class BFGS(FullHessianUpdateStrategy):
|
| 282 |
+
"""Broyden-Fletcher-Goldfarb-Shanno (BFGS) Hessian update strategy.
|
| 283 |
+
|
| 284 |
+
Parameters
|
| 285 |
+
----------
|
| 286 |
+
exception_strategy : {'skip_update', 'damp_update'}, optional
|
| 287 |
+
Define how to proceed when the curvature condition is violated.
|
| 288 |
+
Set it to 'skip_update' to just skip the update. Or, alternatively,
|
| 289 |
+
set it to 'damp_update' to interpolate between the actual BFGS
|
| 290 |
+
result and the unmodified matrix. Both exceptions strategies
|
| 291 |
+
are explained in [1]_, p.536-537.
|
| 292 |
+
min_curvature : float
|
| 293 |
+
This number, scaled by a normalization factor, defines the
|
| 294 |
+
minimum curvature ``dot(delta_grad, delta_x)`` allowed to go
|
| 295 |
+
unaffected by the exception strategy. By default is equal to
|
| 296 |
+
1e-8 when ``exception_strategy = 'skip_update'`` and equal
|
| 297 |
+
to 0.2 when ``exception_strategy = 'damp_update'``.
|
| 298 |
+
init_scale : {float, np.array, 'auto'}
|
| 299 |
+
This parameter can be used to initialize the Hessian or its
|
| 300 |
+
inverse. When a float is given, the relevant array is initialized
|
| 301 |
+
to ``np.eye(n) * init_scale``, where ``n`` is the problem dimension.
|
| 302 |
+
Alternatively, if a precisely ``(n, n)`` shaped, symmetric array is given,
|
| 303 |
+
this array will be used. Otherwise an error is generated.
|
| 304 |
+
Set it to 'auto' in order to use an automatic heuristic for choosing
|
| 305 |
+
the initial scale. The heuristic is described in [1]_, p.143.
|
| 306 |
+
The default is 'auto'.
|
| 307 |
+
|
| 308 |
+
Notes
|
| 309 |
+
-----
|
| 310 |
+
The update is based on the description in [1]_, p.140.
|
| 311 |
+
|
| 312 |
+
References
|
| 313 |
+
----------
|
| 314 |
+
.. [1] Nocedal, Jorge, and Stephen J. Wright. "Numerical optimization"
|
| 315 |
+
Second Edition (2006).
|
| 316 |
+
"""
|
| 317 |
+
|
| 318 |
+
def __init__(self, exception_strategy='skip_update', min_curvature=None,
|
| 319 |
+
init_scale='auto'):
|
| 320 |
+
if exception_strategy == 'skip_update':
|
| 321 |
+
if min_curvature is not None:
|
| 322 |
+
self.min_curvature = min_curvature
|
| 323 |
+
else:
|
| 324 |
+
self.min_curvature = 1e-8
|
| 325 |
+
elif exception_strategy == 'damp_update':
|
| 326 |
+
if min_curvature is not None:
|
| 327 |
+
self.min_curvature = min_curvature
|
| 328 |
+
else:
|
| 329 |
+
self.min_curvature = 0.2
|
| 330 |
+
else:
|
| 331 |
+
raise ValueError("`exception_strategy` must be 'skip_update' "
|
| 332 |
+
"or 'damp_update'.")
|
| 333 |
+
|
| 334 |
+
super().__init__(init_scale)
|
| 335 |
+
self.exception_strategy = exception_strategy
|
| 336 |
+
|
| 337 |
+
def _update_inverse_hessian(self, ys, Hy, yHy, s):
|
| 338 |
+
"""Update the inverse Hessian matrix.
|
| 339 |
+
|
| 340 |
+
BFGS update using the formula:
|
| 341 |
+
|
| 342 |
+
``H <- H + ((H*y).T*y + s.T*y)/(s.T*y)^2 * (s*s.T)
|
| 343 |
+
- 1/(s.T*y) * ((H*y)*s.T + s*(H*y).T)``
|
| 344 |
+
|
| 345 |
+
where ``s = delta_x`` and ``y = delta_grad``. This formula is
|
| 346 |
+
equivalent to (6.17) in [1]_ written in a more efficient way
|
| 347 |
+
for implementation.
|
| 348 |
+
|
| 349 |
+
References
|
| 350 |
+
----------
|
| 351 |
+
.. [1] Nocedal, Jorge, and Stephen J. Wright. "Numerical optimization"
|
| 352 |
+
Second Edition (2006).
|
| 353 |
+
"""
|
| 354 |
+
self.H = self._syr2(-1.0 / ys, s, Hy, a=self.H)
|
| 355 |
+
self.H = self._syr((ys + yHy) / ys ** 2, s, a=self.H)
|
| 356 |
+
|
| 357 |
+
def _update_hessian(self, ys, Bs, sBs, y):
|
| 358 |
+
"""Update the Hessian matrix.
|
| 359 |
+
|
| 360 |
+
BFGS update using the formula:
|
| 361 |
+
|
| 362 |
+
``B <- B - (B*s)*(B*s).T/s.T*(B*s) + y*y^T/s.T*y``
|
| 363 |
+
|
| 364 |
+
where ``s`` is short for ``delta_x`` and ``y`` is short
|
| 365 |
+
for ``delta_grad``. Formula (6.19) in [1]_.
|
| 366 |
+
|
| 367 |
+
References
|
| 368 |
+
----------
|
| 369 |
+
.. [1] Nocedal, Jorge, and Stephen J. Wright. "Numerical optimization"
|
| 370 |
+
Second Edition (2006).
|
| 371 |
+
"""
|
| 372 |
+
self.B = self._syr(1.0 / ys, y, a=self.B)
|
| 373 |
+
self.B = self._syr(-1.0 / sBs, Bs, a=self.B)
|
| 374 |
+
|
| 375 |
+
def _update_implementation(self, delta_x, delta_grad):
|
| 376 |
+
# Auxiliary variables w and z
|
| 377 |
+
if self.approx_type == 'hess':
|
| 378 |
+
w = delta_x
|
| 379 |
+
z = delta_grad
|
| 380 |
+
else:
|
| 381 |
+
w = delta_grad
|
| 382 |
+
z = delta_x
|
| 383 |
+
# Do some common operations
|
| 384 |
+
wz = np.dot(w, z)
|
| 385 |
+
Mw = self.dot(w)
|
| 386 |
+
wMw = Mw.dot(w)
|
| 387 |
+
# Guarantee that wMw > 0 by reinitializing matrix.
|
| 388 |
+
# While this is always true in exact arithmetic,
|
| 389 |
+
# indefinite matrix may appear due to roundoff errors.
|
| 390 |
+
if wMw <= 0.0:
|
| 391 |
+
scale = self._auto_scale(delta_x, delta_grad)
|
| 392 |
+
# Reinitialize matrix
|
| 393 |
+
if self.approx_type == 'hess':
|
| 394 |
+
self.B = scale * np.eye(self.n, dtype=float)
|
| 395 |
+
else:
|
| 396 |
+
self.H = scale * np.eye(self.n, dtype=float)
|
| 397 |
+
# Do common operations for new matrix
|
| 398 |
+
Mw = self.dot(w)
|
| 399 |
+
wMw = Mw.dot(w)
|
| 400 |
+
# Check if curvature condition is violated
|
| 401 |
+
if wz <= self.min_curvature * wMw:
|
| 402 |
+
# If the option 'skip_update' is set
|
| 403 |
+
# we just skip the update when the condition
|
| 404 |
+
# is violated.
|
| 405 |
+
if self.exception_strategy == 'skip_update':
|
| 406 |
+
return
|
| 407 |
+
# If the option 'damp_update' is set we
|
| 408 |
+
# interpolate between the actual BFGS
|
| 409 |
+
# result and the unmodified matrix.
|
| 410 |
+
elif self.exception_strategy == 'damp_update':
|
| 411 |
+
update_factor = (1-self.min_curvature) / (1 - wz/wMw)
|
| 412 |
+
z = update_factor*z + (1-update_factor)*Mw
|
| 413 |
+
wz = np.dot(w, z)
|
| 414 |
+
# Update matrix
|
| 415 |
+
if self.approx_type == 'hess':
|
| 416 |
+
self._update_hessian(wz, Mw, wMw, z)
|
| 417 |
+
else:
|
| 418 |
+
self._update_inverse_hessian(wz, Mw, wMw, z)
|
| 419 |
+
|
| 420 |
+
|
| 421 |
+
class SR1(FullHessianUpdateStrategy):
|
| 422 |
+
"""Symmetric-rank-1 Hessian update strategy.
|
| 423 |
+
|
| 424 |
+
Parameters
|
| 425 |
+
----------
|
| 426 |
+
min_denominator : float
|
| 427 |
+
This number, scaled by a normalization factor,
|
| 428 |
+
defines the minimum denominator magnitude allowed
|
| 429 |
+
in the update. When the condition is violated we skip
|
| 430 |
+
the update. By default uses ``1e-8``.
|
| 431 |
+
init_scale : {float, np.array, 'auto'}, optional
|
| 432 |
+
This parameter can be used to initialize the Hessian or its
|
| 433 |
+
inverse. When a float is given, the relevant array is initialized
|
| 434 |
+
to ``np.eye(n) * init_scale``, where ``n`` is the problem dimension.
|
| 435 |
+
Alternatively, if a precisely ``(n, n)`` shaped, symmetric array is given,
|
| 436 |
+
this array will be used. Otherwise an error is generated.
|
| 437 |
+
Set it to 'auto' in order to use an automatic heuristic for choosing
|
| 438 |
+
the initial scale. The heuristic is described in [1]_, p.143.
|
| 439 |
+
The default is 'auto'.
|
| 440 |
+
|
| 441 |
+
Notes
|
| 442 |
+
-----
|
| 443 |
+
The update is based on the description in [1]_, p.144-146.
|
| 444 |
+
|
| 445 |
+
References
|
| 446 |
+
----------
|
| 447 |
+
.. [1] Nocedal, Jorge, and Stephen J. Wright. "Numerical optimization"
|
| 448 |
+
Second Edition (2006).
|
| 449 |
+
"""
|
| 450 |
+
|
| 451 |
+
def __init__(self, min_denominator=1e-8, init_scale='auto'):
|
| 452 |
+
self.min_denominator = min_denominator
|
| 453 |
+
super().__init__(init_scale)
|
| 454 |
+
|
| 455 |
+
def _update_implementation(self, delta_x, delta_grad):
|
| 456 |
+
# Auxiliary variables w and z
|
| 457 |
+
if self.approx_type == 'hess':
|
| 458 |
+
w = delta_x
|
| 459 |
+
z = delta_grad
|
| 460 |
+
else:
|
| 461 |
+
w = delta_grad
|
| 462 |
+
z = delta_x
|
| 463 |
+
# Do some common operations
|
| 464 |
+
Mw = self.dot(w)
|
| 465 |
+
z_minus_Mw = z - Mw
|
| 466 |
+
denominator = np.dot(w, z_minus_Mw)
|
| 467 |
+
# If the denominator is too small
|
| 468 |
+
# we just skip the update.
|
| 469 |
+
if np.abs(denominator) <= self.min_denominator*norm(w)*norm(z_minus_Mw):
|
| 470 |
+
return
|
| 471 |
+
# Update matrix
|
| 472 |
+
if self.approx_type == 'hess':
|
| 473 |
+
self.B = self._syr(1/denominator, z_minus_Mw, a=self.B)
|
| 474 |
+
else:
|
| 475 |
+
self.H = self._syr(1/denominator, z_minus_Mw, a=self.H)
|
llava_next/lib/python3.10/site-packages/scipy/optimize/_linprog_ip.py
ADDED
|
@@ -0,0 +1,1126 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Interior-point method for linear programming
|
| 2 |
+
|
| 3 |
+
The *interior-point* method uses the primal-dual path following algorithm
|
| 4 |
+
outlined in [1]_. This algorithm supports sparse constraint matrices and
|
| 5 |
+
is typically faster than the simplex methods, especially for large, sparse
|
| 6 |
+
problems. Note, however, that the solution returned may be slightly less
|
| 7 |
+
accurate than those of the simplex methods and will not, in general,
|
| 8 |
+
correspond with a vertex of the polytope defined by the constraints.
|
| 9 |
+
|
| 10 |
+
.. versionadded:: 1.0.0
|
| 11 |
+
|
| 12 |
+
References
|
| 13 |
+
----------
|
| 14 |
+
.. [1] Andersen, Erling D., and Knud D. Andersen. "The MOSEK interior point
|
| 15 |
+
optimizer for linear programming: an implementation of the
|
| 16 |
+
homogeneous algorithm." High performance optimization. Springer US,
|
| 17 |
+
2000. 197-232.
|
| 18 |
+
"""
|
| 19 |
+
# Author: Matt Haberland
|
| 20 |
+
|
| 21 |
+
import numpy as np
|
| 22 |
+
import scipy as sp
|
| 23 |
+
import scipy.sparse as sps
|
| 24 |
+
from warnings import warn
|
| 25 |
+
from scipy.linalg import LinAlgError
|
| 26 |
+
from ._optimize import OptimizeWarning, OptimizeResult, _check_unknown_options
|
| 27 |
+
from ._linprog_util import _postsolve
|
| 28 |
+
has_umfpack = True
|
| 29 |
+
has_cholmod = True
|
| 30 |
+
try:
|
| 31 |
+
import sksparse # noqa: F401
|
| 32 |
+
from sksparse.cholmod import cholesky as cholmod # noqa: F401
|
| 33 |
+
from sksparse.cholmod import analyze as cholmod_analyze
|
| 34 |
+
except ImportError:
|
| 35 |
+
has_cholmod = False
|
| 36 |
+
try:
|
| 37 |
+
import scikits.umfpack # test whether to use factorized # noqa: F401
|
| 38 |
+
except ImportError:
|
| 39 |
+
has_umfpack = False
|
| 40 |
+
|
| 41 |
+
|
| 42 |
+
def _get_solver(M, sparse=False, lstsq=False, sym_pos=True,
|
| 43 |
+
cholesky=True, permc_spec='MMD_AT_PLUS_A'):
|
| 44 |
+
"""
|
| 45 |
+
Given solver options, return a handle to the appropriate linear system
|
| 46 |
+
solver.
|
| 47 |
+
|
| 48 |
+
Parameters
|
| 49 |
+
----------
|
| 50 |
+
M : 2-D array
|
| 51 |
+
As defined in [4] Equation 8.31
|
| 52 |
+
sparse : bool (default = False)
|
| 53 |
+
True if the system to be solved is sparse. This is typically set
|
| 54 |
+
True when the original ``A_ub`` and ``A_eq`` arrays are sparse.
|
| 55 |
+
lstsq : bool (default = False)
|
| 56 |
+
True if the system is ill-conditioned and/or (nearly) singular and
|
| 57 |
+
thus a more robust least-squares solver is desired. This is sometimes
|
| 58 |
+
needed as the solution is approached.
|
| 59 |
+
sym_pos : bool (default = True)
|
| 60 |
+
True if the system matrix is symmetric positive definite
|
| 61 |
+
Sometimes this needs to be set false as the solution is approached,
|
| 62 |
+
even when the system should be symmetric positive definite, due to
|
| 63 |
+
numerical difficulties.
|
| 64 |
+
cholesky : bool (default = True)
|
| 65 |
+
True if the system is to be solved by Cholesky, rather than LU,
|
| 66 |
+
decomposition. This is typically faster unless the problem is very
|
| 67 |
+
small or prone to numerical difficulties.
|
| 68 |
+
permc_spec : str (default = 'MMD_AT_PLUS_A')
|
| 69 |
+
Sparsity preservation strategy used by SuperLU. Acceptable values are:
|
| 70 |
+
|
| 71 |
+
- ``NATURAL``: natural ordering.
|
| 72 |
+
- ``MMD_ATA``: minimum degree ordering on the structure of A^T A.
|
| 73 |
+
- ``MMD_AT_PLUS_A``: minimum degree ordering on the structure of A^T+A.
|
| 74 |
+
- ``COLAMD``: approximate minimum degree column ordering.
|
| 75 |
+
|
| 76 |
+
See SuperLU documentation.
|
| 77 |
+
|
| 78 |
+
Returns
|
| 79 |
+
-------
|
| 80 |
+
solve : function
|
| 81 |
+
Handle to the appropriate solver function
|
| 82 |
+
|
| 83 |
+
"""
|
| 84 |
+
try:
|
| 85 |
+
if sparse:
|
| 86 |
+
if lstsq:
|
| 87 |
+
def solve(r, sym_pos=False):
|
| 88 |
+
return sps.linalg.lsqr(M, r)[0]
|
| 89 |
+
elif cholesky:
|
| 90 |
+
try:
|
| 91 |
+
# Will raise an exception in the first call,
|
| 92 |
+
# or when the matrix changes due to a new problem
|
| 93 |
+
_get_solver.cholmod_factor.cholesky_inplace(M)
|
| 94 |
+
except Exception:
|
| 95 |
+
_get_solver.cholmod_factor = cholmod_analyze(M)
|
| 96 |
+
_get_solver.cholmod_factor.cholesky_inplace(M)
|
| 97 |
+
solve = _get_solver.cholmod_factor
|
| 98 |
+
else:
|
| 99 |
+
if has_umfpack and sym_pos:
|
| 100 |
+
solve = sps.linalg.factorized(M)
|
| 101 |
+
else: # factorized doesn't pass permc_spec
|
| 102 |
+
solve = sps.linalg.splu(M, permc_spec=permc_spec).solve
|
| 103 |
+
|
| 104 |
+
else:
|
| 105 |
+
if lstsq: # sometimes necessary as solution is approached
|
| 106 |
+
def solve(r):
|
| 107 |
+
return sp.linalg.lstsq(M, r)[0]
|
| 108 |
+
elif cholesky:
|
| 109 |
+
L = sp.linalg.cho_factor(M)
|
| 110 |
+
|
| 111 |
+
def solve(r):
|
| 112 |
+
return sp.linalg.cho_solve(L, r)
|
| 113 |
+
else:
|
| 114 |
+
# this seems to cache the matrix factorization, so solving
|
| 115 |
+
# with multiple right hand sides is much faster
|
| 116 |
+
def solve(r, sym_pos=sym_pos):
|
| 117 |
+
if sym_pos:
|
| 118 |
+
return sp.linalg.solve(M, r, assume_a="pos")
|
| 119 |
+
else:
|
| 120 |
+
return sp.linalg.solve(M, r)
|
| 121 |
+
# There are many things that can go wrong here, and it's hard to say
|
| 122 |
+
# what all of them are. It doesn't really matter: if the matrix can't be
|
| 123 |
+
# factorized, return None. get_solver will be called again with different
|
| 124 |
+
# inputs, and a new routine will try to factorize the matrix.
|
| 125 |
+
except KeyboardInterrupt:
|
| 126 |
+
raise
|
| 127 |
+
except Exception:
|
| 128 |
+
return None
|
| 129 |
+
return solve
|
| 130 |
+
|
| 131 |
+
|
| 132 |
+
def _get_delta(A, b, c, x, y, z, tau, kappa, gamma, eta, sparse=False,
|
| 133 |
+
lstsq=False, sym_pos=True, cholesky=True, pc=True, ip=False,
|
| 134 |
+
permc_spec='MMD_AT_PLUS_A'):
|
| 135 |
+
"""
|
| 136 |
+
Given standard form problem defined by ``A``, ``b``, and ``c``;
|
| 137 |
+
current variable estimates ``x``, ``y``, ``z``, ``tau``, and ``kappa``;
|
| 138 |
+
algorithmic parameters ``gamma and ``eta;
|
| 139 |
+
and options ``sparse``, ``lstsq``, ``sym_pos``, ``cholesky``, ``pc``
|
| 140 |
+
(predictor-corrector), and ``ip`` (initial point improvement),
|
| 141 |
+
get the search direction for increments to the variable estimates.
|
| 142 |
+
|
| 143 |
+
Parameters
|
| 144 |
+
----------
|
| 145 |
+
As defined in [4], except:
|
| 146 |
+
sparse : bool
|
| 147 |
+
True if the system to be solved is sparse. This is typically set
|
| 148 |
+
True when the original ``A_ub`` and ``A_eq`` arrays are sparse.
|
| 149 |
+
lstsq : bool
|
| 150 |
+
True if the system is ill-conditioned and/or (nearly) singular and
|
| 151 |
+
thus a more robust least-squares solver is desired. This is sometimes
|
| 152 |
+
needed as the solution is approached.
|
| 153 |
+
sym_pos : bool
|
| 154 |
+
True if the system matrix is symmetric positive definite
|
| 155 |
+
Sometimes this needs to be set false as the solution is approached,
|
| 156 |
+
even when the system should be symmetric positive definite, due to
|
| 157 |
+
numerical difficulties.
|
| 158 |
+
cholesky : bool
|
| 159 |
+
True if the system is to be solved by Cholesky, rather than LU,
|
| 160 |
+
decomposition. This is typically faster unless the problem is very
|
| 161 |
+
small or prone to numerical difficulties.
|
| 162 |
+
pc : bool
|
| 163 |
+
True if the predictor-corrector method of Mehrota is to be used. This
|
| 164 |
+
is almost always (if not always) beneficial. Even though it requires
|
| 165 |
+
the solution of an additional linear system, the factorization
|
| 166 |
+
is typically (implicitly) reused so solution is efficient, and the
|
| 167 |
+
number of algorithm iterations is typically reduced.
|
| 168 |
+
ip : bool
|
| 169 |
+
True if the improved initial point suggestion due to [4] section 4.3
|
| 170 |
+
is desired. It's unclear whether this is beneficial.
|
| 171 |
+
permc_spec : str (default = 'MMD_AT_PLUS_A')
|
| 172 |
+
(Has effect only with ``sparse = True``, ``lstsq = False``, ``sym_pos =
|
| 173 |
+
True``.) A matrix is factorized in each iteration of the algorithm.
|
| 174 |
+
This option specifies how to permute the columns of the matrix for
|
| 175 |
+
sparsity preservation. Acceptable values are:
|
| 176 |
+
|
| 177 |
+
- ``NATURAL``: natural ordering.
|
| 178 |
+
- ``MMD_ATA``: minimum degree ordering on the structure of A^T A.
|
| 179 |
+
- ``MMD_AT_PLUS_A``: minimum degree ordering on the structure of A^T+A.
|
| 180 |
+
- ``COLAMD``: approximate minimum degree column ordering.
|
| 181 |
+
|
| 182 |
+
This option can impact the convergence of the
|
| 183 |
+
interior point algorithm; test different values to determine which
|
| 184 |
+
performs best for your problem. For more information, refer to
|
| 185 |
+
``scipy.sparse.linalg.splu``.
|
| 186 |
+
|
| 187 |
+
Returns
|
| 188 |
+
-------
|
| 189 |
+
Search directions as defined in [4]
|
| 190 |
+
|
| 191 |
+
References
|
| 192 |
+
----------
|
| 193 |
+
.. [4] Andersen, Erling D., and Knud D. Andersen. "The MOSEK interior point
|
| 194 |
+
optimizer for linear programming: an implementation of the
|
| 195 |
+
homogeneous algorithm." High performance optimization. Springer US,
|
| 196 |
+
2000. 197-232.
|
| 197 |
+
|
| 198 |
+
"""
|
| 199 |
+
if A.shape[0] == 0:
|
| 200 |
+
# If there are no constraints, some solvers fail (understandably)
|
| 201 |
+
# rather than returning empty solution. This gets the job done.
|
| 202 |
+
sparse, lstsq, sym_pos, cholesky = False, False, True, False
|
| 203 |
+
n_x = len(x)
|
| 204 |
+
|
| 205 |
+
# [4] Equation 8.8
|
| 206 |
+
r_P = b * tau - A.dot(x)
|
| 207 |
+
r_D = c * tau - A.T.dot(y) - z
|
| 208 |
+
r_G = c.dot(x) - b.transpose().dot(y) + kappa
|
| 209 |
+
mu = (x.dot(z) + tau * kappa) / (n_x + 1)
|
| 210 |
+
|
| 211 |
+
# Assemble M from [4] Equation 8.31
|
| 212 |
+
Dinv = x / z
|
| 213 |
+
|
| 214 |
+
if sparse:
|
| 215 |
+
M = A.dot(sps.diags(Dinv, 0, format="csc").dot(A.T))
|
| 216 |
+
else:
|
| 217 |
+
M = A.dot(Dinv.reshape(-1, 1) * A.T)
|
| 218 |
+
solve = _get_solver(M, sparse, lstsq, sym_pos, cholesky, permc_spec)
|
| 219 |
+
|
| 220 |
+
# pc: "predictor-corrector" [4] Section 4.1
|
| 221 |
+
# In development this option could be turned off
|
| 222 |
+
# but it always seems to improve performance substantially
|
| 223 |
+
n_corrections = 1 if pc else 0
|
| 224 |
+
|
| 225 |
+
i = 0
|
| 226 |
+
alpha, d_x, d_z, d_tau, d_kappa = 0, 0, 0, 0, 0
|
| 227 |
+
while i <= n_corrections:
|
| 228 |
+
# Reference [4] Eq. 8.6
|
| 229 |
+
rhatp = eta(gamma) * r_P
|
| 230 |
+
rhatd = eta(gamma) * r_D
|
| 231 |
+
rhatg = eta(gamma) * r_G
|
| 232 |
+
|
| 233 |
+
# Reference [4] Eq. 8.7
|
| 234 |
+
rhatxs = gamma * mu - x * z
|
| 235 |
+
rhattk = gamma * mu - tau * kappa
|
| 236 |
+
|
| 237 |
+
if i == 1:
|
| 238 |
+
if ip: # if the correction is to get "initial point"
|
| 239 |
+
# Reference [4] Eq. 8.23
|
| 240 |
+
rhatxs = ((1 - alpha) * gamma * mu -
|
| 241 |
+
x * z - alpha**2 * d_x * d_z)
|
| 242 |
+
rhattk = ((1 - alpha) * gamma * mu -
|
| 243 |
+
tau * kappa -
|
| 244 |
+
alpha**2 * d_tau * d_kappa)
|
| 245 |
+
else: # if the correction is for "predictor-corrector"
|
| 246 |
+
# Reference [4] Eq. 8.13
|
| 247 |
+
rhatxs -= d_x * d_z
|
| 248 |
+
rhattk -= d_tau * d_kappa
|
| 249 |
+
|
| 250 |
+
# sometimes numerical difficulties arise as the solution is approached
|
| 251 |
+
# this loop tries to solve the equations using a sequence of functions
|
| 252 |
+
# for solve. For dense systems, the order is:
|
| 253 |
+
# 1. scipy.linalg.cho_factor/scipy.linalg.cho_solve,
|
| 254 |
+
# 2. scipy.linalg.solve w/ sym_pos = True,
|
| 255 |
+
# 3. scipy.linalg.solve w/ sym_pos = False, and if all else fails
|
| 256 |
+
# 4. scipy.linalg.lstsq
|
| 257 |
+
# For sparse systems, the order is:
|
| 258 |
+
# 1. sksparse.cholmod.cholesky (if available)
|
| 259 |
+
# 2. scipy.sparse.linalg.factorized (if umfpack available)
|
| 260 |
+
# 3. scipy.sparse.linalg.splu
|
| 261 |
+
# 4. scipy.sparse.linalg.lsqr
|
| 262 |
+
solved = False
|
| 263 |
+
while not solved:
|
| 264 |
+
try:
|
| 265 |
+
# [4] Equation 8.28
|
| 266 |
+
p, q = _sym_solve(Dinv, A, c, b, solve)
|
| 267 |
+
# [4] Equation 8.29
|
| 268 |
+
u, v = _sym_solve(Dinv, A, rhatd -
|
| 269 |
+
(1 / x) * rhatxs, rhatp, solve)
|
| 270 |
+
if np.any(np.isnan(p)) or np.any(np.isnan(q)):
|
| 271 |
+
raise LinAlgError
|
| 272 |
+
solved = True
|
| 273 |
+
except (LinAlgError, ValueError, TypeError) as e:
|
| 274 |
+
# Usually this doesn't happen. If it does, it happens when
|
| 275 |
+
# there are redundant constraints or when approaching the
|
| 276 |
+
# solution. If so, change solver.
|
| 277 |
+
if cholesky:
|
| 278 |
+
cholesky = False
|
| 279 |
+
warn(
|
| 280 |
+
"Solving system with option 'cholesky':True "
|
| 281 |
+
"failed. It is normal for this to happen "
|
| 282 |
+
"occasionally, especially as the solution is "
|
| 283 |
+
"approached. However, if you see this frequently, "
|
| 284 |
+
"consider setting option 'cholesky' to False.",
|
| 285 |
+
OptimizeWarning, stacklevel=5)
|
| 286 |
+
elif sym_pos:
|
| 287 |
+
sym_pos = False
|
| 288 |
+
warn(
|
| 289 |
+
"Solving system with option 'sym_pos':True "
|
| 290 |
+
"failed. It is normal for this to happen "
|
| 291 |
+
"occasionally, especially as the solution is "
|
| 292 |
+
"approached. However, if you see this frequently, "
|
| 293 |
+
"consider setting option 'sym_pos' to False.",
|
| 294 |
+
OptimizeWarning, stacklevel=5)
|
| 295 |
+
elif not lstsq:
|
| 296 |
+
lstsq = True
|
| 297 |
+
warn(
|
| 298 |
+
"Solving system with option 'sym_pos':False "
|
| 299 |
+
"failed. This may happen occasionally, "
|
| 300 |
+
"especially as the solution is "
|
| 301 |
+
"approached. However, if you see this frequently, "
|
| 302 |
+
"your problem may be numerically challenging. "
|
| 303 |
+
"If you cannot improve the formulation, consider "
|
| 304 |
+
"setting 'lstsq' to True. Consider also setting "
|
| 305 |
+
"`presolve` to True, if it is not already.",
|
| 306 |
+
OptimizeWarning, stacklevel=5)
|
| 307 |
+
else:
|
| 308 |
+
raise e
|
| 309 |
+
solve = _get_solver(M, sparse, lstsq, sym_pos,
|
| 310 |
+
cholesky, permc_spec)
|
| 311 |
+
# [4] Results after 8.29
|
| 312 |
+
d_tau = ((rhatg + 1 / tau * rhattk - (-c.dot(u) + b.dot(v))) /
|
| 313 |
+
(1 / tau * kappa + (-c.dot(p) + b.dot(q))))
|
| 314 |
+
d_x = u + p * d_tau
|
| 315 |
+
d_y = v + q * d_tau
|
| 316 |
+
|
| 317 |
+
# [4] Relations between after 8.25 and 8.26
|
| 318 |
+
d_z = (1 / x) * (rhatxs - z * d_x)
|
| 319 |
+
d_kappa = 1 / tau * (rhattk - kappa * d_tau)
|
| 320 |
+
|
| 321 |
+
# [4] 8.12 and "Let alpha be the maximal possible step..." before 8.23
|
| 322 |
+
alpha = _get_step(x, d_x, z, d_z, tau, d_tau, kappa, d_kappa, 1)
|
| 323 |
+
if ip: # initial point - see [4] 4.4
|
| 324 |
+
gamma = 10
|
| 325 |
+
else: # predictor-corrector, [4] definition after 8.12
|
| 326 |
+
beta1 = 0.1 # [4] pg. 220 (Table 8.1)
|
| 327 |
+
gamma = (1 - alpha)**2 * min(beta1, (1 - alpha))
|
| 328 |
+
i += 1
|
| 329 |
+
|
| 330 |
+
return d_x, d_y, d_z, d_tau, d_kappa
|
| 331 |
+
|
| 332 |
+
|
| 333 |
+
def _sym_solve(Dinv, A, r1, r2, solve):
|
| 334 |
+
"""
|
| 335 |
+
An implementation of [4] equation 8.31 and 8.32
|
| 336 |
+
|
| 337 |
+
References
|
| 338 |
+
----------
|
| 339 |
+
.. [4] Andersen, Erling D., and Knud D. Andersen. "The MOSEK interior point
|
| 340 |
+
optimizer for linear programming: an implementation of the
|
| 341 |
+
homogeneous algorithm." High performance optimization. Springer US,
|
| 342 |
+
2000. 197-232.
|
| 343 |
+
|
| 344 |
+
"""
|
| 345 |
+
# [4] 8.31
|
| 346 |
+
r = r2 + A.dot(Dinv * r1)
|
| 347 |
+
v = solve(r)
|
| 348 |
+
# [4] 8.32
|
| 349 |
+
u = Dinv * (A.T.dot(v) - r1)
|
| 350 |
+
return u, v
|
| 351 |
+
|
| 352 |
+
|
| 353 |
+
def _get_step(x, d_x, z, d_z, tau, d_tau, kappa, d_kappa, alpha0):
|
| 354 |
+
"""
|
| 355 |
+
An implementation of [4] equation 8.21
|
| 356 |
+
|
| 357 |
+
References
|
| 358 |
+
----------
|
| 359 |
+
.. [4] Andersen, Erling D., and Knud D. Andersen. "The MOSEK interior point
|
| 360 |
+
optimizer for linear programming: an implementation of the
|
| 361 |
+
homogeneous algorithm." High performance optimization. Springer US,
|
| 362 |
+
2000. 197-232.
|
| 363 |
+
|
| 364 |
+
"""
|
| 365 |
+
# [4] 4.3 Equation 8.21, ignoring 8.20 requirement
|
| 366 |
+
# same step is taken in primal and dual spaces
|
| 367 |
+
# alpha0 is basically beta3 from [4] Table 8.1, but instead of beta3
|
| 368 |
+
# the value 1 is used in Mehrota corrector and initial point correction
|
| 369 |
+
i_x = d_x < 0
|
| 370 |
+
i_z = d_z < 0
|
| 371 |
+
alpha_x = alpha0 * np.min(x[i_x] / -d_x[i_x]) if np.any(i_x) else 1
|
| 372 |
+
alpha_tau = alpha0 * tau / -d_tau if d_tau < 0 else 1
|
| 373 |
+
alpha_z = alpha0 * np.min(z[i_z] / -d_z[i_z]) if np.any(i_z) else 1
|
| 374 |
+
alpha_kappa = alpha0 * kappa / -d_kappa if d_kappa < 0 else 1
|
| 375 |
+
alpha = np.min([1, alpha_x, alpha_tau, alpha_z, alpha_kappa])
|
| 376 |
+
return alpha
|
| 377 |
+
|
| 378 |
+
|
| 379 |
+
def _get_message(status):
|
| 380 |
+
"""
|
| 381 |
+
Given problem status code, return a more detailed message.
|
| 382 |
+
|
| 383 |
+
Parameters
|
| 384 |
+
----------
|
| 385 |
+
status : int
|
| 386 |
+
An integer representing the exit status of the optimization::
|
| 387 |
+
|
| 388 |
+
0 : Optimization terminated successfully
|
| 389 |
+
1 : Iteration limit reached
|
| 390 |
+
2 : Problem appears to be infeasible
|
| 391 |
+
3 : Problem appears to be unbounded
|
| 392 |
+
4 : Serious numerical difficulties encountered
|
| 393 |
+
|
| 394 |
+
Returns
|
| 395 |
+
-------
|
| 396 |
+
message : str
|
| 397 |
+
A string descriptor of the exit status of the optimization.
|
| 398 |
+
|
| 399 |
+
"""
|
| 400 |
+
messages = (
|
| 401 |
+
["Optimization terminated successfully.",
|
| 402 |
+
"The iteration limit was reached before the algorithm converged.",
|
| 403 |
+
"The algorithm terminated successfully and determined that the "
|
| 404 |
+
"problem is infeasible.",
|
| 405 |
+
"The algorithm terminated successfully and determined that the "
|
| 406 |
+
"problem is unbounded.",
|
| 407 |
+
"Numerical difficulties were encountered before the problem "
|
| 408 |
+
"converged. Please check your problem formulation for errors, "
|
| 409 |
+
"independence of linear equality constraints, and reasonable "
|
| 410 |
+
"scaling and matrix condition numbers. If you continue to "
|
| 411 |
+
"encounter this error, please submit a bug report."
|
| 412 |
+
])
|
| 413 |
+
return messages[status]
|
| 414 |
+
|
| 415 |
+
|
| 416 |
+
def _do_step(x, y, z, tau, kappa, d_x, d_y, d_z, d_tau, d_kappa, alpha):
|
| 417 |
+
"""
|
| 418 |
+
An implementation of [4] Equation 8.9
|
| 419 |
+
|
| 420 |
+
References
|
| 421 |
+
----------
|
| 422 |
+
.. [4] Andersen, Erling D., and Knud D. Andersen. "The MOSEK interior point
|
| 423 |
+
optimizer for linear programming: an implementation of the
|
| 424 |
+
homogeneous algorithm." High performance optimization. Springer US,
|
| 425 |
+
2000. 197-232.
|
| 426 |
+
|
| 427 |
+
"""
|
| 428 |
+
x = x + alpha * d_x
|
| 429 |
+
tau = tau + alpha * d_tau
|
| 430 |
+
z = z + alpha * d_z
|
| 431 |
+
kappa = kappa + alpha * d_kappa
|
| 432 |
+
y = y + alpha * d_y
|
| 433 |
+
return x, y, z, tau, kappa
|
| 434 |
+
|
| 435 |
+
|
| 436 |
+
def _get_blind_start(shape):
|
| 437 |
+
"""
|
| 438 |
+
Return the starting point from [4] 4.4
|
| 439 |
+
|
| 440 |
+
References
|
| 441 |
+
----------
|
| 442 |
+
.. [4] Andersen, Erling D., and Knud D. Andersen. "The MOSEK interior point
|
| 443 |
+
optimizer for linear programming: an implementation of the
|
| 444 |
+
homogeneous algorithm." High performance optimization. Springer US,
|
| 445 |
+
2000. 197-232.
|
| 446 |
+
|
| 447 |
+
"""
|
| 448 |
+
m, n = shape
|
| 449 |
+
x0 = np.ones(n)
|
| 450 |
+
y0 = np.zeros(m)
|
| 451 |
+
z0 = np.ones(n)
|
| 452 |
+
tau0 = 1
|
| 453 |
+
kappa0 = 1
|
| 454 |
+
return x0, y0, z0, tau0, kappa0
|
| 455 |
+
|
| 456 |
+
|
| 457 |
+
def _indicators(A, b, c, c0, x, y, z, tau, kappa):
|
| 458 |
+
"""
|
| 459 |
+
Implementation of several equations from [4] used as indicators of
|
| 460 |
+
the status of optimization.
|
| 461 |
+
|
| 462 |
+
References
|
| 463 |
+
----------
|
| 464 |
+
.. [4] Andersen, Erling D., and Knud D. Andersen. "The MOSEK interior point
|
| 465 |
+
optimizer for linear programming: an implementation of the
|
| 466 |
+
homogeneous algorithm." High performance optimization. Springer US,
|
| 467 |
+
2000. 197-232.
|
| 468 |
+
|
| 469 |
+
"""
|
| 470 |
+
|
| 471 |
+
# residuals for termination are relative to initial values
|
| 472 |
+
x0, y0, z0, tau0, kappa0 = _get_blind_start(A.shape)
|
| 473 |
+
|
| 474 |
+
# See [4], Section 4 - The Homogeneous Algorithm, Equation 8.8
|
| 475 |
+
def r_p(x, tau):
|
| 476 |
+
return b * tau - A.dot(x)
|
| 477 |
+
|
| 478 |
+
def r_d(y, z, tau):
|
| 479 |
+
return c * tau - A.T.dot(y) - z
|
| 480 |
+
|
| 481 |
+
def r_g(x, y, kappa):
|
| 482 |
+
return kappa + c.dot(x) - b.dot(y)
|
| 483 |
+
|
| 484 |
+
# np.dot unpacks if they are arrays of size one
|
| 485 |
+
def mu(x, tau, z, kappa):
|
| 486 |
+
return (x.dot(z) + np.dot(tau, kappa)) / (len(x) + 1)
|
| 487 |
+
|
| 488 |
+
obj = c.dot(x / tau) + c0
|
| 489 |
+
|
| 490 |
+
def norm(a):
|
| 491 |
+
return np.linalg.norm(a)
|
| 492 |
+
|
| 493 |
+
# See [4], Section 4.5 - The Stopping Criteria
|
| 494 |
+
r_p0 = r_p(x0, tau0)
|
| 495 |
+
r_d0 = r_d(y0, z0, tau0)
|
| 496 |
+
r_g0 = r_g(x0, y0, kappa0)
|
| 497 |
+
mu_0 = mu(x0, tau0, z0, kappa0)
|
| 498 |
+
rho_A = norm(c.T.dot(x) - b.T.dot(y)) / (tau + norm(b.T.dot(y)))
|
| 499 |
+
rho_p = norm(r_p(x, tau)) / max(1, norm(r_p0))
|
| 500 |
+
rho_d = norm(r_d(y, z, tau)) / max(1, norm(r_d0))
|
| 501 |
+
rho_g = norm(r_g(x, y, kappa)) / max(1, norm(r_g0))
|
| 502 |
+
rho_mu = mu(x, tau, z, kappa) / mu_0
|
| 503 |
+
return rho_p, rho_d, rho_A, rho_g, rho_mu, obj
|
| 504 |
+
|
| 505 |
+
|
| 506 |
+
def _display_iter(rho_p, rho_d, rho_g, alpha, rho_mu, obj, header=False):
|
| 507 |
+
"""
|
| 508 |
+
Print indicators of optimization status to the console.
|
| 509 |
+
|
| 510 |
+
Parameters
|
| 511 |
+
----------
|
| 512 |
+
rho_p : float
|
| 513 |
+
The (normalized) primal feasibility, see [4] 4.5
|
| 514 |
+
rho_d : float
|
| 515 |
+
The (normalized) dual feasibility, see [4] 4.5
|
| 516 |
+
rho_g : float
|
| 517 |
+
The (normalized) duality gap, see [4] 4.5
|
| 518 |
+
alpha : float
|
| 519 |
+
The step size, see [4] 4.3
|
| 520 |
+
rho_mu : float
|
| 521 |
+
The (normalized) path parameter, see [4] 4.5
|
| 522 |
+
obj : float
|
| 523 |
+
The objective function value of the current iterate
|
| 524 |
+
header : bool
|
| 525 |
+
True if a header is to be printed
|
| 526 |
+
|
| 527 |
+
References
|
| 528 |
+
----------
|
| 529 |
+
.. [4] Andersen, Erling D., and Knud D. Andersen. "The MOSEK interior point
|
| 530 |
+
optimizer for linear programming: an implementation of the
|
| 531 |
+
homogeneous algorithm." High performance optimization. Springer US,
|
| 532 |
+
2000. 197-232.
|
| 533 |
+
|
| 534 |
+
"""
|
| 535 |
+
if header:
|
| 536 |
+
print("Primal Feasibility ",
|
| 537 |
+
"Dual Feasibility ",
|
| 538 |
+
"Duality Gap ",
|
| 539 |
+
"Step ",
|
| 540 |
+
"Path Parameter ",
|
| 541 |
+
"Objective ")
|
| 542 |
+
|
| 543 |
+
# no clue why this works
|
| 544 |
+
fmt = '{0:<20.13}{1:<20.13}{2:<20.13}{3:<17.13}{4:<20.13}{5:<20.13}'
|
| 545 |
+
print(fmt.format(
|
| 546 |
+
float(rho_p),
|
| 547 |
+
float(rho_d),
|
| 548 |
+
float(rho_g),
|
| 549 |
+
alpha if isinstance(alpha, str) else float(alpha),
|
| 550 |
+
float(rho_mu),
|
| 551 |
+
float(obj)))
|
| 552 |
+
|
| 553 |
+
|
| 554 |
+
def _ip_hsd(A, b, c, c0, alpha0, beta, maxiter, disp, tol, sparse, lstsq,
|
| 555 |
+
sym_pos, cholesky, pc, ip, permc_spec, callback, postsolve_args):
|
| 556 |
+
r"""
|
| 557 |
+
Solve a linear programming problem in standard form:
|
| 558 |
+
|
| 559 |
+
Minimize::
|
| 560 |
+
|
| 561 |
+
c @ x
|
| 562 |
+
|
| 563 |
+
Subject to::
|
| 564 |
+
|
| 565 |
+
A @ x == b
|
| 566 |
+
x >= 0
|
| 567 |
+
|
| 568 |
+
using the interior point method of [4].
|
| 569 |
+
|
| 570 |
+
Parameters
|
| 571 |
+
----------
|
| 572 |
+
A : 2-D array
|
| 573 |
+
2-D array such that ``A @ x``, gives the values of the equality
|
| 574 |
+
constraints at ``x``.
|
| 575 |
+
b : 1-D array
|
| 576 |
+
1-D array of values representing the RHS of each equality constraint
|
| 577 |
+
(row) in ``A`` (for standard form problem).
|
| 578 |
+
c : 1-D array
|
| 579 |
+
Coefficients of the linear objective function to be minimized (for
|
| 580 |
+
standard form problem).
|
| 581 |
+
c0 : float
|
| 582 |
+
Constant term in objective function due to fixed (and eliminated)
|
| 583 |
+
variables. (Purely for display.)
|
| 584 |
+
alpha0 : float
|
| 585 |
+
The maximal step size for Mehrota's predictor-corrector search
|
| 586 |
+
direction; see :math:`\beta_3`of [4] Table 8.1
|
| 587 |
+
beta : float
|
| 588 |
+
The desired reduction of the path parameter :math:`\mu` (see [6]_)
|
| 589 |
+
maxiter : int
|
| 590 |
+
The maximum number of iterations of the algorithm.
|
| 591 |
+
disp : bool
|
| 592 |
+
Set to ``True`` if indicators of optimization status are to be printed
|
| 593 |
+
to the console each iteration.
|
| 594 |
+
tol : float
|
| 595 |
+
Termination tolerance; see [4]_ Section 4.5.
|
| 596 |
+
sparse : bool
|
| 597 |
+
Set to ``True`` if the problem is to be treated as sparse. However,
|
| 598 |
+
the inputs ``A_eq`` and ``A_ub`` should nonetheless be provided as
|
| 599 |
+
(dense) arrays rather than sparse matrices.
|
| 600 |
+
lstsq : bool
|
| 601 |
+
Set to ``True`` if the problem is expected to be very poorly
|
| 602 |
+
conditioned. This should always be left as ``False`` unless severe
|
| 603 |
+
numerical difficulties are frequently encountered, and a better option
|
| 604 |
+
would be to improve the formulation of the problem.
|
| 605 |
+
sym_pos : bool
|
| 606 |
+
Leave ``True`` if the problem is expected to yield a well conditioned
|
| 607 |
+
symmetric positive definite normal equation matrix (almost always).
|
| 608 |
+
cholesky : bool
|
| 609 |
+
Set to ``True`` if the normal equations are to be solved by explicit
|
| 610 |
+
Cholesky decomposition followed by explicit forward/backward
|
| 611 |
+
substitution. This is typically faster for moderate, dense problems
|
| 612 |
+
that are numerically well-behaved.
|
| 613 |
+
pc : bool
|
| 614 |
+
Leave ``True`` if the predictor-corrector method of Mehrota is to be
|
| 615 |
+
used. This is almost always (if not always) beneficial.
|
| 616 |
+
ip : bool
|
| 617 |
+
Set to ``True`` if the improved initial point suggestion due to [4]_
|
| 618 |
+
Section 4.3 is desired. It's unclear whether this is beneficial.
|
| 619 |
+
permc_spec : str (default = 'MMD_AT_PLUS_A')
|
| 620 |
+
(Has effect only with ``sparse = True``, ``lstsq = False``, ``sym_pos =
|
| 621 |
+
True``.) A matrix is factorized in each iteration of the algorithm.
|
| 622 |
+
This option specifies how to permute the columns of the matrix for
|
| 623 |
+
sparsity preservation. Acceptable values are:
|
| 624 |
+
|
| 625 |
+
- ``NATURAL``: natural ordering.
|
| 626 |
+
- ``MMD_ATA``: minimum degree ordering on the structure of A^T A.
|
| 627 |
+
- ``MMD_AT_PLUS_A``: minimum degree ordering on the structure of A^T+A.
|
| 628 |
+
- ``COLAMD``: approximate minimum degree column ordering.
|
| 629 |
+
|
| 630 |
+
This option can impact the convergence of the
|
| 631 |
+
interior point algorithm; test different values to determine which
|
| 632 |
+
performs best for your problem. For more information, refer to
|
| 633 |
+
``scipy.sparse.linalg.splu``.
|
| 634 |
+
callback : callable, optional
|
| 635 |
+
If a callback function is provided, it will be called within each
|
| 636 |
+
iteration of the algorithm. The callback function must accept a single
|
| 637 |
+
`scipy.optimize.OptimizeResult` consisting of the following fields:
|
| 638 |
+
|
| 639 |
+
x : 1-D array
|
| 640 |
+
Current solution vector
|
| 641 |
+
fun : float
|
| 642 |
+
Current value of the objective function
|
| 643 |
+
success : bool
|
| 644 |
+
True only when an algorithm has completed successfully,
|
| 645 |
+
so this is always False as the callback function is called
|
| 646 |
+
only while the algorithm is still iterating.
|
| 647 |
+
slack : 1-D array
|
| 648 |
+
The values of the slack variables. Each slack variable
|
| 649 |
+
corresponds to an inequality constraint. If the slack is zero,
|
| 650 |
+
the corresponding constraint is active.
|
| 651 |
+
con : 1-D array
|
| 652 |
+
The (nominally zero) residuals of the equality constraints,
|
| 653 |
+
that is, ``b - A_eq @ x``
|
| 654 |
+
phase : int
|
| 655 |
+
The phase of the algorithm being executed. This is always
|
| 656 |
+
1 for the interior-point method because it has only one phase.
|
| 657 |
+
status : int
|
| 658 |
+
For revised simplex, this is always 0 because if a different
|
| 659 |
+
status is detected, the algorithm terminates.
|
| 660 |
+
nit : int
|
| 661 |
+
The number of iterations performed.
|
| 662 |
+
message : str
|
| 663 |
+
A string descriptor of the exit status of the optimization.
|
| 664 |
+
postsolve_args : tuple
|
| 665 |
+
Data needed by _postsolve to convert the solution to the standard-form
|
| 666 |
+
problem into the solution to the original problem.
|
| 667 |
+
|
| 668 |
+
Returns
|
| 669 |
+
-------
|
| 670 |
+
x_hat : float
|
| 671 |
+
Solution vector (for standard form problem).
|
| 672 |
+
status : int
|
| 673 |
+
An integer representing the exit status of the optimization::
|
| 674 |
+
|
| 675 |
+
0 : Optimization terminated successfully
|
| 676 |
+
1 : Iteration limit reached
|
| 677 |
+
2 : Problem appears to be infeasible
|
| 678 |
+
3 : Problem appears to be unbounded
|
| 679 |
+
4 : Serious numerical difficulties encountered
|
| 680 |
+
|
| 681 |
+
message : str
|
| 682 |
+
A string descriptor of the exit status of the optimization.
|
| 683 |
+
iteration : int
|
| 684 |
+
The number of iterations taken to solve the problem
|
| 685 |
+
|
| 686 |
+
References
|
| 687 |
+
----------
|
| 688 |
+
.. [4] Andersen, Erling D., and Knud D. Andersen. "The MOSEK interior point
|
| 689 |
+
optimizer for linear programming: an implementation of the
|
| 690 |
+
homogeneous algorithm." High performance optimization. Springer US,
|
| 691 |
+
2000. 197-232.
|
| 692 |
+
.. [6] Freund, Robert M. "Primal-Dual Interior-Point Methods for Linear
|
| 693 |
+
Programming based on Newton's Method." Unpublished Course Notes,
|
| 694 |
+
March 2004. Available 2/25/2017 at:
|
| 695 |
+
https://ocw.mit.edu/courses/sloan-school-of-management/15-084j-nonlinear-programming-spring-2004/lecture-notes/lec14_int_pt_mthd.pdf
|
| 696 |
+
|
| 697 |
+
"""
|
| 698 |
+
|
| 699 |
+
iteration = 0
|
| 700 |
+
|
| 701 |
+
# default initial point
|
| 702 |
+
x, y, z, tau, kappa = _get_blind_start(A.shape)
|
| 703 |
+
|
| 704 |
+
# first iteration is special improvement of initial point
|
| 705 |
+
ip = ip if pc else False
|
| 706 |
+
|
| 707 |
+
# [4] 4.5
|
| 708 |
+
rho_p, rho_d, rho_A, rho_g, rho_mu, obj = _indicators(
|
| 709 |
+
A, b, c, c0, x, y, z, tau, kappa)
|
| 710 |
+
go = rho_p > tol or rho_d > tol or rho_A > tol # we might get lucky : )
|
| 711 |
+
|
| 712 |
+
if disp:
|
| 713 |
+
_display_iter(rho_p, rho_d, rho_g, "-", rho_mu, obj, header=True)
|
| 714 |
+
if callback is not None:
|
| 715 |
+
x_o, fun, slack, con = _postsolve(x/tau, postsolve_args)
|
| 716 |
+
res = OptimizeResult({'x': x_o, 'fun': fun, 'slack': slack,
|
| 717 |
+
'con': con, 'nit': iteration, 'phase': 1,
|
| 718 |
+
'complete': False, 'status': 0,
|
| 719 |
+
'message': "", 'success': False})
|
| 720 |
+
callback(res)
|
| 721 |
+
|
| 722 |
+
status = 0
|
| 723 |
+
message = "Optimization terminated successfully."
|
| 724 |
+
|
| 725 |
+
if sparse:
|
| 726 |
+
A = sps.csc_matrix(A)
|
| 727 |
+
|
| 728 |
+
while go:
|
| 729 |
+
|
| 730 |
+
iteration += 1
|
| 731 |
+
|
| 732 |
+
if ip: # initial point
|
| 733 |
+
# [4] Section 4.4
|
| 734 |
+
gamma = 1
|
| 735 |
+
|
| 736 |
+
def eta(g):
|
| 737 |
+
return 1
|
| 738 |
+
else:
|
| 739 |
+
# gamma = 0 in predictor step according to [4] 4.1
|
| 740 |
+
# if predictor/corrector is off, use mean of complementarity [6]
|
| 741 |
+
# 5.1 / [4] Below Figure 10-4
|
| 742 |
+
gamma = 0 if pc else beta * np.mean(z * x)
|
| 743 |
+
# [4] Section 4.1
|
| 744 |
+
|
| 745 |
+
def eta(g=gamma):
|
| 746 |
+
return 1 - g
|
| 747 |
+
|
| 748 |
+
try:
|
| 749 |
+
# Solve [4] 8.6 and 8.7/8.13/8.23
|
| 750 |
+
d_x, d_y, d_z, d_tau, d_kappa = _get_delta(
|
| 751 |
+
A, b, c, x, y, z, tau, kappa, gamma, eta,
|
| 752 |
+
sparse, lstsq, sym_pos, cholesky, pc, ip, permc_spec)
|
| 753 |
+
|
| 754 |
+
if ip: # initial point
|
| 755 |
+
# [4] 4.4
|
| 756 |
+
# Formula after 8.23 takes a full step regardless if this will
|
| 757 |
+
# take it negative
|
| 758 |
+
alpha = 1.0
|
| 759 |
+
x, y, z, tau, kappa = _do_step(
|
| 760 |
+
x, y, z, tau, kappa, d_x, d_y,
|
| 761 |
+
d_z, d_tau, d_kappa, alpha)
|
| 762 |
+
x[x < 1] = 1
|
| 763 |
+
z[z < 1] = 1
|
| 764 |
+
tau = max(1, tau)
|
| 765 |
+
kappa = max(1, kappa)
|
| 766 |
+
ip = False # done with initial point
|
| 767 |
+
else:
|
| 768 |
+
# [4] Section 4.3
|
| 769 |
+
alpha = _get_step(x, d_x, z, d_z, tau,
|
| 770 |
+
d_tau, kappa, d_kappa, alpha0)
|
| 771 |
+
# [4] Equation 8.9
|
| 772 |
+
x, y, z, tau, kappa = _do_step(
|
| 773 |
+
x, y, z, tau, kappa, d_x, d_y, d_z, d_tau, d_kappa, alpha)
|
| 774 |
+
|
| 775 |
+
except (LinAlgError, FloatingPointError,
|
| 776 |
+
ValueError, ZeroDivisionError):
|
| 777 |
+
# this can happen when sparse solver is used and presolve
|
| 778 |
+
# is turned off. Also observed ValueError in AppVeyor Python 3.6
|
| 779 |
+
# Win32 build (PR #8676). I've never seen it otherwise.
|
| 780 |
+
status = 4
|
| 781 |
+
message = _get_message(status)
|
| 782 |
+
break
|
| 783 |
+
|
| 784 |
+
# [4] 4.5
|
| 785 |
+
rho_p, rho_d, rho_A, rho_g, rho_mu, obj = _indicators(
|
| 786 |
+
A, b, c, c0, x, y, z, tau, kappa)
|
| 787 |
+
go = rho_p > tol or rho_d > tol or rho_A > tol
|
| 788 |
+
|
| 789 |
+
if disp:
|
| 790 |
+
_display_iter(rho_p, rho_d, rho_g, alpha, rho_mu, obj)
|
| 791 |
+
if callback is not None:
|
| 792 |
+
x_o, fun, slack, con = _postsolve(x/tau, postsolve_args)
|
| 793 |
+
res = OptimizeResult({'x': x_o, 'fun': fun, 'slack': slack,
|
| 794 |
+
'con': con, 'nit': iteration, 'phase': 1,
|
| 795 |
+
'complete': False, 'status': 0,
|
| 796 |
+
'message': "", 'success': False})
|
| 797 |
+
callback(res)
|
| 798 |
+
|
| 799 |
+
# [4] 4.5
|
| 800 |
+
inf1 = (rho_p < tol and rho_d < tol and rho_g < tol and tau < tol *
|
| 801 |
+
max(1, kappa))
|
| 802 |
+
inf2 = rho_mu < tol and tau < tol * min(1, kappa)
|
| 803 |
+
if inf1 or inf2:
|
| 804 |
+
# [4] Lemma 8.4 / Theorem 8.3
|
| 805 |
+
if b.transpose().dot(y) > tol:
|
| 806 |
+
status = 2
|
| 807 |
+
else: # elif c.T.dot(x) < tol: ? Probably not necessary.
|
| 808 |
+
status = 3
|
| 809 |
+
message = _get_message(status)
|
| 810 |
+
break
|
| 811 |
+
elif iteration >= maxiter:
|
| 812 |
+
status = 1
|
| 813 |
+
message = _get_message(status)
|
| 814 |
+
break
|
| 815 |
+
|
| 816 |
+
x_hat = x / tau
|
| 817 |
+
# [4] Statement after Theorem 8.2
|
| 818 |
+
return x_hat, status, message, iteration
|
| 819 |
+
|
| 820 |
+
|
| 821 |
+
def _linprog_ip(c, c0, A, b, callback, postsolve_args, maxiter=1000, tol=1e-8,
|
| 822 |
+
disp=False, alpha0=.99995, beta=0.1, sparse=False, lstsq=False,
|
| 823 |
+
sym_pos=True, cholesky=None, pc=True, ip=False,
|
| 824 |
+
permc_spec='MMD_AT_PLUS_A', **unknown_options):
|
| 825 |
+
r"""
|
| 826 |
+
Minimize a linear objective function subject to linear
|
| 827 |
+
equality and non-negativity constraints using the interior point method
|
| 828 |
+
of [4]_. Linear programming is intended to solve problems
|
| 829 |
+
of the following form:
|
| 830 |
+
|
| 831 |
+
Minimize::
|
| 832 |
+
|
| 833 |
+
c @ x
|
| 834 |
+
|
| 835 |
+
Subject to::
|
| 836 |
+
|
| 837 |
+
A @ x == b
|
| 838 |
+
x >= 0
|
| 839 |
+
|
| 840 |
+
User-facing documentation is in _linprog_doc.py.
|
| 841 |
+
|
| 842 |
+
Parameters
|
| 843 |
+
----------
|
| 844 |
+
c : 1-D array
|
| 845 |
+
Coefficients of the linear objective function to be minimized.
|
| 846 |
+
c0 : float
|
| 847 |
+
Constant term in objective function due to fixed (and eliminated)
|
| 848 |
+
variables. (Purely for display.)
|
| 849 |
+
A : 2-D array
|
| 850 |
+
2-D array such that ``A @ x``, gives the values of the equality
|
| 851 |
+
constraints at ``x``.
|
| 852 |
+
b : 1-D array
|
| 853 |
+
1-D array of values representing the right hand side of each equality
|
| 854 |
+
constraint (row) in ``A``.
|
| 855 |
+
callback : callable, optional
|
| 856 |
+
Callback function to be executed once per iteration.
|
| 857 |
+
postsolve_args : tuple
|
| 858 |
+
Data needed by _postsolve to convert the solution to the standard-form
|
| 859 |
+
problem into the solution to the original problem.
|
| 860 |
+
|
| 861 |
+
Options
|
| 862 |
+
-------
|
| 863 |
+
maxiter : int (default = 1000)
|
| 864 |
+
The maximum number of iterations of the algorithm.
|
| 865 |
+
tol : float (default = 1e-8)
|
| 866 |
+
Termination tolerance to be used for all termination criteria;
|
| 867 |
+
see [4]_ Section 4.5.
|
| 868 |
+
disp : bool (default = False)
|
| 869 |
+
Set to ``True`` if indicators of optimization status are to be printed
|
| 870 |
+
to the console each iteration.
|
| 871 |
+
alpha0 : float (default = 0.99995)
|
| 872 |
+
The maximal step size for Mehrota's predictor-corrector search
|
| 873 |
+
direction; see :math:`\beta_{3}` of [4]_ Table 8.1.
|
| 874 |
+
beta : float (default = 0.1)
|
| 875 |
+
The desired reduction of the path parameter :math:`\mu` (see [6]_)
|
| 876 |
+
when Mehrota's predictor-corrector is not in use (uncommon).
|
| 877 |
+
sparse : bool (default = False)
|
| 878 |
+
Set to ``True`` if the problem is to be treated as sparse after
|
| 879 |
+
presolve. If either ``A_eq`` or ``A_ub`` is a sparse matrix,
|
| 880 |
+
this option will automatically be set ``True``, and the problem
|
| 881 |
+
will be treated as sparse even during presolve. If your constraint
|
| 882 |
+
matrices contain mostly zeros and the problem is not very small (less
|
| 883 |
+
than about 100 constraints or variables), consider setting ``True``
|
| 884 |
+
or providing ``A_eq`` and ``A_ub`` as sparse matrices.
|
| 885 |
+
lstsq : bool (default = False)
|
| 886 |
+
Set to ``True`` if the problem is expected to be very poorly
|
| 887 |
+
conditioned. This should always be left ``False`` unless severe
|
| 888 |
+
numerical difficulties are encountered. Leave this at the default
|
| 889 |
+
unless you receive a warning message suggesting otherwise.
|
| 890 |
+
sym_pos : bool (default = True)
|
| 891 |
+
Leave ``True`` if the problem is expected to yield a well conditioned
|
| 892 |
+
symmetric positive definite normal equation matrix
|
| 893 |
+
(almost always). Leave this at the default unless you receive
|
| 894 |
+
a warning message suggesting otherwise.
|
| 895 |
+
cholesky : bool (default = True)
|
| 896 |
+
Set to ``True`` if the normal equations are to be solved by explicit
|
| 897 |
+
Cholesky decomposition followed by explicit forward/backward
|
| 898 |
+
substitution. This is typically faster for problems
|
| 899 |
+
that are numerically well-behaved.
|
| 900 |
+
pc : bool (default = True)
|
| 901 |
+
Leave ``True`` if the predictor-corrector method of Mehrota is to be
|
| 902 |
+
used. This is almost always (if not always) beneficial.
|
| 903 |
+
ip : bool (default = False)
|
| 904 |
+
Set to ``True`` if the improved initial point suggestion due to [4]_
|
| 905 |
+
Section 4.3 is desired. Whether this is beneficial or not
|
| 906 |
+
depends on the problem.
|
| 907 |
+
permc_spec : str (default = 'MMD_AT_PLUS_A')
|
| 908 |
+
(Has effect only with ``sparse = True``, ``lstsq = False``, ``sym_pos =
|
| 909 |
+
True``, and no SuiteSparse.)
|
| 910 |
+
A matrix is factorized in each iteration of the algorithm.
|
| 911 |
+
This option specifies how to permute the columns of the matrix for
|
| 912 |
+
sparsity preservation. Acceptable values are:
|
| 913 |
+
|
| 914 |
+
- ``NATURAL``: natural ordering.
|
| 915 |
+
- ``MMD_ATA``: minimum degree ordering on the structure of A^T A.
|
| 916 |
+
- ``MMD_AT_PLUS_A``: minimum degree ordering on the structure of A^T+A.
|
| 917 |
+
- ``COLAMD``: approximate minimum degree column ordering.
|
| 918 |
+
|
| 919 |
+
This option can impact the convergence of the
|
| 920 |
+
interior point algorithm; test different values to determine which
|
| 921 |
+
performs best for your problem. For more information, refer to
|
| 922 |
+
``scipy.sparse.linalg.splu``.
|
| 923 |
+
unknown_options : dict
|
| 924 |
+
Optional arguments not used by this particular solver. If
|
| 925 |
+
`unknown_options` is non-empty a warning is issued listing all
|
| 926 |
+
unused options.
|
| 927 |
+
|
| 928 |
+
Returns
|
| 929 |
+
-------
|
| 930 |
+
x : 1-D array
|
| 931 |
+
Solution vector.
|
| 932 |
+
status : int
|
| 933 |
+
An integer representing the exit status of the optimization::
|
| 934 |
+
|
| 935 |
+
0 : Optimization terminated successfully
|
| 936 |
+
1 : Iteration limit reached
|
| 937 |
+
2 : Problem appears to be infeasible
|
| 938 |
+
3 : Problem appears to be unbounded
|
| 939 |
+
4 : Serious numerical difficulties encountered
|
| 940 |
+
|
| 941 |
+
message : str
|
| 942 |
+
A string descriptor of the exit status of the optimization.
|
| 943 |
+
iteration : int
|
| 944 |
+
The number of iterations taken to solve the problem.
|
| 945 |
+
|
| 946 |
+
Notes
|
| 947 |
+
-----
|
| 948 |
+
This method implements the algorithm outlined in [4]_ with ideas from [8]_
|
| 949 |
+
and a structure inspired by the simpler methods of [6]_.
|
| 950 |
+
|
| 951 |
+
The primal-dual path following method begins with initial 'guesses' of
|
| 952 |
+
the primal and dual variables of the standard form problem and iteratively
|
| 953 |
+
attempts to solve the (nonlinear) Karush-Kuhn-Tucker conditions for the
|
| 954 |
+
problem with a gradually reduced logarithmic barrier term added to the
|
| 955 |
+
objective. This particular implementation uses a homogeneous self-dual
|
| 956 |
+
formulation, which provides certificates of infeasibility or unboundedness
|
| 957 |
+
where applicable.
|
| 958 |
+
|
| 959 |
+
The default initial point for the primal and dual variables is that
|
| 960 |
+
defined in [4]_ Section 4.4 Equation 8.22. Optionally (by setting initial
|
| 961 |
+
point option ``ip=True``), an alternate (potentially improved) starting
|
| 962 |
+
point can be calculated according to the additional recommendations of
|
| 963 |
+
[4]_ Section 4.4.
|
| 964 |
+
|
| 965 |
+
A search direction is calculated using the predictor-corrector method
|
| 966 |
+
(single correction) proposed by Mehrota and detailed in [4]_ Section 4.1.
|
| 967 |
+
(A potential improvement would be to implement the method of multiple
|
| 968 |
+
corrections described in [4]_ Section 4.2.) In practice, this is
|
| 969 |
+
accomplished by solving the normal equations, [4]_ Section 5.1 Equations
|
| 970 |
+
8.31 and 8.32, derived from the Newton equations [4]_ Section 5 Equations
|
| 971 |
+
8.25 (compare to [4]_ Section 4 Equations 8.6-8.8). The advantage of
|
| 972 |
+
solving the normal equations rather than 8.25 directly is that the
|
| 973 |
+
matrices involved are symmetric positive definite, so Cholesky
|
| 974 |
+
decomposition can be used rather than the more expensive LU factorization.
|
| 975 |
+
|
| 976 |
+
With default options, the solver used to perform the factorization depends
|
| 977 |
+
on third-party software availability and the conditioning of the problem.
|
| 978 |
+
|
| 979 |
+
For dense problems, solvers are tried in the following order:
|
| 980 |
+
|
| 981 |
+
1. ``scipy.linalg.cho_factor``
|
| 982 |
+
|
| 983 |
+
2. ``scipy.linalg.solve`` with option ``sym_pos=True``
|
| 984 |
+
|
| 985 |
+
3. ``scipy.linalg.solve`` with option ``sym_pos=False``
|
| 986 |
+
|
| 987 |
+
4. ``scipy.linalg.lstsq``
|
| 988 |
+
|
| 989 |
+
For sparse problems:
|
| 990 |
+
|
| 991 |
+
1. ``sksparse.cholmod.cholesky`` (if scikit-sparse and SuiteSparse are installed)
|
| 992 |
+
|
| 993 |
+
2. ``scipy.sparse.linalg.factorized``
|
| 994 |
+
(if scikit-umfpack and SuiteSparse are installed)
|
| 995 |
+
|
| 996 |
+
3. ``scipy.sparse.linalg.splu`` (which uses SuperLU distributed with SciPy)
|
| 997 |
+
|
| 998 |
+
4. ``scipy.sparse.linalg.lsqr``
|
| 999 |
+
|
| 1000 |
+
If the solver fails for any reason, successively more robust (but slower)
|
| 1001 |
+
solvers are attempted in the order indicated. Attempting, failing, and
|
| 1002 |
+
re-starting factorization can be time consuming, so if the problem is
|
| 1003 |
+
numerically challenging, options can be set to bypass solvers that are
|
| 1004 |
+
failing. Setting ``cholesky=False`` skips to solver 2,
|
| 1005 |
+
``sym_pos=False`` skips to solver 3, and ``lstsq=True`` skips
|
| 1006 |
+
to solver 4 for both sparse and dense problems.
|
| 1007 |
+
|
| 1008 |
+
Potential improvements for combatting issues associated with dense
|
| 1009 |
+
columns in otherwise sparse problems are outlined in [4]_ Section 5.3 and
|
| 1010 |
+
[10]_ Section 4.1-4.2; the latter also discusses the alleviation of
|
| 1011 |
+
accuracy issues associated with the substitution approach to free
|
| 1012 |
+
variables.
|
| 1013 |
+
|
| 1014 |
+
After calculating the search direction, the maximum possible step size
|
| 1015 |
+
that does not activate the non-negativity constraints is calculated, and
|
| 1016 |
+
the smaller of this step size and unity is applied (as in [4]_ Section
|
| 1017 |
+
4.1.) [4]_ Section 4.3 suggests improvements for choosing the step size.
|
| 1018 |
+
|
| 1019 |
+
The new point is tested according to the termination conditions of [4]_
|
| 1020 |
+
Section 4.5. The same tolerance, which can be set using the ``tol`` option,
|
| 1021 |
+
is used for all checks. (A potential improvement would be to expose
|
| 1022 |
+
the different tolerances to be set independently.) If optimality,
|
| 1023 |
+
unboundedness, or infeasibility is detected, the solve procedure
|
| 1024 |
+
terminates; otherwise it repeats.
|
| 1025 |
+
|
| 1026 |
+
The expected problem formulation differs between the top level ``linprog``
|
| 1027 |
+
module and the method specific solvers. The method specific solvers expect a
|
| 1028 |
+
problem in standard form:
|
| 1029 |
+
|
| 1030 |
+
Minimize::
|
| 1031 |
+
|
| 1032 |
+
c @ x
|
| 1033 |
+
|
| 1034 |
+
Subject to::
|
| 1035 |
+
|
| 1036 |
+
A @ x == b
|
| 1037 |
+
x >= 0
|
| 1038 |
+
|
| 1039 |
+
Whereas the top level ``linprog`` module expects a problem of form:
|
| 1040 |
+
|
| 1041 |
+
Minimize::
|
| 1042 |
+
|
| 1043 |
+
c @ x
|
| 1044 |
+
|
| 1045 |
+
Subject to::
|
| 1046 |
+
|
| 1047 |
+
A_ub @ x <= b_ub
|
| 1048 |
+
A_eq @ x == b_eq
|
| 1049 |
+
lb <= x <= ub
|
| 1050 |
+
|
| 1051 |
+
where ``lb = 0`` and ``ub = None`` unless set in ``bounds``.
|
| 1052 |
+
|
| 1053 |
+
The original problem contains equality, upper-bound and variable constraints
|
| 1054 |
+
whereas the method specific solver requires equality constraints and
|
| 1055 |
+
variable non-negativity.
|
| 1056 |
+
|
| 1057 |
+
``linprog`` module converts the original problem to standard form by
|
| 1058 |
+
converting the simple bounds to upper bound constraints, introducing
|
| 1059 |
+
non-negative slack variables for inequality constraints, and expressing
|
| 1060 |
+
unbounded variables as the difference between two non-negative variables.
|
| 1061 |
+
|
| 1062 |
+
|
| 1063 |
+
References
|
| 1064 |
+
----------
|
| 1065 |
+
.. [4] Andersen, Erling D., and Knud D. Andersen. "The MOSEK interior point
|
| 1066 |
+
optimizer for linear programming: an implementation of the
|
| 1067 |
+
homogeneous algorithm." High performance optimization. Springer US,
|
| 1068 |
+
2000. 197-232.
|
| 1069 |
+
.. [6] Freund, Robert M. "Primal-Dual Interior-Point Methods for Linear
|
| 1070 |
+
Programming based on Newton's Method." Unpublished Course Notes,
|
| 1071 |
+
March 2004. Available 2/25/2017 at
|
| 1072 |
+
https://ocw.mit.edu/courses/sloan-school-of-management/15-084j-nonlinear-programming-spring-2004/lecture-notes/lec14_int_pt_mthd.pdf
|
| 1073 |
+
.. [8] Andersen, Erling D., and Knud D. Andersen. "Presolving in linear
|
| 1074 |
+
programming." Mathematical Programming 71.2 (1995): 221-245.
|
| 1075 |
+
.. [9] Bertsimas, Dimitris, and J. Tsitsiklis. "Introduction to linear
|
| 1076 |
+
programming." Athena Scientific 1 (1997): 997.
|
| 1077 |
+
.. [10] Andersen, Erling D., et al. Implementation of interior point methods
|
| 1078 |
+
for large scale linear programming. HEC/Universite de Geneve, 1996.
|
| 1079 |
+
|
| 1080 |
+
"""
|
| 1081 |
+
|
| 1082 |
+
_check_unknown_options(unknown_options)
|
| 1083 |
+
|
| 1084 |
+
# These should be warnings, not errors
|
| 1085 |
+
if (cholesky or cholesky is None) and sparse and not has_cholmod:
|
| 1086 |
+
if cholesky:
|
| 1087 |
+
warn("Sparse cholesky is only available with scikit-sparse. "
|
| 1088 |
+
"Setting `cholesky = False`",
|
| 1089 |
+
OptimizeWarning, stacklevel=3)
|
| 1090 |
+
cholesky = False
|
| 1091 |
+
|
| 1092 |
+
if sparse and lstsq:
|
| 1093 |
+
warn("Option combination 'sparse':True and 'lstsq':True "
|
| 1094 |
+
"is not recommended.",
|
| 1095 |
+
OptimizeWarning, stacklevel=3)
|
| 1096 |
+
|
| 1097 |
+
if lstsq and cholesky:
|
| 1098 |
+
warn("Invalid option combination 'lstsq':True "
|
| 1099 |
+
"and 'cholesky':True; option 'cholesky' has no effect when "
|
| 1100 |
+
"'lstsq' is set True.",
|
| 1101 |
+
OptimizeWarning, stacklevel=3)
|
| 1102 |
+
|
| 1103 |
+
valid_permc_spec = ('NATURAL', 'MMD_ATA', 'MMD_AT_PLUS_A', 'COLAMD')
|
| 1104 |
+
if permc_spec.upper() not in valid_permc_spec:
|
| 1105 |
+
warn("Invalid permc_spec option: '" + str(permc_spec) + "'. "
|
| 1106 |
+
"Acceptable values are 'NATURAL', 'MMD_ATA', 'MMD_AT_PLUS_A', "
|
| 1107 |
+
"and 'COLAMD'. Reverting to default.",
|
| 1108 |
+
OptimizeWarning, stacklevel=3)
|
| 1109 |
+
permc_spec = 'MMD_AT_PLUS_A'
|
| 1110 |
+
|
| 1111 |
+
# This can be an error
|
| 1112 |
+
if not sym_pos and cholesky:
|
| 1113 |
+
raise ValueError(
|
| 1114 |
+
"Invalid option combination 'sym_pos':False "
|
| 1115 |
+
"and 'cholesky':True: Cholesky decomposition is only possible "
|
| 1116 |
+
"for symmetric positive definite matrices.")
|
| 1117 |
+
|
| 1118 |
+
cholesky = cholesky or (cholesky is None and sym_pos and not lstsq)
|
| 1119 |
+
|
| 1120 |
+
x, status, message, iteration = _ip_hsd(A, b, c, c0, alpha0, beta,
|
| 1121 |
+
maxiter, disp, tol, sparse,
|
| 1122 |
+
lstsq, sym_pos, cholesky,
|
| 1123 |
+
pc, ip, permc_spec, callback,
|
| 1124 |
+
postsolve_args)
|
| 1125 |
+
|
| 1126 |
+
return x, status, message, iteration
|
llava_next/lib/python3.10/site-packages/scipy/optimize/_linprog_simplex.py
ADDED
|
@@ -0,0 +1,661 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Simplex method for linear programming
|
| 2 |
+
|
| 3 |
+
The *simplex* method uses a traditional, full-tableau implementation of
|
| 4 |
+
Dantzig's simplex algorithm [1]_, [2]_ (*not* the Nelder-Mead simplex).
|
| 5 |
+
This algorithm is included for backwards compatibility and educational
|
| 6 |
+
purposes.
|
| 7 |
+
|
| 8 |
+
.. versionadded:: 0.15.0
|
| 9 |
+
|
| 10 |
+
Warnings
|
| 11 |
+
--------
|
| 12 |
+
|
| 13 |
+
The simplex method may encounter numerical difficulties when pivot
|
| 14 |
+
values are close to the specified tolerance. If encountered try
|
| 15 |
+
remove any redundant constraints, change the pivot strategy to Bland's
|
| 16 |
+
rule or increase the tolerance value.
|
| 17 |
+
|
| 18 |
+
Alternatively, more robust methods maybe be used. See
|
| 19 |
+
:ref:`'interior-point' <optimize.linprog-interior-point>` and
|
| 20 |
+
:ref:`'revised simplex' <optimize.linprog-revised_simplex>`.
|
| 21 |
+
|
| 22 |
+
References
|
| 23 |
+
----------
|
| 24 |
+
.. [1] Dantzig, George B., Linear programming and extensions. Rand
|
| 25 |
+
Corporation Research Study Princeton Univ. Press, Princeton, NJ,
|
| 26 |
+
1963
|
| 27 |
+
.. [2] Hillier, S.H. and Lieberman, G.J. (1995), "Introduction to
|
| 28 |
+
Mathematical Programming", McGraw-Hill, Chapter 4.
|
| 29 |
+
"""
|
| 30 |
+
|
| 31 |
+
import numpy as np
|
| 32 |
+
from warnings import warn
|
| 33 |
+
from ._optimize import OptimizeResult, OptimizeWarning, _check_unknown_options
|
| 34 |
+
from ._linprog_util import _postsolve
|
| 35 |
+
|
| 36 |
+
|
| 37 |
+
def _pivot_col(T, tol=1e-9, bland=False):
|
| 38 |
+
"""
|
| 39 |
+
Given a linear programming simplex tableau, determine the column
|
| 40 |
+
of the variable to enter the basis.
|
| 41 |
+
|
| 42 |
+
Parameters
|
| 43 |
+
----------
|
| 44 |
+
T : 2-D array
|
| 45 |
+
A 2-D array representing the simplex tableau, T, corresponding to the
|
| 46 |
+
linear programming problem. It should have the form:
|
| 47 |
+
|
| 48 |
+
[[A[0, 0], A[0, 1], ..., A[0, n_total], b[0]],
|
| 49 |
+
[A[1, 0], A[1, 1], ..., A[1, n_total], b[1]],
|
| 50 |
+
.
|
| 51 |
+
.
|
| 52 |
+
.
|
| 53 |
+
[A[m, 0], A[m, 1], ..., A[m, n_total], b[m]],
|
| 54 |
+
[c[0], c[1], ..., c[n_total], 0]]
|
| 55 |
+
|
| 56 |
+
for a Phase 2 problem, or the form:
|
| 57 |
+
|
| 58 |
+
[[A[0, 0], A[0, 1], ..., A[0, n_total], b[0]],
|
| 59 |
+
[A[1, 0], A[1, 1], ..., A[1, n_total], b[1]],
|
| 60 |
+
.
|
| 61 |
+
.
|
| 62 |
+
.
|
| 63 |
+
[A[m, 0], A[m, 1], ..., A[m, n_total], b[m]],
|
| 64 |
+
[c[0], c[1], ..., c[n_total], 0],
|
| 65 |
+
[c'[0], c'[1], ..., c'[n_total], 0]]
|
| 66 |
+
|
| 67 |
+
for a Phase 1 problem (a problem in which a basic feasible solution is
|
| 68 |
+
sought prior to maximizing the actual objective. ``T`` is modified in
|
| 69 |
+
place by ``_solve_simplex``.
|
| 70 |
+
tol : float
|
| 71 |
+
Elements in the objective row larger than -tol will not be considered
|
| 72 |
+
for pivoting. Nominally this value is zero, but numerical issues
|
| 73 |
+
cause a tolerance about zero to be necessary.
|
| 74 |
+
bland : bool
|
| 75 |
+
If True, use Bland's rule for selection of the column (select the
|
| 76 |
+
first column with a negative coefficient in the objective row,
|
| 77 |
+
regardless of magnitude).
|
| 78 |
+
|
| 79 |
+
Returns
|
| 80 |
+
-------
|
| 81 |
+
status: bool
|
| 82 |
+
True if a suitable pivot column was found, otherwise False.
|
| 83 |
+
A return of False indicates that the linear programming simplex
|
| 84 |
+
algorithm is complete.
|
| 85 |
+
col: int
|
| 86 |
+
The index of the column of the pivot element.
|
| 87 |
+
If status is False, col will be returned as nan.
|
| 88 |
+
"""
|
| 89 |
+
ma = np.ma.masked_where(T[-1, :-1] >= -tol, T[-1, :-1], copy=False)
|
| 90 |
+
if ma.count() == 0:
|
| 91 |
+
return False, np.nan
|
| 92 |
+
if bland:
|
| 93 |
+
# ma.mask is sometimes 0d
|
| 94 |
+
return True, np.nonzero(np.logical_not(np.atleast_1d(ma.mask)))[0][0]
|
| 95 |
+
return True, np.ma.nonzero(ma == ma.min())[0][0]
|
| 96 |
+
|
| 97 |
+
|
| 98 |
+
def _pivot_row(T, basis, pivcol, phase, tol=1e-9, bland=False):
|
| 99 |
+
"""
|
| 100 |
+
Given a linear programming simplex tableau, determine the row for the
|
| 101 |
+
pivot operation.
|
| 102 |
+
|
| 103 |
+
Parameters
|
| 104 |
+
----------
|
| 105 |
+
T : 2-D array
|
| 106 |
+
A 2-D array representing the simplex tableau, T, corresponding to the
|
| 107 |
+
linear programming problem. It should have the form:
|
| 108 |
+
|
| 109 |
+
[[A[0, 0], A[0, 1], ..., A[0, n_total], b[0]],
|
| 110 |
+
[A[1, 0], A[1, 1], ..., A[1, n_total], b[1]],
|
| 111 |
+
.
|
| 112 |
+
.
|
| 113 |
+
.
|
| 114 |
+
[A[m, 0], A[m, 1], ..., A[m, n_total], b[m]],
|
| 115 |
+
[c[0], c[1], ..., c[n_total], 0]]
|
| 116 |
+
|
| 117 |
+
for a Phase 2 problem, or the form:
|
| 118 |
+
|
| 119 |
+
[[A[0, 0], A[0, 1], ..., A[0, n_total], b[0]],
|
| 120 |
+
[A[1, 0], A[1, 1], ..., A[1, n_total], b[1]],
|
| 121 |
+
.
|
| 122 |
+
.
|
| 123 |
+
.
|
| 124 |
+
[A[m, 0], A[m, 1], ..., A[m, n_total], b[m]],
|
| 125 |
+
[c[0], c[1], ..., c[n_total], 0],
|
| 126 |
+
[c'[0], c'[1], ..., c'[n_total], 0]]
|
| 127 |
+
|
| 128 |
+
for a Phase 1 problem (a Problem in which a basic feasible solution is
|
| 129 |
+
sought prior to maximizing the actual objective. ``T`` is modified in
|
| 130 |
+
place by ``_solve_simplex``.
|
| 131 |
+
basis : array
|
| 132 |
+
A list of the current basic variables.
|
| 133 |
+
pivcol : int
|
| 134 |
+
The index of the pivot column.
|
| 135 |
+
phase : int
|
| 136 |
+
The phase of the simplex algorithm (1 or 2).
|
| 137 |
+
tol : float
|
| 138 |
+
Elements in the pivot column smaller than tol will not be considered
|
| 139 |
+
for pivoting. Nominally this value is zero, but numerical issues
|
| 140 |
+
cause a tolerance about zero to be necessary.
|
| 141 |
+
bland : bool
|
| 142 |
+
If True, use Bland's rule for selection of the row (if more than one
|
| 143 |
+
row can be used, choose the one with the lowest variable index).
|
| 144 |
+
|
| 145 |
+
Returns
|
| 146 |
+
-------
|
| 147 |
+
status: bool
|
| 148 |
+
True if a suitable pivot row was found, otherwise False. A return
|
| 149 |
+
of False indicates that the linear programming problem is unbounded.
|
| 150 |
+
row: int
|
| 151 |
+
The index of the row of the pivot element. If status is False, row
|
| 152 |
+
will be returned as nan.
|
| 153 |
+
"""
|
| 154 |
+
if phase == 1:
|
| 155 |
+
k = 2
|
| 156 |
+
else:
|
| 157 |
+
k = 1
|
| 158 |
+
ma = np.ma.masked_where(T[:-k, pivcol] <= tol, T[:-k, pivcol], copy=False)
|
| 159 |
+
if ma.count() == 0:
|
| 160 |
+
return False, np.nan
|
| 161 |
+
mb = np.ma.masked_where(T[:-k, pivcol] <= tol, T[:-k, -1], copy=False)
|
| 162 |
+
q = mb / ma
|
| 163 |
+
min_rows = np.ma.nonzero(q == q.min())[0]
|
| 164 |
+
if bland:
|
| 165 |
+
return True, min_rows[np.argmin(np.take(basis, min_rows))]
|
| 166 |
+
return True, min_rows[0]
|
| 167 |
+
|
| 168 |
+
|
| 169 |
+
def _apply_pivot(T, basis, pivrow, pivcol, tol=1e-9):
|
| 170 |
+
"""
|
| 171 |
+
Pivot the simplex tableau inplace on the element given by (pivrow, pivol).
|
| 172 |
+
The entering variable corresponds to the column given by pivcol forcing
|
| 173 |
+
the variable basis[pivrow] to leave the basis.
|
| 174 |
+
|
| 175 |
+
Parameters
|
| 176 |
+
----------
|
| 177 |
+
T : 2-D array
|
| 178 |
+
A 2-D array representing the simplex tableau, T, corresponding to the
|
| 179 |
+
linear programming problem. It should have the form:
|
| 180 |
+
|
| 181 |
+
[[A[0, 0], A[0, 1], ..., A[0, n_total], b[0]],
|
| 182 |
+
[A[1, 0], A[1, 1], ..., A[1, n_total], b[1]],
|
| 183 |
+
.
|
| 184 |
+
.
|
| 185 |
+
.
|
| 186 |
+
[A[m, 0], A[m, 1], ..., A[m, n_total], b[m]],
|
| 187 |
+
[c[0], c[1], ..., c[n_total], 0]]
|
| 188 |
+
|
| 189 |
+
for a Phase 2 problem, or the form:
|
| 190 |
+
|
| 191 |
+
[[A[0, 0], A[0, 1], ..., A[0, n_total], b[0]],
|
| 192 |
+
[A[1, 0], A[1, 1], ..., A[1, n_total], b[1]],
|
| 193 |
+
.
|
| 194 |
+
.
|
| 195 |
+
.
|
| 196 |
+
[A[m, 0], A[m, 1], ..., A[m, n_total], b[m]],
|
| 197 |
+
[c[0], c[1], ..., c[n_total], 0],
|
| 198 |
+
[c'[0], c'[1], ..., c'[n_total], 0]]
|
| 199 |
+
|
| 200 |
+
for a Phase 1 problem (a problem in which a basic feasible solution is
|
| 201 |
+
sought prior to maximizing the actual objective. ``T`` is modified in
|
| 202 |
+
place by ``_solve_simplex``.
|
| 203 |
+
basis : 1-D array
|
| 204 |
+
An array of the indices of the basic variables, such that basis[i]
|
| 205 |
+
contains the column corresponding to the basic variable for row i.
|
| 206 |
+
Basis is modified in place by _apply_pivot.
|
| 207 |
+
pivrow : int
|
| 208 |
+
Row index of the pivot.
|
| 209 |
+
pivcol : int
|
| 210 |
+
Column index of the pivot.
|
| 211 |
+
"""
|
| 212 |
+
basis[pivrow] = pivcol
|
| 213 |
+
pivval = T[pivrow, pivcol]
|
| 214 |
+
T[pivrow] = T[pivrow] / pivval
|
| 215 |
+
for irow in range(T.shape[0]):
|
| 216 |
+
if irow != pivrow:
|
| 217 |
+
T[irow] = T[irow] - T[pivrow] * T[irow, pivcol]
|
| 218 |
+
|
| 219 |
+
# The selected pivot should never lead to a pivot value less than the tol.
|
| 220 |
+
if np.isclose(pivval, tol, atol=0, rtol=1e4):
|
| 221 |
+
message = (
|
| 222 |
+
f"The pivot operation produces a pivot value of:{pivval: .1e}, "
|
| 223 |
+
"which is only slightly greater than the specified "
|
| 224 |
+
f"tolerance{tol: .1e}. This may lead to issues regarding the "
|
| 225 |
+
"numerical stability of the simplex method. "
|
| 226 |
+
"Removing redundant constraints, changing the pivot strategy "
|
| 227 |
+
"via Bland's rule or increasing the tolerance may "
|
| 228 |
+
"help reduce the issue.")
|
| 229 |
+
warn(message, OptimizeWarning, stacklevel=5)
|
| 230 |
+
|
| 231 |
+
|
| 232 |
+
def _solve_simplex(T, n, basis, callback, postsolve_args,
|
| 233 |
+
maxiter=1000, tol=1e-9, phase=2, bland=False, nit0=0,
|
| 234 |
+
):
|
| 235 |
+
"""
|
| 236 |
+
Solve a linear programming problem in "standard form" using the Simplex
|
| 237 |
+
Method. Linear Programming is intended to solve the following problem form:
|
| 238 |
+
|
| 239 |
+
Minimize::
|
| 240 |
+
|
| 241 |
+
c @ x
|
| 242 |
+
|
| 243 |
+
Subject to::
|
| 244 |
+
|
| 245 |
+
A @ x == b
|
| 246 |
+
x >= 0
|
| 247 |
+
|
| 248 |
+
Parameters
|
| 249 |
+
----------
|
| 250 |
+
T : 2-D array
|
| 251 |
+
A 2-D array representing the simplex tableau, T, corresponding to the
|
| 252 |
+
linear programming problem. It should have the form:
|
| 253 |
+
|
| 254 |
+
[[A[0, 0], A[0, 1], ..., A[0, n_total], b[0]],
|
| 255 |
+
[A[1, 0], A[1, 1], ..., A[1, n_total], b[1]],
|
| 256 |
+
.
|
| 257 |
+
.
|
| 258 |
+
.
|
| 259 |
+
[A[m, 0], A[m, 1], ..., A[m, n_total], b[m]],
|
| 260 |
+
[c[0], c[1], ..., c[n_total], 0]]
|
| 261 |
+
|
| 262 |
+
for a Phase 2 problem, or the form:
|
| 263 |
+
|
| 264 |
+
[[A[0, 0], A[0, 1], ..., A[0, n_total], b[0]],
|
| 265 |
+
[A[1, 0], A[1, 1], ..., A[1, n_total], b[1]],
|
| 266 |
+
.
|
| 267 |
+
.
|
| 268 |
+
.
|
| 269 |
+
[A[m, 0], A[m, 1], ..., A[m, n_total], b[m]],
|
| 270 |
+
[c[0], c[1], ..., c[n_total], 0],
|
| 271 |
+
[c'[0], c'[1], ..., c'[n_total], 0]]
|
| 272 |
+
|
| 273 |
+
for a Phase 1 problem (a problem in which a basic feasible solution is
|
| 274 |
+
sought prior to maximizing the actual objective. ``T`` is modified in
|
| 275 |
+
place by ``_solve_simplex``.
|
| 276 |
+
n : int
|
| 277 |
+
The number of true variables in the problem.
|
| 278 |
+
basis : 1-D array
|
| 279 |
+
An array of the indices of the basic variables, such that basis[i]
|
| 280 |
+
contains the column corresponding to the basic variable for row i.
|
| 281 |
+
Basis is modified in place by _solve_simplex
|
| 282 |
+
callback : callable, optional
|
| 283 |
+
If a callback function is provided, it will be called within each
|
| 284 |
+
iteration of the algorithm. The callback must accept a
|
| 285 |
+
`scipy.optimize.OptimizeResult` consisting of the following fields:
|
| 286 |
+
|
| 287 |
+
x : 1-D array
|
| 288 |
+
Current solution vector
|
| 289 |
+
fun : float
|
| 290 |
+
Current value of the objective function
|
| 291 |
+
success : bool
|
| 292 |
+
True only when a phase has completed successfully. This
|
| 293 |
+
will be False for most iterations.
|
| 294 |
+
slack : 1-D array
|
| 295 |
+
The values of the slack variables. Each slack variable
|
| 296 |
+
corresponds to an inequality constraint. If the slack is zero,
|
| 297 |
+
the corresponding constraint is active.
|
| 298 |
+
con : 1-D array
|
| 299 |
+
The (nominally zero) residuals of the equality constraints,
|
| 300 |
+
that is, ``b - A_eq @ x``
|
| 301 |
+
phase : int
|
| 302 |
+
The phase of the optimization being executed. In phase 1 a basic
|
| 303 |
+
feasible solution is sought and the T has an additional row
|
| 304 |
+
representing an alternate objective function.
|
| 305 |
+
status : int
|
| 306 |
+
An integer representing the exit status of the optimization::
|
| 307 |
+
|
| 308 |
+
0 : Optimization terminated successfully
|
| 309 |
+
1 : Iteration limit reached
|
| 310 |
+
2 : Problem appears to be infeasible
|
| 311 |
+
3 : Problem appears to be unbounded
|
| 312 |
+
4 : Serious numerical difficulties encountered
|
| 313 |
+
|
| 314 |
+
nit : int
|
| 315 |
+
The number of iterations performed.
|
| 316 |
+
message : str
|
| 317 |
+
A string descriptor of the exit status of the optimization.
|
| 318 |
+
postsolve_args : tuple
|
| 319 |
+
Data needed by _postsolve to convert the solution to the standard-form
|
| 320 |
+
problem into the solution to the original problem.
|
| 321 |
+
maxiter : int
|
| 322 |
+
The maximum number of iterations to perform before aborting the
|
| 323 |
+
optimization.
|
| 324 |
+
tol : float
|
| 325 |
+
The tolerance which determines when a solution is "close enough" to
|
| 326 |
+
zero in Phase 1 to be considered a basic feasible solution or close
|
| 327 |
+
enough to positive to serve as an optimal solution.
|
| 328 |
+
phase : int
|
| 329 |
+
The phase of the optimization being executed. In phase 1 a basic
|
| 330 |
+
feasible solution is sought and the T has an additional row
|
| 331 |
+
representing an alternate objective function.
|
| 332 |
+
bland : bool
|
| 333 |
+
If True, choose pivots using Bland's rule [3]_. In problems which
|
| 334 |
+
fail to converge due to cycling, using Bland's rule can provide
|
| 335 |
+
convergence at the expense of a less optimal path about the simplex.
|
| 336 |
+
nit0 : int
|
| 337 |
+
The initial iteration number used to keep an accurate iteration total
|
| 338 |
+
in a two-phase problem.
|
| 339 |
+
|
| 340 |
+
Returns
|
| 341 |
+
-------
|
| 342 |
+
nit : int
|
| 343 |
+
The number of iterations. Used to keep an accurate iteration total
|
| 344 |
+
in the two-phase problem.
|
| 345 |
+
status : int
|
| 346 |
+
An integer representing the exit status of the optimization::
|
| 347 |
+
|
| 348 |
+
0 : Optimization terminated successfully
|
| 349 |
+
1 : Iteration limit reached
|
| 350 |
+
2 : Problem appears to be infeasible
|
| 351 |
+
3 : Problem appears to be unbounded
|
| 352 |
+
4 : Serious numerical difficulties encountered
|
| 353 |
+
|
| 354 |
+
"""
|
| 355 |
+
nit = nit0
|
| 356 |
+
status = 0
|
| 357 |
+
message = ''
|
| 358 |
+
complete = False
|
| 359 |
+
|
| 360 |
+
if phase == 1:
|
| 361 |
+
m = T.shape[1]-2
|
| 362 |
+
elif phase == 2:
|
| 363 |
+
m = T.shape[1]-1
|
| 364 |
+
else:
|
| 365 |
+
raise ValueError("Argument 'phase' to _solve_simplex must be 1 or 2")
|
| 366 |
+
|
| 367 |
+
if phase == 2:
|
| 368 |
+
# Check if any artificial variables are still in the basis.
|
| 369 |
+
# If yes, check if any coefficients from this row and a column
|
| 370 |
+
# corresponding to one of the non-artificial variable is non-zero.
|
| 371 |
+
# If found, pivot at this term. If not, start phase 2.
|
| 372 |
+
# Do this for all artificial variables in the basis.
|
| 373 |
+
# Ref: "An Introduction to Linear Programming and Game Theory"
|
| 374 |
+
# by Paul R. Thie, Gerard E. Keough, 3rd Ed,
|
| 375 |
+
# Chapter 3.7 Redundant Systems (pag 102)
|
| 376 |
+
for pivrow in [row for row in range(basis.size)
|
| 377 |
+
if basis[row] > T.shape[1] - 2]:
|
| 378 |
+
non_zero_row = [col for col in range(T.shape[1] - 1)
|
| 379 |
+
if abs(T[pivrow, col]) > tol]
|
| 380 |
+
if len(non_zero_row) > 0:
|
| 381 |
+
pivcol = non_zero_row[0]
|
| 382 |
+
_apply_pivot(T, basis, pivrow, pivcol, tol)
|
| 383 |
+
nit += 1
|
| 384 |
+
|
| 385 |
+
if len(basis[:m]) == 0:
|
| 386 |
+
solution = np.empty(T.shape[1] - 1, dtype=np.float64)
|
| 387 |
+
else:
|
| 388 |
+
solution = np.empty(max(T.shape[1] - 1, max(basis[:m]) + 1),
|
| 389 |
+
dtype=np.float64)
|
| 390 |
+
|
| 391 |
+
while not complete:
|
| 392 |
+
# Find the pivot column
|
| 393 |
+
pivcol_found, pivcol = _pivot_col(T, tol, bland)
|
| 394 |
+
if not pivcol_found:
|
| 395 |
+
pivcol = np.nan
|
| 396 |
+
pivrow = np.nan
|
| 397 |
+
status = 0
|
| 398 |
+
complete = True
|
| 399 |
+
else:
|
| 400 |
+
# Find the pivot row
|
| 401 |
+
pivrow_found, pivrow = _pivot_row(T, basis, pivcol, phase, tol, bland)
|
| 402 |
+
if not pivrow_found:
|
| 403 |
+
status = 3
|
| 404 |
+
complete = True
|
| 405 |
+
|
| 406 |
+
if callback is not None:
|
| 407 |
+
solution[:] = 0
|
| 408 |
+
solution[basis[:n]] = T[:n, -1]
|
| 409 |
+
x = solution[:m]
|
| 410 |
+
x, fun, slack, con = _postsolve(
|
| 411 |
+
x, postsolve_args
|
| 412 |
+
)
|
| 413 |
+
res = OptimizeResult({
|
| 414 |
+
'x': x,
|
| 415 |
+
'fun': fun,
|
| 416 |
+
'slack': slack,
|
| 417 |
+
'con': con,
|
| 418 |
+
'status': status,
|
| 419 |
+
'message': message,
|
| 420 |
+
'nit': nit,
|
| 421 |
+
'success': status == 0 and complete,
|
| 422 |
+
'phase': phase,
|
| 423 |
+
'complete': complete,
|
| 424 |
+
})
|
| 425 |
+
callback(res)
|
| 426 |
+
|
| 427 |
+
if not complete:
|
| 428 |
+
if nit >= maxiter:
|
| 429 |
+
# Iteration limit exceeded
|
| 430 |
+
status = 1
|
| 431 |
+
complete = True
|
| 432 |
+
else:
|
| 433 |
+
_apply_pivot(T, basis, pivrow, pivcol, tol)
|
| 434 |
+
nit += 1
|
| 435 |
+
return nit, status
|
| 436 |
+
|
| 437 |
+
|
| 438 |
+
def _linprog_simplex(c, c0, A, b, callback, postsolve_args,
|
| 439 |
+
maxiter=1000, tol=1e-9, disp=False, bland=False,
|
| 440 |
+
**unknown_options):
|
| 441 |
+
"""
|
| 442 |
+
Minimize a linear objective function subject to linear equality and
|
| 443 |
+
non-negativity constraints using the two phase simplex method.
|
| 444 |
+
Linear programming is intended to solve problems of the following form:
|
| 445 |
+
|
| 446 |
+
Minimize::
|
| 447 |
+
|
| 448 |
+
c @ x
|
| 449 |
+
|
| 450 |
+
Subject to::
|
| 451 |
+
|
| 452 |
+
A @ x == b
|
| 453 |
+
x >= 0
|
| 454 |
+
|
| 455 |
+
User-facing documentation is in _linprog_doc.py.
|
| 456 |
+
|
| 457 |
+
Parameters
|
| 458 |
+
----------
|
| 459 |
+
c : 1-D array
|
| 460 |
+
Coefficients of the linear objective function to be minimized.
|
| 461 |
+
c0 : float
|
| 462 |
+
Constant term in objective function due to fixed (and eliminated)
|
| 463 |
+
variables. (Purely for display.)
|
| 464 |
+
A : 2-D array
|
| 465 |
+
2-D array such that ``A @ x``, gives the values of the equality
|
| 466 |
+
constraints at ``x``.
|
| 467 |
+
b : 1-D array
|
| 468 |
+
1-D array of values representing the right hand side of each equality
|
| 469 |
+
constraint (row) in ``A``.
|
| 470 |
+
callback : callable, optional
|
| 471 |
+
If a callback function is provided, it will be called within each
|
| 472 |
+
iteration of the algorithm. The callback function must accept a single
|
| 473 |
+
`scipy.optimize.OptimizeResult` consisting of the following fields:
|
| 474 |
+
|
| 475 |
+
x : 1-D array
|
| 476 |
+
Current solution vector
|
| 477 |
+
fun : float
|
| 478 |
+
Current value of the objective function
|
| 479 |
+
success : bool
|
| 480 |
+
True when an algorithm has completed successfully.
|
| 481 |
+
slack : 1-D array
|
| 482 |
+
The values of the slack variables. Each slack variable
|
| 483 |
+
corresponds to an inequality constraint. If the slack is zero,
|
| 484 |
+
the corresponding constraint is active.
|
| 485 |
+
con : 1-D array
|
| 486 |
+
The (nominally zero) residuals of the equality constraints,
|
| 487 |
+
that is, ``b - A_eq @ x``
|
| 488 |
+
phase : int
|
| 489 |
+
The phase of the algorithm being executed.
|
| 490 |
+
status : int
|
| 491 |
+
An integer representing the status of the optimization::
|
| 492 |
+
|
| 493 |
+
0 : Algorithm proceeding nominally
|
| 494 |
+
1 : Iteration limit reached
|
| 495 |
+
2 : Problem appears to be infeasible
|
| 496 |
+
3 : Problem appears to be unbounded
|
| 497 |
+
4 : Serious numerical difficulties encountered
|
| 498 |
+
nit : int
|
| 499 |
+
The number of iterations performed.
|
| 500 |
+
message : str
|
| 501 |
+
A string descriptor of the exit status of the optimization.
|
| 502 |
+
postsolve_args : tuple
|
| 503 |
+
Data needed by _postsolve to convert the solution to the standard-form
|
| 504 |
+
problem into the solution to the original problem.
|
| 505 |
+
|
| 506 |
+
Options
|
| 507 |
+
-------
|
| 508 |
+
maxiter : int
|
| 509 |
+
The maximum number of iterations to perform.
|
| 510 |
+
disp : bool
|
| 511 |
+
If True, print exit status message to sys.stdout
|
| 512 |
+
tol : float
|
| 513 |
+
The tolerance which determines when a solution is "close enough" to
|
| 514 |
+
zero in Phase 1 to be considered a basic feasible solution or close
|
| 515 |
+
enough to positive to serve as an optimal solution.
|
| 516 |
+
bland : bool
|
| 517 |
+
If True, use Bland's anti-cycling rule [3]_ to choose pivots to
|
| 518 |
+
prevent cycling. If False, choose pivots which should lead to a
|
| 519 |
+
converged solution more quickly. The latter method is subject to
|
| 520 |
+
cycling (non-convergence) in rare instances.
|
| 521 |
+
unknown_options : dict
|
| 522 |
+
Optional arguments not used by this particular solver. If
|
| 523 |
+
`unknown_options` is non-empty a warning is issued listing all
|
| 524 |
+
unused options.
|
| 525 |
+
|
| 526 |
+
Returns
|
| 527 |
+
-------
|
| 528 |
+
x : 1-D array
|
| 529 |
+
Solution vector.
|
| 530 |
+
status : int
|
| 531 |
+
An integer representing the exit status of the optimization::
|
| 532 |
+
|
| 533 |
+
0 : Optimization terminated successfully
|
| 534 |
+
1 : Iteration limit reached
|
| 535 |
+
2 : Problem appears to be infeasible
|
| 536 |
+
3 : Problem appears to be unbounded
|
| 537 |
+
4 : Serious numerical difficulties encountered
|
| 538 |
+
|
| 539 |
+
message : str
|
| 540 |
+
A string descriptor of the exit status of the optimization.
|
| 541 |
+
iteration : int
|
| 542 |
+
The number of iterations taken to solve the problem.
|
| 543 |
+
|
| 544 |
+
References
|
| 545 |
+
----------
|
| 546 |
+
.. [1] Dantzig, George B., Linear programming and extensions. Rand
|
| 547 |
+
Corporation Research Study Princeton Univ. Press, Princeton, NJ,
|
| 548 |
+
1963
|
| 549 |
+
.. [2] Hillier, S.H. and Lieberman, G.J. (1995), "Introduction to
|
| 550 |
+
Mathematical Programming", McGraw-Hill, Chapter 4.
|
| 551 |
+
.. [3] Bland, Robert G. New finite pivoting rules for the simplex method.
|
| 552 |
+
Mathematics of Operations Research (2), 1977: pp. 103-107.
|
| 553 |
+
|
| 554 |
+
|
| 555 |
+
Notes
|
| 556 |
+
-----
|
| 557 |
+
The expected problem formulation differs between the top level ``linprog``
|
| 558 |
+
module and the method specific solvers. The method specific solvers expect a
|
| 559 |
+
problem in standard form:
|
| 560 |
+
|
| 561 |
+
Minimize::
|
| 562 |
+
|
| 563 |
+
c @ x
|
| 564 |
+
|
| 565 |
+
Subject to::
|
| 566 |
+
|
| 567 |
+
A @ x == b
|
| 568 |
+
x >= 0
|
| 569 |
+
|
| 570 |
+
Whereas the top level ``linprog`` module expects a problem of form:
|
| 571 |
+
|
| 572 |
+
Minimize::
|
| 573 |
+
|
| 574 |
+
c @ x
|
| 575 |
+
|
| 576 |
+
Subject to::
|
| 577 |
+
|
| 578 |
+
A_ub @ x <= b_ub
|
| 579 |
+
A_eq @ x == b_eq
|
| 580 |
+
lb <= x <= ub
|
| 581 |
+
|
| 582 |
+
where ``lb = 0`` and ``ub = None`` unless set in ``bounds``.
|
| 583 |
+
|
| 584 |
+
The original problem contains equality, upper-bound and variable constraints
|
| 585 |
+
whereas the method specific solver requires equality constraints and
|
| 586 |
+
variable non-negativity.
|
| 587 |
+
|
| 588 |
+
``linprog`` module converts the original problem to standard form by
|
| 589 |
+
converting the simple bounds to upper bound constraints, introducing
|
| 590 |
+
non-negative slack variables for inequality constraints, and expressing
|
| 591 |
+
unbounded variables as the difference between two non-negative variables.
|
| 592 |
+
"""
|
| 593 |
+
_check_unknown_options(unknown_options)
|
| 594 |
+
|
| 595 |
+
status = 0
|
| 596 |
+
messages = {0: "Optimization terminated successfully.",
|
| 597 |
+
1: "Iteration limit reached.",
|
| 598 |
+
2: "Optimization failed. Unable to find a feasible"
|
| 599 |
+
" starting point.",
|
| 600 |
+
3: "Optimization failed. The problem appears to be unbounded.",
|
| 601 |
+
4: "Optimization failed. Singular matrix encountered."}
|
| 602 |
+
|
| 603 |
+
n, m = A.shape
|
| 604 |
+
|
| 605 |
+
# All constraints must have b >= 0.
|
| 606 |
+
is_negative_constraint = np.less(b, 0)
|
| 607 |
+
A[is_negative_constraint] *= -1
|
| 608 |
+
b[is_negative_constraint] *= -1
|
| 609 |
+
|
| 610 |
+
# As all constraints are equality constraints the artificial variables
|
| 611 |
+
# will also be basic variables.
|
| 612 |
+
av = np.arange(n) + m
|
| 613 |
+
basis = av.copy()
|
| 614 |
+
|
| 615 |
+
# Format the phase one tableau by adding artificial variables and stacking
|
| 616 |
+
# the constraints, the objective row and pseudo-objective row.
|
| 617 |
+
row_constraints = np.hstack((A, np.eye(n), b[:, np.newaxis]))
|
| 618 |
+
row_objective = np.hstack((c, np.zeros(n), c0))
|
| 619 |
+
row_pseudo_objective = -row_constraints.sum(axis=0)
|
| 620 |
+
row_pseudo_objective[av] = 0
|
| 621 |
+
T = np.vstack((row_constraints, row_objective, row_pseudo_objective))
|
| 622 |
+
|
| 623 |
+
nit1, status = _solve_simplex(T, n, basis, callback=callback,
|
| 624 |
+
postsolve_args=postsolve_args,
|
| 625 |
+
maxiter=maxiter, tol=tol, phase=1,
|
| 626 |
+
bland=bland
|
| 627 |
+
)
|
| 628 |
+
# if pseudo objective is zero, remove the last row from the tableau and
|
| 629 |
+
# proceed to phase 2
|
| 630 |
+
nit2 = nit1
|
| 631 |
+
if abs(T[-1, -1]) < tol:
|
| 632 |
+
# Remove the pseudo-objective row from the tableau
|
| 633 |
+
T = T[:-1, :]
|
| 634 |
+
# Remove the artificial variable columns from the tableau
|
| 635 |
+
T = np.delete(T, av, 1)
|
| 636 |
+
else:
|
| 637 |
+
# Failure to find a feasible starting point
|
| 638 |
+
status = 2
|
| 639 |
+
messages[status] = (
|
| 640 |
+
"Phase 1 of the simplex method failed to find a feasible "
|
| 641 |
+
"solution. The pseudo-objective function evaluates to {0:.1e} "
|
| 642 |
+
"which exceeds the required tolerance of {1} for a solution to be "
|
| 643 |
+
"considered 'close enough' to zero to be a basic solution. "
|
| 644 |
+
"Consider increasing the tolerance to be greater than {0:.1e}. "
|
| 645 |
+
"If this tolerance is unacceptably large the problem may be "
|
| 646 |
+
"infeasible.".format(abs(T[-1, -1]), tol)
|
| 647 |
+
)
|
| 648 |
+
|
| 649 |
+
if status == 0:
|
| 650 |
+
# Phase 2
|
| 651 |
+
nit2, status = _solve_simplex(T, n, basis, callback=callback,
|
| 652 |
+
postsolve_args=postsolve_args,
|
| 653 |
+
maxiter=maxiter, tol=tol, phase=2,
|
| 654 |
+
bland=bland, nit0=nit1
|
| 655 |
+
)
|
| 656 |
+
|
| 657 |
+
solution = np.zeros(n + m)
|
| 658 |
+
solution[basis[:n]] = T[:n, -1]
|
| 659 |
+
x = solution[:m]
|
| 660 |
+
|
| 661 |
+
return x, status, messages[status], int(nit2)
|
llava_next/lib/python3.10/site-packages/scipy/optimize/_minimize.py
ADDED
|
@@ -0,0 +1,1116 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Unified interfaces to minimization algorithms.
|
| 3 |
+
|
| 4 |
+
Functions
|
| 5 |
+
---------
|
| 6 |
+
- minimize : minimization of a function of several variables.
|
| 7 |
+
- minimize_scalar : minimization of a function of one variable.
|
| 8 |
+
"""
|
| 9 |
+
|
| 10 |
+
__all__ = ['minimize', 'minimize_scalar']
|
| 11 |
+
|
| 12 |
+
|
| 13 |
+
from warnings import warn
|
| 14 |
+
|
| 15 |
+
import numpy as np
|
| 16 |
+
|
| 17 |
+
# unconstrained minimization
|
| 18 |
+
from ._optimize import (_minimize_neldermead, _minimize_powell, _minimize_cg,
|
| 19 |
+
_minimize_bfgs, _minimize_newtoncg,
|
| 20 |
+
_minimize_scalar_brent, _minimize_scalar_bounded,
|
| 21 |
+
_minimize_scalar_golden, MemoizeJac, OptimizeResult,
|
| 22 |
+
_wrap_callback, _recover_from_bracket_error)
|
| 23 |
+
from ._trustregion_dogleg import _minimize_dogleg
|
| 24 |
+
from ._trustregion_ncg import _minimize_trust_ncg
|
| 25 |
+
from ._trustregion_krylov import _minimize_trust_krylov
|
| 26 |
+
from ._trustregion_exact import _minimize_trustregion_exact
|
| 27 |
+
from ._trustregion_constr import _minimize_trustregion_constr
|
| 28 |
+
|
| 29 |
+
# constrained minimization
|
| 30 |
+
from ._lbfgsb_py import _minimize_lbfgsb
|
| 31 |
+
from ._tnc import _minimize_tnc
|
| 32 |
+
from ._cobyla_py import _minimize_cobyla
|
| 33 |
+
from ._cobyqa_py import _minimize_cobyqa
|
| 34 |
+
from ._slsqp_py import _minimize_slsqp
|
| 35 |
+
from ._constraints import (old_bound_to_new, new_bounds_to_old,
|
| 36 |
+
old_constraint_to_new, new_constraint_to_old,
|
| 37 |
+
NonlinearConstraint, LinearConstraint, Bounds,
|
| 38 |
+
PreparedConstraint)
|
| 39 |
+
from ._differentiable_functions import FD_METHODS
|
| 40 |
+
|
| 41 |
+
MINIMIZE_METHODS = ['nelder-mead', 'powell', 'cg', 'bfgs', 'newton-cg',
|
| 42 |
+
'l-bfgs-b', 'tnc', 'cobyla', 'cobyqa', 'slsqp',
|
| 43 |
+
'trust-constr', 'dogleg', 'trust-ncg', 'trust-exact',
|
| 44 |
+
'trust-krylov']
|
| 45 |
+
|
| 46 |
+
# These methods support the new callback interface (passed an OptimizeResult)
|
| 47 |
+
MINIMIZE_METHODS_NEW_CB = ['nelder-mead', 'powell', 'cg', 'bfgs', 'newton-cg',
|
| 48 |
+
'l-bfgs-b', 'trust-constr', 'dogleg', 'trust-ncg',
|
| 49 |
+
'trust-exact', 'trust-krylov', 'cobyqa']
|
| 50 |
+
|
| 51 |
+
MINIMIZE_SCALAR_METHODS = ['brent', 'bounded', 'golden']
|
| 52 |
+
|
| 53 |
+
def minimize(fun, x0, args=(), method=None, jac=None, hess=None,
|
| 54 |
+
hessp=None, bounds=None, constraints=(), tol=None,
|
| 55 |
+
callback=None, options=None):
|
| 56 |
+
"""Minimization of scalar function of one or more variables.
|
| 57 |
+
|
| 58 |
+
Parameters
|
| 59 |
+
----------
|
| 60 |
+
fun : callable
|
| 61 |
+
The objective function to be minimized.
|
| 62 |
+
|
| 63 |
+
``fun(x, *args) -> float``
|
| 64 |
+
|
| 65 |
+
where ``x`` is a 1-D array with shape (n,) and ``args``
|
| 66 |
+
is a tuple of the fixed parameters needed to completely
|
| 67 |
+
specify the function.
|
| 68 |
+
x0 : ndarray, shape (n,)
|
| 69 |
+
Initial guess. Array of real elements of size (n,),
|
| 70 |
+
where ``n`` is the number of independent variables.
|
| 71 |
+
args : tuple, optional
|
| 72 |
+
Extra arguments passed to the objective function and its
|
| 73 |
+
derivatives (`fun`, `jac` and `hess` functions).
|
| 74 |
+
method : str or callable, optional
|
| 75 |
+
Type of solver. Should be one of
|
| 76 |
+
|
| 77 |
+
- 'Nelder-Mead' :ref:`(see here) <optimize.minimize-neldermead>`
|
| 78 |
+
- 'Powell' :ref:`(see here) <optimize.minimize-powell>`
|
| 79 |
+
- 'CG' :ref:`(see here) <optimize.minimize-cg>`
|
| 80 |
+
- 'BFGS' :ref:`(see here) <optimize.minimize-bfgs>`
|
| 81 |
+
- 'Newton-CG' :ref:`(see here) <optimize.minimize-newtoncg>`
|
| 82 |
+
- 'L-BFGS-B' :ref:`(see here) <optimize.minimize-lbfgsb>`
|
| 83 |
+
- 'TNC' :ref:`(see here) <optimize.minimize-tnc>`
|
| 84 |
+
- 'COBYLA' :ref:`(see here) <optimize.minimize-cobyla>`
|
| 85 |
+
- 'COBYQA' :ref:`(see here) <optimize.minimize-cobyqa>`
|
| 86 |
+
- 'SLSQP' :ref:`(see here) <optimize.minimize-slsqp>`
|
| 87 |
+
- 'trust-constr':ref:`(see here) <optimize.minimize-trustconstr>`
|
| 88 |
+
- 'dogleg' :ref:`(see here) <optimize.minimize-dogleg>`
|
| 89 |
+
- 'trust-ncg' :ref:`(see here) <optimize.minimize-trustncg>`
|
| 90 |
+
- 'trust-exact' :ref:`(see here) <optimize.minimize-trustexact>`
|
| 91 |
+
- 'trust-krylov' :ref:`(see here) <optimize.minimize-trustkrylov>`
|
| 92 |
+
- custom - a callable object, see below for description.
|
| 93 |
+
|
| 94 |
+
If not given, chosen to be one of ``BFGS``, ``L-BFGS-B``, ``SLSQP``,
|
| 95 |
+
depending on whether or not the problem has constraints or bounds.
|
| 96 |
+
jac : {callable, '2-point', '3-point', 'cs', bool}, optional
|
| 97 |
+
Method for computing the gradient vector. Only for CG, BFGS,
|
| 98 |
+
Newton-CG, L-BFGS-B, TNC, SLSQP, dogleg, trust-ncg, trust-krylov,
|
| 99 |
+
trust-exact and trust-constr.
|
| 100 |
+
If it is a callable, it should be a function that returns the gradient
|
| 101 |
+
vector:
|
| 102 |
+
|
| 103 |
+
``jac(x, *args) -> array_like, shape (n,)``
|
| 104 |
+
|
| 105 |
+
where ``x`` is an array with shape (n,) and ``args`` is a tuple with
|
| 106 |
+
the fixed parameters. If `jac` is a Boolean and is True, `fun` is
|
| 107 |
+
assumed to return a tuple ``(f, g)`` containing the objective
|
| 108 |
+
function and the gradient.
|
| 109 |
+
Methods 'Newton-CG', 'trust-ncg', 'dogleg', 'trust-exact', and
|
| 110 |
+
'trust-krylov' require that either a callable be supplied, or that
|
| 111 |
+
`fun` return the objective and gradient.
|
| 112 |
+
If None or False, the gradient will be estimated using 2-point finite
|
| 113 |
+
difference estimation with an absolute step size.
|
| 114 |
+
Alternatively, the keywords {'2-point', '3-point', 'cs'} can be used
|
| 115 |
+
to select a finite difference scheme for numerical estimation of the
|
| 116 |
+
gradient with a relative step size. These finite difference schemes
|
| 117 |
+
obey any specified `bounds`.
|
| 118 |
+
hess : {callable, '2-point', '3-point', 'cs', HessianUpdateStrategy}, optional
|
| 119 |
+
Method for computing the Hessian matrix. Only for Newton-CG, dogleg,
|
| 120 |
+
trust-ncg, trust-krylov, trust-exact and trust-constr.
|
| 121 |
+
If it is callable, it should return the Hessian matrix:
|
| 122 |
+
|
| 123 |
+
``hess(x, *args) -> {LinearOperator, spmatrix, array}, (n, n)``
|
| 124 |
+
|
| 125 |
+
where ``x`` is a (n,) ndarray and ``args`` is a tuple with the fixed
|
| 126 |
+
parameters.
|
| 127 |
+
The keywords {'2-point', '3-point', 'cs'} can also be used to select
|
| 128 |
+
a finite difference scheme for numerical estimation of the hessian.
|
| 129 |
+
Alternatively, objects implementing the `HessianUpdateStrategy`
|
| 130 |
+
interface can be used to approximate the Hessian. Available
|
| 131 |
+
quasi-Newton methods implementing this interface are:
|
| 132 |
+
|
| 133 |
+
- `BFGS`;
|
| 134 |
+
- `SR1`.
|
| 135 |
+
|
| 136 |
+
Not all of the options are available for each of the methods; for
|
| 137 |
+
availability refer to the notes.
|
| 138 |
+
hessp : callable, optional
|
| 139 |
+
Hessian of objective function times an arbitrary vector p. Only for
|
| 140 |
+
Newton-CG, trust-ncg, trust-krylov, trust-constr.
|
| 141 |
+
Only one of `hessp` or `hess` needs to be given. If `hess` is
|
| 142 |
+
provided, then `hessp` will be ignored. `hessp` must compute the
|
| 143 |
+
Hessian times an arbitrary vector:
|
| 144 |
+
|
| 145 |
+
``hessp(x, p, *args) -> ndarray shape (n,)``
|
| 146 |
+
|
| 147 |
+
where ``x`` is a (n,) ndarray, ``p`` is an arbitrary vector with
|
| 148 |
+
dimension (n,) and ``args`` is a tuple with the fixed
|
| 149 |
+
parameters.
|
| 150 |
+
bounds : sequence or `Bounds`, optional
|
| 151 |
+
Bounds on variables for Nelder-Mead, L-BFGS-B, TNC, SLSQP, Powell,
|
| 152 |
+
trust-constr, COBYLA, and COBYQA methods. There are two ways to specify
|
| 153 |
+
the bounds:
|
| 154 |
+
|
| 155 |
+
1. Instance of `Bounds` class.
|
| 156 |
+
2. Sequence of ``(min, max)`` pairs for each element in `x`. None
|
| 157 |
+
is used to specify no bound.
|
| 158 |
+
|
| 159 |
+
constraints : {Constraint, dict} or List of {Constraint, dict}, optional
|
| 160 |
+
Constraints definition. Only for COBYLA, COBYQA, SLSQP and trust-constr.
|
| 161 |
+
|
| 162 |
+
Constraints for 'trust-constr' and 'cobyqa' are defined as a single object
|
| 163 |
+
or a list of objects specifying constraints to the optimization problem.
|
| 164 |
+
Available constraints are:
|
| 165 |
+
|
| 166 |
+
- `LinearConstraint`
|
| 167 |
+
- `NonlinearConstraint`
|
| 168 |
+
|
| 169 |
+
Constraints for COBYLA, SLSQP are defined as a list of dictionaries.
|
| 170 |
+
Each dictionary with fields:
|
| 171 |
+
|
| 172 |
+
type : str
|
| 173 |
+
Constraint type: 'eq' for equality, 'ineq' for inequality.
|
| 174 |
+
fun : callable
|
| 175 |
+
The function defining the constraint.
|
| 176 |
+
jac : callable, optional
|
| 177 |
+
The Jacobian of `fun` (only for SLSQP).
|
| 178 |
+
args : sequence, optional
|
| 179 |
+
Extra arguments to be passed to the function and Jacobian.
|
| 180 |
+
|
| 181 |
+
Equality constraint means that the constraint function result is to
|
| 182 |
+
be zero whereas inequality means that it is to be non-negative.
|
| 183 |
+
Note that COBYLA only supports inequality constraints.
|
| 184 |
+
|
| 185 |
+
tol : float, optional
|
| 186 |
+
Tolerance for termination. When `tol` is specified, the selected
|
| 187 |
+
minimization algorithm sets some relevant solver-specific tolerance(s)
|
| 188 |
+
equal to `tol`. For detailed control, use solver-specific
|
| 189 |
+
options.
|
| 190 |
+
options : dict, optional
|
| 191 |
+
A dictionary of solver options. All methods except `TNC` accept the
|
| 192 |
+
following generic options:
|
| 193 |
+
|
| 194 |
+
maxiter : int
|
| 195 |
+
Maximum number of iterations to perform. Depending on the
|
| 196 |
+
method each iteration may use several function evaluations.
|
| 197 |
+
|
| 198 |
+
For `TNC` use `maxfun` instead of `maxiter`.
|
| 199 |
+
disp : bool
|
| 200 |
+
Set to True to print convergence messages.
|
| 201 |
+
|
| 202 |
+
For method-specific options, see :func:`show_options()`.
|
| 203 |
+
callback : callable, optional
|
| 204 |
+
A callable called after each iteration.
|
| 205 |
+
|
| 206 |
+
All methods except TNC, SLSQP, and COBYLA support a callable with
|
| 207 |
+
the signature:
|
| 208 |
+
|
| 209 |
+
``callback(intermediate_result: OptimizeResult)``
|
| 210 |
+
|
| 211 |
+
where ``intermediate_result`` is a keyword parameter containing an
|
| 212 |
+
`OptimizeResult` with attributes ``x`` and ``fun``, the present values
|
| 213 |
+
of the parameter vector and objective function. Note that the name
|
| 214 |
+
of the parameter must be ``intermediate_result`` for the callback
|
| 215 |
+
to be passed an `OptimizeResult`. These methods will also terminate if
|
| 216 |
+
the callback raises ``StopIteration``.
|
| 217 |
+
|
| 218 |
+
All methods except trust-constr (also) support a signature like:
|
| 219 |
+
|
| 220 |
+
``callback(xk)``
|
| 221 |
+
|
| 222 |
+
where ``xk`` is the current parameter vector.
|
| 223 |
+
|
| 224 |
+
Introspection is used to determine which of the signatures above to
|
| 225 |
+
invoke.
|
| 226 |
+
|
| 227 |
+
Returns
|
| 228 |
+
-------
|
| 229 |
+
res : OptimizeResult
|
| 230 |
+
The optimization result represented as a ``OptimizeResult`` object.
|
| 231 |
+
Important attributes are: ``x`` the solution array, ``success`` a
|
| 232 |
+
Boolean flag indicating if the optimizer exited successfully and
|
| 233 |
+
``message`` which describes the cause of the termination. See
|
| 234 |
+
`OptimizeResult` for a description of other attributes.
|
| 235 |
+
|
| 236 |
+
See also
|
| 237 |
+
--------
|
| 238 |
+
minimize_scalar : Interface to minimization algorithms for scalar
|
| 239 |
+
univariate functions
|
| 240 |
+
show_options : Additional options accepted by the solvers
|
| 241 |
+
|
| 242 |
+
Notes
|
| 243 |
+
-----
|
| 244 |
+
This section describes the available solvers that can be selected by the
|
| 245 |
+
'method' parameter. The default method is *BFGS*.
|
| 246 |
+
|
| 247 |
+
**Unconstrained minimization**
|
| 248 |
+
|
| 249 |
+
Method :ref:`CG <optimize.minimize-cg>` uses a nonlinear conjugate
|
| 250 |
+
gradient algorithm by Polak and Ribiere, a variant of the
|
| 251 |
+
Fletcher-Reeves method described in [5]_ pp.120-122. Only the
|
| 252 |
+
first derivatives are used.
|
| 253 |
+
|
| 254 |
+
Method :ref:`BFGS <optimize.minimize-bfgs>` uses the quasi-Newton
|
| 255 |
+
method of Broyden, Fletcher, Goldfarb, and Shanno (BFGS) [5]_
|
| 256 |
+
pp. 136. It uses the first derivatives only. BFGS has proven good
|
| 257 |
+
performance even for non-smooth optimizations. This method also
|
| 258 |
+
returns an approximation of the Hessian inverse, stored as
|
| 259 |
+
`hess_inv` in the OptimizeResult object.
|
| 260 |
+
|
| 261 |
+
Method :ref:`Newton-CG <optimize.minimize-newtoncg>` uses a
|
| 262 |
+
Newton-CG algorithm [5]_ pp. 168 (also known as the truncated
|
| 263 |
+
Newton method). It uses a CG method to the compute the search
|
| 264 |
+
direction. See also *TNC* method for a box-constrained
|
| 265 |
+
minimization with a similar algorithm. Suitable for large-scale
|
| 266 |
+
problems.
|
| 267 |
+
|
| 268 |
+
Method :ref:`dogleg <optimize.minimize-dogleg>` uses the dog-leg
|
| 269 |
+
trust-region algorithm [5]_ for unconstrained minimization. This
|
| 270 |
+
algorithm requires the gradient and Hessian; furthermore the
|
| 271 |
+
Hessian is required to be positive definite.
|
| 272 |
+
|
| 273 |
+
Method :ref:`trust-ncg <optimize.minimize-trustncg>` uses the
|
| 274 |
+
Newton conjugate gradient trust-region algorithm [5]_ for
|
| 275 |
+
unconstrained minimization. This algorithm requires the gradient
|
| 276 |
+
and either the Hessian or a function that computes the product of
|
| 277 |
+
the Hessian with a given vector. Suitable for large-scale problems.
|
| 278 |
+
|
| 279 |
+
Method :ref:`trust-krylov <optimize.minimize-trustkrylov>` uses
|
| 280 |
+
the Newton GLTR trust-region algorithm [14]_, [15]_ for unconstrained
|
| 281 |
+
minimization. This algorithm requires the gradient
|
| 282 |
+
and either the Hessian or a function that computes the product of
|
| 283 |
+
the Hessian with a given vector. Suitable for large-scale problems.
|
| 284 |
+
On indefinite problems it requires usually less iterations than the
|
| 285 |
+
`trust-ncg` method and is recommended for medium and large-scale problems.
|
| 286 |
+
|
| 287 |
+
Method :ref:`trust-exact <optimize.minimize-trustexact>`
|
| 288 |
+
is a trust-region method for unconstrained minimization in which
|
| 289 |
+
quadratic subproblems are solved almost exactly [13]_. This
|
| 290 |
+
algorithm requires the gradient and the Hessian (which is
|
| 291 |
+
*not* required to be positive definite). It is, in many
|
| 292 |
+
situations, the Newton method to converge in fewer iterations
|
| 293 |
+
and the most recommended for small and medium-size problems.
|
| 294 |
+
|
| 295 |
+
**Bound-Constrained minimization**
|
| 296 |
+
|
| 297 |
+
Method :ref:`Nelder-Mead <optimize.minimize-neldermead>` uses the
|
| 298 |
+
Simplex algorithm [1]_, [2]_. This algorithm is robust in many
|
| 299 |
+
applications. However, if numerical computation of derivative can be
|
| 300 |
+
trusted, other algorithms using the first and/or second derivatives
|
| 301 |
+
information might be preferred for their better performance in
|
| 302 |
+
general.
|
| 303 |
+
|
| 304 |
+
Method :ref:`L-BFGS-B <optimize.minimize-lbfgsb>` uses the L-BFGS-B
|
| 305 |
+
algorithm [6]_, [7]_ for bound constrained minimization.
|
| 306 |
+
|
| 307 |
+
Method :ref:`Powell <optimize.minimize-powell>` is a modification
|
| 308 |
+
of Powell's method [3]_, [4]_ which is a conjugate direction
|
| 309 |
+
method. It performs sequential one-dimensional minimizations along
|
| 310 |
+
each vector of the directions set (`direc` field in `options` and
|
| 311 |
+
`info`), which is updated at each iteration of the main
|
| 312 |
+
minimization loop. The function need not be differentiable, and no
|
| 313 |
+
derivatives are taken. If bounds are not provided, then an
|
| 314 |
+
unbounded line search will be used. If bounds are provided and
|
| 315 |
+
the initial guess is within the bounds, then every function
|
| 316 |
+
evaluation throughout the minimization procedure will be within
|
| 317 |
+
the bounds. If bounds are provided, the initial guess is outside
|
| 318 |
+
the bounds, and `direc` is full rank (default has full rank), then
|
| 319 |
+
some function evaluations during the first iteration may be
|
| 320 |
+
outside the bounds, but every function evaluation after the first
|
| 321 |
+
iteration will be within the bounds. If `direc` is not full rank,
|
| 322 |
+
then some parameters may not be optimized and the solution is not
|
| 323 |
+
guaranteed to be within the bounds.
|
| 324 |
+
|
| 325 |
+
Method :ref:`TNC <optimize.minimize-tnc>` uses a truncated Newton
|
| 326 |
+
algorithm [5]_, [8]_ to minimize a function with variables subject
|
| 327 |
+
to bounds. This algorithm uses gradient information; it is also
|
| 328 |
+
called Newton Conjugate-Gradient. It differs from the *Newton-CG*
|
| 329 |
+
method described above as it wraps a C implementation and allows
|
| 330 |
+
each variable to be given upper and lower bounds.
|
| 331 |
+
|
| 332 |
+
**Constrained Minimization**
|
| 333 |
+
|
| 334 |
+
Method :ref:`COBYLA <optimize.minimize-cobyla>` uses the
|
| 335 |
+
Constrained Optimization BY Linear Approximation (COBYLA) method
|
| 336 |
+
[9]_, [10]_, [11]_. The algorithm is based on linear
|
| 337 |
+
approximations to the objective function and each constraint. The
|
| 338 |
+
method wraps a FORTRAN implementation of the algorithm. The
|
| 339 |
+
constraints functions 'fun' may return either a single number
|
| 340 |
+
or an array or list of numbers.
|
| 341 |
+
|
| 342 |
+
Method :ref:`COBYQA <optimize.minimize-cobyqa>` uses the Constrained
|
| 343 |
+
Optimization BY Quadratic Approximations (COBYQA) method [18]_. The
|
| 344 |
+
algorithm is a derivative-free trust-region SQP method based on quadratic
|
| 345 |
+
approximations to the objective function and each nonlinear constraint. The
|
| 346 |
+
bounds are treated as unrelaxable constraints, in the sense that the
|
| 347 |
+
algorithm always respects them throughout the optimization process.
|
| 348 |
+
|
| 349 |
+
Method :ref:`SLSQP <optimize.minimize-slsqp>` uses Sequential
|
| 350 |
+
Least SQuares Programming to minimize a function of several
|
| 351 |
+
variables with any combination of bounds, equality and inequality
|
| 352 |
+
constraints. The method wraps the SLSQP Optimization subroutine
|
| 353 |
+
originally implemented by Dieter Kraft [12]_. Note that the
|
| 354 |
+
wrapper handles infinite values in bounds by converting them into
|
| 355 |
+
large floating values.
|
| 356 |
+
|
| 357 |
+
Method :ref:`trust-constr <optimize.minimize-trustconstr>` is a
|
| 358 |
+
trust-region algorithm for constrained optimization. It switches
|
| 359 |
+
between two implementations depending on the problem definition.
|
| 360 |
+
It is the most versatile constrained minimization algorithm
|
| 361 |
+
implemented in SciPy and the most appropriate for large-scale problems.
|
| 362 |
+
For equality constrained problems it is an implementation of Byrd-Omojokun
|
| 363 |
+
Trust-Region SQP method described in [17]_ and in [5]_, p. 549. When
|
| 364 |
+
inequality constraints are imposed as well, it switches to the trust-region
|
| 365 |
+
interior point method described in [16]_. This interior point algorithm,
|
| 366 |
+
in turn, solves inequality constraints by introducing slack variables
|
| 367 |
+
and solving a sequence of equality-constrained barrier problems
|
| 368 |
+
for progressively smaller values of the barrier parameter.
|
| 369 |
+
The previously described equality constrained SQP method is
|
| 370 |
+
used to solve the subproblems with increasing levels of accuracy
|
| 371 |
+
as the iterate gets closer to a solution.
|
| 372 |
+
|
| 373 |
+
**Finite-Difference Options**
|
| 374 |
+
|
| 375 |
+
For Method :ref:`trust-constr <optimize.minimize-trustconstr>`
|
| 376 |
+
the gradient and the Hessian may be approximated using
|
| 377 |
+
three finite-difference schemes: {'2-point', '3-point', 'cs'}.
|
| 378 |
+
The scheme 'cs' is, potentially, the most accurate but it
|
| 379 |
+
requires the function to correctly handle complex inputs and to
|
| 380 |
+
be differentiable in the complex plane. The scheme '3-point' is more
|
| 381 |
+
accurate than '2-point' but requires twice as many operations. If the
|
| 382 |
+
gradient is estimated via finite-differences the Hessian must be
|
| 383 |
+
estimated using one of the quasi-Newton strategies.
|
| 384 |
+
|
| 385 |
+
**Method specific options for the** `hess` **keyword**
|
| 386 |
+
|
| 387 |
+
+--------------+------+----------+-------------------------+-----+
|
| 388 |
+
| method/Hess | None | callable | '2-point/'3-point'/'cs' | HUS |
|
| 389 |
+
+==============+======+==========+=========================+=====+
|
| 390 |
+
| Newton-CG | x | (n, n) | x | x |
|
| 391 |
+
| | | LO | | |
|
| 392 |
+
+--------------+------+----------+-------------------------+-----+
|
| 393 |
+
| dogleg | | (n, n) | | |
|
| 394 |
+
+--------------+------+----------+-------------------------+-----+
|
| 395 |
+
| trust-ncg | | (n, n) | x | x |
|
| 396 |
+
+--------------+------+----------+-------------------------+-----+
|
| 397 |
+
| trust-krylov | | (n, n) | x | x |
|
| 398 |
+
+--------------+------+----------+-------------------------+-----+
|
| 399 |
+
| trust-exact | | (n, n) | | |
|
| 400 |
+
+--------------+------+----------+-------------------------+-----+
|
| 401 |
+
| trust-constr | x | (n, n) | x | x |
|
| 402 |
+
| | | LO | | |
|
| 403 |
+
| | | sp | | |
|
| 404 |
+
+--------------+------+----------+-------------------------+-----+
|
| 405 |
+
|
| 406 |
+
where LO=LinearOperator, sp=Sparse matrix, HUS=HessianUpdateStrategy
|
| 407 |
+
|
| 408 |
+
**Custom minimizers**
|
| 409 |
+
|
| 410 |
+
It may be useful to pass a custom minimization method, for example
|
| 411 |
+
when using a frontend to this method such as `scipy.optimize.basinhopping`
|
| 412 |
+
or a different library. You can simply pass a callable as the ``method``
|
| 413 |
+
parameter.
|
| 414 |
+
|
| 415 |
+
The callable is called as ``method(fun, x0, args, **kwargs, **options)``
|
| 416 |
+
where ``kwargs`` corresponds to any other parameters passed to `minimize`
|
| 417 |
+
(such as `callback`, `hess`, etc.), except the `options` dict, which has
|
| 418 |
+
its contents also passed as `method` parameters pair by pair. Also, if
|
| 419 |
+
`jac` has been passed as a bool type, `jac` and `fun` are mangled so that
|
| 420 |
+
`fun` returns just the function values and `jac` is converted to a function
|
| 421 |
+
returning the Jacobian. The method shall return an `OptimizeResult`
|
| 422 |
+
object.
|
| 423 |
+
|
| 424 |
+
The provided `method` callable must be able to accept (and possibly ignore)
|
| 425 |
+
arbitrary parameters; the set of parameters accepted by `minimize` may
|
| 426 |
+
expand in future versions and then these parameters will be passed to
|
| 427 |
+
the method. You can find an example in the scipy.optimize tutorial.
|
| 428 |
+
|
| 429 |
+
References
|
| 430 |
+
----------
|
| 431 |
+
.. [1] Nelder, J A, and R Mead. 1965. A Simplex Method for Function
|
| 432 |
+
Minimization. The Computer Journal 7: 308-13.
|
| 433 |
+
.. [2] Wright M H. 1996. Direct search methods: Once scorned, now
|
| 434 |
+
respectable, in Numerical Analysis 1995: Proceedings of the 1995
|
| 435 |
+
Dundee Biennial Conference in Numerical Analysis (Eds. D F
|
| 436 |
+
Griffiths and G A Watson). Addison Wesley Longman, Harlow, UK.
|
| 437 |
+
191-208.
|
| 438 |
+
.. [3] Powell, M J D. 1964. An efficient method for finding the minimum of
|
| 439 |
+
a function of several variables without calculating derivatives. The
|
| 440 |
+
Computer Journal 7: 155-162.
|
| 441 |
+
.. [4] Press W, S A Teukolsky, W T Vetterling and B P Flannery.
|
| 442 |
+
Numerical Recipes (any edition), Cambridge University Press.
|
| 443 |
+
.. [5] Nocedal, J, and S J Wright. 2006. Numerical Optimization.
|
| 444 |
+
Springer New York.
|
| 445 |
+
.. [6] Byrd, R H and P Lu and J. Nocedal. 1995. A Limited Memory
|
| 446 |
+
Algorithm for Bound Constrained Optimization. SIAM Journal on
|
| 447 |
+
Scientific and Statistical Computing 16 (5): 1190-1208.
|
| 448 |
+
.. [7] Zhu, C and R H Byrd and J Nocedal. 1997. L-BFGS-B: Algorithm
|
| 449 |
+
778: L-BFGS-B, FORTRAN routines for large scale bound constrained
|
| 450 |
+
optimization. ACM Transactions on Mathematical Software 23 (4):
|
| 451 |
+
550-560.
|
| 452 |
+
.. [8] Nash, S G. Newton-Type Minimization Via the Lanczos Method.
|
| 453 |
+
1984. SIAM Journal of Numerical Analysis 21: 770-778.
|
| 454 |
+
.. [9] Powell, M J D. A direct search optimization method that models
|
| 455 |
+
the objective and constraint functions by linear interpolation.
|
| 456 |
+
1994. Advances in Optimization and Numerical Analysis, eds. S. Gomez
|
| 457 |
+
and J-P Hennart, Kluwer Academic (Dordrecht), 51-67.
|
| 458 |
+
.. [10] Powell M J D. Direct search algorithms for optimization
|
| 459 |
+
calculations. 1998. Acta Numerica 7: 287-336.
|
| 460 |
+
.. [11] Powell M J D. A view of algorithms for optimization without
|
| 461 |
+
derivatives. 2007.Cambridge University Technical Report DAMTP
|
| 462 |
+
2007/NA03
|
| 463 |
+
.. [12] Kraft, D. A software package for sequential quadratic
|
| 464 |
+
programming. 1988. Tech. Rep. DFVLR-FB 88-28, DLR German Aerospace
|
| 465 |
+
Center -- Institute for Flight Mechanics, Koln, Germany.
|
| 466 |
+
.. [13] Conn, A. R., Gould, N. I., and Toint, P. L.
|
| 467 |
+
Trust region methods. 2000. Siam. pp. 169-200.
|
| 468 |
+
.. [14] F. Lenders, C. Kirches, A. Potschka: "trlib: A vector-free
|
| 469 |
+
implementation of the GLTR method for iterative solution of
|
| 470 |
+
the trust region problem", :arxiv:`1611.04718`
|
| 471 |
+
.. [15] N. Gould, S. Lucidi, M. Roma, P. Toint: "Solving the
|
| 472 |
+
Trust-Region Subproblem using the Lanczos Method",
|
| 473 |
+
SIAM J. Optim., 9(2), 504--525, (1999).
|
| 474 |
+
.. [16] Byrd, Richard H., Mary E. Hribar, and Jorge Nocedal. 1999.
|
| 475 |
+
An interior point algorithm for large-scale nonlinear programming.
|
| 476 |
+
SIAM Journal on Optimization 9.4: 877-900.
|
| 477 |
+
.. [17] Lalee, Marucha, Jorge Nocedal, and Todd Plantega. 1998. On the
|
| 478 |
+
implementation of an algorithm for large-scale equality constrained
|
| 479 |
+
optimization. SIAM Journal on Optimization 8.3: 682-706.
|
| 480 |
+
.. [18] Ragonneau, T. M. *Model-Based Derivative-Free Optimization Methods
|
| 481 |
+
and Software*. PhD thesis, Department of Applied Mathematics, The Hong
|
| 482 |
+
Kong Polytechnic University, Hong Kong, China, 2022. URL:
|
| 483 |
+
https://theses.lib.polyu.edu.hk/handle/200/12294.
|
| 484 |
+
|
| 485 |
+
Examples
|
| 486 |
+
--------
|
| 487 |
+
Let us consider the problem of minimizing the Rosenbrock function. This
|
| 488 |
+
function (and its respective derivatives) is implemented in `rosen`
|
| 489 |
+
(resp. `rosen_der`, `rosen_hess`) in the `scipy.optimize`.
|
| 490 |
+
|
| 491 |
+
>>> from scipy.optimize import minimize, rosen, rosen_der
|
| 492 |
+
|
| 493 |
+
A simple application of the *Nelder-Mead* method is:
|
| 494 |
+
|
| 495 |
+
>>> x0 = [1.3, 0.7, 0.8, 1.9, 1.2]
|
| 496 |
+
>>> res = minimize(rosen, x0, method='Nelder-Mead', tol=1e-6)
|
| 497 |
+
>>> res.x
|
| 498 |
+
array([ 1., 1., 1., 1., 1.])
|
| 499 |
+
|
| 500 |
+
Now using the *BFGS* algorithm, using the first derivative and a few
|
| 501 |
+
options:
|
| 502 |
+
|
| 503 |
+
>>> res = minimize(rosen, x0, method='BFGS', jac=rosen_der,
|
| 504 |
+
... options={'gtol': 1e-6, 'disp': True})
|
| 505 |
+
Optimization terminated successfully.
|
| 506 |
+
Current function value: 0.000000
|
| 507 |
+
Iterations: 26
|
| 508 |
+
Function evaluations: 31
|
| 509 |
+
Gradient evaluations: 31
|
| 510 |
+
>>> res.x
|
| 511 |
+
array([ 1., 1., 1., 1., 1.])
|
| 512 |
+
>>> print(res.message)
|
| 513 |
+
Optimization terminated successfully.
|
| 514 |
+
>>> res.hess_inv
|
| 515 |
+
array([
|
| 516 |
+
[ 0.00749589, 0.01255155, 0.02396251, 0.04750988, 0.09495377], # may vary
|
| 517 |
+
[ 0.01255155, 0.02510441, 0.04794055, 0.09502834, 0.18996269],
|
| 518 |
+
[ 0.02396251, 0.04794055, 0.09631614, 0.19092151, 0.38165151],
|
| 519 |
+
[ 0.04750988, 0.09502834, 0.19092151, 0.38341252, 0.7664427 ],
|
| 520 |
+
[ 0.09495377, 0.18996269, 0.38165151, 0.7664427, 1.53713523]
|
| 521 |
+
])
|
| 522 |
+
|
| 523 |
+
|
| 524 |
+
Next, consider a minimization problem with several constraints (namely
|
| 525 |
+
Example 16.4 from [5]_). The objective function is:
|
| 526 |
+
|
| 527 |
+
>>> fun = lambda x: (x[0] - 1)**2 + (x[1] - 2.5)**2
|
| 528 |
+
|
| 529 |
+
There are three constraints defined as:
|
| 530 |
+
|
| 531 |
+
>>> cons = ({'type': 'ineq', 'fun': lambda x: x[0] - 2 * x[1] + 2},
|
| 532 |
+
... {'type': 'ineq', 'fun': lambda x: -x[0] - 2 * x[1] + 6},
|
| 533 |
+
... {'type': 'ineq', 'fun': lambda x: -x[0] + 2 * x[1] + 2})
|
| 534 |
+
|
| 535 |
+
And variables must be positive, hence the following bounds:
|
| 536 |
+
|
| 537 |
+
>>> bnds = ((0, None), (0, None))
|
| 538 |
+
|
| 539 |
+
The optimization problem is solved using the SLSQP method as:
|
| 540 |
+
|
| 541 |
+
>>> res = minimize(fun, (2, 0), method='SLSQP', bounds=bnds,
|
| 542 |
+
... constraints=cons)
|
| 543 |
+
|
| 544 |
+
It should converge to the theoretical solution (1.4 ,1.7).
|
| 545 |
+
|
| 546 |
+
"""
|
| 547 |
+
x0 = np.atleast_1d(np.asarray(x0))
|
| 548 |
+
|
| 549 |
+
if x0.ndim != 1:
|
| 550 |
+
raise ValueError("'x0' must only have one dimension.")
|
| 551 |
+
|
| 552 |
+
if x0.dtype.kind in np.typecodes["AllInteger"]:
|
| 553 |
+
x0 = np.asarray(x0, dtype=float)
|
| 554 |
+
|
| 555 |
+
if not isinstance(args, tuple):
|
| 556 |
+
args = (args,)
|
| 557 |
+
|
| 558 |
+
if method is None:
|
| 559 |
+
# Select automatically
|
| 560 |
+
if constraints:
|
| 561 |
+
method = 'SLSQP'
|
| 562 |
+
elif bounds is not None:
|
| 563 |
+
method = 'L-BFGS-B'
|
| 564 |
+
else:
|
| 565 |
+
method = 'BFGS'
|
| 566 |
+
|
| 567 |
+
if callable(method):
|
| 568 |
+
meth = "_custom"
|
| 569 |
+
else:
|
| 570 |
+
meth = method.lower()
|
| 571 |
+
|
| 572 |
+
if options is None:
|
| 573 |
+
options = {}
|
| 574 |
+
# check if optional parameters are supported by the selected method
|
| 575 |
+
# - jac
|
| 576 |
+
if meth in ('nelder-mead', 'powell', 'cobyla', 'cobyqa') and bool(jac):
|
| 577 |
+
warn('Method %s does not use gradient information (jac).' % method,
|
| 578 |
+
RuntimeWarning, stacklevel=2)
|
| 579 |
+
# - hess
|
| 580 |
+
if meth not in ('newton-cg', 'dogleg', 'trust-ncg', 'trust-constr',
|
| 581 |
+
'trust-krylov', 'trust-exact', '_custom') and hess is not None:
|
| 582 |
+
warn('Method %s does not use Hessian information (hess).' % method,
|
| 583 |
+
RuntimeWarning, stacklevel=2)
|
| 584 |
+
# - hessp
|
| 585 |
+
if meth not in ('newton-cg', 'trust-ncg', 'trust-constr',
|
| 586 |
+
'trust-krylov', '_custom') \
|
| 587 |
+
and hessp is not None:
|
| 588 |
+
warn('Method %s does not use Hessian-vector product '
|
| 589 |
+
'information (hessp).' % method,
|
| 590 |
+
RuntimeWarning, stacklevel=2)
|
| 591 |
+
# - constraints or bounds
|
| 592 |
+
if (meth not in ('cobyla', 'cobyqa', 'slsqp', 'trust-constr', '_custom') and
|
| 593 |
+
np.any(constraints)):
|
| 594 |
+
warn('Method %s cannot handle constraints.' % method,
|
| 595 |
+
RuntimeWarning, stacklevel=2)
|
| 596 |
+
if meth not in (
|
| 597 |
+
'nelder-mead', 'powell', 'l-bfgs-b', 'cobyla', 'cobyqa', 'slsqp',
|
| 598 |
+
'tnc', 'trust-constr', '_custom') and bounds is not None:
|
| 599 |
+
warn('Method %s cannot handle bounds.' % method,
|
| 600 |
+
RuntimeWarning, stacklevel=2)
|
| 601 |
+
# - return_all
|
| 602 |
+
if (meth in ('l-bfgs-b', 'tnc', 'cobyla', 'cobyqa', 'slsqp') and
|
| 603 |
+
options.get('return_all', False)):
|
| 604 |
+
warn('Method %s does not support the return_all option.' % method,
|
| 605 |
+
RuntimeWarning, stacklevel=2)
|
| 606 |
+
|
| 607 |
+
# check gradient vector
|
| 608 |
+
if callable(jac):
|
| 609 |
+
pass
|
| 610 |
+
elif jac is True:
|
| 611 |
+
# fun returns func and grad
|
| 612 |
+
fun = MemoizeJac(fun)
|
| 613 |
+
jac = fun.derivative
|
| 614 |
+
elif (jac in FD_METHODS and
|
| 615 |
+
meth in ['trust-constr', 'bfgs', 'cg', 'l-bfgs-b', 'tnc', 'slsqp']):
|
| 616 |
+
# finite differences with relative step
|
| 617 |
+
pass
|
| 618 |
+
elif meth in ['trust-constr']:
|
| 619 |
+
# default jac calculation for this method
|
| 620 |
+
jac = '2-point'
|
| 621 |
+
elif jac is None or bool(jac) is False:
|
| 622 |
+
# this will cause e.g. LBFGS to use forward difference, absolute step
|
| 623 |
+
jac = None
|
| 624 |
+
else:
|
| 625 |
+
# default if jac option is not understood
|
| 626 |
+
jac = None
|
| 627 |
+
|
| 628 |
+
# set default tolerances
|
| 629 |
+
if tol is not None:
|
| 630 |
+
options = dict(options)
|
| 631 |
+
if meth == 'nelder-mead':
|
| 632 |
+
options.setdefault('xatol', tol)
|
| 633 |
+
options.setdefault('fatol', tol)
|
| 634 |
+
if meth in ('newton-cg', 'powell', 'tnc'):
|
| 635 |
+
options.setdefault('xtol', tol)
|
| 636 |
+
if meth in ('powell', 'l-bfgs-b', 'tnc', 'slsqp'):
|
| 637 |
+
options.setdefault('ftol', tol)
|
| 638 |
+
if meth in ('bfgs', 'cg', 'l-bfgs-b', 'tnc', 'dogleg',
|
| 639 |
+
'trust-ncg', 'trust-exact', 'trust-krylov'):
|
| 640 |
+
options.setdefault('gtol', tol)
|
| 641 |
+
if meth in ('cobyla', '_custom'):
|
| 642 |
+
options.setdefault('tol', tol)
|
| 643 |
+
if meth == 'cobyqa':
|
| 644 |
+
options.setdefault('final_tr_radius', tol)
|
| 645 |
+
if meth == 'trust-constr':
|
| 646 |
+
options.setdefault('xtol', tol)
|
| 647 |
+
options.setdefault('gtol', tol)
|
| 648 |
+
options.setdefault('barrier_tol', tol)
|
| 649 |
+
|
| 650 |
+
if meth == '_custom':
|
| 651 |
+
# custom method called before bounds and constraints are 'standardised'
|
| 652 |
+
# custom method should be able to accept whatever bounds/constraints
|
| 653 |
+
# are provided to it.
|
| 654 |
+
return method(fun, x0, args=args, jac=jac, hess=hess, hessp=hessp,
|
| 655 |
+
bounds=bounds, constraints=constraints,
|
| 656 |
+
callback=callback, **options)
|
| 657 |
+
|
| 658 |
+
constraints = standardize_constraints(constraints, x0, meth)
|
| 659 |
+
|
| 660 |
+
remove_vars = False
|
| 661 |
+
if bounds is not None:
|
| 662 |
+
# convert to new-style bounds so we only have to consider one case
|
| 663 |
+
bounds = standardize_bounds(bounds, x0, 'new')
|
| 664 |
+
bounds = _validate_bounds(bounds, x0, meth)
|
| 665 |
+
|
| 666 |
+
if meth in {"tnc", "slsqp", "l-bfgs-b"}:
|
| 667 |
+
# These methods can't take the finite-difference derivatives they
|
| 668 |
+
# need when a variable is fixed by the bounds. To avoid this issue,
|
| 669 |
+
# remove fixed variables from the problem.
|
| 670 |
+
# NOTE: if this list is expanded, then be sure to update the
|
| 671 |
+
# accompanying tests and test_optimize.eb_data. Consider also if
|
| 672 |
+
# default OptimizeResult will need updating.
|
| 673 |
+
|
| 674 |
+
# determine whether any variables are fixed
|
| 675 |
+
i_fixed = (bounds.lb == bounds.ub)
|
| 676 |
+
|
| 677 |
+
if np.all(i_fixed):
|
| 678 |
+
# all the parameters are fixed, a minimizer is not able to do
|
| 679 |
+
# anything
|
| 680 |
+
return _optimize_result_for_equal_bounds(
|
| 681 |
+
fun, bounds, meth, args=args, constraints=constraints
|
| 682 |
+
)
|
| 683 |
+
|
| 684 |
+
# determine whether finite differences are needed for any grad/jac
|
| 685 |
+
fd_needed = (not callable(jac))
|
| 686 |
+
for con in constraints:
|
| 687 |
+
if not callable(con.get('jac', None)):
|
| 688 |
+
fd_needed = True
|
| 689 |
+
|
| 690 |
+
# If finite differences are ever used, remove all fixed variables
|
| 691 |
+
# Always remove fixed variables for TNC; see gh-14565
|
| 692 |
+
remove_vars = i_fixed.any() and (fd_needed or meth == "tnc")
|
| 693 |
+
if remove_vars:
|
| 694 |
+
x_fixed = (bounds.lb)[i_fixed]
|
| 695 |
+
x0 = x0[~i_fixed]
|
| 696 |
+
bounds = _remove_from_bounds(bounds, i_fixed)
|
| 697 |
+
fun = _remove_from_func(fun, i_fixed, x_fixed)
|
| 698 |
+
if callable(callback):
|
| 699 |
+
callback = _remove_from_func(callback, i_fixed, x_fixed)
|
| 700 |
+
if callable(jac):
|
| 701 |
+
jac = _remove_from_func(jac, i_fixed, x_fixed, remove=1)
|
| 702 |
+
|
| 703 |
+
# make a copy of the constraints so the user's version doesn't
|
| 704 |
+
# get changed. (Shallow copy is ok)
|
| 705 |
+
constraints = [con.copy() for con in constraints]
|
| 706 |
+
for con in constraints: # yes, guaranteed to be a list
|
| 707 |
+
con['fun'] = _remove_from_func(con['fun'], i_fixed,
|
| 708 |
+
x_fixed, min_dim=1,
|
| 709 |
+
remove=0)
|
| 710 |
+
if callable(con.get('jac', None)):
|
| 711 |
+
con['jac'] = _remove_from_func(con['jac'], i_fixed,
|
| 712 |
+
x_fixed, min_dim=2,
|
| 713 |
+
remove=1)
|
| 714 |
+
bounds = standardize_bounds(bounds, x0, meth)
|
| 715 |
+
|
| 716 |
+
callback = _wrap_callback(callback, meth)
|
| 717 |
+
|
| 718 |
+
if meth == 'nelder-mead':
|
| 719 |
+
res = _minimize_neldermead(fun, x0, args, callback, bounds=bounds,
|
| 720 |
+
**options)
|
| 721 |
+
elif meth == 'powell':
|
| 722 |
+
res = _minimize_powell(fun, x0, args, callback, bounds, **options)
|
| 723 |
+
elif meth == 'cg':
|
| 724 |
+
res = _minimize_cg(fun, x0, args, jac, callback, **options)
|
| 725 |
+
elif meth == 'bfgs':
|
| 726 |
+
res = _minimize_bfgs(fun, x0, args, jac, callback, **options)
|
| 727 |
+
elif meth == 'newton-cg':
|
| 728 |
+
res = _minimize_newtoncg(fun, x0, args, jac, hess, hessp, callback,
|
| 729 |
+
**options)
|
| 730 |
+
elif meth == 'l-bfgs-b':
|
| 731 |
+
res = _minimize_lbfgsb(fun, x0, args, jac, bounds,
|
| 732 |
+
callback=callback, **options)
|
| 733 |
+
elif meth == 'tnc':
|
| 734 |
+
res = _minimize_tnc(fun, x0, args, jac, bounds, callback=callback,
|
| 735 |
+
**options)
|
| 736 |
+
elif meth == 'cobyla':
|
| 737 |
+
res = _minimize_cobyla(fun, x0, args, constraints, callback=callback,
|
| 738 |
+
bounds=bounds, **options)
|
| 739 |
+
elif meth == 'cobyqa':
|
| 740 |
+
res = _minimize_cobyqa(fun, x0, args, bounds, constraints, callback,
|
| 741 |
+
**options)
|
| 742 |
+
elif meth == 'slsqp':
|
| 743 |
+
res = _minimize_slsqp(fun, x0, args, jac, bounds,
|
| 744 |
+
constraints, callback=callback, **options)
|
| 745 |
+
elif meth == 'trust-constr':
|
| 746 |
+
res = _minimize_trustregion_constr(fun, x0, args, jac, hess, hessp,
|
| 747 |
+
bounds, constraints,
|
| 748 |
+
callback=callback, **options)
|
| 749 |
+
elif meth == 'dogleg':
|
| 750 |
+
res = _minimize_dogleg(fun, x0, args, jac, hess,
|
| 751 |
+
callback=callback, **options)
|
| 752 |
+
elif meth == 'trust-ncg':
|
| 753 |
+
res = _minimize_trust_ncg(fun, x0, args, jac, hess, hessp,
|
| 754 |
+
callback=callback, **options)
|
| 755 |
+
elif meth == 'trust-krylov':
|
| 756 |
+
res = _minimize_trust_krylov(fun, x0, args, jac, hess, hessp,
|
| 757 |
+
callback=callback, **options)
|
| 758 |
+
elif meth == 'trust-exact':
|
| 759 |
+
res = _minimize_trustregion_exact(fun, x0, args, jac, hess,
|
| 760 |
+
callback=callback, **options)
|
| 761 |
+
else:
|
| 762 |
+
raise ValueError('Unknown solver %s' % method)
|
| 763 |
+
|
| 764 |
+
if remove_vars:
|
| 765 |
+
res.x = _add_to_array(res.x, i_fixed, x_fixed)
|
| 766 |
+
res.jac = _add_to_array(res.jac, i_fixed, np.nan)
|
| 767 |
+
if "hess_inv" in res:
|
| 768 |
+
res.hess_inv = None # unknown
|
| 769 |
+
|
| 770 |
+
if getattr(callback, 'stop_iteration', False):
|
| 771 |
+
res.success = False
|
| 772 |
+
res.status = 99
|
| 773 |
+
res.message = "`callback` raised `StopIteration`."
|
| 774 |
+
|
| 775 |
+
return res
|
| 776 |
+
|
| 777 |
+
|
| 778 |
+
def minimize_scalar(fun, bracket=None, bounds=None, args=(),
|
| 779 |
+
method=None, tol=None, options=None):
|
| 780 |
+
"""Local minimization of scalar function of one variable.
|
| 781 |
+
|
| 782 |
+
Parameters
|
| 783 |
+
----------
|
| 784 |
+
fun : callable
|
| 785 |
+
Objective function.
|
| 786 |
+
Scalar function, must return a scalar.
|
| 787 |
+
bracket : sequence, optional
|
| 788 |
+
For methods 'brent' and 'golden', `bracket` defines the bracketing
|
| 789 |
+
interval and is required.
|
| 790 |
+
Either a triple ``(xa, xb, xc)`` satisfying ``xa < xb < xc`` and
|
| 791 |
+
``func(xb) < func(xa) and func(xb) < func(xc)``, or a pair
|
| 792 |
+
``(xa, xb)`` to be used as initial points for a downhill bracket search
|
| 793 |
+
(see `scipy.optimize.bracket`).
|
| 794 |
+
The minimizer ``res.x`` will not necessarily satisfy
|
| 795 |
+
``xa <= res.x <= xb``.
|
| 796 |
+
bounds : sequence, optional
|
| 797 |
+
For method 'bounded', `bounds` is mandatory and must have two finite
|
| 798 |
+
items corresponding to the optimization bounds.
|
| 799 |
+
args : tuple, optional
|
| 800 |
+
Extra arguments passed to the objective function.
|
| 801 |
+
method : str or callable, optional
|
| 802 |
+
Type of solver. Should be one of:
|
| 803 |
+
|
| 804 |
+
- :ref:`Brent <optimize.minimize_scalar-brent>`
|
| 805 |
+
- :ref:`Bounded <optimize.minimize_scalar-bounded>`
|
| 806 |
+
- :ref:`Golden <optimize.minimize_scalar-golden>`
|
| 807 |
+
- custom - a callable object (added in version 0.14.0), see below
|
| 808 |
+
|
| 809 |
+
Default is "Bounded" if bounds are provided and "Brent" otherwise.
|
| 810 |
+
See the 'Notes' section for details of each solver.
|
| 811 |
+
|
| 812 |
+
tol : float, optional
|
| 813 |
+
Tolerance for termination. For detailed control, use solver-specific
|
| 814 |
+
options.
|
| 815 |
+
options : dict, optional
|
| 816 |
+
A dictionary of solver options.
|
| 817 |
+
|
| 818 |
+
maxiter : int
|
| 819 |
+
Maximum number of iterations to perform.
|
| 820 |
+
disp : bool
|
| 821 |
+
Set to True to print convergence messages.
|
| 822 |
+
|
| 823 |
+
See :func:`show_options()` for solver-specific options.
|
| 824 |
+
|
| 825 |
+
Returns
|
| 826 |
+
-------
|
| 827 |
+
res : OptimizeResult
|
| 828 |
+
The optimization result represented as a ``OptimizeResult`` object.
|
| 829 |
+
Important attributes are: ``x`` the solution array, ``success`` a
|
| 830 |
+
Boolean flag indicating if the optimizer exited successfully and
|
| 831 |
+
``message`` which describes the cause of the termination. See
|
| 832 |
+
`OptimizeResult` for a description of other attributes.
|
| 833 |
+
|
| 834 |
+
See also
|
| 835 |
+
--------
|
| 836 |
+
minimize : Interface to minimization algorithms for scalar multivariate
|
| 837 |
+
functions
|
| 838 |
+
show_options : Additional options accepted by the solvers
|
| 839 |
+
|
| 840 |
+
Notes
|
| 841 |
+
-----
|
| 842 |
+
This section describes the available solvers that can be selected by the
|
| 843 |
+
'method' parameter. The default method is the ``"Bounded"`` Brent method if
|
| 844 |
+
`bounds` are passed and unbounded ``"Brent"`` otherwise.
|
| 845 |
+
|
| 846 |
+
Method :ref:`Brent <optimize.minimize_scalar-brent>` uses Brent's
|
| 847 |
+
algorithm [1]_ to find a local minimum. The algorithm uses inverse
|
| 848 |
+
parabolic interpolation when possible to speed up convergence of
|
| 849 |
+
the golden section method.
|
| 850 |
+
|
| 851 |
+
Method :ref:`Golden <optimize.minimize_scalar-golden>` uses the
|
| 852 |
+
golden section search technique [1]_. It uses analog of the bisection
|
| 853 |
+
method to decrease the bracketed interval. It is usually
|
| 854 |
+
preferable to use the *Brent* method.
|
| 855 |
+
|
| 856 |
+
Method :ref:`Bounded <optimize.minimize_scalar-bounded>` can
|
| 857 |
+
perform bounded minimization [2]_ [3]_. It uses the Brent method to find a
|
| 858 |
+
local minimum in the interval x1 < xopt < x2.
|
| 859 |
+
|
| 860 |
+
Note that the Brent and Golden methods do not guarantee success unless a
|
| 861 |
+
valid ``bracket`` triple is provided. If a three-point bracket cannot be
|
| 862 |
+
found, consider `scipy.optimize.minimize`. Also, all methods are intended
|
| 863 |
+
only for local minimization. When the function of interest has more than
|
| 864 |
+
one local minimum, consider :ref:`global_optimization`.
|
| 865 |
+
|
| 866 |
+
**Custom minimizers**
|
| 867 |
+
|
| 868 |
+
It may be useful to pass a custom minimization method, for example
|
| 869 |
+
when using some library frontend to minimize_scalar. You can simply
|
| 870 |
+
pass a callable as the ``method`` parameter.
|
| 871 |
+
|
| 872 |
+
The callable is called as ``method(fun, args, **kwargs, **options)``
|
| 873 |
+
where ``kwargs`` corresponds to any other parameters passed to `minimize`
|
| 874 |
+
(such as `bracket`, `tol`, etc.), except the `options` dict, which has
|
| 875 |
+
its contents also passed as `method` parameters pair by pair. The method
|
| 876 |
+
shall return an `OptimizeResult` object.
|
| 877 |
+
|
| 878 |
+
The provided `method` callable must be able to accept (and possibly ignore)
|
| 879 |
+
arbitrary parameters; the set of parameters accepted by `minimize` may
|
| 880 |
+
expand in future versions and then these parameters will be passed to
|
| 881 |
+
the method. You can find an example in the scipy.optimize tutorial.
|
| 882 |
+
|
| 883 |
+
.. versionadded:: 0.11.0
|
| 884 |
+
|
| 885 |
+
References
|
| 886 |
+
----------
|
| 887 |
+
.. [1] Press, W., S.A. Teukolsky, W.T. Vetterling, and B.P. Flannery.
|
| 888 |
+
Numerical Recipes in C. Cambridge University Press.
|
| 889 |
+
.. [2] Forsythe, G.E., M. A. Malcolm, and C. B. Moler. "Computer Methods
|
| 890 |
+
for Mathematical Computations." Prentice-Hall Series in Automatic
|
| 891 |
+
Computation 259 (1977).
|
| 892 |
+
.. [3] Brent, Richard P. Algorithms for Minimization Without Derivatives.
|
| 893 |
+
Courier Corporation, 2013.
|
| 894 |
+
|
| 895 |
+
Examples
|
| 896 |
+
--------
|
| 897 |
+
Consider the problem of minimizing the following function.
|
| 898 |
+
|
| 899 |
+
>>> def f(x):
|
| 900 |
+
... return (x - 2) * x * (x + 2)**2
|
| 901 |
+
|
| 902 |
+
Using the *Brent* method, we find the local minimum as:
|
| 903 |
+
|
| 904 |
+
>>> from scipy.optimize import minimize_scalar
|
| 905 |
+
>>> res = minimize_scalar(f)
|
| 906 |
+
>>> res.fun
|
| 907 |
+
-9.9149495908
|
| 908 |
+
|
| 909 |
+
The minimizer is:
|
| 910 |
+
|
| 911 |
+
>>> res.x
|
| 912 |
+
1.28077640403
|
| 913 |
+
|
| 914 |
+
Using the *Bounded* method, we find a local minimum with specified
|
| 915 |
+
bounds as:
|
| 916 |
+
|
| 917 |
+
>>> res = minimize_scalar(f, bounds=(-3, -1), method='bounded')
|
| 918 |
+
>>> res.fun # minimum
|
| 919 |
+
3.28365179850e-13
|
| 920 |
+
>>> res.x # minimizer
|
| 921 |
+
-2.0000002026
|
| 922 |
+
|
| 923 |
+
"""
|
| 924 |
+
if not isinstance(args, tuple):
|
| 925 |
+
args = (args,)
|
| 926 |
+
|
| 927 |
+
if callable(method):
|
| 928 |
+
meth = "_custom"
|
| 929 |
+
elif method is None:
|
| 930 |
+
meth = 'brent' if bounds is None else 'bounded'
|
| 931 |
+
else:
|
| 932 |
+
meth = method.lower()
|
| 933 |
+
if options is None:
|
| 934 |
+
options = {}
|
| 935 |
+
|
| 936 |
+
if bounds is not None and meth in {'brent', 'golden'}:
|
| 937 |
+
message = f"Use of `bounds` is incompatible with 'method={method}'."
|
| 938 |
+
raise ValueError(message)
|
| 939 |
+
|
| 940 |
+
if tol is not None:
|
| 941 |
+
options = dict(options)
|
| 942 |
+
if meth == 'bounded' and 'xatol' not in options:
|
| 943 |
+
warn("Method 'bounded' does not support relative tolerance in x; "
|
| 944 |
+
"defaulting to absolute tolerance.",
|
| 945 |
+
RuntimeWarning, stacklevel=2)
|
| 946 |
+
options['xatol'] = tol
|
| 947 |
+
elif meth == '_custom':
|
| 948 |
+
options.setdefault('tol', tol)
|
| 949 |
+
else:
|
| 950 |
+
options.setdefault('xtol', tol)
|
| 951 |
+
|
| 952 |
+
# replace boolean "disp" option, if specified, by an integer value.
|
| 953 |
+
disp = options.get('disp')
|
| 954 |
+
if isinstance(disp, bool):
|
| 955 |
+
options['disp'] = 2 * int(disp)
|
| 956 |
+
|
| 957 |
+
if meth == '_custom':
|
| 958 |
+
res = method(fun, args=args, bracket=bracket, bounds=bounds, **options)
|
| 959 |
+
elif meth == 'brent':
|
| 960 |
+
res = _recover_from_bracket_error(_minimize_scalar_brent,
|
| 961 |
+
fun, bracket, args, **options)
|
| 962 |
+
elif meth == 'bounded':
|
| 963 |
+
if bounds is None:
|
| 964 |
+
raise ValueError('The `bounds` parameter is mandatory for '
|
| 965 |
+
'method `bounded`.')
|
| 966 |
+
res = _minimize_scalar_bounded(fun, bounds, args, **options)
|
| 967 |
+
elif meth == 'golden':
|
| 968 |
+
res = _recover_from_bracket_error(_minimize_scalar_golden,
|
| 969 |
+
fun, bracket, args, **options)
|
| 970 |
+
else:
|
| 971 |
+
raise ValueError('Unknown solver %s' % method)
|
| 972 |
+
|
| 973 |
+
# gh-16196 reported inconsistencies in the output shape of `res.x`. While
|
| 974 |
+
# fixing this, future-proof it for when the function is vectorized:
|
| 975 |
+
# the shape of `res.x` should match that of `res.fun`.
|
| 976 |
+
res.fun = np.asarray(res.fun)[()]
|
| 977 |
+
res.x = np.reshape(res.x, res.fun.shape)[()]
|
| 978 |
+
return res
|
| 979 |
+
|
| 980 |
+
|
| 981 |
+
def _remove_from_bounds(bounds, i_fixed):
|
| 982 |
+
"""Removes fixed variables from a `Bounds` instance"""
|
| 983 |
+
lb = bounds.lb[~i_fixed]
|
| 984 |
+
ub = bounds.ub[~i_fixed]
|
| 985 |
+
return Bounds(lb, ub) # don't mutate original Bounds object
|
| 986 |
+
|
| 987 |
+
|
| 988 |
+
def _remove_from_func(fun_in, i_fixed, x_fixed, min_dim=None, remove=0):
|
| 989 |
+
"""Wraps a function such that fixed variables need not be passed in"""
|
| 990 |
+
def fun_out(x_in, *args, **kwargs):
|
| 991 |
+
x_out = np.zeros_like(i_fixed, dtype=x_in.dtype)
|
| 992 |
+
x_out[i_fixed] = x_fixed
|
| 993 |
+
x_out[~i_fixed] = x_in
|
| 994 |
+
y_out = fun_in(x_out, *args, **kwargs)
|
| 995 |
+
y_out = np.array(y_out)
|
| 996 |
+
|
| 997 |
+
if min_dim == 1:
|
| 998 |
+
y_out = np.atleast_1d(y_out)
|
| 999 |
+
elif min_dim == 2:
|
| 1000 |
+
y_out = np.atleast_2d(y_out)
|
| 1001 |
+
|
| 1002 |
+
if remove == 1:
|
| 1003 |
+
y_out = y_out[..., ~i_fixed]
|
| 1004 |
+
elif remove == 2:
|
| 1005 |
+
y_out = y_out[~i_fixed, ~i_fixed]
|
| 1006 |
+
|
| 1007 |
+
return y_out
|
| 1008 |
+
return fun_out
|
| 1009 |
+
|
| 1010 |
+
|
| 1011 |
+
def _add_to_array(x_in, i_fixed, x_fixed):
|
| 1012 |
+
"""Adds fixed variables back to an array"""
|
| 1013 |
+
i_free = ~i_fixed
|
| 1014 |
+
if x_in.ndim == 2:
|
| 1015 |
+
i_free = i_free[:, None] @ i_free[None, :]
|
| 1016 |
+
x_out = np.zeros_like(i_free, dtype=x_in.dtype)
|
| 1017 |
+
x_out[~i_free] = x_fixed
|
| 1018 |
+
x_out[i_free] = x_in.ravel()
|
| 1019 |
+
return x_out
|
| 1020 |
+
|
| 1021 |
+
|
| 1022 |
+
def _validate_bounds(bounds, x0, meth):
|
| 1023 |
+
"""Check that bounds are valid."""
|
| 1024 |
+
|
| 1025 |
+
msg = "An upper bound is less than the corresponding lower bound."
|
| 1026 |
+
if np.any(bounds.ub < bounds.lb):
|
| 1027 |
+
raise ValueError(msg)
|
| 1028 |
+
|
| 1029 |
+
msg = "The number of bounds is not compatible with the length of `x0`."
|
| 1030 |
+
try:
|
| 1031 |
+
bounds.lb = np.broadcast_to(bounds.lb, x0.shape)
|
| 1032 |
+
bounds.ub = np.broadcast_to(bounds.ub, x0.shape)
|
| 1033 |
+
except Exception as e:
|
| 1034 |
+
raise ValueError(msg) from e
|
| 1035 |
+
|
| 1036 |
+
return bounds
|
| 1037 |
+
|
| 1038 |
+
def standardize_bounds(bounds, x0, meth):
|
| 1039 |
+
"""Converts bounds to the form required by the solver."""
|
| 1040 |
+
if meth in {'trust-constr', 'powell', 'nelder-mead', 'cobyla', 'cobyqa',
|
| 1041 |
+
'new'}:
|
| 1042 |
+
if not isinstance(bounds, Bounds):
|
| 1043 |
+
lb, ub = old_bound_to_new(bounds)
|
| 1044 |
+
bounds = Bounds(lb, ub)
|
| 1045 |
+
elif meth in ('l-bfgs-b', 'tnc', 'slsqp', 'old'):
|
| 1046 |
+
if isinstance(bounds, Bounds):
|
| 1047 |
+
bounds = new_bounds_to_old(bounds.lb, bounds.ub, x0.shape[0])
|
| 1048 |
+
return bounds
|
| 1049 |
+
|
| 1050 |
+
|
| 1051 |
+
def standardize_constraints(constraints, x0, meth):
|
| 1052 |
+
"""Converts constraints to the form required by the solver."""
|
| 1053 |
+
all_constraint_types = (NonlinearConstraint, LinearConstraint, dict)
|
| 1054 |
+
new_constraint_types = all_constraint_types[:-1]
|
| 1055 |
+
if constraints is None:
|
| 1056 |
+
constraints = []
|
| 1057 |
+
elif isinstance(constraints, all_constraint_types):
|
| 1058 |
+
constraints = [constraints]
|
| 1059 |
+
else:
|
| 1060 |
+
constraints = list(constraints) # ensure it's a mutable sequence
|
| 1061 |
+
|
| 1062 |
+
if meth in ['trust-constr', 'cobyqa', 'new']:
|
| 1063 |
+
for i, con in enumerate(constraints):
|
| 1064 |
+
if not isinstance(con, new_constraint_types):
|
| 1065 |
+
constraints[i] = old_constraint_to_new(i, con)
|
| 1066 |
+
else:
|
| 1067 |
+
# iterate over copy, changing original
|
| 1068 |
+
for i, con in enumerate(list(constraints)):
|
| 1069 |
+
if isinstance(con, new_constraint_types):
|
| 1070 |
+
old_constraints = new_constraint_to_old(con, x0)
|
| 1071 |
+
constraints[i] = old_constraints[0]
|
| 1072 |
+
constraints.extend(old_constraints[1:]) # appends 1 if present
|
| 1073 |
+
|
| 1074 |
+
return constraints
|
| 1075 |
+
|
| 1076 |
+
|
| 1077 |
+
def _optimize_result_for_equal_bounds(
|
| 1078 |
+
fun, bounds, method, args=(), constraints=()
|
| 1079 |
+
):
|
| 1080 |
+
"""
|
| 1081 |
+
Provides a default OptimizeResult for when a bounded minimization method
|
| 1082 |
+
has (lb == ub).all().
|
| 1083 |
+
|
| 1084 |
+
Parameters
|
| 1085 |
+
----------
|
| 1086 |
+
fun: callable
|
| 1087 |
+
bounds: Bounds
|
| 1088 |
+
method: str
|
| 1089 |
+
constraints: Constraint
|
| 1090 |
+
"""
|
| 1091 |
+
success = True
|
| 1092 |
+
message = 'All independent variables were fixed by bounds.'
|
| 1093 |
+
|
| 1094 |
+
# bounds is new-style
|
| 1095 |
+
x0 = bounds.lb
|
| 1096 |
+
|
| 1097 |
+
if constraints:
|
| 1098 |
+
message = ("All independent variables were fixed by bounds at values"
|
| 1099 |
+
" that satisfy the constraints.")
|
| 1100 |
+
constraints = standardize_constraints(constraints, x0, 'new')
|
| 1101 |
+
|
| 1102 |
+
maxcv = 0
|
| 1103 |
+
for c in constraints:
|
| 1104 |
+
pc = PreparedConstraint(c, x0)
|
| 1105 |
+
violation = pc.violation(x0)
|
| 1106 |
+
if np.sum(violation):
|
| 1107 |
+
maxcv = max(maxcv, np.max(violation))
|
| 1108 |
+
success = False
|
| 1109 |
+
message = (f"All independent variables were fixed by bounds, but "
|
| 1110 |
+
f"the independent variables do not satisfy the "
|
| 1111 |
+
f"constraints exactly. (Maximum violation: {maxcv}).")
|
| 1112 |
+
|
| 1113 |
+
return OptimizeResult(
|
| 1114 |
+
x=x0, fun=fun(x0, *args), success=success, message=message, nfev=1,
|
| 1115 |
+
njev=0, nhev=0,
|
| 1116 |
+
)
|
llava_next/lib/python3.10/site-packages/scipy/optimize/_minpack2.cpython-310-x86_64-linux-gnu.so
ADDED
|
Binary file (61.1 kB). View file
|
|
|
llava_next/lib/python3.10/site-packages/scipy/optimize/_nnls.py
ADDED
|
@@ -0,0 +1,164 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import numpy as np
|
| 2 |
+
from scipy.linalg import solve, LinAlgWarning
|
| 3 |
+
import warnings
|
| 4 |
+
|
| 5 |
+
__all__ = ['nnls']
|
| 6 |
+
|
| 7 |
+
|
| 8 |
+
def nnls(A, b, maxiter=None, *, atol=None):
|
| 9 |
+
"""
|
| 10 |
+
Solve ``argmin_x || Ax - b ||_2`` for ``x>=0``.
|
| 11 |
+
|
| 12 |
+
This problem, often called as NonNegative Least Squares, is a convex
|
| 13 |
+
optimization problem with convex constraints. It typically arises when
|
| 14 |
+
the ``x`` models quantities for which only nonnegative values are
|
| 15 |
+
attainable; weight of ingredients, component costs and so on.
|
| 16 |
+
|
| 17 |
+
Parameters
|
| 18 |
+
----------
|
| 19 |
+
A : (m, n) ndarray
|
| 20 |
+
Coefficient array
|
| 21 |
+
b : (m,) ndarray, float
|
| 22 |
+
Right-hand side vector.
|
| 23 |
+
maxiter: int, optional
|
| 24 |
+
Maximum number of iterations, optional. Default value is ``3 * n``.
|
| 25 |
+
atol: float
|
| 26 |
+
Tolerance value used in the algorithm to assess closeness to zero in
|
| 27 |
+
the projected residual ``(A.T @ (A x - b)`` entries. Increasing this
|
| 28 |
+
value relaxes the solution constraints. A typical relaxation value can
|
| 29 |
+
be selected as ``max(m, n) * np.linalg.norm(a, 1) * np.spacing(1.)``.
|
| 30 |
+
This value is not set as default since the norm operation becomes
|
| 31 |
+
expensive for large problems hence can be used only when necessary.
|
| 32 |
+
|
| 33 |
+
Returns
|
| 34 |
+
-------
|
| 35 |
+
x : ndarray
|
| 36 |
+
Solution vector.
|
| 37 |
+
rnorm : float
|
| 38 |
+
The 2-norm of the residual, ``|| Ax-b ||_2``.
|
| 39 |
+
|
| 40 |
+
See Also
|
| 41 |
+
--------
|
| 42 |
+
lsq_linear : Linear least squares with bounds on the variables
|
| 43 |
+
|
| 44 |
+
Notes
|
| 45 |
+
-----
|
| 46 |
+
The code is based on [2]_ which is an improved version of the classical
|
| 47 |
+
algorithm of [1]_. It utilizes an active set method and solves the KKT
|
| 48 |
+
(Karush-Kuhn-Tucker) conditions for the non-negative least squares problem.
|
| 49 |
+
|
| 50 |
+
References
|
| 51 |
+
----------
|
| 52 |
+
.. [1] : Lawson C., Hanson R.J., "Solving Least Squares Problems", SIAM,
|
| 53 |
+
1995, :doi:`10.1137/1.9781611971217`
|
| 54 |
+
.. [2] : Bro, Rasmus and de Jong, Sijmen, "A Fast Non-Negativity-
|
| 55 |
+
Constrained Least Squares Algorithm", Journal Of Chemometrics, 1997,
|
| 56 |
+
:doi:`10.1002/(SICI)1099-128X(199709/10)11:5<393::AID-CEM483>3.0.CO;2-L`
|
| 57 |
+
|
| 58 |
+
Examples
|
| 59 |
+
--------
|
| 60 |
+
>>> import numpy as np
|
| 61 |
+
>>> from scipy.optimize import nnls
|
| 62 |
+
...
|
| 63 |
+
>>> A = np.array([[1, 0], [1, 0], [0, 1]])
|
| 64 |
+
>>> b = np.array([2, 1, 1])
|
| 65 |
+
>>> nnls(A, b)
|
| 66 |
+
(array([1.5, 1. ]), 0.7071067811865475)
|
| 67 |
+
|
| 68 |
+
>>> b = np.array([-1, -1, -1])
|
| 69 |
+
>>> nnls(A, b)
|
| 70 |
+
(array([0., 0.]), 1.7320508075688772)
|
| 71 |
+
|
| 72 |
+
"""
|
| 73 |
+
|
| 74 |
+
A = np.asarray_chkfinite(A)
|
| 75 |
+
b = np.asarray_chkfinite(b)
|
| 76 |
+
|
| 77 |
+
if len(A.shape) != 2:
|
| 78 |
+
raise ValueError("Expected a two-dimensional array (matrix)" +
|
| 79 |
+
f", but the shape of A is {A.shape}")
|
| 80 |
+
if len(b.shape) != 1:
|
| 81 |
+
raise ValueError("Expected a one-dimensional array (vector)" +
|
| 82 |
+
f", but the shape of b is {b.shape}")
|
| 83 |
+
|
| 84 |
+
m, n = A.shape
|
| 85 |
+
|
| 86 |
+
if m != b.shape[0]:
|
| 87 |
+
raise ValueError(
|
| 88 |
+
"Incompatible dimensions. The first dimension of " +
|
| 89 |
+
f"A is {m}, while the shape of b is {(b.shape[0], )}")
|
| 90 |
+
|
| 91 |
+
x, rnorm, mode = _nnls(A, b, maxiter, tol=atol)
|
| 92 |
+
if mode != 1:
|
| 93 |
+
raise RuntimeError("Maximum number of iterations reached.")
|
| 94 |
+
|
| 95 |
+
return x, rnorm
|
| 96 |
+
|
| 97 |
+
|
| 98 |
+
def _nnls(A, b, maxiter=None, tol=None):
|
| 99 |
+
"""
|
| 100 |
+
This is a single RHS algorithm from ref [2] above. For multiple RHS
|
| 101 |
+
support, the algorithm is given in :doi:`10.1002/cem.889`
|
| 102 |
+
"""
|
| 103 |
+
m, n = A.shape
|
| 104 |
+
|
| 105 |
+
AtA = A.T @ A
|
| 106 |
+
Atb = b @ A # Result is 1D - let NumPy figure it out
|
| 107 |
+
|
| 108 |
+
if not maxiter:
|
| 109 |
+
maxiter = 3*n
|
| 110 |
+
if tol is None:
|
| 111 |
+
tol = 10 * max(m, n) * np.spacing(1.)
|
| 112 |
+
|
| 113 |
+
# Initialize vars
|
| 114 |
+
x = np.zeros(n, dtype=np.float64)
|
| 115 |
+
s = np.zeros(n, dtype=np.float64)
|
| 116 |
+
# Inactive constraint switches
|
| 117 |
+
P = np.zeros(n, dtype=bool)
|
| 118 |
+
|
| 119 |
+
# Projected residual
|
| 120 |
+
w = Atb.copy().astype(np.float64) # x=0. Skip (-AtA @ x) term
|
| 121 |
+
|
| 122 |
+
# Overall iteration counter
|
| 123 |
+
# Outer loop is not counted, inner iter is counted across outer spins
|
| 124 |
+
iter = 0
|
| 125 |
+
|
| 126 |
+
while (not P.all()) and (w[~P] > tol).any(): # B
|
| 127 |
+
# Get the "most" active coeff index and move to inactive set
|
| 128 |
+
k = np.argmax(w * (~P)) # B.2
|
| 129 |
+
P[k] = True # B.3
|
| 130 |
+
|
| 131 |
+
# Iteration solution
|
| 132 |
+
s[:] = 0.
|
| 133 |
+
# B.4
|
| 134 |
+
with warnings.catch_warnings():
|
| 135 |
+
warnings.filterwarnings('ignore', message='Ill-conditioned matrix',
|
| 136 |
+
category=LinAlgWarning)
|
| 137 |
+
s[P] = solve(AtA[np.ix_(P, P)], Atb[P], assume_a='sym', check_finite=False)
|
| 138 |
+
|
| 139 |
+
# Inner loop
|
| 140 |
+
while (iter < maxiter) and (s[P].min() < 0): # C.1
|
| 141 |
+
iter += 1
|
| 142 |
+
inds = P * (s < 0)
|
| 143 |
+
alpha = (x[inds] / (x[inds] - s[inds])).min() # C.2
|
| 144 |
+
x *= (1 - alpha)
|
| 145 |
+
x += alpha*s
|
| 146 |
+
P[x <= tol] = False
|
| 147 |
+
with warnings.catch_warnings():
|
| 148 |
+
warnings.filterwarnings('ignore', message='Ill-conditioned matrix',
|
| 149 |
+
category=LinAlgWarning)
|
| 150 |
+
s[P] = solve(AtA[np.ix_(P, P)], Atb[P], assume_a='sym',
|
| 151 |
+
check_finite=False)
|
| 152 |
+
s[~P] = 0 # C.6
|
| 153 |
+
|
| 154 |
+
x[:] = s[:]
|
| 155 |
+
w[:] = Atb - AtA @ x
|
| 156 |
+
|
| 157 |
+
if iter == maxiter:
|
| 158 |
+
# Typically following line should return
|
| 159 |
+
# return x, np.linalg.norm(A@x - b), -1
|
| 160 |
+
# however at the top level, -1 raises an exception wasting norm
|
| 161 |
+
# Instead return dummy number 0.
|
| 162 |
+
return x, 0., -1
|
| 163 |
+
|
| 164 |
+
return x, np.linalg.norm(A@x - b), 1
|
llava_next/lib/python3.10/site-packages/scipy/optimize/_numdiff.py
ADDED
|
@@ -0,0 +1,779 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Routines for numerical differentiation."""
|
| 2 |
+
import functools
|
| 3 |
+
import numpy as np
|
| 4 |
+
from numpy.linalg import norm
|
| 5 |
+
|
| 6 |
+
from scipy.sparse.linalg import LinearOperator
|
| 7 |
+
from ..sparse import issparse, csc_matrix, csr_matrix, coo_matrix, find
|
| 8 |
+
from ._group_columns import group_dense, group_sparse
|
| 9 |
+
from scipy._lib._array_api import atleast_nd, array_namespace
|
| 10 |
+
|
| 11 |
+
|
| 12 |
+
def _adjust_scheme_to_bounds(x0, h, num_steps, scheme, lb, ub):
|
| 13 |
+
"""Adjust final difference scheme to the presence of bounds.
|
| 14 |
+
|
| 15 |
+
Parameters
|
| 16 |
+
----------
|
| 17 |
+
x0 : ndarray, shape (n,)
|
| 18 |
+
Point at which we wish to estimate derivative.
|
| 19 |
+
h : ndarray, shape (n,)
|
| 20 |
+
Desired absolute finite difference steps.
|
| 21 |
+
num_steps : int
|
| 22 |
+
Number of `h` steps in one direction required to implement finite
|
| 23 |
+
difference scheme. For example, 2 means that we need to evaluate
|
| 24 |
+
f(x0 + 2 * h) or f(x0 - 2 * h)
|
| 25 |
+
scheme : {'1-sided', '2-sided'}
|
| 26 |
+
Whether steps in one or both directions are required. In other
|
| 27 |
+
words '1-sided' applies to forward and backward schemes, '2-sided'
|
| 28 |
+
applies to center schemes.
|
| 29 |
+
lb : ndarray, shape (n,)
|
| 30 |
+
Lower bounds on independent variables.
|
| 31 |
+
ub : ndarray, shape (n,)
|
| 32 |
+
Upper bounds on independent variables.
|
| 33 |
+
|
| 34 |
+
Returns
|
| 35 |
+
-------
|
| 36 |
+
h_adjusted : ndarray, shape (n,)
|
| 37 |
+
Adjusted absolute step sizes. Step size decreases only if a sign flip
|
| 38 |
+
or switching to one-sided scheme doesn't allow to take a full step.
|
| 39 |
+
use_one_sided : ndarray of bool, shape (n,)
|
| 40 |
+
Whether to switch to one-sided scheme. Informative only for
|
| 41 |
+
``scheme='2-sided'``.
|
| 42 |
+
"""
|
| 43 |
+
if scheme == '1-sided':
|
| 44 |
+
use_one_sided = np.ones_like(h, dtype=bool)
|
| 45 |
+
elif scheme == '2-sided':
|
| 46 |
+
h = np.abs(h)
|
| 47 |
+
use_one_sided = np.zeros_like(h, dtype=bool)
|
| 48 |
+
else:
|
| 49 |
+
raise ValueError("`scheme` must be '1-sided' or '2-sided'.")
|
| 50 |
+
|
| 51 |
+
if np.all((lb == -np.inf) & (ub == np.inf)):
|
| 52 |
+
return h, use_one_sided
|
| 53 |
+
|
| 54 |
+
h_total = h * num_steps
|
| 55 |
+
h_adjusted = h.copy()
|
| 56 |
+
|
| 57 |
+
lower_dist = x0 - lb
|
| 58 |
+
upper_dist = ub - x0
|
| 59 |
+
|
| 60 |
+
if scheme == '1-sided':
|
| 61 |
+
x = x0 + h_total
|
| 62 |
+
violated = (x < lb) | (x > ub)
|
| 63 |
+
fitting = np.abs(h_total) <= np.maximum(lower_dist, upper_dist)
|
| 64 |
+
h_adjusted[violated & fitting] *= -1
|
| 65 |
+
|
| 66 |
+
forward = (upper_dist >= lower_dist) & ~fitting
|
| 67 |
+
h_adjusted[forward] = upper_dist[forward] / num_steps
|
| 68 |
+
backward = (upper_dist < lower_dist) & ~fitting
|
| 69 |
+
h_adjusted[backward] = -lower_dist[backward] / num_steps
|
| 70 |
+
elif scheme == '2-sided':
|
| 71 |
+
central = (lower_dist >= h_total) & (upper_dist >= h_total)
|
| 72 |
+
|
| 73 |
+
forward = (upper_dist >= lower_dist) & ~central
|
| 74 |
+
h_adjusted[forward] = np.minimum(
|
| 75 |
+
h[forward], 0.5 * upper_dist[forward] / num_steps)
|
| 76 |
+
use_one_sided[forward] = True
|
| 77 |
+
|
| 78 |
+
backward = (upper_dist < lower_dist) & ~central
|
| 79 |
+
h_adjusted[backward] = -np.minimum(
|
| 80 |
+
h[backward], 0.5 * lower_dist[backward] / num_steps)
|
| 81 |
+
use_one_sided[backward] = True
|
| 82 |
+
|
| 83 |
+
min_dist = np.minimum(upper_dist, lower_dist) / num_steps
|
| 84 |
+
adjusted_central = (~central & (np.abs(h_adjusted) <= min_dist))
|
| 85 |
+
h_adjusted[adjusted_central] = min_dist[adjusted_central]
|
| 86 |
+
use_one_sided[adjusted_central] = False
|
| 87 |
+
|
| 88 |
+
return h_adjusted, use_one_sided
|
| 89 |
+
|
| 90 |
+
|
| 91 |
+
@functools.lru_cache
|
| 92 |
+
def _eps_for_method(x0_dtype, f0_dtype, method):
|
| 93 |
+
"""
|
| 94 |
+
Calculates relative EPS step to use for a given data type
|
| 95 |
+
and numdiff step method.
|
| 96 |
+
|
| 97 |
+
Progressively smaller steps are used for larger floating point types.
|
| 98 |
+
|
| 99 |
+
Parameters
|
| 100 |
+
----------
|
| 101 |
+
f0_dtype: np.dtype
|
| 102 |
+
dtype of function evaluation
|
| 103 |
+
|
| 104 |
+
x0_dtype: np.dtype
|
| 105 |
+
dtype of parameter vector
|
| 106 |
+
|
| 107 |
+
method: {'2-point', '3-point', 'cs'}
|
| 108 |
+
|
| 109 |
+
Returns
|
| 110 |
+
-------
|
| 111 |
+
EPS: float
|
| 112 |
+
relative step size. May be np.float16, np.float32, np.float64
|
| 113 |
+
|
| 114 |
+
Notes
|
| 115 |
+
-----
|
| 116 |
+
The default relative step will be np.float64. However, if x0 or f0 are
|
| 117 |
+
smaller floating point types (np.float16, np.float32), then the smallest
|
| 118 |
+
floating point type is chosen.
|
| 119 |
+
"""
|
| 120 |
+
# the default EPS value
|
| 121 |
+
EPS = np.finfo(np.float64).eps
|
| 122 |
+
|
| 123 |
+
x0_is_fp = False
|
| 124 |
+
if np.issubdtype(x0_dtype, np.inexact):
|
| 125 |
+
# if you're a floating point type then over-ride the default EPS
|
| 126 |
+
EPS = np.finfo(x0_dtype).eps
|
| 127 |
+
x0_itemsize = np.dtype(x0_dtype).itemsize
|
| 128 |
+
x0_is_fp = True
|
| 129 |
+
|
| 130 |
+
if np.issubdtype(f0_dtype, np.inexact):
|
| 131 |
+
f0_itemsize = np.dtype(f0_dtype).itemsize
|
| 132 |
+
# choose the smallest itemsize between x0 and f0
|
| 133 |
+
if x0_is_fp and f0_itemsize < x0_itemsize:
|
| 134 |
+
EPS = np.finfo(f0_dtype).eps
|
| 135 |
+
|
| 136 |
+
if method in ["2-point", "cs"]:
|
| 137 |
+
return EPS**0.5
|
| 138 |
+
elif method in ["3-point"]:
|
| 139 |
+
return EPS**(1/3)
|
| 140 |
+
else:
|
| 141 |
+
raise RuntimeError("Unknown step method, should be one of "
|
| 142 |
+
"{'2-point', '3-point', 'cs'}")
|
| 143 |
+
|
| 144 |
+
|
| 145 |
+
def _compute_absolute_step(rel_step, x0, f0, method):
|
| 146 |
+
"""
|
| 147 |
+
Computes an absolute step from a relative step for finite difference
|
| 148 |
+
calculation.
|
| 149 |
+
|
| 150 |
+
Parameters
|
| 151 |
+
----------
|
| 152 |
+
rel_step: None or array-like
|
| 153 |
+
Relative step for the finite difference calculation
|
| 154 |
+
x0 : np.ndarray
|
| 155 |
+
Parameter vector
|
| 156 |
+
f0 : np.ndarray or scalar
|
| 157 |
+
method : {'2-point', '3-point', 'cs'}
|
| 158 |
+
|
| 159 |
+
Returns
|
| 160 |
+
-------
|
| 161 |
+
h : float
|
| 162 |
+
The absolute step size
|
| 163 |
+
|
| 164 |
+
Notes
|
| 165 |
+
-----
|
| 166 |
+
`h` will always be np.float64. However, if `x0` or `f0` are
|
| 167 |
+
smaller floating point dtypes (e.g. np.float32), then the absolute
|
| 168 |
+
step size will be calculated from the smallest floating point size.
|
| 169 |
+
"""
|
| 170 |
+
# this is used instead of np.sign(x0) because we need
|
| 171 |
+
# sign_x0 to be 1 when x0 == 0.
|
| 172 |
+
sign_x0 = (x0 >= 0).astype(float) * 2 - 1
|
| 173 |
+
|
| 174 |
+
rstep = _eps_for_method(x0.dtype, f0.dtype, method)
|
| 175 |
+
|
| 176 |
+
if rel_step is None:
|
| 177 |
+
abs_step = rstep * sign_x0 * np.maximum(1.0, np.abs(x0))
|
| 178 |
+
else:
|
| 179 |
+
# User has requested specific relative steps.
|
| 180 |
+
# Don't multiply by max(1, abs(x0) because if x0 < 1 then their
|
| 181 |
+
# requested step is not used.
|
| 182 |
+
abs_step = rel_step * sign_x0 * np.abs(x0)
|
| 183 |
+
|
| 184 |
+
# however we don't want an abs_step of 0, which can happen if
|
| 185 |
+
# rel_step is 0, or x0 is 0. Instead, substitute a realistic step
|
| 186 |
+
dx = ((x0 + abs_step) - x0)
|
| 187 |
+
abs_step = np.where(dx == 0,
|
| 188 |
+
rstep * sign_x0 * np.maximum(1.0, np.abs(x0)),
|
| 189 |
+
abs_step)
|
| 190 |
+
|
| 191 |
+
return abs_step
|
| 192 |
+
|
| 193 |
+
|
| 194 |
+
def _prepare_bounds(bounds, x0):
|
| 195 |
+
"""
|
| 196 |
+
Prepares new-style bounds from a two-tuple specifying the lower and upper
|
| 197 |
+
limits for values in x0. If a value is not bound then the lower/upper bound
|
| 198 |
+
will be expected to be -np.inf/np.inf.
|
| 199 |
+
|
| 200 |
+
Examples
|
| 201 |
+
--------
|
| 202 |
+
>>> _prepare_bounds([(0, 1, 2), (1, 2, np.inf)], [0.5, 1.5, 2.5])
|
| 203 |
+
(array([0., 1., 2.]), array([ 1., 2., inf]))
|
| 204 |
+
"""
|
| 205 |
+
lb, ub = (np.asarray(b, dtype=float) for b in bounds)
|
| 206 |
+
if lb.ndim == 0:
|
| 207 |
+
lb = np.resize(lb, x0.shape)
|
| 208 |
+
|
| 209 |
+
if ub.ndim == 0:
|
| 210 |
+
ub = np.resize(ub, x0.shape)
|
| 211 |
+
|
| 212 |
+
return lb, ub
|
| 213 |
+
|
| 214 |
+
|
| 215 |
+
def group_columns(A, order=0):
|
| 216 |
+
"""Group columns of a 2-D matrix for sparse finite differencing [1]_.
|
| 217 |
+
|
| 218 |
+
Two columns are in the same group if in each row at least one of them
|
| 219 |
+
has zero. A greedy sequential algorithm is used to construct groups.
|
| 220 |
+
|
| 221 |
+
Parameters
|
| 222 |
+
----------
|
| 223 |
+
A : array_like or sparse matrix, shape (m, n)
|
| 224 |
+
Matrix of which to group columns.
|
| 225 |
+
order : int, iterable of int with shape (n,) or None
|
| 226 |
+
Permutation array which defines the order of columns enumeration.
|
| 227 |
+
If int or None, a random permutation is used with `order` used as
|
| 228 |
+
a random seed. Default is 0, that is use a random permutation but
|
| 229 |
+
guarantee repeatability.
|
| 230 |
+
|
| 231 |
+
Returns
|
| 232 |
+
-------
|
| 233 |
+
groups : ndarray of int, shape (n,)
|
| 234 |
+
Contains values from 0 to n_groups-1, where n_groups is the number
|
| 235 |
+
of found groups. Each value ``groups[i]`` is an index of a group to
|
| 236 |
+
which ith column assigned. The procedure was helpful only if
|
| 237 |
+
n_groups is significantly less than n.
|
| 238 |
+
|
| 239 |
+
References
|
| 240 |
+
----------
|
| 241 |
+
.. [1] A. Curtis, M. J. D. Powell, and J. Reid, "On the estimation of
|
| 242 |
+
sparse Jacobian matrices", Journal of the Institute of Mathematics
|
| 243 |
+
and its Applications, 13 (1974), pp. 117-120.
|
| 244 |
+
"""
|
| 245 |
+
if issparse(A):
|
| 246 |
+
A = csc_matrix(A)
|
| 247 |
+
else:
|
| 248 |
+
A = np.atleast_2d(A)
|
| 249 |
+
A = (A != 0).astype(np.int32)
|
| 250 |
+
|
| 251 |
+
if A.ndim != 2:
|
| 252 |
+
raise ValueError("`A` must be 2-dimensional.")
|
| 253 |
+
|
| 254 |
+
m, n = A.shape
|
| 255 |
+
|
| 256 |
+
if order is None or np.isscalar(order):
|
| 257 |
+
rng = np.random.RandomState(order)
|
| 258 |
+
order = rng.permutation(n)
|
| 259 |
+
else:
|
| 260 |
+
order = np.asarray(order)
|
| 261 |
+
if order.shape != (n,):
|
| 262 |
+
raise ValueError("`order` has incorrect shape.")
|
| 263 |
+
|
| 264 |
+
A = A[:, order]
|
| 265 |
+
|
| 266 |
+
if issparse(A):
|
| 267 |
+
groups = group_sparse(m, n, A.indices, A.indptr)
|
| 268 |
+
else:
|
| 269 |
+
groups = group_dense(m, n, A)
|
| 270 |
+
|
| 271 |
+
groups[order] = groups.copy()
|
| 272 |
+
|
| 273 |
+
return groups
|
| 274 |
+
|
| 275 |
+
|
| 276 |
+
def approx_derivative(fun, x0, method='3-point', rel_step=None, abs_step=None,
|
| 277 |
+
f0=None, bounds=(-np.inf, np.inf), sparsity=None,
|
| 278 |
+
as_linear_operator=False, args=(), kwargs={}):
|
| 279 |
+
"""Compute finite difference approximation of the derivatives of a
|
| 280 |
+
vector-valued function.
|
| 281 |
+
|
| 282 |
+
If a function maps from R^n to R^m, its derivatives form m-by-n matrix
|
| 283 |
+
called the Jacobian, where an element (i, j) is a partial derivative of
|
| 284 |
+
f[i] with respect to x[j].
|
| 285 |
+
|
| 286 |
+
Parameters
|
| 287 |
+
----------
|
| 288 |
+
fun : callable
|
| 289 |
+
Function of which to estimate the derivatives. The argument x
|
| 290 |
+
passed to this function is ndarray of shape (n,) (never a scalar
|
| 291 |
+
even if n=1). It must return 1-D array_like of shape (m,) or a scalar.
|
| 292 |
+
x0 : array_like of shape (n,) or float
|
| 293 |
+
Point at which to estimate the derivatives. Float will be converted
|
| 294 |
+
to a 1-D array.
|
| 295 |
+
method : {'3-point', '2-point', 'cs'}, optional
|
| 296 |
+
Finite difference method to use:
|
| 297 |
+
- '2-point' - use the first order accuracy forward or backward
|
| 298 |
+
difference.
|
| 299 |
+
- '3-point' - use central difference in interior points and the
|
| 300 |
+
second order accuracy forward or backward difference
|
| 301 |
+
near the boundary.
|
| 302 |
+
- 'cs' - use a complex-step finite difference scheme. This assumes
|
| 303 |
+
that the user function is real-valued and can be
|
| 304 |
+
analytically continued to the complex plane. Otherwise,
|
| 305 |
+
produces bogus results.
|
| 306 |
+
rel_step : None or array_like, optional
|
| 307 |
+
Relative step size to use. If None (default) the absolute step size is
|
| 308 |
+
computed as ``h = rel_step * sign(x0) * max(1, abs(x0))``, with
|
| 309 |
+
`rel_step` being selected automatically, see Notes. Otherwise
|
| 310 |
+
``h = rel_step * sign(x0) * abs(x0)``. For ``method='3-point'`` the
|
| 311 |
+
sign of `h` is ignored. The calculated step size is possibly adjusted
|
| 312 |
+
to fit into the bounds.
|
| 313 |
+
abs_step : array_like, optional
|
| 314 |
+
Absolute step size to use, possibly adjusted to fit into the bounds.
|
| 315 |
+
For ``method='3-point'`` the sign of `abs_step` is ignored. By default
|
| 316 |
+
relative steps are used, only if ``abs_step is not None`` are absolute
|
| 317 |
+
steps used.
|
| 318 |
+
f0 : None or array_like, optional
|
| 319 |
+
If not None it is assumed to be equal to ``fun(x0)``, in this case
|
| 320 |
+
the ``fun(x0)`` is not called. Default is None.
|
| 321 |
+
bounds : tuple of array_like, optional
|
| 322 |
+
Lower and upper bounds on independent variables. Defaults to no bounds.
|
| 323 |
+
Each bound must match the size of `x0` or be a scalar, in the latter
|
| 324 |
+
case the bound will be the same for all variables. Use it to limit the
|
| 325 |
+
range of function evaluation. Bounds checking is not implemented
|
| 326 |
+
when `as_linear_operator` is True.
|
| 327 |
+
sparsity : {None, array_like, sparse matrix, 2-tuple}, optional
|
| 328 |
+
Defines a sparsity structure of the Jacobian matrix. If the Jacobian
|
| 329 |
+
matrix is known to have only few non-zero elements in each row, then
|
| 330 |
+
it's possible to estimate its several columns by a single function
|
| 331 |
+
evaluation [3]_. To perform such economic computations two ingredients
|
| 332 |
+
are required:
|
| 333 |
+
|
| 334 |
+
* structure : array_like or sparse matrix of shape (m, n). A zero
|
| 335 |
+
element means that a corresponding element of the Jacobian
|
| 336 |
+
identically equals to zero.
|
| 337 |
+
* groups : array_like of shape (n,). A column grouping for a given
|
| 338 |
+
sparsity structure, use `group_columns` to obtain it.
|
| 339 |
+
|
| 340 |
+
A single array or a sparse matrix is interpreted as a sparsity
|
| 341 |
+
structure, and groups are computed inside the function. A tuple is
|
| 342 |
+
interpreted as (structure, groups). If None (default), a standard
|
| 343 |
+
dense differencing will be used.
|
| 344 |
+
|
| 345 |
+
Note, that sparse differencing makes sense only for large Jacobian
|
| 346 |
+
matrices where each row contains few non-zero elements.
|
| 347 |
+
as_linear_operator : bool, optional
|
| 348 |
+
When True the function returns an `scipy.sparse.linalg.LinearOperator`.
|
| 349 |
+
Otherwise it returns a dense array or a sparse matrix depending on
|
| 350 |
+
`sparsity`. The linear operator provides an efficient way of computing
|
| 351 |
+
``J.dot(p)`` for any vector ``p`` of shape (n,), but does not allow
|
| 352 |
+
direct access to individual elements of the matrix. By default
|
| 353 |
+
`as_linear_operator` is False.
|
| 354 |
+
args, kwargs : tuple and dict, optional
|
| 355 |
+
Additional arguments passed to `fun`. Both empty by default.
|
| 356 |
+
The calling signature is ``fun(x, *args, **kwargs)``.
|
| 357 |
+
|
| 358 |
+
Returns
|
| 359 |
+
-------
|
| 360 |
+
J : {ndarray, sparse matrix, LinearOperator}
|
| 361 |
+
Finite difference approximation of the Jacobian matrix.
|
| 362 |
+
If `as_linear_operator` is True returns a LinearOperator
|
| 363 |
+
with shape (m, n). Otherwise it returns a dense array or sparse
|
| 364 |
+
matrix depending on how `sparsity` is defined. If `sparsity`
|
| 365 |
+
is None then a ndarray with shape (m, n) is returned. If
|
| 366 |
+
`sparsity` is not None returns a csr_matrix with shape (m, n).
|
| 367 |
+
For sparse matrices and linear operators it is always returned as
|
| 368 |
+
a 2-D structure, for ndarrays, if m=1 it is returned
|
| 369 |
+
as a 1-D gradient array with shape (n,).
|
| 370 |
+
|
| 371 |
+
See Also
|
| 372 |
+
--------
|
| 373 |
+
check_derivative : Check correctness of a function computing derivatives.
|
| 374 |
+
|
| 375 |
+
Notes
|
| 376 |
+
-----
|
| 377 |
+
If `rel_step` is not provided, it assigned as ``EPS**(1/s)``, where EPS is
|
| 378 |
+
determined from the smallest floating point dtype of `x0` or `fun(x0)`,
|
| 379 |
+
``np.finfo(x0.dtype).eps``, s=2 for '2-point' method and
|
| 380 |
+
s=3 for '3-point' method. Such relative step approximately minimizes a sum
|
| 381 |
+
of truncation and round-off errors, see [1]_. Relative steps are used by
|
| 382 |
+
default. However, absolute steps are used when ``abs_step is not None``.
|
| 383 |
+
If any of the absolute or relative steps produces an indistinguishable
|
| 384 |
+
difference from the original `x0`, ``(x0 + dx) - x0 == 0``, then a
|
| 385 |
+
automatic step size is substituted for that particular entry.
|
| 386 |
+
|
| 387 |
+
A finite difference scheme for '3-point' method is selected automatically.
|
| 388 |
+
The well-known central difference scheme is used for points sufficiently
|
| 389 |
+
far from the boundary, and 3-point forward or backward scheme is used for
|
| 390 |
+
points near the boundary. Both schemes have the second-order accuracy in
|
| 391 |
+
terms of Taylor expansion. Refer to [2]_ for the formulas of 3-point
|
| 392 |
+
forward and backward difference schemes.
|
| 393 |
+
|
| 394 |
+
For dense differencing when m=1 Jacobian is returned with a shape (n,),
|
| 395 |
+
on the other hand when n=1 Jacobian is returned with a shape (m, 1).
|
| 396 |
+
Our motivation is the following: a) It handles a case of gradient
|
| 397 |
+
computation (m=1) in a conventional way. b) It clearly separates these two
|
| 398 |
+
different cases. b) In all cases np.atleast_2d can be called to get 2-D
|
| 399 |
+
Jacobian with correct dimensions.
|
| 400 |
+
|
| 401 |
+
References
|
| 402 |
+
----------
|
| 403 |
+
.. [1] W. H. Press et. al. "Numerical Recipes. The Art of Scientific
|
| 404 |
+
Computing. 3rd edition", sec. 5.7.
|
| 405 |
+
|
| 406 |
+
.. [2] A. Curtis, M. J. D. Powell, and J. Reid, "On the estimation of
|
| 407 |
+
sparse Jacobian matrices", Journal of the Institute of Mathematics
|
| 408 |
+
and its Applications, 13 (1974), pp. 117-120.
|
| 409 |
+
|
| 410 |
+
.. [3] B. Fornberg, "Generation of Finite Difference Formulas on
|
| 411 |
+
Arbitrarily Spaced Grids", Mathematics of Computation 51, 1988.
|
| 412 |
+
|
| 413 |
+
Examples
|
| 414 |
+
--------
|
| 415 |
+
>>> import numpy as np
|
| 416 |
+
>>> from scipy.optimize._numdiff import approx_derivative
|
| 417 |
+
>>>
|
| 418 |
+
>>> def f(x, c1, c2):
|
| 419 |
+
... return np.array([x[0] * np.sin(c1 * x[1]),
|
| 420 |
+
... x[0] * np.cos(c2 * x[1])])
|
| 421 |
+
...
|
| 422 |
+
>>> x0 = np.array([1.0, 0.5 * np.pi])
|
| 423 |
+
>>> approx_derivative(f, x0, args=(1, 2))
|
| 424 |
+
array([[ 1., 0.],
|
| 425 |
+
[-1., 0.]])
|
| 426 |
+
|
| 427 |
+
Bounds can be used to limit the region of function evaluation.
|
| 428 |
+
In the example below we compute left and right derivative at point 1.0.
|
| 429 |
+
|
| 430 |
+
>>> def g(x):
|
| 431 |
+
... return x**2 if x >= 1 else x
|
| 432 |
+
...
|
| 433 |
+
>>> x0 = 1.0
|
| 434 |
+
>>> approx_derivative(g, x0, bounds=(-np.inf, 1.0))
|
| 435 |
+
array([ 1.])
|
| 436 |
+
>>> approx_derivative(g, x0, bounds=(1.0, np.inf))
|
| 437 |
+
array([ 2.])
|
| 438 |
+
"""
|
| 439 |
+
if method not in ['2-point', '3-point', 'cs']:
|
| 440 |
+
raise ValueError("Unknown method '%s'. " % method)
|
| 441 |
+
|
| 442 |
+
xp = array_namespace(x0)
|
| 443 |
+
_x = atleast_nd(x0, ndim=1, xp=xp)
|
| 444 |
+
_dtype = xp.float64
|
| 445 |
+
if xp.isdtype(_x.dtype, "real floating"):
|
| 446 |
+
_dtype = _x.dtype
|
| 447 |
+
|
| 448 |
+
# promotes to floating
|
| 449 |
+
x0 = xp.astype(_x, _dtype)
|
| 450 |
+
|
| 451 |
+
if x0.ndim > 1:
|
| 452 |
+
raise ValueError("`x0` must have at most 1 dimension.")
|
| 453 |
+
|
| 454 |
+
lb, ub = _prepare_bounds(bounds, x0)
|
| 455 |
+
|
| 456 |
+
if lb.shape != x0.shape or ub.shape != x0.shape:
|
| 457 |
+
raise ValueError("Inconsistent shapes between bounds and `x0`.")
|
| 458 |
+
|
| 459 |
+
if as_linear_operator and not (np.all(np.isinf(lb))
|
| 460 |
+
and np.all(np.isinf(ub))):
|
| 461 |
+
raise ValueError("Bounds not supported when "
|
| 462 |
+
"`as_linear_operator` is True.")
|
| 463 |
+
|
| 464 |
+
def fun_wrapped(x):
|
| 465 |
+
# send user function same fp type as x0. (but only if cs is not being
|
| 466 |
+
# used
|
| 467 |
+
if xp.isdtype(x.dtype, "real floating"):
|
| 468 |
+
x = xp.astype(x, x0.dtype)
|
| 469 |
+
|
| 470 |
+
f = np.atleast_1d(fun(x, *args, **kwargs))
|
| 471 |
+
if f.ndim > 1:
|
| 472 |
+
raise RuntimeError("`fun` return value has "
|
| 473 |
+
"more than 1 dimension.")
|
| 474 |
+
return f
|
| 475 |
+
|
| 476 |
+
if f0 is None:
|
| 477 |
+
f0 = fun_wrapped(x0)
|
| 478 |
+
else:
|
| 479 |
+
f0 = np.atleast_1d(f0)
|
| 480 |
+
if f0.ndim > 1:
|
| 481 |
+
raise ValueError("`f0` passed has more than 1 dimension.")
|
| 482 |
+
|
| 483 |
+
if np.any((x0 < lb) | (x0 > ub)):
|
| 484 |
+
raise ValueError("`x0` violates bound constraints.")
|
| 485 |
+
|
| 486 |
+
if as_linear_operator:
|
| 487 |
+
if rel_step is None:
|
| 488 |
+
rel_step = _eps_for_method(x0.dtype, f0.dtype, method)
|
| 489 |
+
|
| 490 |
+
return _linear_operator_difference(fun_wrapped, x0,
|
| 491 |
+
f0, rel_step, method)
|
| 492 |
+
else:
|
| 493 |
+
# by default we use rel_step
|
| 494 |
+
if abs_step is None:
|
| 495 |
+
h = _compute_absolute_step(rel_step, x0, f0, method)
|
| 496 |
+
else:
|
| 497 |
+
# user specifies an absolute step
|
| 498 |
+
sign_x0 = (x0 >= 0).astype(float) * 2 - 1
|
| 499 |
+
h = abs_step
|
| 500 |
+
|
| 501 |
+
# cannot have a zero step. This might happen if x0 is very large
|
| 502 |
+
# or small. In which case fall back to relative step.
|
| 503 |
+
dx = ((x0 + h) - x0)
|
| 504 |
+
h = np.where(dx == 0,
|
| 505 |
+
_eps_for_method(x0.dtype, f0.dtype, method) *
|
| 506 |
+
sign_x0 * np.maximum(1.0, np.abs(x0)),
|
| 507 |
+
h)
|
| 508 |
+
|
| 509 |
+
if method == '2-point':
|
| 510 |
+
h, use_one_sided = _adjust_scheme_to_bounds(
|
| 511 |
+
x0, h, 1, '1-sided', lb, ub)
|
| 512 |
+
elif method == '3-point':
|
| 513 |
+
h, use_one_sided = _adjust_scheme_to_bounds(
|
| 514 |
+
x0, h, 1, '2-sided', lb, ub)
|
| 515 |
+
elif method == 'cs':
|
| 516 |
+
use_one_sided = False
|
| 517 |
+
|
| 518 |
+
if sparsity is None:
|
| 519 |
+
return _dense_difference(fun_wrapped, x0, f0, h,
|
| 520 |
+
use_one_sided, method)
|
| 521 |
+
else:
|
| 522 |
+
if not issparse(sparsity) and len(sparsity) == 2:
|
| 523 |
+
structure, groups = sparsity
|
| 524 |
+
else:
|
| 525 |
+
structure = sparsity
|
| 526 |
+
groups = group_columns(sparsity)
|
| 527 |
+
|
| 528 |
+
if issparse(structure):
|
| 529 |
+
structure = csc_matrix(structure)
|
| 530 |
+
else:
|
| 531 |
+
structure = np.atleast_2d(structure)
|
| 532 |
+
|
| 533 |
+
groups = np.atleast_1d(groups)
|
| 534 |
+
return _sparse_difference(fun_wrapped, x0, f0, h,
|
| 535 |
+
use_one_sided, structure,
|
| 536 |
+
groups, method)
|
| 537 |
+
|
| 538 |
+
|
| 539 |
+
def _linear_operator_difference(fun, x0, f0, h, method):
|
| 540 |
+
m = f0.size
|
| 541 |
+
n = x0.size
|
| 542 |
+
|
| 543 |
+
if method == '2-point':
|
| 544 |
+
def matvec(p):
|
| 545 |
+
if np.array_equal(p, np.zeros_like(p)):
|
| 546 |
+
return np.zeros(m)
|
| 547 |
+
dx = h / norm(p)
|
| 548 |
+
x = x0 + dx*p
|
| 549 |
+
df = fun(x) - f0
|
| 550 |
+
return df / dx
|
| 551 |
+
|
| 552 |
+
elif method == '3-point':
|
| 553 |
+
def matvec(p):
|
| 554 |
+
if np.array_equal(p, np.zeros_like(p)):
|
| 555 |
+
return np.zeros(m)
|
| 556 |
+
dx = 2*h / norm(p)
|
| 557 |
+
x1 = x0 - (dx/2)*p
|
| 558 |
+
x2 = x0 + (dx/2)*p
|
| 559 |
+
f1 = fun(x1)
|
| 560 |
+
f2 = fun(x2)
|
| 561 |
+
df = f2 - f1
|
| 562 |
+
return df / dx
|
| 563 |
+
|
| 564 |
+
elif method == 'cs':
|
| 565 |
+
def matvec(p):
|
| 566 |
+
if np.array_equal(p, np.zeros_like(p)):
|
| 567 |
+
return np.zeros(m)
|
| 568 |
+
dx = h / norm(p)
|
| 569 |
+
x = x0 + dx*p*1.j
|
| 570 |
+
f1 = fun(x)
|
| 571 |
+
df = f1.imag
|
| 572 |
+
return df / dx
|
| 573 |
+
|
| 574 |
+
else:
|
| 575 |
+
raise RuntimeError("Never be here.")
|
| 576 |
+
|
| 577 |
+
return LinearOperator((m, n), matvec)
|
| 578 |
+
|
| 579 |
+
|
| 580 |
+
def _dense_difference(fun, x0, f0, h, use_one_sided, method):
|
| 581 |
+
m = f0.size
|
| 582 |
+
n = x0.size
|
| 583 |
+
J_transposed = np.empty((n, m))
|
| 584 |
+
x1 = x0.copy()
|
| 585 |
+
x2 = x0.copy()
|
| 586 |
+
xc = x0.astype(complex, copy=True)
|
| 587 |
+
|
| 588 |
+
for i in range(h.size):
|
| 589 |
+
if method == '2-point':
|
| 590 |
+
x1[i] += h[i]
|
| 591 |
+
dx = x1[i] - x0[i] # Recompute dx as exactly representable number.
|
| 592 |
+
df = fun(x1) - f0
|
| 593 |
+
elif method == '3-point' and use_one_sided[i]:
|
| 594 |
+
x1[i] += h[i]
|
| 595 |
+
x2[i] += 2 * h[i]
|
| 596 |
+
dx = x2[i] - x0[i]
|
| 597 |
+
f1 = fun(x1)
|
| 598 |
+
f2 = fun(x2)
|
| 599 |
+
df = -3.0 * f0 + 4 * f1 - f2
|
| 600 |
+
elif method == '3-point' and not use_one_sided[i]:
|
| 601 |
+
x1[i] -= h[i]
|
| 602 |
+
x2[i] += h[i]
|
| 603 |
+
dx = x2[i] - x1[i]
|
| 604 |
+
f1 = fun(x1)
|
| 605 |
+
f2 = fun(x2)
|
| 606 |
+
df = f2 - f1
|
| 607 |
+
elif method == 'cs':
|
| 608 |
+
xc[i] += h[i] * 1.j
|
| 609 |
+
f1 = fun(xc)
|
| 610 |
+
df = f1.imag
|
| 611 |
+
dx = h[i]
|
| 612 |
+
else:
|
| 613 |
+
raise RuntimeError("Never be here.")
|
| 614 |
+
|
| 615 |
+
J_transposed[i] = df / dx
|
| 616 |
+
x1[i] = x2[i] = xc[i] = x0[i]
|
| 617 |
+
|
| 618 |
+
if m == 1:
|
| 619 |
+
J_transposed = np.ravel(J_transposed)
|
| 620 |
+
|
| 621 |
+
return J_transposed.T
|
| 622 |
+
|
| 623 |
+
|
| 624 |
+
def _sparse_difference(fun, x0, f0, h, use_one_sided,
|
| 625 |
+
structure, groups, method):
|
| 626 |
+
m = f0.size
|
| 627 |
+
n = x0.size
|
| 628 |
+
row_indices = []
|
| 629 |
+
col_indices = []
|
| 630 |
+
fractions = []
|
| 631 |
+
|
| 632 |
+
n_groups = np.max(groups) + 1
|
| 633 |
+
for group in range(n_groups):
|
| 634 |
+
# Perturb variables which are in the same group simultaneously.
|
| 635 |
+
e = np.equal(group, groups)
|
| 636 |
+
h_vec = h * e
|
| 637 |
+
if method == '2-point':
|
| 638 |
+
x = x0 + h_vec
|
| 639 |
+
dx = x - x0
|
| 640 |
+
df = fun(x) - f0
|
| 641 |
+
# The result is written to columns which correspond to perturbed
|
| 642 |
+
# variables.
|
| 643 |
+
cols, = np.nonzero(e)
|
| 644 |
+
# Find all non-zero elements in selected columns of Jacobian.
|
| 645 |
+
i, j, _ = find(structure[:, cols])
|
| 646 |
+
# Restore column indices in the full array.
|
| 647 |
+
j = cols[j]
|
| 648 |
+
elif method == '3-point':
|
| 649 |
+
# Here we do conceptually the same but separate one-sided
|
| 650 |
+
# and two-sided schemes.
|
| 651 |
+
x1 = x0.copy()
|
| 652 |
+
x2 = x0.copy()
|
| 653 |
+
|
| 654 |
+
mask_1 = use_one_sided & e
|
| 655 |
+
x1[mask_1] += h_vec[mask_1]
|
| 656 |
+
x2[mask_1] += 2 * h_vec[mask_1]
|
| 657 |
+
|
| 658 |
+
mask_2 = ~use_one_sided & e
|
| 659 |
+
x1[mask_2] -= h_vec[mask_2]
|
| 660 |
+
x2[mask_2] += h_vec[mask_2]
|
| 661 |
+
|
| 662 |
+
dx = np.zeros(n)
|
| 663 |
+
dx[mask_1] = x2[mask_1] - x0[mask_1]
|
| 664 |
+
dx[mask_2] = x2[mask_2] - x1[mask_2]
|
| 665 |
+
|
| 666 |
+
f1 = fun(x1)
|
| 667 |
+
f2 = fun(x2)
|
| 668 |
+
|
| 669 |
+
cols, = np.nonzero(e)
|
| 670 |
+
i, j, _ = find(structure[:, cols])
|
| 671 |
+
j = cols[j]
|
| 672 |
+
|
| 673 |
+
mask = use_one_sided[j]
|
| 674 |
+
df = np.empty(m)
|
| 675 |
+
|
| 676 |
+
rows = i[mask]
|
| 677 |
+
df[rows] = -3 * f0[rows] + 4 * f1[rows] - f2[rows]
|
| 678 |
+
|
| 679 |
+
rows = i[~mask]
|
| 680 |
+
df[rows] = f2[rows] - f1[rows]
|
| 681 |
+
elif method == 'cs':
|
| 682 |
+
f1 = fun(x0 + h_vec*1.j)
|
| 683 |
+
df = f1.imag
|
| 684 |
+
dx = h_vec
|
| 685 |
+
cols, = np.nonzero(e)
|
| 686 |
+
i, j, _ = find(structure[:, cols])
|
| 687 |
+
j = cols[j]
|
| 688 |
+
else:
|
| 689 |
+
raise ValueError("Never be here.")
|
| 690 |
+
|
| 691 |
+
# All that's left is to compute the fraction. We store i, j and
|
| 692 |
+
# fractions as separate arrays and later construct coo_matrix.
|
| 693 |
+
row_indices.append(i)
|
| 694 |
+
col_indices.append(j)
|
| 695 |
+
fractions.append(df[i] / dx[j])
|
| 696 |
+
|
| 697 |
+
row_indices = np.hstack(row_indices)
|
| 698 |
+
col_indices = np.hstack(col_indices)
|
| 699 |
+
fractions = np.hstack(fractions)
|
| 700 |
+
J = coo_matrix((fractions, (row_indices, col_indices)), shape=(m, n))
|
| 701 |
+
return csr_matrix(J)
|
| 702 |
+
|
| 703 |
+
|
| 704 |
+
def check_derivative(fun, jac, x0, bounds=(-np.inf, np.inf), args=(),
|
| 705 |
+
kwargs={}):
|
| 706 |
+
"""Check correctness of a function computing derivatives (Jacobian or
|
| 707 |
+
gradient) by comparison with a finite difference approximation.
|
| 708 |
+
|
| 709 |
+
Parameters
|
| 710 |
+
----------
|
| 711 |
+
fun : callable
|
| 712 |
+
Function of which to estimate the derivatives. The argument x
|
| 713 |
+
passed to this function is ndarray of shape (n,) (never a scalar
|
| 714 |
+
even if n=1). It must return 1-D array_like of shape (m,) or a scalar.
|
| 715 |
+
jac : callable
|
| 716 |
+
Function which computes Jacobian matrix of `fun`. It must work with
|
| 717 |
+
argument x the same way as `fun`. The return value must be array_like
|
| 718 |
+
or sparse matrix with an appropriate shape.
|
| 719 |
+
x0 : array_like of shape (n,) or float
|
| 720 |
+
Point at which to estimate the derivatives. Float will be converted
|
| 721 |
+
to 1-D array.
|
| 722 |
+
bounds : 2-tuple of array_like, optional
|
| 723 |
+
Lower and upper bounds on independent variables. Defaults to no bounds.
|
| 724 |
+
Each bound must match the size of `x0` or be a scalar, in the latter
|
| 725 |
+
case the bound will be the same for all variables. Use it to limit the
|
| 726 |
+
range of function evaluation.
|
| 727 |
+
args, kwargs : tuple and dict, optional
|
| 728 |
+
Additional arguments passed to `fun` and `jac`. Both empty by default.
|
| 729 |
+
The calling signature is ``fun(x, *args, **kwargs)`` and the same
|
| 730 |
+
for `jac`.
|
| 731 |
+
|
| 732 |
+
Returns
|
| 733 |
+
-------
|
| 734 |
+
accuracy : float
|
| 735 |
+
The maximum among all relative errors for elements with absolute values
|
| 736 |
+
higher than 1 and absolute errors for elements with absolute values
|
| 737 |
+
less or equal than 1. If `accuracy` is on the order of 1e-6 or lower,
|
| 738 |
+
then it is likely that your `jac` implementation is correct.
|
| 739 |
+
|
| 740 |
+
See Also
|
| 741 |
+
--------
|
| 742 |
+
approx_derivative : Compute finite difference approximation of derivative.
|
| 743 |
+
|
| 744 |
+
Examples
|
| 745 |
+
--------
|
| 746 |
+
>>> import numpy as np
|
| 747 |
+
>>> from scipy.optimize._numdiff import check_derivative
|
| 748 |
+
>>>
|
| 749 |
+
>>>
|
| 750 |
+
>>> def f(x, c1, c2):
|
| 751 |
+
... return np.array([x[0] * np.sin(c1 * x[1]),
|
| 752 |
+
... x[0] * np.cos(c2 * x[1])])
|
| 753 |
+
...
|
| 754 |
+
>>> def jac(x, c1, c2):
|
| 755 |
+
... return np.array([
|
| 756 |
+
... [np.sin(c1 * x[1]), c1 * x[0] * np.cos(c1 * x[1])],
|
| 757 |
+
... [np.cos(c2 * x[1]), -c2 * x[0] * np.sin(c2 * x[1])]
|
| 758 |
+
... ])
|
| 759 |
+
...
|
| 760 |
+
>>>
|
| 761 |
+
>>> x0 = np.array([1.0, 0.5 * np.pi])
|
| 762 |
+
>>> check_derivative(f, jac, x0, args=(1, 2))
|
| 763 |
+
2.4492935982947064e-16
|
| 764 |
+
"""
|
| 765 |
+
J_to_test = jac(x0, *args, **kwargs)
|
| 766 |
+
if issparse(J_to_test):
|
| 767 |
+
J_diff = approx_derivative(fun, x0, bounds=bounds, sparsity=J_to_test,
|
| 768 |
+
args=args, kwargs=kwargs)
|
| 769 |
+
J_to_test = csr_matrix(J_to_test)
|
| 770 |
+
abs_err = J_to_test - J_diff
|
| 771 |
+
i, j, abs_err_data = find(abs_err)
|
| 772 |
+
J_diff_data = np.asarray(J_diff[i, j]).ravel()
|
| 773 |
+
return np.max(np.abs(abs_err_data) /
|
| 774 |
+
np.maximum(1, np.abs(J_diff_data)))
|
| 775 |
+
else:
|
| 776 |
+
J_diff = approx_derivative(fun, x0, bounds=bounds,
|
| 777 |
+
args=args, kwargs=kwargs)
|
| 778 |
+
abs_err = np.abs(J_to_test - J_diff)
|
| 779 |
+
return np.max(abs_err / np.maximum(1, np.abs(J_diff)))
|
llava_next/lib/python3.10/site-packages/scipy/optimize/_optimize.py
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
llava_next/lib/python3.10/site-packages/scipy/optimize/_slsqp.cpython-310-x86_64-linux-gnu.so
ADDED
|
Binary file (86.6 kB). View file
|
|
|
llava_next/lib/python3.10/site-packages/scipy/optimize/_spectral.py
ADDED
|
@@ -0,0 +1,260 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Spectral Algorithm for Nonlinear Equations
|
| 3 |
+
"""
|
| 4 |
+
import collections
|
| 5 |
+
|
| 6 |
+
import numpy as np
|
| 7 |
+
from scipy.optimize import OptimizeResult
|
| 8 |
+
from scipy.optimize._optimize import _check_unknown_options
|
| 9 |
+
from ._linesearch import _nonmonotone_line_search_cruz, _nonmonotone_line_search_cheng
|
| 10 |
+
|
| 11 |
+
class _NoConvergence(Exception):
|
| 12 |
+
pass
|
| 13 |
+
|
| 14 |
+
|
| 15 |
+
def _root_df_sane(func, x0, args=(), ftol=1e-8, fatol=1e-300, maxfev=1000,
|
| 16 |
+
fnorm=None, callback=None, disp=False, M=10, eta_strategy=None,
|
| 17 |
+
sigma_eps=1e-10, sigma_0=1.0, line_search='cruz', **unknown_options):
|
| 18 |
+
r"""
|
| 19 |
+
Solve nonlinear equation with the DF-SANE method
|
| 20 |
+
|
| 21 |
+
Options
|
| 22 |
+
-------
|
| 23 |
+
ftol : float, optional
|
| 24 |
+
Relative norm tolerance.
|
| 25 |
+
fatol : float, optional
|
| 26 |
+
Absolute norm tolerance.
|
| 27 |
+
Algorithm terminates when ``||func(x)|| < fatol + ftol ||func(x_0)||``.
|
| 28 |
+
fnorm : callable, optional
|
| 29 |
+
Norm to use in the convergence check. If None, 2-norm is used.
|
| 30 |
+
maxfev : int, optional
|
| 31 |
+
Maximum number of function evaluations.
|
| 32 |
+
disp : bool, optional
|
| 33 |
+
Whether to print convergence process to stdout.
|
| 34 |
+
eta_strategy : callable, optional
|
| 35 |
+
Choice of the ``eta_k`` parameter, which gives slack for growth
|
| 36 |
+
of ``||F||**2``. Called as ``eta_k = eta_strategy(k, x, F)`` with
|
| 37 |
+
`k` the iteration number, `x` the current iterate and `F` the current
|
| 38 |
+
residual. Should satisfy ``eta_k > 0`` and ``sum(eta, k=0..inf) < inf``.
|
| 39 |
+
Default: ``||F||**2 / (1 + k)**2``.
|
| 40 |
+
sigma_eps : float, optional
|
| 41 |
+
The spectral coefficient is constrained to ``sigma_eps < sigma < 1/sigma_eps``.
|
| 42 |
+
Default: 1e-10
|
| 43 |
+
sigma_0 : float, optional
|
| 44 |
+
Initial spectral coefficient.
|
| 45 |
+
Default: 1.0
|
| 46 |
+
M : int, optional
|
| 47 |
+
Number of iterates to include in the nonmonotonic line search.
|
| 48 |
+
Default: 10
|
| 49 |
+
line_search : {'cruz', 'cheng'}
|
| 50 |
+
Type of line search to employ. 'cruz' is the original one defined in
|
| 51 |
+
[Martinez & Raydan. Math. Comp. 75, 1429 (2006)], 'cheng' is
|
| 52 |
+
a modified search defined in [Cheng & Li. IMA J. Numer. Anal. 29, 814 (2009)].
|
| 53 |
+
Default: 'cruz'
|
| 54 |
+
|
| 55 |
+
References
|
| 56 |
+
----------
|
| 57 |
+
.. [1] "Spectral residual method without gradient information for solving
|
| 58 |
+
large-scale nonlinear systems of equations." W. La Cruz,
|
| 59 |
+
J.M. Martinez, M. Raydan. Math. Comp. **75**, 1429 (2006).
|
| 60 |
+
.. [2] W. La Cruz, Opt. Meth. Software, 29, 24 (2014).
|
| 61 |
+
.. [3] W. Cheng, D.-H. Li. IMA J. Numer. Anal. **29**, 814 (2009).
|
| 62 |
+
|
| 63 |
+
"""
|
| 64 |
+
_check_unknown_options(unknown_options)
|
| 65 |
+
|
| 66 |
+
if line_search not in ('cheng', 'cruz'):
|
| 67 |
+
raise ValueError(f"Invalid value {line_search!r} for 'line_search'")
|
| 68 |
+
|
| 69 |
+
nexp = 2
|
| 70 |
+
|
| 71 |
+
if eta_strategy is None:
|
| 72 |
+
# Different choice from [1], as their eta is not invariant
|
| 73 |
+
# vs. scaling of F.
|
| 74 |
+
def eta_strategy(k, x, F):
|
| 75 |
+
# Obtain squared 2-norm of the initial residual from the outer scope
|
| 76 |
+
return f_0 / (1 + k)**2
|
| 77 |
+
|
| 78 |
+
if fnorm is None:
|
| 79 |
+
def fnorm(F):
|
| 80 |
+
# Obtain squared 2-norm of the current residual from the outer scope
|
| 81 |
+
return f_k**(1.0/nexp)
|
| 82 |
+
|
| 83 |
+
def fmerit(F):
|
| 84 |
+
return np.linalg.norm(F)**nexp
|
| 85 |
+
|
| 86 |
+
nfev = [0]
|
| 87 |
+
f, x_k, x_shape, f_k, F_k, is_complex = _wrap_func(func, x0, fmerit,
|
| 88 |
+
nfev, maxfev, args)
|
| 89 |
+
|
| 90 |
+
k = 0
|
| 91 |
+
f_0 = f_k
|
| 92 |
+
sigma_k = sigma_0
|
| 93 |
+
|
| 94 |
+
F_0_norm = fnorm(F_k)
|
| 95 |
+
|
| 96 |
+
# For the 'cruz' line search
|
| 97 |
+
prev_fs = collections.deque([f_k], M)
|
| 98 |
+
|
| 99 |
+
# For the 'cheng' line search
|
| 100 |
+
Q = 1.0
|
| 101 |
+
C = f_0
|
| 102 |
+
|
| 103 |
+
converged = False
|
| 104 |
+
message = "too many function evaluations required"
|
| 105 |
+
|
| 106 |
+
while True:
|
| 107 |
+
F_k_norm = fnorm(F_k)
|
| 108 |
+
|
| 109 |
+
if disp:
|
| 110 |
+
print("iter %d: ||F|| = %g, sigma = %g" % (k, F_k_norm, sigma_k))
|
| 111 |
+
|
| 112 |
+
if callback is not None:
|
| 113 |
+
callback(x_k, F_k)
|
| 114 |
+
|
| 115 |
+
if F_k_norm < ftol * F_0_norm + fatol:
|
| 116 |
+
# Converged!
|
| 117 |
+
message = "successful convergence"
|
| 118 |
+
converged = True
|
| 119 |
+
break
|
| 120 |
+
|
| 121 |
+
# Control spectral parameter, from [2]
|
| 122 |
+
if abs(sigma_k) > 1/sigma_eps:
|
| 123 |
+
sigma_k = 1/sigma_eps * np.sign(sigma_k)
|
| 124 |
+
elif abs(sigma_k) < sigma_eps:
|
| 125 |
+
sigma_k = sigma_eps
|
| 126 |
+
|
| 127 |
+
# Line search direction
|
| 128 |
+
d = -sigma_k * F_k
|
| 129 |
+
|
| 130 |
+
# Nonmonotone line search
|
| 131 |
+
eta = eta_strategy(k, x_k, F_k)
|
| 132 |
+
try:
|
| 133 |
+
if line_search == 'cruz':
|
| 134 |
+
alpha, xp, fp, Fp = _nonmonotone_line_search_cruz(f, x_k, d, prev_fs,
|
| 135 |
+
eta=eta)
|
| 136 |
+
elif line_search == 'cheng':
|
| 137 |
+
alpha, xp, fp, Fp, C, Q = _nonmonotone_line_search_cheng(f, x_k, d, f_k,
|
| 138 |
+
C, Q, eta=eta)
|
| 139 |
+
except _NoConvergence:
|
| 140 |
+
break
|
| 141 |
+
|
| 142 |
+
# Update spectral parameter
|
| 143 |
+
s_k = xp - x_k
|
| 144 |
+
y_k = Fp - F_k
|
| 145 |
+
sigma_k = np.vdot(s_k, s_k) / np.vdot(s_k, y_k)
|
| 146 |
+
|
| 147 |
+
# Take step
|
| 148 |
+
x_k = xp
|
| 149 |
+
F_k = Fp
|
| 150 |
+
f_k = fp
|
| 151 |
+
|
| 152 |
+
# Store function value
|
| 153 |
+
if line_search == 'cruz':
|
| 154 |
+
prev_fs.append(fp)
|
| 155 |
+
|
| 156 |
+
k += 1
|
| 157 |
+
|
| 158 |
+
x = _wrap_result(x_k, is_complex, shape=x_shape)
|
| 159 |
+
F = _wrap_result(F_k, is_complex)
|
| 160 |
+
|
| 161 |
+
result = OptimizeResult(x=x, success=converged,
|
| 162 |
+
message=message,
|
| 163 |
+
fun=F, nfev=nfev[0], nit=k, method="df-sane")
|
| 164 |
+
|
| 165 |
+
return result
|
| 166 |
+
|
| 167 |
+
|
| 168 |
+
def _wrap_func(func, x0, fmerit, nfev_list, maxfev, args=()):
|
| 169 |
+
"""
|
| 170 |
+
Wrap a function and an initial value so that (i) complex values
|
| 171 |
+
are wrapped to reals, and (ii) value for a merit function
|
| 172 |
+
fmerit(x, f) is computed at the same time, (iii) iteration count
|
| 173 |
+
is maintained and an exception is raised if it is exceeded.
|
| 174 |
+
|
| 175 |
+
Parameters
|
| 176 |
+
----------
|
| 177 |
+
func : callable
|
| 178 |
+
Function to wrap
|
| 179 |
+
x0 : ndarray
|
| 180 |
+
Initial value
|
| 181 |
+
fmerit : callable
|
| 182 |
+
Merit function fmerit(f) for computing merit value from residual.
|
| 183 |
+
nfev_list : list
|
| 184 |
+
List to store number of evaluations in. Should be [0] in the beginning.
|
| 185 |
+
maxfev : int
|
| 186 |
+
Maximum number of evaluations before _NoConvergence is raised.
|
| 187 |
+
args : tuple
|
| 188 |
+
Extra arguments to func
|
| 189 |
+
|
| 190 |
+
Returns
|
| 191 |
+
-------
|
| 192 |
+
wrap_func : callable
|
| 193 |
+
Wrapped function, to be called as
|
| 194 |
+
``F, fp = wrap_func(x0)``
|
| 195 |
+
x0_wrap : ndarray of float
|
| 196 |
+
Wrapped initial value; raveled to 1-D and complex
|
| 197 |
+
values mapped to reals.
|
| 198 |
+
x0_shape : tuple
|
| 199 |
+
Shape of the initial value array
|
| 200 |
+
f : float
|
| 201 |
+
Merit function at F
|
| 202 |
+
F : ndarray of float
|
| 203 |
+
Residual at x0_wrap
|
| 204 |
+
is_complex : bool
|
| 205 |
+
Whether complex values were mapped to reals
|
| 206 |
+
|
| 207 |
+
"""
|
| 208 |
+
x0 = np.asarray(x0)
|
| 209 |
+
x0_shape = x0.shape
|
| 210 |
+
F = np.asarray(func(x0, *args)).ravel()
|
| 211 |
+
is_complex = np.iscomplexobj(x0) or np.iscomplexobj(F)
|
| 212 |
+
x0 = x0.ravel()
|
| 213 |
+
|
| 214 |
+
nfev_list[0] = 1
|
| 215 |
+
|
| 216 |
+
if is_complex:
|
| 217 |
+
def wrap_func(x):
|
| 218 |
+
if nfev_list[0] >= maxfev:
|
| 219 |
+
raise _NoConvergence()
|
| 220 |
+
nfev_list[0] += 1
|
| 221 |
+
z = _real2complex(x).reshape(x0_shape)
|
| 222 |
+
v = np.asarray(func(z, *args)).ravel()
|
| 223 |
+
F = _complex2real(v)
|
| 224 |
+
f = fmerit(F)
|
| 225 |
+
return f, F
|
| 226 |
+
|
| 227 |
+
x0 = _complex2real(x0)
|
| 228 |
+
F = _complex2real(F)
|
| 229 |
+
else:
|
| 230 |
+
def wrap_func(x):
|
| 231 |
+
if nfev_list[0] >= maxfev:
|
| 232 |
+
raise _NoConvergence()
|
| 233 |
+
nfev_list[0] += 1
|
| 234 |
+
x = x.reshape(x0_shape)
|
| 235 |
+
F = np.asarray(func(x, *args)).ravel()
|
| 236 |
+
f = fmerit(F)
|
| 237 |
+
return f, F
|
| 238 |
+
|
| 239 |
+
return wrap_func, x0, x0_shape, fmerit(F), F, is_complex
|
| 240 |
+
|
| 241 |
+
|
| 242 |
+
def _wrap_result(result, is_complex, shape=None):
|
| 243 |
+
"""
|
| 244 |
+
Convert from real to complex and reshape result arrays.
|
| 245 |
+
"""
|
| 246 |
+
if is_complex:
|
| 247 |
+
z = _real2complex(result)
|
| 248 |
+
else:
|
| 249 |
+
z = result
|
| 250 |
+
if shape is not None:
|
| 251 |
+
z = z.reshape(shape)
|
| 252 |
+
return z
|
| 253 |
+
|
| 254 |
+
|
| 255 |
+
def _real2complex(x):
|
| 256 |
+
return np.ascontiguousarray(x, dtype=float).view(np.complex128)
|
| 257 |
+
|
| 258 |
+
|
| 259 |
+
def _complex2real(z):
|
| 260 |
+
return np.ascontiguousarray(z, dtype=complex).view(np.float64)
|
llava_next/lib/python3.10/site-packages/scipy/optimize/_trustregion_ncg.py
ADDED
|
@@ -0,0 +1,126 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Newton-CG trust-region optimization."""
|
| 2 |
+
import math
|
| 3 |
+
|
| 4 |
+
import numpy as np
|
| 5 |
+
import scipy.linalg
|
| 6 |
+
from ._trustregion import (_minimize_trust_region, BaseQuadraticSubproblem)
|
| 7 |
+
|
| 8 |
+
__all__ = []
|
| 9 |
+
|
| 10 |
+
|
| 11 |
+
def _minimize_trust_ncg(fun, x0, args=(), jac=None, hess=None, hessp=None,
|
| 12 |
+
**trust_region_options):
|
| 13 |
+
"""
|
| 14 |
+
Minimization of scalar function of one or more variables using
|
| 15 |
+
the Newton conjugate gradient trust-region algorithm.
|
| 16 |
+
|
| 17 |
+
Options
|
| 18 |
+
-------
|
| 19 |
+
initial_trust_radius : float
|
| 20 |
+
Initial trust-region radius.
|
| 21 |
+
max_trust_radius : float
|
| 22 |
+
Maximum value of the trust-region radius. No steps that are longer
|
| 23 |
+
than this value will be proposed.
|
| 24 |
+
eta : float
|
| 25 |
+
Trust region related acceptance stringency for proposed steps.
|
| 26 |
+
gtol : float
|
| 27 |
+
Gradient norm must be less than `gtol` before successful
|
| 28 |
+
termination.
|
| 29 |
+
|
| 30 |
+
"""
|
| 31 |
+
if jac is None:
|
| 32 |
+
raise ValueError('Jacobian is required for Newton-CG trust-region '
|
| 33 |
+
'minimization')
|
| 34 |
+
if hess is None and hessp is None:
|
| 35 |
+
raise ValueError('Either the Hessian or the Hessian-vector product '
|
| 36 |
+
'is required for Newton-CG trust-region minimization')
|
| 37 |
+
return _minimize_trust_region(fun, x0, args=args, jac=jac, hess=hess,
|
| 38 |
+
hessp=hessp, subproblem=CGSteihaugSubproblem,
|
| 39 |
+
**trust_region_options)
|
| 40 |
+
|
| 41 |
+
|
| 42 |
+
class CGSteihaugSubproblem(BaseQuadraticSubproblem):
|
| 43 |
+
"""Quadratic subproblem solved by a conjugate gradient method"""
|
| 44 |
+
def solve(self, trust_radius):
|
| 45 |
+
"""
|
| 46 |
+
Solve the subproblem using a conjugate gradient method.
|
| 47 |
+
|
| 48 |
+
Parameters
|
| 49 |
+
----------
|
| 50 |
+
trust_radius : float
|
| 51 |
+
We are allowed to wander only this far away from the origin.
|
| 52 |
+
|
| 53 |
+
Returns
|
| 54 |
+
-------
|
| 55 |
+
p : ndarray
|
| 56 |
+
The proposed step.
|
| 57 |
+
hits_boundary : bool
|
| 58 |
+
True if the proposed step is on the boundary of the trust region.
|
| 59 |
+
|
| 60 |
+
Notes
|
| 61 |
+
-----
|
| 62 |
+
This is algorithm (7.2) of Nocedal and Wright 2nd edition.
|
| 63 |
+
Only the function that computes the Hessian-vector product is required.
|
| 64 |
+
The Hessian itself is not required, and the Hessian does
|
| 65 |
+
not need to be positive semidefinite.
|
| 66 |
+
"""
|
| 67 |
+
|
| 68 |
+
# get the norm of jacobian and define the origin
|
| 69 |
+
p_origin = np.zeros_like(self.jac)
|
| 70 |
+
|
| 71 |
+
# define a default tolerance
|
| 72 |
+
tolerance = min(0.5, math.sqrt(self.jac_mag)) * self.jac_mag
|
| 73 |
+
|
| 74 |
+
# Stop the method if the search direction
|
| 75 |
+
# is a direction of nonpositive curvature.
|
| 76 |
+
if self.jac_mag < tolerance:
|
| 77 |
+
hits_boundary = False
|
| 78 |
+
return p_origin, hits_boundary
|
| 79 |
+
|
| 80 |
+
# init the state for the first iteration
|
| 81 |
+
z = p_origin
|
| 82 |
+
r = self.jac
|
| 83 |
+
d = -r
|
| 84 |
+
|
| 85 |
+
# Search for the min of the approximation of the objective function.
|
| 86 |
+
while True:
|
| 87 |
+
|
| 88 |
+
# do an iteration
|
| 89 |
+
Bd = self.hessp(d)
|
| 90 |
+
dBd = np.dot(d, Bd)
|
| 91 |
+
if dBd <= 0:
|
| 92 |
+
# Look at the two boundary points.
|
| 93 |
+
# Find both values of t to get the boundary points such that
|
| 94 |
+
# ||z + t d|| == trust_radius
|
| 95 |
+
# and then choose the one with the predicted min value.
|
| 96 |
+
ta, tb = self.get_boundaries_intersections(z, d, trust_radius)
|
| 97 |
+
pa = z + ta * d
|
| 98 |
+
pb = z + tb * d
|
| 99 |
+
if self(pa) < self(pb):
|
| 100 |
+
p_boundary = pa
|
| 101 |
+
else:
|
| 102 |
+
p_boundary = pb
|
| 103 |
+
hits_boundary = True
|
| 104 |
+
return p_boundary, hits_boundary
|
| 105 |
+
r_squared = np.dot(r, r)
|
| 106 |
+
alpha = r_squared / dBd
|
| 107 |
+
z_next = z + alpha * d
|
| 108 |
+
if scipy.linalg.norm(z_next) >= trust_radius:
|
| 109 |
+
# Find t >= 0 to get the boundary point such that
|
| 110 |
+
# ||z + t d|| == trust_radius
|
| 111 |
+
ta, tb = self.get_boundaries_intersections(z, d, trust_radius)
|
| 112 |
+
p_boundary = z + tb * d
|
| 113 |
+
hits_boundary = True
|
| 114 |
+
return p_boundary, hits_boundary
|
| 115 |
+
r_next = r + alpha * Bd
|
| 116 |
+
r_next_squared = np.dot(r_next, r_next)
|
| 117 |
+
if math.sqrt(r_next_squared) < tolerance:
|
| 118 |
+
hits_boundary = False
|
| 119 |
+
return z_next, hits_boundary
|
| 120 |
+
beta_next = r_next_squared / r_squared
|
| 121 |
+
d_next = -r_next + beta_next * d
|
| 122 |
+
|
| 123 |
+
# update the state for the next iteration
|
| 124 |
+
z = z_next
|
| 125 |
+
r = r_next
|
| 126 |
+
d = d_next
|
llava_next/lib/python3.10/site-packages/scipy/optimize/cobyla.py
ADDED
|
@@ -0,0 +1,19 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# This file is not meant for public use and will be removed in SciPy v2.0.0.
|
| 2 |
+
# Use the `scipy.optimize` namespace for importing the functions
|
| 3 |
+
# included below.
|
| 4 |
+
|
| 5 |
+
from scipy._lib.deprecation import _sub_module_deprecation
|
| 6 |
+
|
| 7 |
+
|
| 8 |
+
__all__ = [ # noqa: F822
|
| 9 |
+
'OptimizeResult',
|
| 10 |
+
'fmin_cobyla',
|
| 11 |
+
]
|
| 12 |
+
|
| 13 |
+
def __dir__():
|
| 14 |
+
return __all__
|
| 15 |
+
|
| 16 |
+
def __getattr__(name):
|
| 17 |
+
return _sub_module_deprecation(sub_package="optimize", module="cobyla",
|
| 18 |
+
private_modules=["_cobyla_py"], all=__all__,
|
| 19 |
+
attribute=name)
|
llava_next/lib/python3.10/site-packages/scipy/optimize/minpack2.py
ADDED
|
@@ -0,0 +1,17 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# This file is not meant for public use and will be removed in SciPy v2.0.0.
|
| 2 |
+
# Use the `scipy.optimize` namespace for importing the functions
|
| 3 |
+
# included below.
|
| 4 |
+
|
| 5 |
+
from scipy._lib.deprecation import _sub_module_deprecation
|
| 6 |
+
|
| 7 |
+
__all__: list[str] = []
|
| 8 |
+
|
| 9 |
+
|
| 10 |
+
def __dir__():
|
| 11 |
+
return __all__
|
| 12 |
+
|
| 13 |
+
|
| 14 |
+
def __getattr__(name):
|
| 15 |
+
return _sub_module_deprecation(sub_package="optimize", module="minpack2",
|
| 16 |
+
private_modules=["_minpack2"], all=__all__,
|
| 17 |
+
attribute=name)
|
llava_next/lib/python3.10/site-packages/scipy/optimize/slsqp.py
ADDED
|
@@ -0,0 +1,23 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# This file is not meant for public use and will be removed in SciPy v2.0.0.
|
| 2 |
+
# Use the `scipy.optimize` namespace for importing the functions
|
| 3 |
+
# included below.
|
| 4 |
+
|
| 5 |
+
from scipy._lib.deprecation import _sub_module_deprecation
|
| 6 |
+
|
| 7 |
+
|
| 8 |
+
__all__ = [ # noqa: F822
|
| 9 |
+
'OptimizeResult',
|
| 10 |
+
'fmin_slsqp',
|
| 11 |
+
'slsqp',
|
| 12 |
+
'zeros',
|
| 13 |
+
]
|
| 14 |
+
|
| 15 |
+
|
| 16 |
+
def __dir__():
|
| 17 |
+
return __all__
|
| 18 |
+
|
| 19 |
+
|
| 20 |
+
def __getattr__(name):
|
| 21 |
+
return _sub_module_deprecation(sub_package="optimize", module="slsqp",
|
| 22 |
+
private_modules=["_slsqp_py"], all=__all__,
|
| 23 |
+
attribute=name)
|
parrot/lib/python3.10/site-packages/gradio_client/__pycache__/media_data.cpython-310.pyc
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:b7eb9550628889b4477639bb4c3b301fea459f135cdd6b5b1dc975fdb55a0a43
|
| 3 |
+
size 452036
|
parrot/lib/python3.10/site-packages/torch/cpu/amp/__init__.py
ADDED
|
@@ -0,0 +1,2 @@
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from .autocast_mode import autocast
|
| 2 |
+
from .grad_scaler import GradScaler
|
parrot/lib/python3.10/site-packages/torch/cpu/amp/__pycache__/__init__.cpython-310.pyc
ADDED
|
Binary file (257 Bytes). View file
|
|
|
parrot/lib/python3.10/site-packages/torch/cpu/amp/__pycache__/autocast_mode.cpython-310.pyc
ADDED
|
Binary file (1.82 kB). View file
|
|
|
parrot/lib/python3.10/site-packages/torch/cpu/amp/__pycache__/grad_scaler.cpython-310.pyc
ADDED
|
Binary file (1.24 kB). View file
|
|
|
parrot/lib/python3.10/site-packages/torch/cpu/amp/autocast_mode.py
ADDED
|
@@ -0,0 +1,50 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# mypy: allow-untyped-defs
|
| 2 |
+
from typing import Any
|
| 3 |
+
from typing_extensions import deprecated
|
| 4 |
+
|
| 5 |
+
import torch
|
| 6 |
+
|
| 7 |
+
__all__ = ["autocast"]
|
| 8 |
+
|
| 9 |
+
|
| 10 |
+
class autocast(torch.amp.autocast_mode.autocast):
|
| 11 |
+
r"""
|
| 12 |
+
See :class:`torch.autocast`.
|
| 13 |
+
``torch.cpu.amp.autocast(args...)`` is deprecated. Please use ``torch.amp.autocast("cpu", args...)`` instead.
|
| 14 |
+
"""
|
| 15 |
+
|
| 16 |
+
@deprecated(
|
| 17 |
+
"`torch.cpu.amp.autocast(args...)` is deprecated. "
|
| 18 |
+
"Please use `torch.amp.autocast('cpu', args...)` instead.",
|
| 19 |
+
category=FutureWarning,
|
| 20 |
+
)
|
| 21 |
+
def __init__(
|
| 22 |
+
self,
|
| 23 |
+
enabled: bool = True,
|
| 24 |
+
dtype: torch.dtype = torch.bfloat16,
|
| 25 |
+
cache_enabled: bool = True,
|
| 26 |
+
):
|
| 27 |
+
if torch._jit_internal.is_scripting():
|
| 28 |
+
self._enabled = enabled
|
| 29 |
+
self.device = "cpu"
|
| 30 |
+
self.fast_dtype = dtype
|
| 31 |
+
return
|
| 32 |
+
super().__init__(
|
| 33 |
+
"cpu", enabled=enabled, dtype=dtype, cache_enabled=cache_enabled
|
| 34 |
+
)
|
| 35 |
+
|
| 36 |
+
def __enter__(self):
|
| 37 |
+
if torch._jit_internal.is_scripting():
|
| 38 |
+
return self
|
| 39 |
+
return super().__enter__()
|
| 40 |
+
|
| 41 |
+
# TODO: discuss a unified TorchScript-friendly API for autocast
|
| 42 |
+
def __exit__(self, exc_type: Any, exc_val: Any, exc_tb: Any): # type: ignore[override]
|
| 43 |
+
if torch._jit_internal.is_scripting():
|
| 44 |
+
return
|
| 45 |
+
return super().__exit__(exc_type, exc_val, exc_tb)
|
| 46 |
+
|
| 47 |
+
def __call__(self, func):
|
| 48 |
+
if torch._jit_internal.is_scripting():
|
| 49 |
+
return func
|
| 50 |
+
return super().__call__(func)
|
parrot/lib/python3.10/site-packages/torch/cpu/amp/grad_scaler.py
ADDED
|
@@ -0,0 +1,34 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from typing_extensions import deprecated
|
| 2 |
+
|
| 3 |
+
import torch
|
| 4 |
+
|
| 5 |
+
__all__ = ["GradScaler"]
|
| 6 |
+
|
| 7 |
+
|
| 8 |
+
class GradScaler(torch.amp.GradScaler):
|
| 9 |
+
r"""
|
| 10 |
+
See :class:`torch.amp.GradScaler`.
|
| 11 |
+
``torch.cpu.amp.GradScaler(args...)`` is deprecated. Please use ``torch.amp.GradScaler("cpu", args...)`` instead.
|
| 12 |
+
"""
|
| 13 |
+
|
| 14 |
+
@deprecated(
|
| 15 |
+
"`torch.cpu.amp.GradScaler(args...)` is deprecated. "
|
| 16 |
+
"Please use `torch.amp.GradScaler('cpu', args...)` instead.",
|
| 17 |
+
category=FutureWarning,
|
| 18 |
+
)
|
| 19 |
+
def __init__(
|
| 20 |
+
self,
|
| 21 |
+
init_scale: float = 2.0**16,
|
| 22 |
+
growth_factor: float = 2.0,
|
| 23 |
+
backoff_factor: float = 0.5,
|
| 24 |
+
growth_interval: int = 2000,
|
| 25 |
+
enabled: bool = True,
|
| 26 |
+
) -> None:
|
| 27 |
+
super().__init__(
|
| 28 |
+
"cpu",
|
| 29 |
+
init_scale=init_scale,
|
| 30 |
+
growth_factor=growth_factor,
|
| 31 |
+
backoff_factor=backoff_factor,
|
| 32 |
+
growth_interval=growth_interval,
|
| 33 |
+
enabled=enabled,
|
| 34 |
+
)
|
parrot/lib/python3.10/site-packages/torch/distributed/__init__.py
ADDED
|
@@ -0,0 +1,146 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# mypy: allow-untyped-defs
|
| 2 |
+
import os
|
| 3 |
+
import sys
|
| 4 |
+
from enum import Enum
|
| 5 |
+
import pdb
|
| 6 |
+
import io
|
| 7 |
+
|
| 8 |
+
import torch
|
| 9 |
+
|
| 10 |
+
def is_available() -> bool:
|
| 11 |
+
"""
|
| 12 |
+
Return ``True`` if the distributed package is available.
|
| 13 |
+
|
| 14 |
+
Otherwise,
|
| 15 |
+
``torch.distributed`` does not expose any other APIs. Currently,
|
| 16 |
+
``torch.distributed`` is available on Linux, MacOS and Windows. Set
|
| 17 |
+
``USE_DISTRIBUTED=1`` to enable it when building PyTorch from source.
|
| 18 |
+
Currently, the default value is ``USE_DISTRIBUTED=1`` for Linux and Windows,
|
| 19 |
+
``USE_DISTRIBUTED=0`` for MacOS.
|
| 20 |
+
"""
|
| 21 |
+
return hasattr(torch._C, "_c10d_init")
|
| 22 |
+
|
| 23 |
+
|
| 24 |
+
if is_available() and not torch._C._c10d_init():
|
| 25 |
+
raise RuntimeError("Failed to initialize torch.distributed")
|
| 26 |
+
|
| 27 |
+
# Custom Runtime Errors thrown from the distributed package
|
| 28 |
+
DistError = torch._C._DistError
|
| 29 |
+
DistBackendError = torch._C._DistBackendError
|
| 30 |
+
DistNetworkError = torch._C._DistNetworkError
|
| 31 |
+
DistStoreError = torch._C._DistStoreError
|
| 32 |
+
|
| 33 |
+
if is_available():
|
| 34 |
+
from torch._C._distributed_c10d import (
|
| 35 |
+
Store,
|
| 36 |
+
FileStore,
|
| 37 |
+
TCPStore,
|
| 38 |
+
ProcessGroup as ProcessGroup,
|
| 39 |
+
Backend as _Backend,
|
| 40 |
+
PrefixStore,
|
| 41 |
+
Reducer,
|
| 42 |
+
Logger,
|
| 43 |
+
BuiltinCommHookType,
|
| 44 |
+
GradBucket,
|
| 45 |
+
Work as _Work,
|
| 46 |
+
_DEFAULT_FIRST_BUCKET_BYTES,
|
| 47 |
+
_register_comm_hook,
|
| 48 |
+
_register_builtin_comm_hook,
|
| 49 |
+
_broadcast_coalesced,
|
| 50 |
+
_compute_bucket_assignment_by_size,
|
| 51 |
+
_verify_params_across_processes,
|
| 52 |
+
_test_python_store,
|
| 53 |
+
DebugLevel,
|
| 54 |
+
get_debug_level,
|
| 55 |
+
set_debug_level,
|
| 56 |
+
set_debug_level_from_env,
|
| 57 |
+
_make_nccl_premul_sum,
|
| 58 |
+
_ControlCollectives,
|
| 59 |
+
_StoreCollectives,
|
| 60 |
+
)
|
| 61 |
+
|
| 62 |
+
class _DistributedPdb(pdb.Pdb):
|
| 63 |
+
"""
|
| 64 |
+
Supports using PDB from inside a multiprocessing child process.
|
| 65 |
+
|
| 66 |
+
Usage:
|
| 67 |
+
_DistributedPdb().set_trace()
|
| 68 |
+
"""
|
| 69 |
+
def interaction(self, *args, **kwargs):
|
| 70 |
+
_stdin = sys.stdin
|
| 71 |
+
try:
|
| 72 |
+
sys.stdin = open('/dev/stdin')
|
| 73 |
+
pdb.Pdb.interaction(self, *args, **kwargs)
|
| 74 |
+
finally:
|
| 75 |
+
sys.stdin = _stdin
|
| 76 |
+
|
| 77 |
+
def breakpoint(rank: int = 0):
|
| 78 |
+
"""
|
| 79 |
+
Set a breakpoint, but only on a single rank. All other ranks will wait for you to be
|
| 80 |
+
done with the breakpoint before continuing.
|
| 81 |
+
|
| 82 |
+
Args:
|
| 83 |
+
rank (int): Which rank to break on. Default: ``0``
|
| 84 |
+
"""
|
| 85 |
+
if get_rank() == rank:
|
| 86 |
+
pdb = _DistributedPdb()
|
| 87 |
+
pdb.message(
|
| 88 |
+
"\n!!! ATTENTION !!!\n\n"
|
| 89 |
+
f"Type 'up' to get to the frame that called dist.breakpoint(rank={rank})\n"
|
| 90 |
+
)
|
| 91 |
+
pdb.set_trace()
|
| 92 |
+
# If Meta/Python keys are in the TLS, we want to make sure that we ignore them
|
| 93 |
+
# and hit the (default) CPU/CUDA implementation of barrier.
|
| 94 |
+
meta_in_tls = torch._C._meta_in_tls_dispatch_include()
|
| 95 |
+
guard = torch._C._DisableTorchDispatch() # type: ignore[attr-defined]
|
| 96 |
+
torch._C._set_meta_in_tls_dispatch_include(False)
|
| 97 |
+
try:
|
| 98 |
+
barrier()
|
| 99 |
+
finally:
|
| 100 |
+
torch._C._set_meta_in_tls_dispatch_include(meta_in_tls)
|
| 101 |
+
del guard
|
| 102 |
+
|
| 103 |
+
if sys.platform != "win32":
|
| 104 |
+
from torch._C._distributed_c10d import (
|
| 105 |
+
HashStore,
|
| 106 |
+
_round_robin_process_groups,
|
| 107 |
+
)
|
| 108 |
+
|
| 109 |
+
from .distributed_c10d import * # noqa: F403
|
| 110 |
+
|
| 111 |
+
# Variables prefixed with underscore are not auto imported
|
| 112 |
+
# See the comment in `distributed_c10d.py` above `_backend` on why we expose
|
| 113 |
+
# this.
|
| 114 |
+
|
| 115 |
+
from .distributed_c10d import (
|
| 116 |
+
_all_gather_base,
|
| 117 |
+
_reduce_scatter_base,
|
| 118 |
+
_create_process_group_wrapper,
|
| 119 |
+
_rank_not_in_group,
|
| 120 |
+
_coalescing_manager,
|
| 121 |
+
_CoalescingManager,
|
| 122 |
+
_get_process_group_name,
|
| 123 |
+
get_node_local_rank,
|
| 124 |
+
)
|
| 125 |
+
|
| 126 |
+
from .rendezvous import (
|
| 127 |
+
rendezvous,
|
| 128 |
+
_create_store_from_options,
|
| 129 |
+
register_rendezvous_handler,
|
| 130 |
+
)
|
| 131 |
+
|
| 132 |
+
from .remote_device import _remote_device
|
| 133 |
+
from .device_mesh import init_device_mesh, DeviceMesh
|
| 134 |
+
|
| 135 |
+
set_debug_level_from_env()
|
| 136 |
+
|
| 137 |
+
else:
|
| 138 |
+
# This stub is sufficient to get
|
| 139 |
+
# python test/test_public_bindings.py -k test_correct_module_names
|
| 140 |
+
# working even when USE_DISTRIBUTED=0. Feel free to add more
|
| 141 |
+
# stubs as necessary.
|
| 142 |
+
# We cannot define stubs directly because they confuse pyre
|
| 143 |
+
|
| 144 |
+
class _ProcessGroupStub:
|
| 145 |
+
pass
|
| 146 |
+
sys.modules["torch.distributed"].ProcessGroup = _ProcessGroupStub # type: ignore[attr-defined]
|
parrot/lib/python3.10/site-packages/torch/distributed/_functional_collectives.py
ADDED
|
@@ -0,0 +1,1147 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# mypy: allow-untyped-defs
|
| 2 |
+
import sys
|
| 3 |
+
import warnings
|
| 4 |
+
from typing import cast, List, Optional, Tuple, TYPE_CHECKING, Union
|
| 5 |
+
|
| 6 |
+
import torch
|
| 7 |
+
import torch.distributed as dist
|
| 8 |
+
import torch.distributed.distributed_c10d as c10d
|
| 9 |
+
from torch.distributed.device_mesh import DeviceMesh
|
| 10 |
+
from torch.fx.experimental.proxy_tensor import get_innermost_proxy_mode
|
| 11 |
+
|
| 12 |
+
from . import _functional_collectives_impl as fun_col_impl
|
| 13 |
+
|
| 14 |
+
try:
|
| 15 |
+
from torch.utils._cxx_pytree import tree_map_only
|
| 16 |
+
except ImportError:
|
| 17 |
+
from torch.utils._pytree import tree_map_only # type: ignore[no-redef]
|
| 18 |
+
|
| 19 |
+
|
| 20 |
+
if torch._running_with_deploy():
|
| 21 |
+
|
| 22 |
+
def is_torchdynamo_compiling():
|
| 23 |
+
"""Can't import torchdynamo in torchdeploy builds currently."""
|
| 24 |
+
return False
|
| 25 |
+
|
| 26 |
+
else:
|
| 27 |
+
try:
|
| 28 |
+
from torch.compiler import is_dynamo_compiling as is_torchdynamo_compiling
|
| 29 |
+
except Exception:
|
| 30 |
+
warnings.warn(
|
| 31 |
+
"Unable to import torchdynamo util `is_torchdynamo_compiling`, so won't support torchdynamo correctly"
|
| 32 |
+
)
|
| 33 |
+
|
| 34 |
+
def is_torchdynamo_compiling():
|
| 35 |
+
return False
|
| 36 |
+
|
| 37 |
+
|
| 38 |
+
"""
|
| 39 |
+
New traceable, functional collectives.
|
| 40 |
+
RFC: https://github.com/pytorch/pytorch/issues/93173
|
| 41 |
+
|
| 42 |
+
compiler: trace these ops with plain-old-data schemas, then choose how to lower them.
|
| 43 |
+
eager: execute these 'functional' ops which in eager return AsyncCollectiveTensor subclasses,
|
| 44 |
+
automatically calling .wait() on underlying/hidden async 'work' obj only when fed to
|
| 45 |
+
a downstream op.
|
| 46 |
+
|
| 47 |
+
Issues:
|
| 48 |
+
* Where should these ops live? Couldn't `import torch` if putting these ops in existing torch.distributed files
|
| 49 |
+
* Proper support for eager requires inplace ops. We should explore having it as an option for the API.
|
| 50 |
+
"""
|
| 51 |
+
|
| 52 |
+
"""
|
| 53 |
+
Functional collectives are asynchronous only and we perform implicit stream synchronization
|
| 54 |
+
on behalf of the user.
|
| 55 |
+
|
| 56 |
+
We use AsyncCollectiveTensor to wrap the result tensor of a collective and it lets us witness
|
| 57 |
+
first usage of the tensor and insert cross stream sync at the right place.
|
| 58 |
+
|
| 59 |
+
The above are the easy bits, the hard one is how we match the Work object returned by
|
| 60 |
+
c10d and the tensor AsyncCollectiveTensor wraps. We alloc the tensor inside the collective
|
| 61 |
+
op implementation (see ``clone()`` call in ``_all_reduce``) and then it's handled by the
|
| 62 |
+
dispatcher which might call other implementations that are allowed to change the returned
|
| 63 |
+
tensor - even return a tensor with a different shape (see ``torch.vmap``).
|
| 64 |
+
|
| 65 |
+
This means the caller of our ops receives a Tensor that is not guaranteed to be the same
|
| 66 |
+
allocated by our implementations and that makes pairing The AsyncTensor to the original
|
| 67 |
+
tensor a lot harder. This pairing is needed so we can lookup the Work object to use.
|
| 68 |
+
|
| 69 |
+
Originally, we tried WeakKeyDictionary to map from Tensor to Work, but because Tensor's
|
| 70 |
+
identity is not stable across dispatch, the op caller would end up with a different Tensor
|
| 71 |
+
instance that would not match any in the dictionary.
|
| 72 |
+
|
| 73 |
+
With Tensor identity out of the question, we decided use the tensor data pointer, which
|
| 74 |
+
should be stable across all the Tensor changes done during dispatch.
|
| 75 |
+
|
| 76 |
+
We have a dictionary of tensor::data_ptr -> Work that we insert right after we call into c10d.
|
| 77 |
+
|
| 78 |
+
We use this dictionary when AsyncCollectiveTensor is used to invoke Work::wait()
|
| 79 |
+
|
| 80 |
+
Finally, we setup a finalizer against the tensor wrapper to observe it getting collected so we
|
| 81 |
+
can clean up stale entries in the dictionary.
|
| 82 |
+
|
| 83 |
+
To eliminate the possibility of races we have a global version counter that is used by the finalizer.
|
| 84 |
+
|
| 85 |
+
As a wise man said once: Don't cross the streams (https://www.youtube.com/watch?v=wyKQe_i9yyo)
|
| 86 |
+
|
| 87 |
+
"""
|
| 88 |
+
|
| 89 |
+
"""
|
| 90 |
+
Functional collectives can accept any of these types to describe the ranks participating in collectives.
|
| 91 |
+
|
| 92 |
+
The different types will be desugared to a canonical format
|
| 93 |
+
"""
|
| 94 |
+
RANK_TYPES = Union[
|
| 95 |
+
List[int],
|
| 96 |
+
List[List[int]],
|
| 97 |
+
dist.ProcessGroup,
|
| 98 |
+
DeviceMesh,
|
| 99 |
+
Tuple["dist._tensor.DeviceMesh", int],
|
| 100 |
+
str,
|
| 101 |
+
]
|
| 102 |
+
|
| 103 |
+
|
| 104 |
+
"""
|
| 105 |
+
User facing APIs for functional collectives
|
| 106 |
+
-------------------------------------------
|
| 107 |
+
|
| 108 |
+
These apis are called by user code and expected to work both in eager execution and compilation,
|
| 109 |
+
but there are significant differences to how the two modes are implemented underneath.
|
| 110 |
+
|
| 111 |
+
Eager execution is 'optimized' using a tensor subclass that schedules the synchronization (via wait_tensor() op)
|
| 112 |
+
just before the tensor is first used. Compiled tracing currently relies on the compiler to perform this optimization,
|
| 113 |
+
and cannot yet correctly trace the AsyncTensor wrapper class. In the future, these paths may be unified
|
| 114 |
+
if sufficient subclass support is added in dynamo.
|
| 115 |
+
|
| 116 |
+
Example: all_reduce is an entrypoint API, and other collectives follow a similar pattern.
|
| 117 |
+
|
| 118 |
+
Here's how it works under torch.compile/dynamo:
|
| 119 |
+
all_reduce(...)
|
| 120 |
+
|--> _expand_group(...) - desugars processgroup into canonical/traceable format
|
| 121 |
+
|--> c10d_functional.all_reduce(...) - dynamo captures this op call, doesn't trace deeper
|
| 122 |
+
|--> _maybe_wrap_tensor(...) - wait_tensor() op is immediately called, no AsyncTensor subclass needed
|
| 123 |
+
|
| 124 |
+
And under eager execution:
|
| 125 |
+
all_reduce(...)
|
| 126 |
+
|--> _expand_group(...) - same as above, but less critical for eager
|
| 127 |
+
|--> c10d_functional.all_reduce(...) - dispatches to real kernel OR records op in trace
|
| 128 |
+
|--> _maybe_wrap_tensor(...) - AsyncTensor wrapper applied to returned tensor,
|
| 129 |
+
which issues wait_tensor() at the time of first use
|
| 130 |
+
"""
|
| 131 |
+
|
| 132 |
+
|
| 133 |
+
def wait_tensor(tensor):
|
| 134 |
+
"""
|
| 135 |
+
Wait on a tensor returned by the collectives ops.
|
| 136 |
+
|
| 137 |
+
Waiting follows device semantics, which means blocking on CPU and synchronizing streams on CUDA.
|
| 138 |
+
"""
|
| 139 |
+
return torch.ops._c10d_functional.wait_tensor(tensor) # type: ignore[attr-defined]
|
| 140 |
+
|
| 141 |
+
|
| 142 |
+
def broadcast(self: torch.Tensor, src: int, group: RANK_TYPES, tag: str = ""):
|
| 143 |
+
"""
|
| 144 |
+
Broadcasts the tensor to all processes in the given process group.
|
| 145 |
+
|
| 146 |
+
Args:
|
| 147 |
+
src (int): Source rank
|
| 148 |
+
group (ProcessGroup or List[int]): The process group to work on.
|
| 149 |
+
tag (str, optional): A unique identifier for the collective. Default: empty string
|
| 150 |
+
"""
|
| 151 |
+
group_name = _resolve_group_name(group, tag)
|
| 152 |
+
tensor = torch.ops._c10d_functional.broadcast(self, src, group_name)
|
| 153 |
+
return _maybe_wrap_tensor(tensor)
|
| 154 |
+
|
| 155 |
+
|
| 156 |
+
def all_reduce(self: torch.Tensor, reduceOp: str, group: RANK_TYPES, tag: str = ""):
|
| 157 |
+
"""
|
| 158 |
+
Reduces the tensor data across all machines in such a way that all get
|
| 159 |
+
the final result.
|
| 160 |
+
|
| 161 |
+
The input tensor is left unmodified.
|
| 162 |
+
|
| 163 |
+
Group can be one of:
|
| 164 |
+
List[int]: ranks participating in the collective.
|
| 165 |
+
List[List[int]]: 2D mesh of ranks taking part of this collective in MPMD.
|
| 166 |
+
ProcessGroup: Will perform a collective using the ranks and tag of the PG.
|
| 167 |
+
DeviceMesh: Do a SPMD collective over all ranks of the mesh
|
| 168 |
+
(DeviceMesh, int): Do a MPMD collective over one dimension of the DeviceMesh
|
| 169 |
+
|
| 170 |
+
:: N.B. If you pass a PG or a 1D list to perform a MPMD collective, the compiler won't be able to recover
|
| 171 |
+
that information and perform collective algebraic optimization. Use other forms of input for that.
|
| 172 |
+
"""
|
| 173 |
+
group_name = _resolve_group_name(group, tag)
|
| 174 |
+
tensor = torch.ops._c10d_functional.all_reduce(self, reduceOp.lower(), group_name)
|
| 175 |
+
return _maybe_wrap_tensor(tensor)
|
| 176 |
+
|
| 177 |
+
|
| 178 |
+
def all_gather_tensor(
|
| 179 |
+
self: torch.Tensor,
|
| 180 |
+
gather_dim: int,
|
| 181 |
+
group: RANK_TYPES,
|
| 182 |
+
tag: str = "",
|
| 183 |
+
):
|
| 184 |
+
"""
|
| 185 |
+
Gather tensor data across from all machines and concatenate over ``gather_dim``.
|
| 186 |
+
|
| 187 |
+
Note that it currently only supports gather_dim = 0.
|
| 188 |
+
|
| 189 |
+
The input tensor is left unmodified.
|
| 190 |
+
Group can be one of:
|
| 191 |
+
List[int]: ranks participating in the collective.
|
| 192 |
+
List[List[int]]: 2D mesh of ranks taking part of this collective in MPMD.
|
| 193 |
+
ProcessGroup: Will perform a collective using the ranks and tag of the PG.
|
| 194 |
+
DeviceMesh: Do a SPMD collective over all ranks of the mesh
|
| 195 |
+
(DeviceMesh, int): Do a MPMD collective over one dimension of the DeviceMesh
|
| 196 |
+
|
| 197 |
+
:: N.B. If you pass a PG or a 1D list to perform a MPMD collective, the compiler won't be able to recover
|
| 198 |
+
that information and perform collective algebraic optimization. Use other forms of input for that.
|
| 199 |
+
"""
|
| 200 |
+
assert self.is_contiguous()
|
| 201 |
+
group_name = _resolve_group_name(group, tag)
|
| 202 |
+
group_size = c10d._get_group_size_by_name(group_name)
|
| 203 |
+
tensor = torch.ops._c10d_functional.all_gather_into_tensor(
|
| 204 |
+
self, group_size, group_name
|
| 205 |
+
)
|
| 206 |
+
res = _maybe_wrap_tensor(tensor)
|
| 207 |
+
# TODO this should be done inside AsyncCollectiveTensor to delay the wait() call
|
| 208 |
+
if gather_dim != 0:
|
| 209 |
+
# torch.cat access the data so we already need to wait here, first do wait
|
| 210 |
+
# and then chunk + cat avoid us going through ACT dispatching logic again
|
| 211 |
+
if isinstance(res, AsyncCollectiveTensor):
|
| 212 |
+
res = res.wait() # type: ignore[attr-defined]
|
| 213 |
+
res = torch.cat(torch.chunk(res, group_size, dim=0), dim=gather_dim)
|
| 214 |
+
return res
|
| 215 |
+
|
| 216 |
+
|
| 217 |
+
def all_gather_tensor_autograd(
|
| 218 |
+
self: torch.Tensor,
|
| 219 |
+
gather_dim: int,
|
| 220 |
+
group: RANK_TYPES,
|
| 221 |
+
tag: str = "",
|
| 222 |
+
):
|
| 223 |
+
"""
|
| 224 |
+
Gather tensor data across from all machines and concatenate over ``gather_dim``.
|
| 225 |
+
|
| 226 |
+
Note that it currently only supports gather_dim = 0.
|
| 227 |
+
|
| 228 |
+
This function is the same as all_gather_tensor but will propagate the
|
| 229 |
+
backwards gradient across workers.
|
| 230 |
+
|
| 231 |
+
See all_gather_tensor for more details on usage.
|
| 232 |
+
"""
|
| 233 |
+
group_name = _resolve_group_name(group, tag)
|
| 234 |
+
group_size = c10d._get_group_size_by_name(group_name)
|
| 235 |
+
|
| 236 |
+
tensor = torch.ops._c10d_functional_autograd.all_gather_into_tensor(
|
| 237 |
+
self, group_size, group_name
|
| 238 |
+
)
|
| 239 |
+
res = _FromTorchTensor.apply(tensor)
|
| 240 |
+
# TODO this should be done inside AsyncCollectiveTensor to delay the wait() call
|
| 241 |
+
if gather_dim != 0:
|
| 242 |
+
# torch.cat access the data so we already need to wait here, first do wait
|
| 243 |
+
# and then chunk + cat avoid us going through ACT dispatching logic again
|
| 244 |
+
if isinstance(res, AsyncCollectiveTensor):
|
| 245 |
+
res = res.wait() # type: ignore[attr-defined]
|
| 246 |
+
res = torch.cat(torch.chunk(res, group_size, dim=0), dim=gather_dim)
|
| 247 |
+
return res
|
| 248 |
+
|
| 249 |
+
|
| 250 |
+
def reduce_scatter_tensor(
|
| 251 |
+
self: torch.Tensor,
|
| 252 |
+
reduceOp: str,
|
| 253 |
+
scatter_dim: int,
|
| 254 |
+
group: RANK_TYPES,
|
| 255 |
+
tag: str = "",
|
| 256 |
+
):
|
| 257 |
+
"""
|
| 258 |
+
Reduces the tensor data across all machines in such a way that all get
|
| 259 |
+
the final result, then scatter the results to corresponding ranks.
|
| 260 |
+
|
| 261 |
+
|
| 262 |
+
The input tensor is left unmodified.
|
| 263 |
+
Group can be one of:
|
| 264 |
+
List[int]: ranks participating in the collective.
|
| 265 |
+
List[List[int]]: 2D mesh of ranks taking part of this collective in MPMD.
|
| 266 |
+
ProcessGroup: Will perform a collective using the ranks and tag of the PG.
|
| 267 |
+
DeviceMesh: Do a SPMD collective over all ranks of the mesh
|
| 268 |
+
(DeviceMesh, int): Do a MPMD collective over one dimension of the DeviceMesh
|
| 269 |
+
:: N.B. If you pass a PG or a 1D list to perform a MPMD collective, the compiler won't be able to recover
|
| 270 |
+
that information and perform collective algebraic optimization. Use other forms of input for that.
|
| 271 |
+
"""
|
| 272 |
+
group_name = _resolve_group_name(group, tag)
|
| 273 |
+
group_size = c10d._get_group_size_by_name(group_name)
|
| 274 |
+
|
| 275 |
+
assert (
|
| 276 |
+
self.size(scatter_dim) % group_size == 0
|
| 277 |
+
), f"input dimension 0 ({self.size(0)} must be a multiple of group_size {group_size}"
|
| 278 |
+
if scatter_dim != 0:
|
| 279 |
+
tensor_list = torch.chunk(self, group_size, dim=scatter_dim)
|
| 280 |
+
self = torch.cat(tensor_list)
|
| 281 |
+
|
| 282 |
+
tensor = torch.ops._c10d_functional.reduce_scatter_tensor(
|
| 283 |
+
self,
|
| 284 |
+
reduceOp.lower(),
|
| 285 |
+
group_size,
|
| 286 |
+
group_name, # type: ignore[possibly-undefined]
|
| 287 |
+
)
|
| 288 |
+
res = _maybe_wrap_tensor(tensor)
|
| 289 |
+
return res
|
| 290 |
+
|
| 291 |
+
|
| 292 |
+
def reduce_scatter_tensor_autograd(
|
| 293 |
+
self: torch.Tensor,
|
| 294 |
+
reduceOp: str,
|
| 295 |
+
scatter_dim: int,
|
| 296 |
+
group: RANK_TYPES,
|
| 297 |
+
tag: str = "",
|
| 298 |
+
):
|
| 299 |
+
"""
|
| 300 |
+
Reduces the tensor data across all machines in such a way that all get
|
| 301 |
+
the final result, then scatter the results to corresponding ranks.
|
| 302 |
+
|
| 303 |
+
This function is the same as reduce_scatter_tensor but will propagate the
|
| 304 |
+
backwards gradient across workers.
|
| 305 |
+
|
| 306 |
+
Currently only the "sum" reduceOp is supported.
|
| 307 |
+
|
| 308 |
+
See reduce_scatter_tensor for more details on usage.
|
| 309 |
+
"""
|
| 310 |
+
|
| 311 |
+
group_name = _resolve_group_name(group, tag)
|
| 312 |
+
group_size = c10d._get_group_size_by_name(group_name)
|
| 313 |
+
|
| 314 |
+
assert (
|
| 315 |
+
self.size(scatter_dim) % group_size == 0
|
| 316 |
+
), f"input dimension 0 ({self.size(0)} must be a multiple of group_size {group_size}"
|
| 317 |
+
if scatter_dim != 0:
|
| 318 |
+
tensor_list = torch.chunk(self, group_size, dim=scatter_dim)
|
| 319 |
+
self = torch.cat(tensor_list)
|
| 320 |
+
|
| 321 |
+
tensor = torch.ops._c10d_functional_autograd.reduce_scatter_tensor(
|
| 322 |
+
self,
|
| 323 |
+
reduceOp.lower(),
|
| 324 |
+
group_size,
|
| 325 |
+
group_name, # type: ignore[possibly-undefined]
|
| 326 |
+
)
|
| 327 |
+
res = _FromTorchTensor.apply(tensor)
|
| 328 |
+
return res
|
| 329 |
+
|
| 330 |
+
|
| 331 |
+
def all_reduce_coalesced(
|
| 332 |
+
self: List[torch.Tensor], reduceOp: str, group: RANK_TYPES, tag: str = ""
|
| 333 |
+
) -> List[torch.Tensor]:
|
| 334 |
+
"""
|
| 335 |
+
Reduces a list of tensors across all machines in such a way that all get
|
| 336 |
+
the final result.
|
| 337 |
+
|
| 338 |
+
The all tensors in the input list are left unmodified.
|
| 339 |
+
|
| 340 |
+
Group can be one of:
|
| 341 |
+
List[int]: ranks participating in the collective.
|
| 342 |
+
List[List[int]]: 2D mesh of ranks taking part of this collective in MPMD.
|
| 343 |
+
ProcessGroup: Will perform a collective using the ranks and tag of the PG.
|
| 344 |
+
DeviceMesh: Do a SPMD collective over all ranks of the mesh
|
| 345 |
+
(DeviceMesh, int): Do a MPMD collective over one dimension of the DeviceMesh
|
| 346 |
+
|
| 347 |
+
:: N.B. If you pass a PG or a 1D list to perform a MPMD collective, the compiler won't be able to recover
|
| 348 |
+
that information and perform collective algebraic optimization. Use other forms of input for that.
|
| 349 |
+
"""
|
| 350 |
+
group_name = _resolve_group_name(group, tag)
|
| 351 |
+
tensor_list = torch.ops._c10d_functional.all_reduce_coalesced( # type: ignore[attr-defined]
|
| 352 |
+
self,
|
| 353 |
+
reduceOp.lower(),
|
| 354 |
+
group_name,
|
| 355 |
+
)
|
| 356 |
+
return list(map(_maybe_wrap_tensor, tensor_list))
|
| 357 |
+
|
| 358 |
+
|
| 359 |
+
def all_gather_into_tensor_coalesced(
|
| 360 |
+
self: List[torch.Tensor], group: RANK_TYPES, tag: str = ""
|
| 361 |
+
) -> List[torch.Tensor]:
|
| 362 |
+
"""
|
| 363 |
+
Gather a list of tensors across from all machines.
|
| 364 |
+
|
| 365 |
+
Note that it currently only supports gather_dim = 0.
|
| 366 |
+
|
| 367 |
+
The input tensor is left unmodified.
|
| 368 |
+
Group can be one of:
|
| 369 |
+
List[int]: ranks participating in the collective.
|
| 370 |
+
List[List[int]]: 2D mesh of ranks taking part of this collective in MPMD.
|
| 371 |
+
ProcessGroup: Will perform a collective using the ranks and tag of the PG.
|
| 372 |
+
DeviceMesh: Do a SPMD collective over all ranks of the mesh
|
| 373 |
+
(DeviceMesh, int): Do a MPMD collective over one dimension of the DeviceMesh
|
| 374 |
+
|
| 375 |
+
:: N.B. If you pass a PG or a 1D list to perform a MPMD collective, the compiler won't be able to recover
|
| 376 |
+
that information and perform collective algebraic optimization. Use other forms of input for that.
|
| 377 |
+
"""
|
| 378 |
+
group_name = _resolve_group_name(group, tag)
|
| 379 |
+
group_size = c10d._get_group_size_by_name(group_name)
|
| 380 |
+
tensor_list = torch.ops._c10d_functional.all_gather_into_tensor_coalesced( # type: ignore[attr-defined]
|
| 381 |
+
self,
|
| 382 |
+
group_size,
|
| 383 |
+
group_name,
|
| 384 |
+
)
|
| 385 |
+
return list(map(_maybe_wrap_tensor, tensor_list))
|
| 386 |
+
|
| 387 |
+
|
| 388 |
+
def reduce_scatter_tensor_coalesced(
|
| 389 |
+
inputs: List[torch.Tensor],
|
| 390 |
+
reduceOp: str,
|
| 391 |
+
scatter_dim: List[int],
|
| 392 |
+
group: RANK_TYPES,
|
| 393 |
+
tag: str = "",
|
| 394 |
+
) -> List[torch.Tensor]:
|
| 395 |
+
"""
|
| 396 |
+
Reduces a list of tensors across all machines in such a way that all get
|
| 397 |
+
the final result, then scatter the results to corresponding ranks.
|
| 398 |
+
|
| 399 |
+
The input tensors are left unmodified.
|
| 400 |
+
Group can be one of:
|
| 401 |
+
List[int]: ranks participating in the collective.
|
| 402 |
+
List[List[int]]: 2D mesh of ranks taking part of this collective in MPMD.
|
| 403 |
+
ProcessGroup: Will perform a collective using the ranks and tag of the PG.
|
| 404 |
+
DeviceMesh: Do a SPMD collective over all ranks of the mesh
|
| 405 |
+
(DeviceMesh, int): Do a MPMD collective over one dimension of the DeviceMesh
|
| 406 |
+
|
| 407 |
+
:: N.B. If you pass a PG or a 1D list to perform a MPMD collective, the compiler won't be able to recover
|
| 408 |
+
that information and perform collective algebraic optimization. Use other forms of input for that.
|
| 409 |
+
"""
|
| 410 |
+
group_name = _resolve_group_name(group, tag)
|
| 411 |
+
group_size = c10d._get_group_size_by_name(group_name)
|
| 412 |
+
|
| 413 |
+
assert len(scatter_dim) == len(inputs)
|
| 414 |
+
for idx, (dim, tensor) in enumerate(zip(scatter_dim, inputs)):
|
| 415 |
+
assert (
|
| 416 |
+
tensor.size(dim) % group_size == 0
|
| 417 |
+
), f"input dimension {dim} ({tensor.size(dim)} must be a multiple of group_size {group_size} for tensor at index {idx}"
|
| 418 |
+
if dim != 0:
|
| 419 |
+
tensor_list = torch.chunk(tensor, group_size, dim=dim)
|
| 420 |
+
inputs[idx] = torch.cat(tensor_list)
|
| 421 |
+
|
| 422 |
+
tensor_list = torch.ops._c10d_functional.reduce_scatter_tensor_coalesced( # type: ignore[attr-defined]
|
| 423 |
+
inputs,
|
| 424 |
+
reduceOp.lower(),
|
| 425 |
+
group_size,
|
| 426 |
+
group_name, # type: ignore[possibly-undefined]
|
| 427 |
+
)
|
| 428 |
+
|
| 429 |
+
return list(map(_maybe_wrap_tensor, tensor_list))
|
| 430 |
+
|
| 431 |
+
|
| 432 |
+
# This is a bit unsafe: it checks if the first argument in the schema reports as a non-mutable alias.
|
| 433 |
+
# Today, this maps 1:1 with "aten ops that are views".
|
| 434 |
+
def _is_view_op(tgt):
|
| 435 |
+
assert isinstance(tgt, torch._ops.OpOverload)
|
| 436 |
+
schema = tgt._schema
|
| 437 |
+
if len(schema.arguments) > 0:
|
| 438 |
+
first_arg = schema.arguments[0]
|
| 439 |
+
# check if op is a view
|
| 440 |
+
return first_arg.alias_info is not None and not first_arg.alias_info.is_write
|
| 441 |
+
|
| 442 |
+
|
| 443 |
+
def all_to_all_single(
|
| 444 |
+
self: torch.Tensor,
|
| 445 |
+
output_split_sizes: Optional[List[int]],
|
| 446 |
+
input_split_sizes: Optional[List[int]],
|
| 447 |
+
group: RANK_TYPES,
|
| 448 |
+
tag: str = "",
|
| 449 |
+
) -> torch.Tensor:
|
| 450 |
+
"""
|
| 451 |
+
Each process splits input tensor and then scatters the split list
|
| 452 |
+
to all processes in a group. Then concatenate the received tensors from all
|
| 453 |
+
the processes in the group and return single output tensor.
|
| 454 |
+
|
| 455 |
+
Group can be one of:
|
| 456 |
+
List[int]: ranks participating in the collective.
|
| 457 |
+
List[List[int]]: 2D mesh of ranks taking part of this collective in MPMD.
|
| 458 |
+
ProcessGroup: Will perform a collective using the ranks and tag of the PG.
|
| 459 |
+
DeviceMesh: Do a SPMD collective over all ranks of the mesh
|
| 460 |
+
(DeviceMesh, int): Do a MPMD collective over one dimension of the DeviceMesh
|
| 461 |
+
|
| 462 |
+
:: N.B. If you pass a PG or a 1D list to perform a MPMD collective, the compiler won't be able to recover
|
| 463 |
+
that information and perform collective algebraic optimization. Use other forms of input for that.
|
| 464 |
+
"""
|
| 465 |
+
if output_split_sizes is not None:
|
| 466 |
+
assert all(
|
| 467 |
+
isinstance(size, (int, torch.SymInt)) for size in output_split_sizes
|
| 468 |
+
), output_split_sizes
|
| 469 |
+
if input_split_sizes is not None:
|
| 470 |
+
assert all(
|
| 471 |
+
isinstance(size, (int, torch.SymInt)) for size in input_split_sizes
|
| 472 |
+
), input_split_sizes
|
| 473 |
+
group_name = _resolve_group_name(group, tag)
|
| 474 |
+
group_size = c10d._get_group_size_by_name(group_name)
|
| 475 |
+
if output_split_sizes is None or input_split_sizes is None:
|
| 476 |
+
assert output_split_sizes is None and input_split_sizes is None, (
|
| 477 |
+
"output_split_sizes and input_split_sizes must either be "
|
| 478 |
+
"specified together or both set to None"
|
| 479 |
+
)
|
| 480 |
+
output_split_sizes = [self.shape[0] // group_size] * group_size
|
| 481 |
+
input_split_sizes = output_split_sizes
|
| 482 |
+
tensor = torch.ops._c10d_functional.all_to_all_single( # type: ignore[attr-defined]
|
| 483 |
+
self,
|
| 484 |
+
output_split_sizes,
|
| 485 |
+
input_split_sizes,
|
| 486 |
+
group_name,
|
| 487 |
+
)
|
| 488 |
+
return _maybe_wrap_tensor(tensor)
|
| 489 |
+
|
| 490 |
+
|
| 491 |
+
def all_to_all_single_autograd(
|
| 492 |
+
self: torch.Tensor,
|
| 493 |
+
output_split_sizes: Optional[List[int]],
|
| 494 |
+
input_split_sizes: Optional[List[int]],
|
| 495 |
+
group: RANK_TYPES,
|
| 496 |
+
tag: str = "",
|
| 497 |
+
) -> torch.Tensor:
|
| 498 |
+
"""
|
| 499 |
+
Same as all_to_all_single but supports autograd.
|
| 500 |
+
"""
|
| 501 |
+
if output_split_sizes is not None:
|
| 502 |
+
assert all(
|
| 503 |
+
isinstance(size, (int, torch.SymInt)) for size in output_split_sizes
|
| 504 |
+
), output_split_sizes
|
| 505 |
+
if input_split_sizes is not None:
|
| 506 |
+
assert all(
|
| 507 |
+
isinstance(size, (int, torch.SymInt)) for size in input_split_sizes
|
| 508 |
+
), input_split_sizes
|
| 509 |
+
|
| 510 |
+
group_name = _resolve_group_name(group, tag)
|
| 511 |
+
group_size = c10d._get_group_size_by_name(group_name)
|
| 512 |
+
if output_split_sizes is None or input_split_sizes is None:
|
| 513 |
+
assert output_split_sizes is None and input_split_sizes is None, (
|
| 514 |
+
"output_split_sizes and input_split_sizes must either be "
|
| 515 |
+
"specified together or both set to None"
|
| 516 |
+
)
|
| 517 |
+
output_split_sizes = [self.shape[0] // group_size] * group_size
|
| 518 |
+
input_split_sizes = output_split_sizes
|
| 519 |
+
tensor = torch.ops._c10d_functional_autograd.all_to_all_single( # type: ignore[attr-defined]
|
| 520 |
+
self,
|
| 521 |
+
output_split_sizes,
|
| 522 |
+
input_split_sizes,
|
| 523 |
+
group_name,
|
| 524 |
+
)
|
| 525 |
+
return _FromTorchTensor.apply(tensor)
|
| 526 |
+
|
| 527 |
+
|
| 528 |
+
def permute_tensor(
|
| 529 |
+
self: torch.Tensor,
|
| 530 |
+
src_dst: List[int],
|
| 531 |
+
group: RANK_TYPES,
|
| 532 |
+
tag: str = "",
|
| 533 |
+
) -> torch.Tensor:
|
| 534 |
+
"""
|
| 535 |
+
Permutes the elements of the tensor according to the given source/destination pairs. `src_dst` should
|
| 536 |
+
be defined such that src_dst[m] == n means m sends to n.
|
| 537 |
+
|
| 538 |
+
Group can be one of:
|
| 539 |
+
List[int]: ranks participating in the collective.
|
| 540 |
+
List[List[int]]: 2D mesh of ranks taking part of this collective in MPMD.
|
| 541 |
+
ProcessGroup: Will perform a collective using the ranks and tag of the PG.
|
| 542 |
+
DeviceMesh: Do a SPMD collective over all ranks of the mesh
|
| 543 |
+
(DeviceMesh, int): Do a MPMD collective over one
|
| 544 |
+
"""
|
| 545 |
+
t, rankset, group_size = _expand_group(group, tag)
|
| 546 |
+
local_pg = c10d._find_or_create_pg_by_ranks_and_tag(t, rankset, group_size)
|
| 547 |
+
|
| 548 |
+
output_split_sizes = [0] * group_size
|
| 549 |
+
input_split_sizes = [0] * group_size
|
| 550 |
+
for src, dst in enumerate(src_dst):
|
| 551 |
+
if src == dist.get_rank(local_pg):
|
| 552 |
+
input_split_sizes[dst] = self.numel()
|
| 553 |
+
if dst == dist.get_rank(local_pg):
|
| 554 |
+
output_split_sizes[src] = self.numel()
|
| 555 |
+
|
| 556 |
+
return all_to_all_single(self, output_split_sizes, input_split_sizes, group, tag)
|
| 557 |
+
|
| 558 |
+
|
| 559 |
+
class AsyncCollectiveTensor(torch.Tensor):
|
| 560 |
+
r"""
|
| 561 |
+
A Tensor wrapper subclass that is used to trigger a call to wait
|
| 562 |
+
prior to first use of the underlying tensor.
|
| 563 |
+
Use it inside functional collective pytorch wrappers like the following:
|
| 564 |
+
def functional_collective(self, group, tag):
|
| 565 |
+
tag, rankset, group_size = _expand_group(group, tag)
|
| 566 |
+
tensor = torch.ops.c10d_functional.{collective}(self, tag, rankset, group_size)
|
| 567 |
+
return _maybe_wrap_tensor(tensor)
|
| 568 |
+
"""
|
| 569 |
+
elem: torch.Tensor
|
| 570 |
+
completed: bool
|
| 571 |
+
|
| 572 |
+
__slots__ = ["elem", "completed"]
|
| 573 |
+
|
| 574 |
+
@staticmethod
|
| 575 |
+
def __new__(cls, elem: torch.Tensor):
|
| 576 |
+
r = torch.Tensor._make_wrapper_subclass( # type: ignore[attr-defined]
|
| 577 |
+
cls,
|
| 578 |
+
elem.size(),
|
| 579 |
+
strides=elem.stride(),
|
| 580 |
+
storage_offset=elem.storage_offset(),
|
| 581 |
+
dtype=elem.dtype,
|
| 582 |
+
layout=elem.layout,
|
| 583 |
+
device=elem.device,
|
| 584 |
+
requires_grad=elem.requires_grad,
|
| 585 |
+
)
|
| 586 |
+
r.elem = elem
|
| 587 |
+
r.completed = False
|
| 588 |
+
return r
|
| 589 |
+
|
| 590 |
+
def __tensor_flatten__(self):
|
| 591 |
+
return ["elem"], None
|
| 592 |
+
|
| 593 |
+
def tolist(self):
|
| 594 |
+
return self.trigger_wait().tolist()
|
| 595 |
+
|
| 596 |
+
@staticmethod
|
| 597 |
+
def __tensor_unflatten__(inner_tensors, meta, outer_size, outer_stride):
|
| 598 |
+
assert meta is None
|
| 599 |
+
elem = inner_tensors["elem"]
|
| 600 |
+
return AsyncCollectiveTensor(elem)
|
| 601 |
+
|
| 602 |
+
def __repr__(self):
|
| 603 |
+
return f"AsyncCollectiveTensor({self.trigger_wait()})"
|
| 604 |
+
|
| 605 |
+
def trigger_wait(self):
|
| 606 |
+
if not self.completed:
|
| 607 |
+
out = wait_tensor(self.elem)
|
| 608 |
+
self.completed = True
|
| 609 |
+
return out
|
| 610 |
+
else:
|
| 611 |
+
return self.elem
|
| 612 |
+
|
| 613 |
+
def wait(self) -> torch.Tensor:
|
| 614 |
+
return wait_tensor(self.elem)
|
| 615 |
+
|
| 616 |
+
def _get_acs_underlying_tensor(self):
|
| 617 |
+
"""This method enables _functional_collectives_impl to test if a tensor is an ACS"""
|
| 618 |
+
return self.elem
|
| 619 |
+
|
| 620 |
+
@classmethod
|
| 621 |
+
def __torch_dispatch__(cls, func, types, args=(), kwargs=None):
|
| 622 |
+
if func == torch.ops.aten.view.default:
|
| 623 |
+
# Fast handle aten.view as a lot of view related op goes to aten.view
|
| 624 |
+
# eventually, this avoids pytree slowdown
|
| 625 |
+
res = func(args[0].elem, args[1])
|
| 626 |
+
wrapper_res = AsyncCollectiveTensor(res)
|
| 627 |
+
return wrapper_res
|
| 628 |
+
|
| 629 |
+
is_view_op = _is_view_op(func)
|
| 630 |
+
|
| 631 |
+
def unwrap(e: AsyncCollectiveTensor):
|
| 632 |
+
# wait_tensor is idepotent and will do stream sync only once
|
| 633 |
+
if not is_view_op:
|
| 634 |
+
return e.trigger_wait()
|
| 635 |
+
return e.elem
|
| 636 |
+
|
| 637 |
+
def wrap(e: torch.Tensor):
|
| 638 |
+
# wait_tensor is idepotent and will do stream sync only once
|
| 639 |
+
assert not isinstance(e, AsyncCollectiveTensor)
|
| 640 |
+
res = AsyncCollectiveTensor(e)
|
| 641 |
+
return res
|
| 642 |
+
|
| 643 |
+
unwrapped_args = tree_map_only(AsyncCollectiveTensor, unwrap, args)
|
| 644 |
+
unwrapped_kwargs = tree_map_only(AsyncCollectiveTensor, unwrap, kwargs)
|
| 645 |
+
|
| 646 |
+
# we don't wrap the result as it doesn't need to be waited on.
|
| 647 |
+
out = func(*unwrapped_args, **unwrapped_kwargs)
|
| 648 |
+
|
| 649 |
+
# View ops dont require a sync, so we should re-wrap the outputs.
|
| 650 |
+
if is_view_op:
|
| 651 |
+
out = tree_map_only(torch.Tensor, wrap, out)
|
| 652 |
+
|
| 653 |
+
return out
|
| 654 |
+
|
| 655 |
+
def numpy(self):
|
| 656 |
+
return self.wait().numpy()
|
| 657 |
+
|
| 658 |
+
|
| 659 |
+
"""
|
| 660 |
+
Utils and infrastructure for tracing support
|
| 661 |
+
"""
|
| 662 |
+
|
| 663 |
+
|
| 664 |
+
def _expand_group(group: RANK_TYPES, tag: str = "") -> Tuple[str, List[int], int]:
|
| 665 |
+
"""
|
| 666 |
+
_expand_group desugars the different RANK_TYPES types into a canonical format that is traceable.
|
| 667 |
+
|
| 668 |
+
By having this be part of the explicit eager codepath, we avoid having to specialize behavior inside
|
| 669 |
+
torchdynamo and can still interoperate with processgroup objects or other untraceable forms.
|
| 670 |
+
"""
|
| 671 |
+
# had to define this hack _inside_ expand_group to avoid
|
| 672 |
+
# graph_break [('torch.* op returned non-Tensor int
|
| 673 |
+
# caused by 'cast_*` functions being treated as 'torch.*' ops (iiuc)
|
| 674 |
+
if TYPE_CHECKING:
|
| 675 |
+
|
| 676 |
+
def cast_listlistint(x):
|
| 677 |
+
return cast(List[List[int]], x)
|
| 678 |
+
|
| 679 |
+
def cast_listint(x):
|
| 680 |
+
return cast(List[int], x)
|
| 681 |
+
|
| 682 |
+
else:
|
| 683 |
+
# fake cast op for use at runtime since dynamo doesn't support real cast
|
| 684 |
+
# also, dynamo didn't like encountering 'typing' objects ()
|
| 685 |
+
# NotImplementedError: argument of type: <class 'typing._GenericAlias'>
|
| 686 |
+
def cast_listlistint(x):
|
| 687 |
+
return x
|
| 688 |
+
|
| 689 |
+
def cast_listint(x):
|
| 690 |
+
return x
|
| 691 |
+
|
| 692 |
+
rankset: List[int]
|
| 693 |
+
if isinstance(group, list):
|
| 694 |
+
if isinstance(group[0], list):
|
| 695 |
+
nested_list = cast_listlistint(group)
|
| 696 |
+
rankset = []
|
| 697 |
+
group_size = -1
|
| 698 |
+
for rs in nested_list:
|
| 699 |
+
rankset.extend(rs)
|
| 700 |
+
if group_size != -1 and group_size != len(rs):
|
| 701 |
+
raise ValueError(
|
| 702 |
+
f"group sizes must be identical found {group_size} and {len(rs)}"
|
| 703 |
+
)
|
| 704 |
+
group_size = len(rs)
|
| 705 |
+
else:
|
| 706 |
+
rankset = cast_listint(group)
|
| 707 |
+
group_size = len(rankset)
|
| 708 |
+
elif isinstance(group, dist.ProcessGroup):
|
| 709 |
+
rankset = dist.get_process_group_ranks(group)
|
| 710 |
+
group_size = len(rankset)
|
| 711 |
+
tag = tag or c10d._get_group_tag(group)
|
| 712 |
+
elif isinstance(group, DeviceMesh):
|
| 713 |
+
assert (
|
| 714 |
+
group.ndim == 1
|
| 715 |
+
), "Only 1D mesh is supported, pass in (DeviceMesh, int) together if mesh > 1D"
|
| 716 |
+
# TODO: it should run collective in the whole mesh instead of dim 0
|
| 717 |
+
tag, rankset, _ = group._dim_group_infos[0]
|
| 718 |
+
group_size = len(rankset)
|
| 719 |
+
elif isinstance(group, tuple):
|
| 720 |
+
if (
|
| 721 |
+
len(group) == 2
|
| 722 |
+
and isinstance(group[0], DeviceMesh)
|
| 723 |
+
and isinstance(group[1], int)
|
| 724 |
+
):
|
| 725 |
+
dmesh = group[0]
|
| 726 |
+
dim = group[1]
|
| 727 |
+
tag, rankset, _ = dmesh._dim_group_infos[dim]
|
| 728 |
+
group_size = len(rankset)
|
| 729 |
+
else:
|
| 730 |
+
raise ValueError("Invalid tuple for group must be (DeviceMesh, int)")
|
| 731 |
+
else:
|
| 732 |
+
raise ValueError(
|
| 733 |
+
"Invalid type for group, must be one of List, Processgroup, DeviceMesh or (DeviceMesh, int)."
|
| 734 |
+
)
|
| 735 |
+
|
| 736 |
+
return (tag, rankset, group_size)
|
| 737 |
+
|
| 738 |
+
|
| 739 |
+
def _resolve_group_name(group: RANK_TYPES, tag: str = "") -> str:
|
| 740 |
+
"""
|
| 741 |
+
Given group in RANK_TYPES, return the group name.
|
| 742 |
+
"""
|
| 743 |
+
# `tag` will be deprecated. See details in:
|
| 744 |
+
# https://github.com/pytorch/pytorch/issues/93173#issuecomment-1907095208
|
| 745 |
+
if isinstance(group, dist.ProcessGroup):
|
| 746 |
+
return group.group_name
|
| 747 |
+
elif isinstance(group, str):
|
| 748 |
+
return group
|
| 749 |
+
elif isinstance(group, DeviceMesh):
|
| 750 |
+
assert (
|
| 751 |
+
group.ndim == 1
|
| 752 |
+
), "Only 1D mesh is supported, pass in (DeviceMesh, int) together if mesh > 1D"
|
| 753 |
+
return group._dim_group_infos[0][2]
|
| 754 |
+
elif isinstance(group, tuple):
|
| 755 |
+
if (
|
| 756 |
+
len(group) == 2
|
| 757 |
+
and isinstance(group[0], DeviceMesh)
|
| 758 |
+
and isinstance(group[1], int)
|
| 759 |
+
):
|
| 760 |
+
dmesh = group[0]
|
| 761 |
+
dim = group[1]
|
| 762 |
+
return dmesh._dim_group_infos[dim][2]
|
| 763 |
+
else:
|
| 764 |
+
raise ValueError("Invalid tuple for group must be (DeviceMesh, int)")
|
| 765 |
+
elif isinstance(group, list):
|
| 766 |
+
if not is_torchdynamo_compiling():
|
| 767 |
+
warnings.warn(
|
| 768 |
+
"The combination of ranks + tag as process group "
|
| 769 |
+
"identifier has been deprecated. Please switch to "
|
| 770 |
+
"using ProcessGroup, DeviceMesh, or group name instead.",
|
| 771 |
+
FutureWarning,
|
| 772 |
+
stacklevel=3,
|
| 773 |
+
)
|
| 774 |
+
return c10d._resolve_group_name_by_ranks_and_tag(cast(List[int], group), tag)
|
| 775 |
+
else:
|
| 776 |
+
raise ValueError(f"Unsupported group type: {type(group)}, {group}")
|
| 777 |
+
|
| 778 |
+
|
| 779 |
+
class _FromTorchTensor(torch.autograd.Function):
|
| 780 |
+
"""
|
| 781 |
+
_FromTorchTensor allows autograd to propagate from a normal Tensor to an
|
| 782 |
+
AsyncCollectiveTensor.
|
| 783 |
+
"""
|
| 784 |
+
|
| 785 |
+
@staticmethod
|
| 786 |
+
def forward( # type: ignore[override]
|
| 787 |
+
ctx, # pyre-ignore[2]: Parameter must be annotated.
|
| 788 |
+
input: torch.Tensor,
|
| 789 |
+
) -> torch.Tensor:
|
| 790 |
+
return _maybe_wrap_tensor(input)
|
| 791 |
+
|
| 792 |
+
@staticmethod
|
| 793 |
+
def backward(ctx, grad_output: torch.Tensor) -> torch.Tensor: # type: ignore[override]
|
| 794 |
+
return grad_output
|
| 795 |
+
|
| 796 |
+
|
| 797 |
+
def _are_we_tracing() -> bool:
|
| 798 |
+
if is_torchdynamo_compiling():
|
| 799 |
+
return True
|
| 800 |
+
# If functionalization is turned on, we are almost definitely compiling/tracing.
|
| 801 |
+
# (In particular, AOTAutograd traces a model once with functionalization on
|
| 802 |
+
# but proxy tracing turned of, so this is how we detect it).
|
| 803 |
+
if (
|
| 804 |
+
torch._C._get_dispatch_mode(torch._C._TorchDispatchModeKey.FUNCTIONAL)
|
| 805 |
+
is not None
|
| 806 |
+
):
|
| 807 |
+
return True
|
| 808 |
+
mode = get_innermost_proxy_mode()
|
| 809 |
+
if mode is None:
|
| 810 |
+
return False
|
| 811 |
+
return mode.tracer is not None
|
| 812 |
+
|
| 813 |
+
|
| 814 |
+
def _maybe_wrap_tensor(self) -> torch.Tensor:
|
| 815 |
+
if _are_we_tracing():
|
| 816 |
+
return wait_tensor(self)
|
| 817 |
+
res = AsyncCollectiveTensor(self)
|
| 818 |
+
return cast(torch.Tensor, res)
|
| 819 |
+
|
| 820 |
+
|
| 821 |
+
def _all_gather_into_tensor_coalesced_meta(self, tag, rankset, group_size):
|
| 822 |
+
def mk_out_tensor(shard):
|
| 823 |
+
out_size = list(shard.size())
|
| 824 |
+
out_size[0] *= group_size
|
| 825 |
+
out_tensor = shard.new_empty(out_size)
|
| 826 |
+
return out_tensor
|
| 827 |
+
|
| 828 |
+
return [mk_out_tensor(t) for t in self]
|
| 829 |
+
|
| 830 |
+
|
| 831 |
+
# We now register meta kernels to deal with tracing
|
| 832 |
+
def _broadcast_meta(self, *args):
|
| 833 |
+
return torch.empty_like(self)
|
| 834 |
+
|
| 835 |
+
|
| 836 |
+
def _all_reduce_meta(self, *args):
|
| 837 |
+
return torch.empty_like(self)
|
| 838 |
+
|
| 839 |
+
|
| 840 |
+
def _wait_tensor_meta(self, *args):
|
| 841 |
+
return torch.empty_like(self)
|
| 842 |
+
|
| 843 |
+
|
| 844 |
+
def _all_gather_into_tensor_meta(shard, tag, rankset, group_size):
|
| 845 |
+
out_size = list(shard.size())
|
| 846 |
+
out_size[0] *= group_size
|
| 847 |
+
return shard.new_empty(out_size)
|
| 848 |
+
|
| 849 |
+
|
| 850 |
+
def _reduce_scatter_tensor_meta(input, reduce_op, tag, rankset, group_size):
|
| 851 |
+
out_size = list(input.size())
|
| 852 |
+
out_size[0] //= group_size
|
| 853 |
+
return input.new_empty(out_size)
|
| 854 |
+
|
| 855 |
+
|
| 856 |
+
def _all_reduce_coalesced_meta(self, *args):
|
| 857 |
+
return [torch.empty_like(t) for t in self]
|
| 858 |
+
|
| 859 |
+
|
| 860 |
+
def _all_reduce__meta(inp, *args):
|
| 861 |
+
return inp
|
| 862 |
+
|
| 863 |
+
|
| 864 |
+
def _broadcast__meta(inp, *args):
|
| 865 |
+
return inp
|
| 866 |
+
|
| 867 |
+
|
| 868 |
+
def _all_reduce_coalesced__meta(inputs, *args):
|
| 869 |
+
return inputs
|
| 870 |
+
|
| 871 |
+
|
| 872 |
+
def _reduce_scatter_tensor_coalesced_meta(inputs, reduceOp, tag, rankset, group_size):
|
| 873 |
+
def mk_out_tensor(input):
|
| 874 |
+
out_size = list(input.size())
|
| 875 |
+
out_size[0] //= group_size
|
| 876 |
+
out_tensor = input.new_empty(out_size)
|
| 877 |
+
return out_tensor
|
| 878 |
+
|
| 879 |
+
return [mk_out_tensor(t) for t in inputs]
|
| 880 |
+
|
| 881 |
+
|
| 882 |
+
# NB: We often say all_to_all has dynamic output size, but this is not
|
| 883 |
+
# technically true: instead, what typically happens is you manually
|
| 884 |
+
# communicate the output_split_sizes ahead of time (which is dynamic),
|
| 885 |
+
# but then you pass those sizes explicitly, and the all to all itself
|
| 886 |
+
# isn't dynamic, it just follows the specified output splits
|
| 887 |
+
def _all_to_all_single_meta(
|
| 888 |
+
input, output_split_sizes, input_split_sizes, *args, **kwargs
|
| 889 |
+
):
|
| 890 |
+
if output_split_sizes is None:
|
| 891 |
+
return input.new_empty(input.size())
|
| 892 |
+
else:
|
| 893 |
+
for s in output_split_sizes:
|
| 894 |
+
torch._check_is_size(s)
|
| 895 |
+
out_size = list(input.size())
|
| 896 |
+
out_size[0] = sum(output_split_sizes)
|
| 897 |
+
return input.new_empty(out_size)
|
| 898 |
+
|
| 899 |
+
|
| 900 |
+
def _all_gather_into_tensor_out_native_meta(input, group_size, group_name, *, out):
|
| 901 |
+
shape = list(input.size())
|
| 902 |
+
shape[0] *= group_size
|
| 903 |
+
return input.new_empty(shape)
|
| 904 |
+
|
| 905 |
+
|
| 906 |
+
def _all_gather_into_tensor_native_meta(input, group_size, group_name):
|
| 907 |
+
shape = list(input.size())
|
| 908 |
+
shape[0] *= group_size
|
| 909 |
+
return input.new_empty(shape)
|
| 910 |
+
|
| 911 |
+
|
| 912 |
+
def _all_gather_into_tensor_coalesced_native_meta(inputs, group_size, group_name):
|
| 913 |
+
return [
|
| 914 |
+
_all_gather_into_tensor_native_meta(input, group_size, group_name)
|
| 915 |
+
for input in inputs
|
| 916 |
+
]
|
| 917 |
+
|
| 918 |
+
|
| 919 |
+
def _reduce_scatter_tensor_native_meta(inp, reduce_op, group_size, group_name):
|
| 920 |
+
shape = list(inp.size())
|
| 921 |
+
shape[0] //= group_size
|
| 922 |
+
return inp.new_empty(shape)
|
| 923 |
+
|
| 924 |
+
|
| 925 |
+
def _reduce_scatter_tensor_coalesced_native_meta(
|
| 926 |
+
inputs, reduce_op, group_size, group_name
|
| 927 |
+
):
|
| 928 |
+
return [
|
| 929 |
+
_reduce_scatter_tensor_native_meta(inp, reduce_op, group_size, group_name)
|
| 930 |
+
for inp in inputs
|
| 931 |
+
]
|
| 932 |
+
|
| 933 |
+
|
| 934 |
+
if not torch._running_with_deploy():
|
| 935 |
+
# Library MUST be defined at module scope or it doesn't work
|
| 936 |
+
# Creating a "DEF" Library always crashes torch::deploy so we create our
|
| 937 |
+
# Library instances here guarded against running inside it
|
| 938 |
+
lib_impl = torch.library.Library("_c10d_functional", "IMPL")
|
| 939 |
+
lib_impl.impl("all_reduce", _all_reduce_meta, "Meta")
|
| 940 |
+
lib_impl.impl("all_reduce_", _all_reduce__meta, "Meta")
|
| 941 |
+
lib_impl.impl("all_reduce_coalesced", _all_reduce_coalesced_meta, "Meta")
|
| 942 |
+
lib_impl.impl("all_reduce_coalesced_", _all_reduce_coalesced__meta, "Meta")
|
| 943 |
+
lib_impl.impl("wait_tensor", _wait_tensor_meta, "Meta")
|
| 944 |
+
lib_impl.impl(
|
| 945 |
+
"all_gather_into_tensor_out", _all_gather_into_tensor_out_native_meta, "Meta"
|
| 946 |
+
)
|
| 947 |
+
lib_impl.impl("all_gather_into_tensor", _all_gather_into_tensor_native_meta, "Meta")
|
| 948 |
+
lib_impl.impl(
|
| 949 |
+
"all_gather_into_tensor_coalesced",
|
| 950 |
+
_all_gather_into_tensor_coalesced_native_meta,
|
| 951 |
+
"Meta",
|
| 952 |
+
)
|
| 953 |
+
lib_impl.impl("reduce_scatter_tensor", _reduce_scatter_tensor_native_meta, "Meta")
|
| 954 |
+
lib_impl.impl(
|
| 955 |
+
"reduce_scatter_tensor_coalesced",
|
| 956 |
+
_reduce_scatter_tensor_coalesced_native_meta,
|
| 957 |
+
"Meta",
|
| 958 |
+
)
|
| 959 |
+
lib_impl.impl("all_to_all_single", _all_to_all_single_meta, "Meta")
|
| 960 |
+
lib_impl.impl("broadcast", _broadcast_meta, "Meta")
|
| 961 |
+
lib_impl.impl("broadcast_", _broadcast__meta, "Meta")
|
| 962 |
+
|
| 963 |
+
# Register legacy ops for backward compatibility
|
| 964 |
+
# TODO(yifu): remove these in functional collective beta release
|
| 965 |
+
legacy_lib = torch.library.Library("c10d_functional", "DEF")
|
| 966 |
+
legacy_lib_impl = torch.library.Library("c10d_functional", "IMPL")
|
| 967 |
+
ops_defs = [
|
| 968 |
+
"broadcast(Tensor self, int src, str tag, int[] ranks, int group_size) -> Tensor",
|
| 969 |
+
"all_reduce(Tensor self, str reduceOp, str tag, int[] ranks, int group_size) -> Tensor",
|
| 970 |
+
"all_reduce_coalesced(Tensor[] self, str reduceOp, str tag, int[] ranks, int group_size) -> Tensor[]",
|
| 971 |
+
"wait_tensor(Tensor self) -> Tensor",
|
| 972 |
+
"all_gather_into_tensor(Tensor shard, str tag, int[] ranks, int group_size) -> Tensor",
|
| 973 |
+
"all_gather_into_tensor_coalesced(Tensor[] input, str tag, int[] ranks, int group_size) -> Tensor[]",
|
| 974 |
+
"reduce_scatter_tensor(Tensor input, str reduceOp, str tag, int[] ranks, int group_size) -> Tensor",
|
| 975 |
+
"reduce_scatter_tensor_coalesced(Tensor[] inputs, str reduceOp, str tag, int[] ranks, int group_size) -> Tensor[]",
|
| 976 |
+
"all_to_all_single(Tensor input, SymInt[]? output_split_sizes, SymInt[]? input_split_sizes, str tag, int[] ranks, int group_size) -> Tensor", # noqa: B950
|
| 977 |
+
]
|
| 978 |
+
|
| 979 |
+
my_module = sys.modules[__name__]
|
| 980 |
+
for op_def in ops_defs:
|
| 981 |
+
op_name = op_def[0 : op_def.index("(")]
|
| 982 |
+
backend_impl = getattr(fun_col_impl, f"_{op_name}")
|
| 983 |
+
legacy_lib.define(op_def, tags=torch.Tag.pt2_compliant_tag)
|
| 984 |
+
legacy_lib_impl.impl(op_name, backend_impl, "CompositeImplicitAutograd")
|
| 985 |
+
|
| 986 |
+
else:
|
| 987 |
+
warnings.warn(
|
| 988 |
+
"PyTorch Distributed functional collectives do not work with torch::deploy."
|
| 989 |
+
)
|
| 990 |
+
|
| 991 |
+
|
| 992 |
+
"""
|
| 993 |
+
Dynamo Remappings allow seamless translation from non-functional collectives of supportable form into
|
| 994 |
+
functional collective calls followed by inplace copy ops, allowing them to be traced into a functional graph.
|
| 995 |
+
|
| 996 |
+
We implement this by writing a decomposition and teaching dynamo how to associate it to a corresponding op via
|
| 997 |
+
the mapping dict below.
|
| 998 |
+
|
| 999 |
+
These schemas intentionally match torch.distributed.distributed_c10d.* ops that we are trying to remap from
|
| 1000 |
+
"""
|
| 1001 |
+
|
| 1002 |
+
|
| 1003 |
+
def all_gather_tensor_inplace(
|
| 1004 |
+
output_tensor: torch.Tensor,
|
| 1005 |
+
input_tensor: torch.Tensor,
|
| 1006 |
+
group, # TODO add a type,
|
| 1007 |
+
async_op: bool = False,
|
| 1008 |
+
tag: str = "",
|
| 1009 |
+
gather_dim: int = 0,
|
| 1010 |
+
):
|
| 1011 |
+
assert (
|
| 1012 |
+
not async_op
|
| 1013 |
+
), "Can't remap async version of inplace op to functional collective"
|
| 1014 |
+
|
| 1015 |
+
group = group or dist.group.WORLD
|
| 1016 |
+
assert group is not None
|
| 1017 |
+
|
| 1018 |
+
return output_tensor.copy_(all_gather_tensor(input_tensor, gather_dim, group, tag))
|
| 1019 |
+
|
| 1020 |
+
|
| 1021 |
+
def reduce_scatter_tensor_inplace(
|
| 1022 |
+
output: torch.Tensor,
|
| 1023 |
+
input: torch.Tensor,
|
| 1024 |
+
op: str = "sum", # TODO type is actually c10d ReduceOp. is this ok?
|
| 1025 |
+
group=None, # TODO add a type
|
| 1026 |
+
async_op: bool = False,
|
| 1027 |
+
scatter_dim: int = 0,
|
| 1028 |
+
tag: str = "",
|
| 1029 |
+
):
|
| 1030 |
+
assert (
|
| 1031 |
+
not async_op
|
| 1032 |
+
), "Can't remap async version of inplace op to functional collective"
|
| 1033 |
+
|
| 1034 |
+
group = group or dist.group.WORLD
|
| 1035 |
+
assert group is not None
|
| 1036 |
+
|
| 1037 |
+
return output.copy_(reduce_scatter_tensor(input, op, scatter_dim, group, tag))
|
| 1038 |
+
|
| 1039 |
+
|
| 1040 |
+
REDUCE_OP_TO_STR = {
|
| 1041 |
+
dist.ReduceOp.SUM: "sum",
|
| 1042 |
+
dist.ReduceOp.AVG: "avg",
|
| 1043 |
+
dist.ReduceOp.PRODUCT: "product",
|
| 1044 |
+
dist.ReduceOp.MIN: "min",
|
| 1045 |
+
dist.ReduceOp.MAX: "max",
|
| 1046 |
+
dist.ReduceOp.BAND: "band",
|
| 1047 |
+
dist.ReduceOp.BOR: "bor",
|
| 1048 |
+
dist.ReduceOp.BXOR: "bxor",
|
| 1049 |
+
}
|
| 1050 |
+
|
| 1051 |
+
|
| 1052 |
+
def all_reduce_inplace(
|
| 1053 |
+
tensor: torch.Tensor,
|
| 1054 |
+
op: str = "sum",
|
| 1055 |
+
group=None,
|
| 1056 |
+
async_op: bool = False,
|
| 1057 |
+
tag: str = "",
|
| 1058 |
+
):
|
| 1059 |
+
assert (
|
| 1060 |
+
not async_op
|
| 1061 |
+
), "Can't remap async version of inplace op to functional collective"
|
| 1062 |
+
|
| 1063 |
+
group = group or dist.group.WORLD
|
| 1064 |
+
assert group is not None
|
| 1065 |
+
|
| 1066 |
+
return tensor.copy_(all_reduce(tensor, op, group, tag))
|
| 1067 |
+
|
| 1068 |
+
|
| 1069 |
+
def all_to_all_inplace(
|
| 1070 |
+
output: torch.Tensor,
|
| 1071 |
+
input: torch.Tensor,
|
| 1072 |
+
output_split_sizes=None,
|
| 1073 |
+
input_split_sizes=None,
|
| 1074 |
+
group=None,
|
| 1075 |
+
async_op=False,
|
| 1076 |
+
tag: str = "",
|
| 1077 |
+
):
|
| 1078 |
+
assert (
|
| 1079 |
+
not async_op
|
| 1080 |
+
), "Can't remap async version of inplace op to functional collective"
|
| 1081 |
+
|
| 1082 |
+
group = group or dist.group.WORLD
|
| 1083 |
+
assert group is not None
|
| 1084 |
+
|
| 1085 |
+
return output.copy_(
|
| 1086 |
+
all_to_all_single(
|
| 1087 |
+
input,
|
| 1088 |
+
output_split_sizes,
|
| 1089 |
+
input_split_sizes,
|
| 1090 |
+
group,
|
| 1091 |
+
tag,
|
| 1092 |
+
)
|
| 1093 |
+
)
|
| 1094 |
+
|
| 1095 |
+
|
| 1096 |
+
def all_gather_inplace(
|
| 1097 |
+
tensor_list: List[torch.Tensor],
|
| 1098 |
+
tensor: torch.Tensor,
|
| 1099 |
+
group=None,
|
| 1100 |
+
async_op=False,
|
| 1101 |
+
tag: str = "",
|
| 1102 |
+
):
|
| 1103 |
+
assert (
|
| 1104 |
+
not async_op
|
| 1105 |
+
), "Can't remap async version of inplace op to functional collective"
|
| 1106 |
+
assert all(
|
| 1107 |
+
t.size(0) == tensor.size(0) for t in tensor_list
|
| 1108 |
+
), "Remapping variable size all_gather is not yet supported"
|
| 1109 |
+
|
| 1110 |
+
group = group or dist.group.WORLD
|
| 1111 |
+
assert group is not None
|
| 1112 |
+
|
| 1113 |
+
output = all_gather_tensor(tensor, 0, group, tag)
|
| 1114 |
+
|
| 1115 |
+
# Use aten.slice instead of aten.split because the latter causes
|
| 1116 |
+
# tensor.shape(0) to be unnecessarily baked in when it's a SymInt.
|
| 1117 |
+
output_splits = []
|
| 1118 |
+
offset = 0
|
| 1119 |
+
for t in tensor_list:
|
| 1120 |
+
output_splits.append(output[offset : offset + t.size(0)])
|
| 1121 |
+
offset += t.size(0)
|
| 1122 |
+
for dst, src in zip(tensor_list, output_splits):
|
| 1123 |
+
dst.copy_(src)
|
| 1124 |
+
return tensor_list
|
| 1125 |
+
|
| 1126 |
+
|
| 1127 |
+
from torch.distributed.distributed_c10d import (
|
| 1128 |
+
_all_gather_base as legacy_all_gather_base,
|
| 1129 |
+
_reduce_scatter_base as legacy_reduce_scatter_base,
|
| 1130 |
+
all_gather as legacy_all_gather,
|
| 1131 |
+
all_gather_into_tensor as legacy_allgather,
|
| 1132 |
+
all_reduce as legacy_allreduce,
|
| 1133 |
+
all_to_all_single as legacy_all_to_all_single,
|
| 1134 |
+
reduce_scatter_tensor as legacy_reducescatter,
|
| 1135 |
+
)
|
| 1136 |
+
|
| 1137 |
+
# This dict should contain sets of functions that dynamo is allowed to remap.
|
| 1138 |
+
# Functions in this set should accept the same args/kwargs 1:1 as their mapping.
|
| 1139 |
+
traceable_collective_remaps = {
|
| 1140 |
+
legacy_allgather: all_gather_tensor_inplace,
|
| 1141 |
+
legacy_reducescatter: reduce_scatter_tensor_inplace,
|
| 1142 |
+
legacy_allreduce: all_reduce_inplace,
|
| 1143 |
+
legacy_all_to_all_single: all_to_all_inplace,
|
| 1144 |
+
legacy_all_gather: all_gather_inplace,
|
| 1145 |
+
legacy_reduce_scatter_base: reduce_scatter_tensor_inplace,
|
| 1146 |
+
legacy_all_gather_base: all_gather_tensor_inplace,
|
| 1147 |
+
}
|
parrot/lib/python3.10/site-packages/torch/distributed/_functional_collectives_impl.py
ADDED
|
@@ -0,0 +1,116 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# mypy: allow-untyped-defs
|
| 2 |
+
from typing import List, Optional
|
| 3 |
+
|
| 4 |
+
import torch
|
| 5 |
+
import torch.distributed.distributed_c10d as c10d
|
| 6 |
+
|
| 7 |
+
"""
|
| 8 |
+
This file contains the op impls for the legacy (c10d_functional) functional collectives.
|
| 9 |
+
These impls simply call into the native (_c10d_functional) functional collectives.
|
| 10 |
+
"""
|
| 11 |
+
|
| 12 |
+
|
| 13 |
+
def _broadcast(input, src, tag, ranks, group_size):
|
| 14 |
+
group_name = c10d._resolve_group_name_by_ranks_and_tag(ranks, tag)
|
| 15 |
+
return torch.ops._c10d_functional.broadcast(
|
| 16 |
+
input,
|
| 17 |
+
src,
|
| 18 |
+
group_name,
|
| 19 |
+
)
|
| 20 |
+
|
| 21 |
+
|
| 22 |
+
def _all_reduce(input, reduce_op, tag, ranks, group_size):
|
| 23 |
+
group_name = c10d._resolve_group_name_by_ranks_and_tag(ranks, tag)
|
| 24 |
+
return torch.ops._c10d_functional.all_reduce(
|
| 25 |
+
input,
|
| 26 |
+
reduce_op,
|
| 27 |
+
group_name,
|
| 28 |
+
)
|
| 29 |
+
|
| 30 |
+
|
| 31 |
+
def _all_reduce_coalesced(inputs, reduce_op, tag, ranks, group_size):
|
| 32 |
+
group_name = c10d._resolve_group_name_by_ranks_and_tag(ranks, tag)
|
| 33 |
+
return torch.ops._c10d_functional.all_reduce_coalesced(
|
| 34 |
+
inputs,
|
| 35 |
+
reduce_op,
|
| 36 |
+
group_name,
|
| 37 |
+
)
|
| 38 |
+
|
| 39 |
+
|
| 40 |
+
def _all_gather_into_tensor(input, tag, ranks, group_size):
|
| 41 |
+
group_name = c10d._resolve_group_name_by_ranks_and_tag(ranks, tag)
|
| 42 |
+
return torch.ops._c10d_functional.all_gather_into_tensor(
|
| 43 |
+
input,
|
| 44 |
+
group_size,
|
| 45 |
+
group_name,
|
| 46 |
+
)
|
| 47 |
+
|
| 48 |
+
|
| 49 |
+
def _all_gather_into_tensor_coalesced(input, tag, ranks, group_size):
|
| 50 |
+
group_name = c10d._resolve_group_name_by_ranks_and_tag(ranks, tag)
|
| 51 |
+
return torch.ops._c10d_functional.all_gather_into_tensor_coalesced(
|
| 52 |
+
input,
|
| 53 |
+
group_size,
|
| 54 |
+
group_name,
|
| 55 |
+
)
|
| 56 |
+
|
| 57 |
+
|
| 58 |
+
def _reduce_scatter_tensor(
|
| 59 |
+
input: torch.Tensor,
|
| 60 |
+
reduce_op: str,
|
| 61 |
+
tag: str,
|
| 62 |
+
ranks: List[int],
|
| 63 |
+
group_size: int,
|
| 64 |
+
):
|
| 65 |
+
group_name = c10d._resolve_group_name_by_ranks_and_tag(ranks, tag)
|
| 66 |
+
return torch.ops._c10d_functional.reduce_scatter_tensor(
|
| 67 |
+
input,
|
| 68 |
+
reduce_op,
|
| 69 |
+
group_size,
|
| 70 |
+
group_name,
|
| 71 |
+
)
|
| 72 |
+
|
| 73 |
+
|
| 74 |
+
def _reduce_scatter_tensor_coalesced(
|
| 75 |
+
inputs: List[torch.Tensor],
|
| 76 |
+
reduce_op: str,
|
| 77 |
+
tag: str,
|
| 78 |
+
ranks: List[int],
|
| 79 |
+
group_size: int,
|
| 80 |
+
):
|
| 81 |
+
group_name = c10d._resolve_group_name_by_ranks_and_tag(ranks, tag)
|
| 82 |
+
return torch.ops._c10d_functional.reduce_scatter_tensor_coalesced(
|
| 83 |
+
inputs,
|
| 84 |
+
reduce_op,
|
| 85 |
+
group_size,
|
| 86 |
+
group_name,
|
| 87 |
+
)
|
| 88 |
+
|
| 89 |
+
|
| 90 |
+
def _all_to_all_single(
|
| 91 |
+
input: torch.Tensor,
|
| 92 |
+
output_split_sizes: Optional[List[int]],
|
| 93 |
+
input_split_sizes: Optional[List[int]],
|
| 94 |
+
tag: str,
|
| 95 |
+
ranks: List[int],
|
| 96 |
+
group_size: int,
|
| 97 |
+
):
|
| 98 |
+
if output_split_sizes is None or input_split_sizes is None:
|
| 99 |
+
assert output_split_sizes is None and input_split_sizes is None, (
|
| 100 |
+
"output_split_sizes and input_split_sizes must either be "
|
| 101 |
+
"specified together or both set to None"
|
| 102 |
+
)
|
| 103 |
+
output_split_sizes = [input.shape[0] // group_size] * group_size
|
| 104 |
+
input_split_sizes = output_split_sizes
|
| 105 |
+
|
| 106 |
+
group_name = c10d._resolve_group_name_by_ranks_and_tag(ranks, tag)
|
| 107 |
+
return torch.ops._c10d_functional.all_to_all_single(
|
| 108 |
+
input,
|
| 109 |
+
output_split_sizes,
|
| 110 |
+
input_split_sizes,
|
| 111 |
+
group_name,
|
| 112 |
+
)
|
| 113 |
+
|
| 114 |
+
|
| 115 |
+
def _wait_tensor(tensor: torch.Tensor) -> torch.Tensor:
|
| 116 |
+
return torch.ops._c10d_functional.wait_tensor(tensor)
|
parrot/lib/python3.10/site-packages/torch/distributed/_shard/__init__.py
ADDED
|
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from .api import (
|
| 2 |
+
_shard_tensor,
|
| 3 |
+
load_with_process_group,
|
| 4 |
+
shard_module,
|
| 5 |
+
shard_parameter,
|
| 6 |
+
)
|
parrot/lib/python3.10/site-packages/torch/distributed/_shard/_utils.py
ADDED
|
@@ -0,0 +1,28 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import torch
|
| 2 |
+
from torch.distributed._shard.metadata import ShardMetadata
|
| 3 |
+
from typing import Sequence
|
| 4 |
+
|
| 5 |
+
DEPRECATE_MSG = "Please use DTensor instead and we are deprecating ShardedTensor."
|
| 6 |
+
|
| 7 |
+
def narrow_tensor_by_index(tensor: torch.Tensor, offsets: Sequence[int], sizes: Sequence[int]) -> torch.Tensor:
|
| 8 |
+
"""
|
| 9 |
+
Narrow the tensor according to ``offsets`` and ``sizes``.
|
| 10 |
+
"""
|
| 11 |
+
narrowed_tensor = tensor
|
| 12 |
+
for idx, (offset, size) in enumerate(zip(offsets, sizes)):
|
| 13 |
+
if size < tensor.size(idx):
|
| 14 |
+
# Reshape to get shard for this rank and we don't want autograd
|
| 15 |
+
# recording here for the narrow op and 'local_shard' should be a
|
| 16 |
+
# leaf variable in the autograd graph.
|
| 17 |
+
narrowed_tensor = narrowed_tensor.narrow(
|
| 18 |
+
idx,
|
| 19 |
+
offset,
|
| 20 |
+
size
|
| 21 |
+
)
|
| 22 |
+
return narrowed_tensor
|
| 23 |
+
|
| 24 |
+
def narrow_tensor(tensor: torch.Tensor, metadata: ShardMetadata) -> torch.Tensor:
|
| 25 |
+
"""
|
| 26 |
+
Narrow the tensor according to the metadata
|
| 27 |
+
"""
|
| 28 |
+
return narrow_tensor_by_index(tensor, metadata.shard_offsets, metadata.shard_sizes)
|
parrot/lib/python3.10/site-packages/torch/distributed/_shard/checkpoint/__init__.py
ADDED
|
@@ -0,0 +1,19 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Keep old package for BC purposes, this file should be removed once
|
| 2 |
+
# everything moves to the `torch.distributed.checkpoint` package.
|
| 3 |
+
import sys
|
| 4 |
+
import torch
|
| 5 |
+
import warnings
|
| 6 |
+
|
| 7 |
+
from torch.distributed.checkpoint import * # noqa: F403
|
| 8 |
+
|
| 9 |
+
|
| 10 |
+
with warnings.catch_warnings():
|
| 11 |
+
warnings.simplefilter("always")
|
| 12 |
+
warnings.warn(
|
| 13 |
+
"`torch.distributed._shard.checkpoint` will be deprecated, "
|
| 14 |
+
"use `torch.distributed.checkpoint` instead",
|
| 15 |
+
DeprecationWarning,
|
| 16 |
+
stacklevel=2,
|
| 17 |
+
)
|
| 18 |
+
|
| 19 |
+
sys.modules['torch.distributed._shard.checkpoint'] = torch.distributed.checkpoint
|
parrot/lib/python3.10/site-packages/torch/distributed/_shard/common_op_utils.py
ADDED
|
@@ -0,0 +1,62 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# mypy: allow-untyped-defs
|
| 2 |
+
import torch
|
| 3 |
+
from torch.utils import _pytree as pytree
|
| 4 |
+
from typing import Optional
|
| 5 |
+
|
| 6 |
+
def _basic_validation(op, args=(), kwargs=None):
|
| 7 |
+
"""
|
| 8 |
+
Common validation across all ops go in here.
|
| 9 |
+
"""
|
| 10 |
+
from torch.distributed._shard.sharded_tensor import ShardedTensor
|
| 11 |
+
|
| 12 |
+
if len(args) == 0 and (kwargs is None or len(kwargs) == 0):
|
| 13 |
+
raise ValueError(f" No input for '{op.__name__}'!")
|
| 14 |
+
|
| 15 |
+
# Validate types
|
| 16 |
+
has_distributed_tensor = False
|
| 17 |
+
|
| 18 |
+
def is_distributed_tensor(e):
|
| 19 |
+
nonlocal has_distributed_tensor
|
| 20 |
+
if isinstance(e, ShardedTensor):
|
| 21 |
+
has_distributed_tensor = True
|
| 22 |
+
|
| 23 |
+
pytree.tree_map_(is_distributed_tensor, args)
|
| 24 |
+
pytree.tree_map_(is_distributed_tensor, kwargs)
|
| 25 |
+
|
| 26 |
+
if not has_distributed_tensor:
|
| 27 |
+
raise TypeError(
|
| 28 |
+
f"torch function '{op.__name__}', with args: {args} and "
|
| 29 |
+
f"kwargs: {kwargs} are called without any distributed tensor!"
|
| 30 |
+
)
|
| 31 |
+
|
| 32 |
+
# Validate all distributed tensors use the same PG.
|
| 33 |
+
cur_pg: Optional[torch.distributed.ProcessGroup] = None
|
| 34 |
+
|
| 35 |
+
def validate_pg(e):
|
| 36 |
+
nonlocal cur_pg
|
| 37 |
+
if isinstance(e, ShardedTensor):
|
| 38 |
+
if cur_pg is not None and e._process_group is not cur_pg:
|
| 39 |
+
raise RuntimeError(
|
| 40 |
+
'All distributed tensors should use the '
|
| 41 |
+
'same ProcessGroup if used together in an op.'
|
| 42 |
+
)
|
| 43 |
+
cur_pg = e._process_group
|
| 44 |
+
|
| 45 |
+
pytree.tree_map_(validate_pg, args)
|
| 46 |
+
pytree.tree_map_(validate_pg, kwargs)
|
| 47 |
+
|
| 48 |
+
def _register_default_op(op, decorator):
|
| 49 |
+
@decorator(op)
|
| 50 |
+
def tensor_default_op(types, args=(), kwargs=None, pg=None):
|
| 51 |
+
"""
|
| 52 |
+
Handles ``__torch_function__`` dispatch for the default tensor ops that
|
| 53 |
+
behave the same as ``torch.Tensor`` such as ``torch.Tensor.shape`` or
|
| 54 |
+
``torch.Tensor.dtype``. We simply lower to the real op call with
|
| 55 |
+
DisableTorchFunctionSubclass context like ``torch.Tensor.__torch_function__``
|
| 56 |
+
to avoid recursions.
|
| 57 |
+
"""
|
| 58 |
+
if kwargs is None:
|
| 59 |
+
kwargs = {}
|
| 60 |
+
|
| 61 |
+
with torch._C.DisableTorchFunctionSubclass():
|
| 62 |
+
return op(*args, **kwargs)
|
parrot/lib/python3.10/site-packages/torch/distributed/_shard/metadata.py
ADDED
|
@@ -0,0 +1,62 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# mypy: allow-untyped-defs
|
| 2 |
+
from dataclasses import dataclass
|
| 3 |
+
from typing import List, Union, Optional
|
| 4 |
+
from functools import reduce
|
| 5 |
+
|
| 6 |
+
from torch.distributed.remote_device import _remote_device
|
| 7 |
+
|
| 8 |
+
@dataclass
|
| 9 |
+
class ShardMetadata:
|
| 10 |
+
"""
|
| 11 |
+
Represents a shard of the overall Tensor including its
|
| 12 |
+
offsets, lengths and device placement.
|
| 13 |
+
|
| 14 |
+
Args:
|
| 15 |
+
shard_offsets(List[int]): Offsets in the original tensor indicating
|
| 16 |
+
the start offsets for this shard. Should have the same rank as
|
| 17 |
+
the original tensor.
|
| 18 |
+
shard_sizes(List[int]): Integers indicating the size of each
|
| 19 |
+
dimension for this shard. Should have the same rank as the
|
| 20 |
+
original tensor.
|
| 21 |
+
placement(:class:`torch.distributed._remote_device`):
|
| 22 |
+
Specifies the placement of this shard.
|
| 23 |
+
"""
|
| 24 |
+
|
| 25 |
+
__slots__ = ['shard_offsets', 'shard_sizes', 'placement']
|
| 26 |
+
|
| 27 |
+
shard_offsets: List[int]
|
| 28 |
+
shard_sizes: List[int]
|
| 29 |
+
placement: Optional[_remote_device]
|
| 30 |
+
|
| 31 |
+
def __init__(
|
| 32 |
+
self,
|
| 33 |
+
shard_offsets: List[int],
|
| 34 |
+
shard_sizes: List[int],
|
| 35 |
+
placement: Optional[Union[str, _remote_device]] = None
|
| 36 |
+
):
|
| 37 |
+
self.shard_offsets = shard_offsets
|
| 38 |
+
self.shard_sizes = shard_sizes
|
| 39 |
+
if isinstance(placement, str):
|
| 40 |
+
self.placement = _remote_device(placement)
|
| 41 |
+
else:
|
| 42 |
+
self.placement = placement
|
| 43 |
+
if len(self.shard_offsets) != len(self.shard_sizes):
|
| 44 |
+
raise ValueError(
|
| 45 |
+
f'shard_offsets and shard_sizes should have '
|
| 46 |
+
f'the same number of elements, found {len(self.shard_offsets)} '
|
| 47 |
+
f'and {self.shard_sizes} respectively')
|
| 48 |
+
|
| 49 |
+
for i in range(len(self.shard_offsets)):
|
| 50 |
+
if self.shard_offsets[i] < 0:
|
| 51 |
+
raise ValueError('shard_offsets should be >=0')
|
| 52 |
+
if self.shard_sizes[i] < 0:
|
| 53 |
+
raise ValueError('shard_sizes should be >= 0')
|
| 54 |
+
|
| 55 |
+
def __hash__(self):
|
| 56 |
+
def _hash_reduce(a, b):
|
| 57 |
+
return (a << 8) + hash(b)
|
| 58 |
+
|
| 59 |
+
res = reduce(_hash_reduce, self.shard_offsets, 37)
|
| 60 |
+
res = reduce(_hash_reduce, self.shard_sizes, res)
|
| 61 |
+
res = _hash_reduce(res, self.placement)
|
| 62 |
+
return res
|
parrot/lib/python3.10/site-packages/torch/distributed/_shard/sharded_optim/__pycache__/__init__.cpython-310.pyc
ADDED
|
Binary file (1.98 kB). View file
|
|
|
parrot/lib/python3.10/site-packages/torch/distributed/_shard/sharded_optim/__pycache__/api.cpython-310.pyc
ADDED
|
Binary file (4.55 kB). View file
|
|
|
parrot/lib/python3.10/site-packages/torch/distributed/_shard/sharded_optim/api.py
ADDED
|
@@ -0,0 +1,98 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# mypy: allow-untyped-defs
|
| 2 |
+
from typing import List, Union, Mapping, Dict, Any
|
| 3 |
+
|
| 4 |
+
import torch.optim as optim
|
| 5 |
+
from torch import Tensor
|
| 6 |
+
from torch.distributed._shard.sharded_tensor import ShardedTensor
|
| 7 |
+
|
| 8 |
+
|
| 9 |
+
class ShardedOptimizer(optim.Optimizer):
|
| 10 |
+
def __init__(
|
| 11 |
+
self,
|
| 12 |
+
named_params: Mapping[str, Union[Tensor, ShardedTensor]],
|
| 13 |
+
optimizer_class,
|
| 14 |
+
*optimizer_args,
|
| 15 |
+
**optimizer_kwargs
|
| 16 |
+
):
|
| 17 |
+
"""
|
| 18 |
+
ShardedOptimizer collects all tensors and local shard tensors of
|
| 19 |
+
ShardedTensor, then use these tensors as ``params`` for optimizers
|
| 20 |
+
|
| 21 |
+
Args:
|
| 22 |
+
named_params (Dict[str, Union[Tensor, ShardedTensor]]) : a Dict
|
| 23 |
+
of parameters, where key is the parameter key, value is either
|
| 24 |
+
Tensor or ShardedTensor parameter.
|
| 25 |
+
optimizer_class (torch.optim.Optimizer): the Optimizer to use
|
| 26 |
+
locally, i.e. torch.optim.SGD, torch.optim.Adagrad, etc.
|
| 27 |
+
*optimizer_args: the arguments to initialize the optimizer.
|
| 28 |
+
**optimizer_kwargs: the key-word arguments to initialize the optimizer.
|
| 29 |
+
|
| 30 |
+
"""
|
| 31 |
+
tensors: List[Tensor] = []
|
| 32 |
+
for value in named_params.values():
|
| 33 |
+
if isinstance(value, ShardedTensor):
|
| 34 |
+
for local_shard in value.local_shards():
|
| 35 |
+
tensors.append(local_shard.tensor)
|
| 36 |
+
else:
|
| 37 |
+
tensors.append(value)
|
| 38 |
+
|
| 39 |
+
self.named_params = named_params
|
| 40 |
+
self._optim = optimizer_class(tensors, *optimizer_args, **optimizer_kwargs)
|
| 41 |
+
self.param_groups = self._optim.param_groups
|
| 42 |
+
self.state = self._optim.state
|
| 43 |
+
|
| 44 |
+
def zero_grad(self, set_to_none: bool = True): # type: ignore[override]
|
| 45 |
+
r"""Resets the gradients of all optimized :class:`torch.Tensor` s.
|
| 46 |
+
|
| 47 |
+
Args:
|
| 48 |
+
set_to_none (bool): instead of setting to zero, set the grads to None.
|
| 49 |
+
This will in general have lower memory footprint, and can modestly improve performance.
|
| 50 |
+
However, it changes certain behaviors. For example:
|
| 51 |
+
1. When the user tries to access a gradient and perform manual ops on it,
|
| 52 |
+
a None attribute or a Tensor full of 0s will behave differently.
|
| 53 |
+
2. If the user requests ``zero_grad(set_to_none=True)`` followed by a backward pass, ``.grad``\ s
|
| 54 |
+
are guaranteed to be None for params that did not receive a gradient.
|
| 55 |
+
3. ``torch.optim`` optimizers have a different behavior if the gradient is 0 or None
|
| 56 |
+
(in one case it does the step with a gradient of 0 and in the other it skips
|
| 57 |
+
the step altogether).
|
| 58 |
+
"""
|
| 59 |
+
self._optim.zero_grad(set_to_none)
|
| 60 |
+
|
| 61 |
+
def step(self, closure=None):
|
| 62 |
+
r"""Performs a single optimization step (parameter update).
|
| 63 |
+
|
| 64 |
+
Args:
|
| 65 |
+
closure (Callable): A closure that reevaluates the model and
|
| 66 |
+
returns the loss. Optional for most optimizers.
|
| 67 |
+
|
| 68 |
+
.. note::
|
| 69 |
+
Unless otherwise specified, this function should not modify the
|
| 70 |
+
``.grad`` field of the parameters.
|
| 71 |
+
"""
|
| 72 |
+
self._optim.step(closure)
|
| 73 |
+
|
| 74 |
+
def state_dict(self) -> Dict[str, Any]:
|
| 75 |
+
"""
|
| 76 |
+
Returned state and param_groups will contain parameter keys
|
| 77 |
+
instead of parameter indices like torch.optim.Optimizer.
|
| 78 |
+
This allows for advanced functionality like optimizer re-sharding to be implemented.
|
| 79 |
+
"""
|
| 80 |
+
# TODO: implement state_dict
|
| 81 |
+
raise NotImplementedError("ShardedOptimizer state_dict not implemented yet!")
|
| 82 |
+
|
| 83 |
+
|
| 84 |
+
def load_state_dict(self, state_dict: Mapping[str, Any]):
|
| 85 |
+
r"""Loads the ShardedOptimizer state.
|
| 86 |
+
|
| 87 |
+
Args:
|
| 88 |
+
state_dict (dict): ShardedOptimizer state. Should be an object returned
|
| 89 |
+
from a call to :meth:`state_dict`.
|
| 90 |
+
"""
|
| 91 |
+
# TODO: implement load_state_dict
|
| 92 |
+
raise NotImplementedError("ShardedOptimizer load_state_dict not implemented yet!")
|
| 93 |
+
|
| 94 |
+
def add_param_group(self, param_group: Any):
|
| 95 |
+
r"""Add a new param group
|
| 96 |
+
"""
|
| 97 |
+
# TODO: implement add_param_group
|
| 98 |
+
raise NotImplementedError("ShardedOptimizer add_param_group not implemented yet!")
|
parrot/lib/python3.10/site-packages/torch/distributed/_shard/sharder.py
ADDED
|
@@ -0,0 +1,27 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import abc
|
| 2 |
+
import torch.nn as nn
|
| 3 |
+
|
| 4 |
+
class Sharder(abc.ABC):
|
| 5 |
+
"""
|
| 6 |
+
This is an interface which allows user to create more advanced
|
| 7 |
+
sharding strategies that are not easily be composed by the
|
| 8 |
+
`ShardingSpec`.
|
| 9 |
+
|
| 10 |
+
:class:`torch.distributed._shard.sharding_plan.ShardingPlan` could
|
| 11 |
+
take an object of the `Sharder` and call `shard` to shard the module,
|
| 12 |
+
then replace the original module with sharded module returned.
|
| 13 |
+
"""
|
| 14 |
+
@abc.abstractmethod
|
| 15 |
+
def shard(self, module: nn.Module) -> nn.Module:
|
| 16 |
+
"""
|
| 17 |
+
Shard a module base on the implementation of this method, and
|
| 18 |
+
return the sharded version of the module.
|
| 19 |
+
|
| 20 |
+
Args:
|
| 21 |
+
module (:class:`torch.nn.Module`):
|
| 22 |
+
The module to apply sharding to.
|
| 23 |
+
Returns:
|
| 24 |
+
A :class:`torch.nn.Module` object that represents a module
|
| 25 |
+
that's already been sharded.
|
| 26 |
+
"""
|
| 27 |
+
pass
|
parrot/lib/python3.10/site-packages/torch/distributed/argparse_util.py
ADDED
|
@@ -0,0 +1,104 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/usr/bin/env python3
|
| 2 |
+
# mypy: allow-untyped-defs
|
| 3 |
+
|
| 4 |
+
# Copyright (c) Facebook, Inc. and its affiliates.
|
| 5 |
+
# All rights reserved.
|
| 6 |
+
#
|
| 7 |
+
# This source code is licensed under the BSD-style license found in the
|
| 8 |
+
# LICENSE file in the root directory of this source tree.
|
| 9 |
+
import os
|
| 10 |
+
from argparse import Action
|
| 11 |
+
|
| 12 |
+
|
| 13 |
+
class env(Action):
|
| 14 |
+
"""
|
| 15 |
+
Get argument values from ``PET_{dest}`` before defaulting to the given ``default`` value.
|
| 16 |
+
|
| 17 |
+
For flags (e.g. ``--standalone``)
|
| 18 |
+
use ``check_env`` instead.
|
| 19 |
+
|
| 20 |
+
.. note:: when multiple option strings are specified, ``dest`` is
|
| 21 |
+
the longest option string (e.g. for ``"-f", "--foo"``
|
| 22 |
+
the env var to set is ``PET_FOO`` not ``PET_F``)
|
| 23 |
+
|
| 24 |
+
Example:
|
| 25 |
+
::
|
| 26 |
+
|
| 27 |
+
parser.add_argument("-f", "--foo", action=env, default="bar")
|
| 28 |
+
|
| 29 |
+
./program -> args.foo="bar"
|
| 30 |
+
./program -f baz -> args.foo="baz"
|
| 31 |
+
./program --foo baz -> args.foo="baz"
|
| 32 |
+
PET_FOO="env_bar" ./program -f baz -> args.foo="baz"
|
| 33 |
+
PET_FOO="env_bar" ./program --foo baz -> args.foo="baz"
|
| 34 |
+
PET_FOO="env_bar" ./program -> args.foo="env_bar"
|
| 35 |
+
|
| 36 |
+
parser.add_argument("-f", "--foo", action=env, required=True)
|
| 37 |
+
|
| 38 |
+
./program -> fails
|
| 39 |
+
./program -f baz -> args.foo="baz"
|
| 40 |
+
PET_FOO="env_bar" ./program -> args.foo="env_bar"
|
| 41 |
+
PET_FOO="env_bar" ./program -f baz -> args.foo="baz"
|
| 42 |
+
"""
|
| 43 |
+
|
| 44 |
+
def __init__(self, dest, default=None, required=False, **kwargs) -> None:
|
| 45 |
+
env_name = f"PET_{dest.upper()}"
|
| 46 |
+
default = os.environ.get(env_name, default)
|
| 47 |
+
|
| 48 |
+
# ``required`` means that it NEEDS to be present in the command-line args
|
| 49 |
+
# rather than "this option requires a value (either set explicitly or default"
|
| 50 |
+
# so if we found default then we don't "require" it to be in the command-line
|
| 51 |
+
# so set it to False
|
| 52 |
+
if default:
|
| 53 |
+
required = False
|
| 54 |
+
|
| 55 |
+
super().__init__(dest=dest, default=default, required=required, **kwargs)
|
| 56 |
+
|
| 57 |
+
def __call__(self, parser, namespace, values, option_string=None):
|
| 58 |
+
setattr(namespace, self.dest, values)
|
| 59 |
+
|
| 60 |
+
|
| 61 |
+
class check_env(Action):
|
| 62 |
+
"""
|
| 63 |
+
Check whether the env var ``PET_{dest}`` exists before defaulting to the given ``default`` value.
|
| 64 |
+
|
| 65 |
+
Equivalent to
|
| 66 |
+
``store_true`` argparse built-in action except that the argument can
|
| 67 |
+
be omitted from the commandline if the env var is present and has a
|
| 68 |
+
non-zero value.
|
| 69 |
+
|
| 70 |
+
.. note:: it is redundant to pass ``default=True`` for arguments
|
| 71 |
+
that use this action because a flag should be ``True``
|
| 72 |
+
when present and ``False`` otherwise.
|
| 73 |
+
|
| 74 |
+
Example:
|
| 75 |
+
::
|
| 76 |
+
|
| 77 |
+
parser.add_argument("--verbose", action=check_env)
|
| 78 |
+
|
| 79 |
+
./program -> args.verbose=False
|
| 80 |
+
./program --verbose -> args.verbose=True
|
| 81 |
+
PET_VERBOSE=1 ./program -> args.verbose=True
|
| 82 |
+
PET_VERBOSE=0 ./program -> args.verbose=False
|
| 83 |
+
PET_VERBOSE=0 ./program --verbose -> args.verbose=True
|
| 84 |
+
|
| 85 |
+
Anti-pattern (don't do this):
|
| 86 |
+
|
| 87 |
+
::
|
| 88 |
+
|
| 89 |
+
parser.add_argument("--verbose", action=check_env, default=True)
|
| 90 |
+
|
| 91 |
+
./program -> args.verbose=True
|
| 92 |
+
./program --verbose -> args.verbose=True
|
| 93 |
+
PET_VERBOSE=1 ./program -> args.verbose=True
|
| 94 |
+
PET_VERBOSE=0 ./program -> args.verbose=False
|
| 95 |
+
|
| 96 |
+
"""
|
| 97 |
+
|
| 98 |
+
def __init__(self, dest, default=False, **kwargs) -> None:
|
| 99 |
+
env_name = f"PET_{dest.upper()}"
|
| 100 |
+
default = bool(int(os.environ.get(env_name, "1" if default else "0")))
|
| 101 |
+
super().__init__(dest=dest, const=True, default=default, nargs=0, **kwargs)
|
| 102 |
+
|
| 103 |
+
def __call__(self, parser, namespace, values, option_string=None):
|
| 104 |
+
setattr(namespace, self.dest, self.const)
|
parrot/lib/python3.10/site-packages/torch/distributed/autograd/__init__.py
ADDED
|
@@ -0,0 +1,53 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# mypy: allow-untyped-defs
|
| 2 |
+
|
| 3 |
+
import sys
|
| 4 |
+
import torch
|
| 5 |
+
|
| 6 |
+
|
| 7 |
+
def is_available():
|
| 8 |
+
return hasattr(torch._C, "_dist_autograd_init")
|
| 9 |
+
|
| 10 |
+
|
| 11 |
+
if is_available() and not torch._C._dist_autograd_init():
|
| 12 |
+
raise RuntimeError("Failed to initialize torch.distributed.autograd")
|
| 13 |
+
|
| 14 |
+
if is_available():
|
| 15 |
+
from torch._C._distributed_autograd import (
|
| 16 |
+
get_gradients,
|
| 17 |
+
backward,
|
| 18 |
+
_init,
|
| 19 |
+
_new_context,
|
| 20 |
+
_release_context,
|
| 21 |
+
_get_max_id,
|
| 22 |
+
_is_valid_context,
|
| 23 |
+
_retrieve_context,
|
| 24 |
+
_current_context,
|
| 25 |
+
_get_debug_info,
|
| 26 |
+
DistAutogradContext,
|
| 27 |
+
)
|
| 28 |
+
|
| 29 |
+
|
| 30 |
+
class context:
|
| 31 |
+
'''
|
| 32 |
+
Context object to wrap forward and backward passes when using
|
| 33 |
+
distributed autograd. The ``context_id`` generated in the ``with``
|
| 34 |
+
statement is required to uniquely identify a distributed backward pass
|
| 35 |
+
on all workers. Each worker stores metadata associated with this
|
| 36 |
+
``context_id``, which is required to correctly execute a distributed
|
| 37 |
+
autograd pass.
|
| 38 |
+
|
| 39 |
+
Example::
|
| 40 |
+
>>> # xdoctest: +SKIP
|
| 41 |
+
>>> import torch.distributed.autograd as dist_autograd
|
| 42 |
+
>>> with dist_autograd.context() as context_id:
|
| 43 |
+
>>> t1 = torch.rand((3, 3), requires_grad=True)
|
| 44 |
+
>>> t2 = torch.rand((3, 3), requires_grad=True)
|
| 45 |
+
>>> loss = rpc.rpc_sync("worker1", torch.add, args=(t1, t2)).sum()
|
| 46 |
+
>>> dist_autograd.backward(context_id, [loss])
|
| 47 |
+
'''
|
| 48 |
+
def __enter__(self):
|
| 49 |
+
self.autograd_context = _new_context()
|
| 50 |
+
return self.autograd_context._context_id()
|
| 51 |
+
|
| 52 |
+
def __exit__(self, type, value, traceback):
|
| 53 |
+
_release_context(self.autograd_context._context_id())
|
parrot/lib/python3.10/site-packages/torch/distributed/autograd/__pycache__/__init__.cpython-310.pyc
ADDED
|
Binary file (2.1 kB). View file
|
|
|
parrot/lib/python3.10/site-packages/torch/distributed/constants.py
ADDED
|
@@ -0,0 +1,23 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from torch._C._distributed_c10d import _DEFAULT_PG_TIMEOUT
|
| 2 |
+
from datetime import timedelta
|
| 3 |
+
from typing import Optional
|
| 4 |
+
|
| 5 |
+
__all__ = ['default_pg_timeout', 'default_pg_nccl_timeout']
|
| 6 |
+
|
| 7 |
+
# Default process group wide timeout, if applicable.
|
| 8 |
+
# This only applies to the non-nccl backends
|
| 9 |
+
# To make an attempt at backwards compatibility with THD, we use an
|
| 10 |
+
# extraordinarily high default timeout, given that THD did not have timeouts.
|
| 11 |
+
default_pg_timeout: timedelta = _DEFAULT_PG_TIMEOUT
|
| 12 |
+
# Separate timeout for PGNCCL mainly becuase it's always been that way in the C++ layer, but until recently
|
| 13 |
+
# there was one default that applied across all backends in the python layer.
|
| 14 |
+
# Later, we could consider merging them back together at the c++ layer if we can align on a same value.
|
| 15 |
+
# (only if TORCH_NCCL_BLOCKING_WAIT or TORCH_NCCL_ASYNC_ERROR_HANDLING is set to 1).
|
| 16 |
+
|
| 17 |
+
try:
|
| 18 |
+
from torch._C._distributed_c10d import _DEFAULT_PG_NCCL_TIMEOUT
|
| 19 |
+
default_pg_nccl_timeout: Optional[timedelta] = _DEFAULT_PG_NCCL_TIMEOUT
|
| 20 |
+
except ImportError:
|
| 21 |
+
# if C++ NCCL support is not compiled, we don't have access to the default nccl value.
|
| 22 |
+
# if anyone is actually trying to use nccl in this state, it should error.
|
| 23 |
+
default_pg_nccl_timeout = None
|
parrot/lib/python3.10/site-packages/torch/distributed/device_mesh.py
ADDED
|
@@ -0,0 +1,719 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# mypy: allow-untyped-defs
|
| 2 |
+
# Copyright (c) Meta Platforms, Inc. and affiliates
|
| 3 |
+
import logging
|
| 4 |
+
import math
|
| 5 |
+
import threading
|
| 6 |
+
from typing import Dict, List, Optional, Tuple, TYPE_CHECKING, Union
|
| 7 |
+
|
| 8 |
+
import torch
|
| 9 |
+
|
| 10 |
+
from torch.distributed import is_available
|
| 11 |
+
|
| 12 |
+
from ..utils._typing_utils import not_none
|
| 13 |
+
|
| 14 |
+
__all__ = ["init_device_mesh", "DeviceMesh"]
|
| 15 |
+
|
| 16 |
+
|
| 17 |
+
if not is_available():
|
| 18 |
+
import sys
|
| 19 |
+
|
| 20 |
+
# We need to create the stubs when distributed is not available.
|
| 21 |
+
# Otherwise, we would fail the doc tests (```./.ci/pytorch/docs-test.sh```),
|
| 22 |
+
# since it would try to import ``torch.distributed.device_mesh`` or
|
| 23 |
+
# ``torch.distributed.init_device_mesh`` but cannot find them.
|
| 24 |
+
|
| 25 |
+
class _DeviceMeshStub:
|
| 26 |
+
pass
|
| 27 |
+
|
| 28 |
+
def _init_device_mesh_stub():
|
| 29 |
+
pass
|
| 30 |
+
|
| 31 |
+
sys.modules["torch.distributed.device_mesh"].DeviceMesh = _DeviceMeshStub # type: ignore[attr-defined]
|
| 32 |
+
sys.modules[
|
| 33 |
+
"torch.distributed.device_mesh"
|
| 34 |
+
].init_device_mesh = _init_device_mesh_stub # type: ignore[attr-defined]
|
| 35 |
+
|
| 36 |
+
|
| 37 |
+
else:
|
| 38 |
+
from torch.distributed.distributed_c10d import (
|
| 39 |
+
_find_pg_by_ranks_and_tag,
|
| 40 |
+
_get_default_group,
|
| 41 |
+
_get_group_tag,
|
| 42 |
+
get_process_group_ranks,
|
| 43 |
+
get_rank,
|
| 44 |
+
get_world_size,
|
| 45 |
+
init_process_group,
|
| 46 |
+
is_initialized,
|
| 47 |
+
new_group,
|
| 48 |
+
ProcessGroup,
|
| 49 |
+
)
|
| 50 |
+
|
| 51 |
+
logger = logging.getLogger(__name__)
|
| 52 |
+
|
| 53 |
+
# only import numpy typing when type checking
|
| 54 |
+
if TYPE_CHECKING:
|
| 55 |
+
try:
|
| 56 |
+
from numpy.typing import ArrayLike
|
| 57 |
+
except ImportError:
|
| 58 |
+
logger.warning(
|
| 59 |
+
"DeviceMesh requires numpy >= 1.21 to be installed for type checking"
|
| 60 |
+
)
|
| 61 |
+
|
| 62 |
+
class _MeshEnv(threading.local):
|
| 63 |
+
def __init__(self) -> None:
|
| 64 |
+
self.mesh_stack: List[DeviceMesh] = []
|
| 65 |
+
self.child_to_parent_mapping: Dict[DeviceMesh, DeviceMesh] = {}
|
| 66 |
+
self.mesh_dim_group_options: Dict[
|
| 67 |
+
int, Tuple[str, Optional[ProcessGroup.Options]]
|
| 68 |
+
] = {}
|
| 69 |
+
|
| 70 |
+
def get_current_mesh(self) -> "DeviceMesh":
|
| 71 |
+
if len(self.mesh_stack) == 0:
|
| 72 |
+
raise RuntimeError("No device mesh is currently active!")
|
| 73 |
+
return self.mesh_stack[-1]
|
| 74 |
+
|
| 75 |
+
def create_child_mesh(
|
| 76 |
+
self, parent_mesh: "DeviceMesh", submesh_dim_names: Tuple[str, ...]
|
| 77 |
+
) -> "DeviceMesh":
|
| 78 |
+
# submesh_dims are the mesh dimension of the submesh in the parent mesh.
|
| 79 |
+
submesh_dims = [
|
| 80 |
+
not_none(parent_mesh.mesh_dim_names).index(mesh_dim_name)
|
| 81 |
+
for mesh_dim_name in submesh_dim_names
|
| 82 |
+
]
|
| 83 |
+
submesh_dim_sizes = [
|
| 84 |
+
parent_mesh.mesh.size(mesh_dim) for mesh_dim in submesh_dims
|
| 85 |
+
]
|
| 86 |
+
|
| 87 |
+
mesh_dims_remained = list(range(parent_mesh.mesh.ndim))
|
| 88 |
+
for submesh_dim in submesh_dims:
|
| 89 |
+
mesh_dims_remained.remove(submesh_dim)
|
| 90 |
+
|
| 91 |
+
# pg_ranks_by_dim is the size of [number of local ranks of the outermost submesh dimension, *sub_mesh_dims]
|
| 92 |
+
# This means on each local rank of the outermost slice mesh dim, we have a tensor of submesh size with
|
| 93 |
+
# the pg ranks of the submesh. From this, we can extract the submesh mesh tensor contains the current rank.
|
| 94 |
+
pg_ranks_by_dim = parent_mesh.mesh.permute(
|
| 95 |
+
*mesh_dims_remained, *submesh_dims
|
| 96 |
+
).reshape(-1, *submesh_dim_sizes)
|
| 97 |
+
|
| 98 |
+
cur_rank = parent_mesh.get_rank()
|
| 99 |
+
for mesh_nd in pg_ranks_by_dim:
|
| 100 |
+
submesh = DeviceMesh(
|
| 101 |
+
parent_mesh.device_type,
|
| 102 |
+
mesh_nd,
|
| 103 |
+
mesh_dim_names=submesh_dim_names,
|
| 104 |
+
_init_backend=False,
|
| 105 |
+
)
|
| 106 |
+
if cur_rank in mesh_nd:
|
| 107 |
+
res_submesh = submesh
|
| 108 |
+
|
| 109 |
+
res_submesh._parent_mesh = parent_mesh # type: ignore[possibly-undefined]
|
| 110 |
+
res_submesh._dim_group_infos = [
|
| 111 |
+
parent_mesh._dim_group_infos[mesh_dim] for mesh_dim in submesh_dims # type: ignore[possibly-undefined]
|
| 112 |
+
]
|
| 113 |
+
self.child_to_parent_mapping[res_submesh] = parent_mesh
|
| 114 |
+
|
| 115 |
+
return res_submesh
|
| 116 |
+
|
| 117 |
+
def get_parent_mesh(self, device_mesh: "DeviceMesh") -> Optional["DeviceMesh"]:
|
| 118 |
+
return self.child_to_parent_mapping.get(device_mesh, None)
|
| 119 |
+
|
| 120 |
+
def get_parent_mesh_dim(self, device_mesh: "DeviceMesh") -> Optional[int]:
|
| 121 |
+
"""
|
| 122 |
+
Return the index of the mesh dim in the parent mesh.
|
| 123 |
+
The device_mesh passed in needs to be sliced out from a parent mesh.
|
| 124 |
+
"""
|
| 125 |
+
parent_mesh = self.get_parent_mesh(device_mesh)
|
| 126 |
+
child_mesh_dim_names = device_mesh.mesh_dim_names
|
| 127 |
+
if parent_mesh and child_mesh_dim_names:
|
| 128 |
+
assert (
|
| 129 |
+
len(child_mesh_dim_names) == 1
|
| 130 |
+
), "The child mesh can only be a 1D mesh."
|
| 131 |
+
child_mesh_dim_name = child_mesh_dim_names[0]
|
| 132 |
+
return self.get_mesh_dim_by_name(parent_mesh, child_mesh_dim_name)
|
| 133 |
+
return None
|
| 134 |
+
|
| 135 |
+
@staticmethod
|
| 136 |
+
def num_devices_per_host(device_type: str) -> int:
|
| 137 |
+
return _get_device_handle(device_type).device_count()
|
| 138 |
+
|
| 139 |
+
@staticmethod
|
| 140 |
+
def num_hosts(device_type: str) -> int:
|
| 141 |
+
# ProcessGroup can't tell us this info so we have to infer it, assume
|
| 142 |
+
# homogeneous hardware for now
|
| 143 |
+
return get_world_size() // _MeshEnv.num_devices_per_host(device_type)
|
| 144 |
+
|
| 145 |
+
def get_mesh_dim_by_name(
|
| 146 |
+
self, device_mesh: "DeviceMesh", mesh_dim_name: str
|
| 147 |
+
) -> int:
|
| 148 |
+
if (
|
| 149 |
+
device_mesh.mesh_dim_names is None
|
| 150 |
+
or len(device_mesh.mesh_dim_names) == 0
|
| 151 |
+
):
|
| 152 |
+
raise KeyError(
|
| 153 |
+
"No `mesh_dim_names` found.",
|
| 154 |
+
)
|
| 155 |
+
if mesh_dim_name not in device_mesh.mesh_dim_names:
|
| 156 |
+
raise KeyError(
|
| 157 |
+
f"Mesh dimension '{mesh_dim_name}' does not exist.",
|
| 158 |
+
f"Available mesh dimensions are: mesh_dim_names={device_mesh.mesh_dim_names}",
|
| 159 |
+
)
|
| 160 |
+
return not_none(device_mesh.mesh_dim_names.index(mesh_dim_name))
|
| 161 |
+
|
| 162 |
+
def _set_mesh_dim_group_options(
|
| 163 |
+
self,
|
| 164 |
+
dim: int,
|
| 165 |
+
backend: str,
|
| 166 |
+
pg_options: Optional[ProcessGroup.Options] = None,
|
| 167 |
+
) -> None:
|
| 168 |
+
self.mesh_dim_group_options[dim] = (backend, pg_options)
|
| 169 |
+
|
| 170 |
+
_mesh_resources: _MeshEnv = _MeshEnv()
|
| 171 |
+
|
| 172 |
+
def _get_device_handle(device_type: str = "cuda"):
|
| 173 |
+
"""
|
| 174 |
+
Get the module corresponding to the device_type which is cuda or cuda-like device.
|
| 175 |
+
For example, when the device_type is cuda, the module `torch.cuda` is returned.
|
| 176 |
+
Return None when there is no corresponding module for device_type, otherwise
|
| 177 |
+
return the corresponding module.
|
| 178 |
+
"""
|
| 179 |
+
return getattr(torch, device_type, None)
|
| 180 |
+
|
| 181 |
+
class DeviceMesh:
|
| 182 |
+
"""
|
| 183 |
+
DeviceMesh represents a mesh of devices, where layout of devices could be
|
| 184 |
+
represented as a n-d dimension array, and each value of the n-d dimensional
|
| 185 |
+
array is the global id of the default process group ranks.
|
| 186 |
+
|
| 187 |
+
DeviceMesh could be used to describe the layout of devices across the cluster,
|
| 188 |
+
and serves as a proxy for communication among the device lists within the cluster.
|
| 189 |
+
|
| 190 |
+
DeviceMesh can be used as a context manager.
|
| 191 |
+
|
| 192 |
+
.. note::
|
| 193 |
+
DeviceMesh follows SPMD programming model, which means the same PyTorch Python program
|
| 194 |
+
is running on all processes/ranks in the cluster. Therefore, users need to make sure the
|
| 195 |
+
`mesh` array (which describes the layout of devices) should be identical across all ranks.
|
| 196 |
+
Inconsistent `mesh` will lead to silent hang.
|
| 197 |
+
|
| 198 |
+
Args:
|
| 199 |
+
device_type (str): The device type of the mesh. Currently supports: "cpu", "cuda/cuda-like".
|
| 200 |
+
mesh (ndarray): A multi-dimensional array or an integer tensor describing the layout
|
| 201 |
+
of devices, where the IDs are global IDs of the default process group.
|
| 202 |
+
|
| 203 |
+
Returns:
|
| 204 |
+
DeviceMesh: A :class:`DeviceMesh` object representing the device layout.
|
| 205 |
+
|
| 206 |
+
The following program runs on each process/rank in an SPMD manner. In this example, we have 2
|
| 207 |
+
hosts with 4 GPUs each.
|
| 208 |
+
A reduction over the first dimension of mesh will reduce across
|
| 209 |
+
columns (0, 4), .. and (3, 7), a reduction over the second dimension
|
| 210 |
+
of mesh reduces across rows (0, 1, 2, 3) and (4, 5, 6, 7).
|
| 211 |
+
|
| 212 |
+
Example::
|
| 213 |
+
>>> # xdoctest: +SKIP("no rank")
|
| 214 |
+
>>> from torch.distributed.device_mesh import DeviceMesh
|
| 215 |
+
>>>
|
| 216 |
+
>>> # Initialize device mesh as (2, 4) to represent the topology
|
| 217 |
+
>>> # of cross-host(dim 0), and within-host (dim 1).
|
| 218 |
+
>>> mesh = DeviceMesh(device_type="cuda", mesh=[[0, 1, 2, 3],[4, 5, 6, 7]])
|
| 219 |
+
"""
|
| 220 |
+
|
| 221 |
+
device_type: str
|
| 222 |
+
mesh: torch.Tensor
|
| 223 |
+
mesh_dim_names: Optional[Tuple[str, ...]]
|
| 224 |
+
|
| 225 |
+
def __init__(
|
| 226 |
+
self,
|
| 227 |
+
device_type: str,
|
| 228 |
+
mesh: Union[torch.Tensor, "ArrayLike"],
|
| 229 |
+
*,
|
| 230 |
+
mesh_dim_names: Optional[Tuple[str, ...]] = None,
|
| 231 |
+
_init_backend: bool = True,
|
| 232 |
+
) -> None:
|
| 233 |
+
self.device_type = device_type
|
| 234 |
+
if isinstance(mesh, torch.Tensor) and mesh.device.type != "cpu":
|
| 235 |
+
raise ValueError(f"`mesh` must be a CPU tensor, got {mesh}")
|
| 236 |
+
self.mesh = (
|
| 237 |
+
mesh.detach().to(dtype=torch.int)
|
| 238 |
+
if isinstance(mesh, torch.Tensor)
|
| 239 |
+
else torch.tensor(mesh, device="cpu", dtype=torch.int)
|
| 240 |
+
)
|
| 241 |
+
self.mesh_dim_names = tuple(mesh_dim_names) if mesh_dim_names else None
|
| 242 |
+
|
| 243 |
+
# private field to pre-generate DeviceMesh's hash
|
| 244 |
+
self._flatten_mesh_list = tuple(self.mesh.flatten().tolist())
|
| 245 |
+
self._parent_mesh: Optional[DeviceMesh] = None
|
| 246 |
+
self._thread_id = threading.get_ident()
|
| 247 |
+
|
| 248 |
+
# Skip process group initialization if xla device or init backend is False
|
| 249 |
+
# TODO(yeounoh) implement DeviceMesh backend and register XLA backend.
|
| 250 |
+
if device_type != "xla":
|
| 251 |
+
# always try to create default (world) pg, even if it is not initialized
|
| 252 |
+
# already. The world pg is used for device mesh identity (rank) on each
|
| 253 |
+
# process (we need to know if the current global rank is in the mesh or not).
|
| 254 |
+
if _init_backend:
|
| 255 |
+
self._get_or_create_default_group()
|
| 256 |
+
self._init_process_groups()
|
| 257 |
+
|
| 258 |
+
# calculate the coordinates of the current global rank on the mesh
|
| 259 |
+
rank_coords = (self.mesh == get_rank()).nonzero()
|
| 260 |
+
assert rank_coords.size(0) in (0, 1)
|
| 261 |
+
self._coordinate_on_dim: Optional[List[int]] = (
|
| 262 |
+
rank_coords[0].tolist() if rank_coords.size(0) > 0 else None
|
| 263 |
+
)
|
| 264 |
+
|
| 265 |
+
def _get_or_create_default_group(self):
|
| 266 |
+
default_initialized = is_initialized()
|
| 267 |
+
if not default_initialized:
|
| 268 |
+
init_process_group()
|
| 269 |
+
|
| 270 |
+
world_size = get_world_size()
|
| 271 |
+
if self.mesh.numel() > world_size:
|
| 272 |
+
raise RuntimeError(
|
| 273 |
+
f"Mesh should not be bigger than default world size, but found {self.mesh.numel()} ranks!"
|
| 274 |
+
)
|
| 275 |
+
|
| 276 |
+
device_handle = _get_device_handle(self.device_type)
|
| 277 |
+
# TODO: if user want to pass pg_options, offer a way to do it
|
| 278 |
+
if not default_initialized and device_handle:
|
| 279 |
+
# automatically set the current cuda/cuda-like device base on num of gpu devices available in each host
|
| 280 |
+
# NOTE: This device selection would only work for homogeneous hardware.
|
| 281 |
+
num_devices_per_host = device_handle.device_count()
|
| 282 |
+
if (
|
| 283 |
+
world_size > num_devices_per_host
|
| 284 |
+
and world_size % num_devices_per_host != 0
|
| 285 |
+
):
|
| 286 |
+
raise RuntimeError(
|
| 287 |
+
f"DeviceMesh only support homogeneous hardware, but found "
|
| 288 |
+
f"{world_size} ranks and {num_devices_per_host} {self.device_type} devices!"
|
| 289 |
+
)
|
| 290 |
+
device_handle.set_device(get_rank() % num_devices_per_host)
|
| 291 |
+
|
| 292 |
+
return _get_default_group()
|
| 293 |
+
|
| 294 |
+
def _init_process_groups(self):
|
| 295 |
+
# tag/ranks/group_name associated with each mesh dimension, each
|
| 296 |
+
# mesh dimension should have one sub-group per rank
|
| 297 |
+
#
|
| 298 |
+
# TODO(yifu): remove tag and ranks once we fully migrate to native
|
| 299 |
+
# functional collectives. See details in:
|
| 300 |
+
# https://github.com/pytorch/pytorch/issues/93173#issuecomment-1907095208
|
| 301 |
+
dim_group_infos: List[Tuple[str, List[int], str]] = []
|
| 302 |
+
|
| 303 |
+
if self.mesh.ndim == 1 and self.mesh.numel() == get_world_size():
|
| 304 |
+
# if the mesh is the same as world_pg, we just append the default
|
| 305 |
+
# pg to the first dim groups, as new_group cannot have the exact
|
| 306 |
+
# same ranks as world
|
| 307 |
+
dim_group_infos.append(
|
| 308 |
+
(
|
| 309 |
+
_get_group_tag(_get_default_group()),
|
| 310 |
+
list(range(get_world_size())),
|
| 311 |
+
_get_default_group().group_name,
|
| 312 |
+
)
|
| 313 |
+
)
|
| 314 |
+
else:
|
| 315 |
+
# create sub pgs base on the mesh argument specified
|
| 316 |
+
for dim in range(self.mesh.ndim):
|
| 317 |
+
# swap the current dim to the last dim
|
| 318 |
+
# then reshape to flatten out other dims
|
| 319 |
+
pg_ranks_by_dim = self.mesh.swapdims(-1, dim).reshape(
|
| 320 |
+
-1, self.mesh.size(dim)
|
| 321 |
+
)
|
| 322 |
+
# multi-dim mesh, create subgroups by looping over the pg_ranks
|
| 323 |
+
# for each dim and append the groups
|
| 324 |
+
for dim_mesh in pg_ranks_by_dim:
|
| 325 |
+
subgroup_ranks = dim_mesh.tolist()
|
| 326 |
+
|
| 327 |
+
# Respect dim group options specified via _MeshEnv.set_dim_group_options().
|
| 328 |
+
# Inherit from the parent group if no options are specified for the group.
|
| 329 |
+
if dim in _mesh_resources.mesh_dim_group_options:
|
| 330 |
+
(
|
| 331 |
+
backend,
|
| 332 |
+
pg_options,
|
| 333 |
+
) = _mesh_resources.mesh_dim_group_options[dim]
|
| 334 |
+
else:
|
| 335 |
+
backend, pg_options = None, None
|
| 336 |
+
|
| 337 |
+
# We temporarily revert the re-use subgroup, since it breaks two internal tests.
|
| 338 |
+
# Temporarily reverting to resolve test timeout while root-causing.
|
| 339 |
+
# TODO: Add two tests to cover internal tests scenarios and re-enable reuse subgroup if exists.
|
| 340 |
+
dim_group = new_group(
|
| 341 |
+
ranks=subgroup_ranks,
|
| 342 |
+
backend=backend,
|
| 343 |
+
pg_options=pg_options,
|
| 344 |
+
)
|
| 345 |
+
|
| 346 |
+
# only add to dim_groups if the current rank in the subgroup
|
| 347 |
+
if self.get_rank() in subgroup_ranks:
|
| 348 |
+
if len(dim_group_infos) > dim:
|
| 349 |
+
raise RuntimeError(
|
| 350 |
+
f"Each device mesh dimension should get only one process group, but got {self.get_rank} "
|
| 351 |
+
f"in {subgroup_ranks}!"
|
| 352 |
+
)
|
| 353 |
+
dim_group_infos.append(
|
| 354 |
+
(
|
| 355 |
+
_get_group_tag(not_none(dim_group)),
|
| 356 |
+
subgroup_ranks,
|
| 357 |
+
dim_group.group_name,
|
| 358 |
+
)
|
| 359 |
+
)
|
| 360 |
+
self._dim_group_infos = dim_group_infos
|
| 361 |
+
|
| 362 |
+
def __enter__(self) -> "DeviceMesh":
|
| 363 |
+
# set this mesh as the current mesh in mesh env
|
| 364 |
+
_mesh_resources.mesh_stack.append(self)
|
| 365 |
+
return self
|
| 366 |
+
|
| 367 |
+
# pyre-fixme[2]: Parameter must be annotated.
|
| 368 |
+
def __exit__(self, exc_type, exc_value, exc_traceback) -> None:
|
| 369 |
+
# pop this mesh from mesh env
|
| 370 |
+
_mesh_resources.mesh_stack.pop()
|
| 371 |
+
|
| 372 |
+
def __repr__(self) -> str:
|
| 373 |
+
device_mesh_repr = (
|
| 374 |
+
f"DeviceMesh({self.mesh.tolist()})"
|
| 375 |
+
if not self.mesh_dim_names
|
| 376 |
+
else f"DeviceMesh({self.mesh.tolist()}, mesh_dim_names={self.mesh_dim_names})"
|
| 377 |
+
)
|
| 378 |
+
return device_mesh_repr
|
| 379 |
+
|
| 380 |
+
def __hash__(self):
|
| 381 |
+
# lazily compute hash
|
| 382 |
+
self._hash = getattr(self, "_hash", None)
|
| 383 |
+
if not self._hash:
|
| 384 |
+
self._hash = hash(
|
| 385 |
+
(
|
| 386 |
+
self._flatten_mesh_list,
|
| 387 |
+
self.mesh.shape,
|
| 388 |
+
self.device_type,
|
| 389 |
+
self.mesh_dim_names,
|
| 390 |
+
self._parent_mesh,
|
| 391 |
+
self._thread_id,
|
| 392 |
+
)
|
| 393 |
+
)
|
| 394 |
+
return self._hash
|
| 395 |
+
|
| 396 |
+
def __eq__(self, other: object) -> bool:
|
| 397 |
+
if not isinstance(other, DeviceMesh):
|
| 398 |
+
return False
|
| 399 |
+
if id(self) == id(other):
|
| 400 |
+
return True
|
| 401 |
+
else:
|
| 402 |
+
return (
|
| 403 |
+
self._flatten_mesh_list == other._flatten_mesh_list
|
| 404 |
+
and self.mesh.shape == other.mesh.shape
|
| 405 |
+
and self.device_type == other.device_type
|
| 406 |
+
and self.mesh_dim_names == other.mesh_dim_names
|
| 407 |
+
and self._parent_mesh == other._parent_mesh
|
| 408 |
+
and self._thread_id == other._thread_id
|
| 409 |
+
)
|
| 410 |
+
|
| 411 |
+
def __getitem__(
|
| 412 |
+
self, mesh_dim_names: Union[str, Tuple[str, ...]]
|
| 413 |
+
) -> "DeviceMesh":
|
| 414 |
+
"""
|
| 415 |
+
Slice the current DeviceMesh based on the mesh_dim_name given to create a child
|
| 416 |
+
DeviceMesh.
|
| 417 |
+
|
| 418 |
+
Args:
|
| 419 |
+
mesh_dim_name (Union[str, Tuple[str]]): the name or the tuple of names of the
|
| 420 |
+
mesh dimension of the parent DeviceMesh to create the child DeviceMesh for.
|
| 421 |
+
Returns:
|
| 422 |
+
A :class:`DeviceMesh` object
|
| 423 |
+
|
| 424 |
+
The following program runs on each process/rank in an SPMD manner. In this example, we have 2
|
| 425 |
+
hosts with 4 GPUs each.
|
| 426 |
+
Calling mesh["tp"] on rank 0, 1, 2, 3 would return a 1D child DeviceMesh:([0, 1, 2, 3]).
|
| 427 |
+
Calling mesh["tp"] on rank 4, 5, 6, 7 would return a 1D child DeviceMesh:([4, 5, 6, 7]).
|
| 428 |
+
Calling mesh["dp"] on rank 0, 4 would return a 1D child DeviceMesh:([0, 4]).
|
| 429 |
+
Calling mesh["dp"] on rank 1, 5 would return a 1D child DeviceMesh:([1, 5]).
|
| 430 |
+
Calling mesh["dp"] on rank 2, 6 would return a 1D child DeviceMesh:([2, 6]).
|
| 431 |
+
Calling mesh["dp"] on rank 3, 7 would return a 1D child DeviceMesh:([3, 7]).
|
| 432 |
+
|
| 433 |
+
Example::
|
| 434 |
+
>>> # xdoctest: +SKIP("no rank")
|
| 435 |
+
>>> from torch.distributed.device_mesh import DeviceMesh
|
| 436 |
+
>>>
|
| 437 |
+
>>> # Initialize device mesh as (2, 4) to represent the topology
|
| 438 |
+
>>> # of cross-host(dim 0), and within-host (dim 1).
|
| 439 |
+
>>> mesh = DeviceMesh(device_type="cuda", mesh=[[0, 1, 2, 3],[4, 5, 6, 7]])
|
| 440 |
+
"""
|
| 441 |
+
if not self.mesh_dim_names:
|
| 442 |
+
raise RuntimeError("Cannot slice a DeviceMesh without mesh_dim_names!")
|
| 443 |
+
|
| 444 |
+
mesh_dim_names = (
|
| 445 |
+
(mesh_dim_names,) if isinstance(mesh_dim_names, str) else mesh_dim_names
|
| 446 |
+
)
|
| 447 |
+
|
| 448 |
+
error_msg = (
|
| 449 |
+
f"Invalid mesh_dim_name {mesh_dim_names} specified. "
|
| 450 |
+
f"Valid mesh_dim_names should be a contiguous subsequence of {self.mesh_dim_names}."
|
| 451 |
+
)
|
| 452 |
+
|
| 453 |
+
if mesh_dim_names == self.mesh_dim_names:
|
| 454 |
+
return self
|
| 455 |
+
elif len(mesh_dim_names) > len(self.mesh_dim_names) or not all(
|
| 456 |
+
mesh_dim_name in self.mesh_dim_names for mesh_dim_name in mesh_dim_names
|
| 457 |
+
):
|
| 458 |
+
raise KeyError(error_msg)
|
| 459 |
+
# Check if the user-provided slicing is a valid contiguous subsequence of the mesh_dim_names
|
| 460 |
+
# of the current DeviceMesh.
|
| 461 |
+
else:
|
| 462 |
+
outermost_dim_name = mesh_dim_names[0]
|
| 463 |
+
outermost_dim_idx = self.mesh_dim_names.index(outermost_dim_name)
|
| 464 |
+
for i, j in zip(
|
| 465 |
+
mesh_dim_names,
|
| 466 |
+
self.mesh_dim_names[outermost_dim_idx : len(mesh_dim_names)],
|
| 467 |
+
):
|
| 468 |
+
if i != j:
|
| 469 |
+
raise KeyError(error_msg)
|
| 470 |
+
|
| 471 |
+
submesh = _mesh_resources.create_child_mesh(self, mesh_dim_names)
|
| 472 |
+
return submesh
|
| 473 |
+
|
| 474 |
+
def get_group(self, mesh_dim: Optional[Union[int, str]] = None) -> ProcessGroup:
|
| 475 |
+
"""
|
| 476 |
+
Returns the single ProcessGroup specified by mesh_dim, or, if mesh_dim is not specified and the
|
| 477 |
+
DeviceMesh is 1-dimensional, returns the only ProcessGroup in the mesh.
|
| 478 |
+
|
| 479 |
+
Args:
|
| 480 |
+
mesh_dim (str/int, optional): it can be the name of the mesh dimension or the index
|
| 481 |
+
of the mesh dimension. Default is None.
|
| 482 |
+
|
| 483 |
+
Returns:
|
| 484 |
+
A :class:`ProcessGroup` object.
|
| 485 |
+
"""
|
| 486 |
+
if not hasattr(self, "_dim_group_infos"):
|
| 487 |
+
raise RuntimeError("DeviceMesh process groups not initialized!")
|
| 488 |
+
|
| 489 |
+
if self.mesh.ndim > 1 and mesh_dim is None:
|
| 490 |
+
raise RuntimeError(
|
| 491 |
+
f"Found the DeviceMesh have {self.mesh.ndim} dimensions",
|
| 492 |
+
"Optional kwarg `mesh_dim` needs to be specified when device_mesh.ndim > 1.",
|
| 493 |
+
"If you want to get the list of all the ProcessGroups in the DeviceMesh,"
|
| 494 |
+
"please use `get_all_groups()` instead.",
|
| 495 |
+
)
|
| 496 |
+
|
| 497 |
+
if self.mesh.ndim == 1 and mesh_dim is None:
|
| 498 |
+
mesh_dim = 0
|
| 499 |
+
else:
|
| 500 |
+
mesh_dim = (
|
| 501 |
+
_mesh_resources.get_mesh_dim_by_name(self, mesh_dim)
|
| 502 |
+
if isinstance(mesh_dim, str)
|
| 503 |
+
else mesh_dim
|
| 504 |
+
)
|
| 505 |
+
|
| 506 |
+
return not_none(
|
| 507 |
+
_find_pg_by_ranks_and_tag(*self._dim_group_infos[mesh_dim][:2]) # type: ignore[index]
|
| 508 |
+
)
|
| 509 |
+
|
| 510 |
+
def get_all_groups(self) -> List[ProcessGroup]:
|
| 511 |
+
"""
|
| 512 |
+
Returns a list of ProcessGroups for all mesh dimensions.
|
| 513 |
+
|
| 514 |
+
Returns:
|
| 515 |
+
A list of :class:`ProcessGroup` object.
|
| 516 |
+
"""
|
| 517 |
+
return [self.get_group(i) for i in range(self.mesh.ndim)]
|
| 518 |
+
|
| 519 |
+
@staticmethod
|
| 520 |
+
def from_group(
|
| 521 |
+
group: Union[ProcessGroup, List[ProcessGroup]],
|
| 522 |
+
device_type: str,
|
| 523 |
+
mesh: Optional[Union[torch.Tensor, "ArrayLike"]] = None,
|
| 524 |
+
*,
|
| 525 |
+
mesh_dim_names: Optional[Tuple[str, ...]] = None,
|
| 526 |
+
) -> "DeviceMesh":
|
| 527 |
+
"""
|
| 528 |
+
Contstructs a :class:`DeviceMesh` with ``device_type`` from an
|
| 529 |
+
existing :class:`ProcessGroup`.
|
| 530 |
+
|
| 531 |
+
The constructed device mesh has number of dimensions equal to the
|
| 532 |
+
number of groups passed. If more than one group is passed, then the
|
| 533 |
+
``mesh`` argument is required.
|
| 534 |
+
"""
|
| 535 |
+
if isinstance(group, ProcessGroup):
|
| 536 |
+
group_ranks = get_process_group_ranks(group)
|
| 537 |
+
if (
|
| 538 |
+
isinstance(mesh, torch.Tensor) and mesh.tolist() != group_ranks
|
| 539 |
+
) or (mesh is not None and mesh != group_ranks):
|
| 540 |
+
raise ValueError(
|
| 541 |
+
f"Invalid mesh {str(mesh)} for ProcessGroup with ranks {group_ranks}"
|
| 542 |
+
)
|
| 543 |
+
mesh = torch.tensor(group_ranks, device="cpu", dtype=torch.int)
|
| 544 |
+
device_mesh = DeviceMesh(
|
| 545 |
+
device_type,
|
| 546 |
+
mesh,
|
| 547 |
+
mesh_dim_names=mesh_dim_names,
|
| 548 |
+
_init_backend=False,
|
| 549 |
+
)
|
| 550 |
+
device_mesh._dim_group_infos = [
|
| 551 |
+
(_get_group_tag(group), group_ranks, group.group_name)
|
| 552 |
+
]
|
| 553 |
+
return device_mesh
|
| 554 |
+
groups = list(group)
|
| 555 |
+
if len(groups) == 0:
|
| 556 |
+
raise ValueError("Expects at least one ProcessGroup to be passed")
|
| 557 |
+
if mesh is None:
|
| 558 |
+
raise ValueError("Must pass mesh if passing multiple ProcessGroups")
|
| 559 |
+
mesh = (
|
| 560 |
+
mesh.detach().to(dtype=torch.int, device="cpu")
|
| 561 |
+
if isinstance(mesh, torch.Tensor)
|
| 562 |
+
else torch.tensor(mesh, device="cpu", dtype=torch.int)
|
| 563 |
+
)
|
| 564 |
+
if mesh.ndim != len(groups):
|
| 565 |
+
raise ValueError(
|
| 566 |
+
"Expects mesh with ndim equal to number of ProcessGroups but got "
|
| 567 |
+
f"mesh {mesh.tolist()} and {len(groups)} ProcessGroups"
|
| 568 |
+
)
|
| 569 |
+
device_mesh = DeviceMesh(
|
| 570 |
+
device_type, mesh, mesh_dim_names=mesh_dim_names, _init_backend=False
|
| 571 |
+
)
|
| 572 |
+
device_mesh._dim_group_infos = [
|
| 573 |
+
(
|
| 574 |
+
_get_group_tag(group),
|
| 575 |
+
get_process_group_ranks(group),
|
| 576 |
+
group.group_name,
|
| 577 |
+
)
|
| 578 |
+
for group in groups
|
| 579 |
+
]
|
| 580 |
+
return device_mesh
|
| 581 |
+
|
| 582 |
+
def size(self, mesh_dim: Optional[int] = None) -> int:
|
| 583 |
+
return self.mesh.numel() if mesh_dim is None else self.mesh.size(mesh_dim)
|
| 584 |
+
|
| 585 |
+
@property
|
| 586 |
+
def ndim(self) -> int:
|
| 587 |
+
return self.mesh.ndim
|
| 588 |
+
|
| 589 |
+
@property
|
| 590 |
+
def shape(self) -> Tuple[int, ...]:
|
| 591 |
+
return tuple(self.mesh.shape)
|
| 592 |
+
|
| 593 |
+
def get_rank(self) -> int:
|
| 594 |
+
"""
|
| 595 |
+
Returns the current global rank.
|
| 596 |
+
"""
|
| 597 |
+
return get_rank()
|
| 598 |
+
|
| 599 |
+
def get_local_rank(self, mesh_dim: Optional[Union[int, str]] = None) -> int:
|
| 600 |
+
"""
|
| 601 |
+
Returns the local rank of the given mesh_dim of the DeviceMesh.
|
| 602 |
+
|
| 603 |
+
Args:
|
| 604 |
+
mesh_dim (str/int, optional): it can be the name of the mesh dimension or the index
|
| 605 |
+
of the mesh dimension. Default is None.
|
| 606 |
+
|
| 607 |
+
Returns:
|
| 608 |
+
An integer denotes the local rank.
|
| 609 |
+
|
| 610 |
+
The following program runs on each process/rank in an SPMD manner. In this example, we have 2
|
| 611 |
+
hosts with 4 GPUs each.
|
| 612 |
+
Calling mesh_2d.get_local_rank(mesh_dim=0) on rank 0, 1, 2, 3 would return 0.
|
| 613 |
+
Calling mesh_2d.get_local_rank(mesh_dim=0) on rank 4, 5, 6, 7 would return 1.
|
| 614 |
+
Calling mesh_2d.get_local_rank(mesh_dim=1) on rank 0, 4 would return 0.
|
| 615 |
+
Calling mesh_2d.get_local_rank(mesh_dim=1) on rank 1, 5 would return 1.
|
| 616 |
+
Calling mesh_2d.get_local_rank(mesh_dim=1) on rank 2, 6 would return 2.
|
| 617 |
+
Calling mesh_2d.get_local_rank(mesh_dim=1) on rank 3, 7 would return 3.
|
| 618 |
+
|
| 619 |
+
Example::
|
| 620 |
+
>>> # xdoctest: +SKIP("no rank")
|
| 621 |
+
>>> from torch.distributed.device_mesh import DeviceMesh
|
| 622 |
+
>>>
|
| 623 |
+
>>> # Initialize device mesh as (2, 4) to represent the topology
|
| 624 |
+
>>> # of cross-host(dim 0), and within-host (dim 1).
|
| 625 |
+
>>> mesh = DeviceMesh(device_type="cuda", mesh=[[0, 1, 2, 3],[4, 5, 6, 7]])
|
| 626 |
+
"""
|
| 627 |
+
if self.ndim > 1 and mesh_dim is None:
|
| 628 |
+
raise RuntimeError(
|
| 629 |
+
f"Found the DeviceMesh have {self.mesh.ndim} dimensions",
|
| 630 |
+
"Optional kwarg `mesh_dim` needs to be specified when device_mesh.ndim > 1.",
|
| 631 |
+
)
|
| 632 |
+
elif mesh_dim is None:
|
| 633 |
+
mesh_dim = 0
|
| 634 |
+
|
| 635 |
+
mesh_dim_group = not_none(self.get_group(mesh_dim))
|
| 636 |
+
assert isinstance(
|
| 637 |
+
mesh_dim_group, ProcessGroup
|
| 638 |
+
), "We expect ProcessGroup before calling `get_rank`!"
|
| 639 |
+
return not_none(get_rank(mesh_dim_group))
|
| 640 |
+
|
| 641 |
+
def get_coordinate(self) -> Optional[List[int]]:
|
| 642 |
+
"""
|
| 643 |
+
Return the relative indices of this rank relative to all
|
| 644 |
+
dimensions of the mesh. If this rank is not part of the mesh, return None.
|
| 645 |
+
"""
|
| 646 |
+
return self._coordinate_on_dim if self._coordinate_on_dim else None
|
| 647 |
+
|
| 648 |
+
def init_device_mesh(
|
| 649 |
+
device_type: str,
|
| 650 |
+
mesh_shape: Tuple[int, ...],
|
| 651 |
+
*,
|
| 652 |
+
mesh_dim_names: Optional[Tuple[str, ...]] = None,
|
| 653 |
+
) -> DeviceMesh:
|
| 654 |
+
"""
|
| 655 |
+
Initializes a `DeviceMesh` based on `device_type`, `mesh_shape`, and `mesh_dim_names` parameters.
|
| 656 |
+
|
| 657 |
+
This creates a DeviceMesh with an n-dimensional array layout, where `n` is the length of `mesh_shape`.
|
| 658 |
+
If `mesh_dim_names` is provided, each dimension is labeled as `mesh_dim_names[i]`.
|
| 659 |
+
|
| 660 |
+
.. note::
|
| 661 |
+
`init_device_mesh` follows SPMD programming model, meaning the same PyTorch Python program
|
| 662 |
+
runs on all processes/ranks in the cluster. Ensure `mesh_shape` (the dimensions of the nD array
|
| 663 |
+
describing device layout) is identical across all ranks. Inconsistent `mesh_shape` may lead to hanging.
|
| 664 |
+
|
| 665 |
+
.. note::
|
| 666 |
+
If no process group is found, init_device_mesh will initialize distributed process group/groups
|
| 667 |
+
required for distributed communications behind the scene.
|
| 668 |
+
|
| 669 |
+
Args:
|
| 670 |
+
device_type (str): The device type of the mesh. Currently supports: "cpu", "cuda/cuda-like".
|
| 671 |
+
Passing in a device type with a GPU index, such as "cuda:0", is not allowed.
|
| 672 |
+
mesh_shape (Tuple[int]): A tuple defining the dimensions of the multi-dimensional array
|
| 673 |
+
describing the layout of devices.
|
| 674 |
+
mesh_dim_names (Tuple[str], optional): A tuple of mesh dimension names to assign to each dimension
|
| 675 |
+
of the multi-dimensional array describing the layout of devices. Its length must match the length
|
| 676 |
+
of `mesh_shape`. Each string in `mesh_dim_names` must be unique.
|
| 677 |
+
|
| 678 |
+
Returns:
|
| 679 |
+
DeviceMesh: A :class:`DeviceMesh` object representing the device layout.
|
| 680 |
+
|
| 681 |
+
Example::
|
| 682 |
+
>>> # xdoctest: +SKIP("no rank")
|
| 683 |
+
>>> from torch.distributed.device_mesh import init_device_mesh
|
| 684 |
+
>>>
|
| 685 |
+
>>> mesh_1d = init_device_mesh("cuda", mesh_shape=(8,))
|
| 686 |
+
>>> mesh_2d = init_device_mesh("cuda", mesh_shape=(2, 8), mesh_dim_names=("dp", "tp"))
|
| 687 |
+
|
| 688 |
+
"""
|
| 689 |
+
if mesh_dim_names is not None:
|
| 690 |
+
if len(set(mesh_dim_names)) != len(mesh_dim_names):
|
| 691 |
+
raise RuntimeError(
|
| 692 |
+
"Each mesh_dim_name must be unique.",
|
| 693 |
+
f"Found repeated mesh_dim_name in mesh_dim_names {mesh_dim_names}",
|
| 694 |
+
)
|
| 695 |
+
|
| 696 |
+
if len(mesh_shape) != len(mesh_dim_names):
|
| 697 |
+
raise RuntimeError(
|
| 698 |
+
"mesh_shape and mesh_dim_names should have same length!",
|
| 699 |
+
f"Found len(mesh_dim_names): {len(mesh_dim_names)} and len(mesh_shape):{len(mesh_shape)}.",
|
| 700 |
+
)
|
| 701 |
+
|
| 702 |
+
# assume valid device types are all letters
|
| 703 |
+
if device_type and not device_type.isalpha():
|
| 704 |
+
raise RuntimeError(
|
| 705 |
+
f"Device type with GPU index is not supported but got {device_type}. ",
|
| 706 |
+
"If you maintained a 'torch.device' object, it's recommended to pass in 'device.type'.",
|
| 707 |
+
)
|
| 708 |
+
|
| 709 |
+
# Always initialize the mesh's tensor on CPU, regardless of what the
|
| 710 |
+
# external device type has been set to be (e.g. meta)
|
| 711 |
+
with torch.device("cpu"):
|
| 712 |
+
mesh = torch.arange(math.prod(mesh_shape), dtype=torch.int).view(mesh_shape)
|
| 713 |
+
device_mesh = DeviceMesh(
|
| 714 |
+
device_type=device_type,
|
| 715 |
+
mesh=mesh,
|
| 716 |
+
mesh_dim_names=mesh_dim_names,
|
| 717 |
+
)
|
| 718 |
+
|
| 719 |
+
return device_mesh
|
parrot/lib/python3.10/site-packages/torch/distributed/distributed_c10d.py
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
parrot/lib/python3.10/site-packages/torch/distributed/launch.py
ADDED
|
@@ -0,0 +1,208 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# mypy: allow-untyped-defs
|
| 2 |
+
r"""
|
| 3 |
+
Module ``torch.distributed.launch``.
|
| 4 |
+
|
| 5 |
+
``torch.distributed.launch`` is a module that spawns up multiple distributed
|
| 6 |
+
training processes on each of the training nodes.
|
| 7 |
+
|
| 8 |
+
.. warning::
|
| 9 |
+
|
| 10 |
+
This module is going to be deprecated in favor of :ref:`torchrun <launcher-api>`.
|
| 11 |
+
|
| 12 |
+
The utility can be used for single-node distributed training, in which one or
|
| 13 |
+
more processes per node will be spawned. The utility can be used for either
|
| 14 |
+
CPU training or GPU training. If the utility is used for GPU training,
|
| 15 |
+
each distributed process will be operating on a single GPU. This can achieve
|
| 16 |
+
well-improved single-node training performance. It can also be used in
|
| 17 |
+
multi-node distributed training, by spawning up multiple processes on each node
|
| 18 |
+
for well-improved multi-node distributed training performance as well.
|
| 19 |
+
This will especially be beneficial for systems with multiple Infiniband
|
| 20 |
+
interfaces that have direct-GPU support, since all of them can be utilized for
|
| 21 |
+
aggregated communication bandwidth.
|
| 22 |
+
|
| 23 |
+
In both cases of single-node distributed training or multi-node distributed
|
| 24 |
+
training, this utility will launch the given number of processes per node
|
| 25 |
+
(``--nproc-per-node``). If used for GPU training, this number needs to be less
|
| 26 |
+
or equal to the number of GPUs on the current system (``nproc_per_node``),
|
| 27 |
+
and each process will be operating on a single GPU from *GPU 0 to
|
| 28 |
+
GPU (nproc_per_node - 1)*.
|
| 29 |
+
|
| 30 |
+
**How to use this module:**
|
| 31 |
+
|
| 32 |
+
1. Single-Node multi-process distributed training
|
| 33 |
+
|
| 34 |
+
::
|
| 35 |
+
|
| 36 |
+
python -m torch.distributed.launch --nproc-per-node=NUM_GPUS_YOU_HAVE
|
| 37 |
+
YOUR_TRAINING_SCRIPT.py (--arg1 --arg2 --arg3 and all other
|
| 38 |
+
arguments of your training script)
|
| 39 |
+
|
| 40 |
+
2. Multi-Node multi-process distributed training: (e.g. two nodes)
|
| 41 |
+
|
| 42 |
+
|
| 43 |
+
Node 1: *(IP: 192.168.1.1, and has a free port: 1234)*
|
| 44 |
+
|
| 45 |
+
::
|
| 46 |
+
|
| 47 |
+
python -m torch.distributed.launch --nproc-per-node=NUM_GPUS_YOU_HAVE
|
| 48 |
+
--nnodes=2 --node-rank=0 --master-addr="192.168.1.1"
|
| 49 |
+
--master-port=1234 YOUR_TRAINING_SCRIPT.py (--arg1 --arg2 --arg3
|
| 50 |
+
and all other arguments of your training script)
|
| 51 |
+
|
| 52 |
+
Node 2:
|
| 53 |
+
|
| 54 |
+
::
|
| 55 |
+
|
| 56 |
+
python -m torch.distributed.launch --nproc-per-node=NUM_GPUS_YOU_HAVE
|
| 57 |
+
--nnodes=2 --node-rank=1 --master-addr="192.168.1.1"
|
| 58 |
+
--master-port=1234 YOUR_TRAINING_SCRIPT.py (--arg1 --arg2 --arg3
|
| 59 |
+
and all other arguments of your training script)
|
| 60 |
+
|
| 61 |
+
3. To look up what optional arguments this module offers:
|
| 62 |
+
|
| 63 |
+
::
|
| 64 |
+
|
| 65 |
+
python -m torch.distributed.launch --help
|
| 66 |
+
|
| 67 |
+
|
| 68 |
+
**Important Notices:**
|
| 69 |
+
|
| 70 |
+
1. This utility and multi-process distributed (single-node or
|
| 71 |
+
multi-node) GPU training currently only achieves the best performance using
|
| 72 |
+
the NCCL distributed backend. Thus NCCL backend is the recommended backend to
|
| 73 |
+
use for GPU training.
|
| 74 |
+
|
| 75 |
+
2. In your training program, you must parse the command-line argument:
|
| 76 |
+
``--local-rank=LOCAL_PROCESS_RANK``, which will be provided by this module.
|
| 77 |
+
If your training program uses GPUs, you should ensure that your code only
|
| 78 |
+
runs on the GPU device of LOCAL_PROCESS_RANK. This can be done by:
|
| 79 |
+
|
| 80 |
+
Parsing the local_rank argument
|
| 81 |
+
|
| 82 |
+
::
|
| 83 |
+
|
| 84 |
+
>>> # xdoctest: +SKIP
|
| 85 |
+
>>> import argparse
|
| 86 |
+
>>> parser = argparse.ArgumentParser()
|
| 87 |
+
>>> parser.add_argument("--local-rank", "--local_rank", type=int)
|
| 88 |
+
>>> args = parser.parse_args()
|
| 89 |
+
|
| 90 |
+
Set your device to local rank using either
|
| 91 |
+
|
| 92 |
+
::
|
| 93 |
+
|
| 94 |
+
>>> torch.cuda.set_device(args.local_rank) # before your code runs
|
| 95 |
+
|
| 96 |
+
or
|
| 97 |
+
|
| 98 |
+
::
|
| 99 |
+
|
| 100 |
+
>>> with torch.cuda.device(args.local_rank):
|
| 101 |
+
>>> # your code to run
|
| 102 |
+
>>> ...
|
| 103 |
+
|
| 104 |
+
.. versionchanged:: 2.0.0
|
| 105 |
+
|
| 106 |
+
The launcher will passes the ``--local-rank=<rank>`` argument to your script.
|
| 107 |
+
From PyTorch 2.0.0 onwards, the dashed ``--local-rank`` is preferred over the
|
| 108 |
+
previously used underscored ``--local_rank``.
|
| 109 |
+
|
| 110 |
+
For backward compatibility, it may be necessary for users to handle both
|
| 111 |
+
cases in their argument parsing code. This means including both ``"--local-rank"``
|
| 112 |
+
and ``"--local_rank"`` in the argument parser. If only ``"--local_rank"`` is
|
| 113 |
+
provided, the launcher will trigger an error: "error: unrecognized arguments:
|
| 114 |
+
--local-rank=<rank>". For training code that only supports PyTorch 2.0.0+,
|
| 115 |
+
including ``"--local-rank"`` should be sufficient.
|
| 116 |
+
|
| 117 |
+
3. In your training program, you are supposed to call the following function
|
| 118 |
+
at the beginning to start the distributed backend. It is strongly recommended
|
| 119 |
+
that ``init_method=env://``. Other init methods (e.g. ``tcp://``) may work,
|
| 120 |
+
but ``env://`` is the one that is officially supported by this module.
|
| 121 |
+
|
| 122 |
+
::
|
| 123 |
+
|
| 124 |
+
>>> torch.distributed.init_process_group(backend='YOUR BACKEND',
|
| 125 |
+
>>> init_method='env://')
|
| 126 |
+
|
| 127 |
+
4. In your training program, you can either use regular distributed functions
|
| 128 |
+
or use :func:`torch.nn.parallel.DistributedDataParallel` module. If your
|
| 129 |
+
training program uses GPUs for training and you would like to use
|
| 130 |
+
:func:`torch.nn.parallel.DistributedDataParallel` module,
|
| 131 |
+
here is how to configure it.
|
| 132 |
+
|
| 133 |
+
::
|
| 134 |
+
|
| 135 |
+
>>> model = torch.nn.parallel.DistributedDataParallel(model,
|
| 136 |
+
>>> device_ids=[args.local_rank],
|
| 137 |
+
>>> output_device=args.local_rank)
|
| 138 |
+
|
| 139 |
+
Please ensure that ``device_ids`` argument is set to be the only GPU device id
|
| 140 |
+
that your code will be operating on. This is generally the local rank of the
|
| 141 |
+
process. In other words, the ``device_ids`` needs to be ``[args.local_rank]``,
|
| 142 |
+
and ``output_device`` needs to be ``args.local_rank`` in order to use this
|
| 143 |
+
utility
|
| 144 |
+
|
| 145 |
+
5. Another way to pass ``local_rank`` to the subprocesses via environment variable
|
| 146 |
+
``LOCAL_RANK``. This behavior is enabled when you launch the script with
|
| 147 |
+
``--use-env=True``. You must adjust the subprocess example above to replace
|
| 148 |
+
``args.local_rank`` with ``os.environ['LOCAL_RANK']``; the launcher
|
| 149 |
+
will not pass ``--local-rank`` when you specify this flag.
|
| 150 |
+
|
| 151 |
+
.. warning::
|
| 152 |
+
|
| 153 |
+
``local_rank`` is NOT globally unique: it is only unique per process
|
| 154 |
+
on a machine. Thus, don't use it to decide if you should, e.g.,
|
| 155 |
+
write to a networked filesystem. See
|
| 156 |
+
https://github.com/pytorch/pytorch/issues/12042 for an example of
|
| 157 |
+
how things can go wrong if you don't do this correctly.
|
| 158 |
+
|
| 159 |
+
|
| 160 |
+
|
| 161 |
+
"""
|
| 162 |
+
|
| 163 |
+
from typing_extensions import deprecated as _deprecated
|
| 164 |
+
|
| 165 |
+
from torch.distributed.run import get_args_parser, run
|
| 166 |
+
|
| 167 |
+
|
| 168 |
+
def parse_args(args):
|
| 169 |
+
parser = get_args_parser()
|
| 170 |
+
parser.add_argument(
|
| 171 |
+
"--use-env",
|
| 172 |
+
"--use_env",
|
| 173 |
+
default=False,
|
| 174 |
+
action="store_true",
|
| 175 |
+
help="Use environment variable to pass "
|
| 176 |
+
"'local rank'. For legacy reasons, the default value is False. "
|
| 177 |
+
"If set to True, the script will not pass "
|
| 178 |
+
"--local-rank as argument, and will instead set LOCAL_RANK.",
|
| 179 |
+
)
|
| 180 |
+
return parser.parse_args(args)
|
| 181 |
+
|
| 182 |
+
|
| 183 |
+
def launch(args):
|
| 184 |
+
if args.no_python and not args.use_env:
|
| 185 |
+
raise ValueError(
|
| 186 |
+
"When using the '--no-python' flag,"
|
| 187 |
+
" you must also set the '--use-env' flag."
|
| 188 |
+
)
|
| 189 |
+
run(args)
|
| 190 |
+
|
| 191 |
+
|
| 192 |
+
@_deprecated(
|
| 193 |
+
"The module torch.distributed.launch is deprecated\n"
|
| 194 |
+
"and will be removed in future. Use torchrun.\n"
|
| 195 |
+
"Note that --use-env is set by default in torchrun.\n"
|
| 196 |
+
"If your script expects `--local-rank` argument to be set, please\n"
|
| 197 |
+
"change it to read from `os.environ['LOCAL_RANK']` instead. See \n"
|
| 198 |
+
"https://pytorch.org/docs/stable/distributed.html#launch-utility for \n"
|
| 199 |
+
"further instructions\n",
|
| 200 |
+
category=FutureWarning,
|
| 201 |
+
)
|
| 202 |
+
def main(args=None):
|
| 203 |
+
args = parse_args(args)
|
| 204 |
+
launch(args)
|
| 205 |
+
|
| 206 |
+
|
| 207 |
+
if __name__ == "__main__":
|
| 208 |
+
main()
|
parrot/lib/python3.10/site-packages/torch/distributed/optim/__init__.py
ADDED
|
@@ -0,0 +1,47 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
:mod:`torch.distributed.optim` exposes DistributedOptimizer, which takes a list
|
| 3 |
+
of remote parameters (:class:`~torch.distributed.rpc.RRef`) and runs the
|
| 4 |
+
optimizer locally on the workers where the parameters live. The distributed
|
| 5 |
+
optimizer can use any of the local optimizer :ref:`optimizer-algorithms` to
|
| 6 |
+
apply the gradients on each worker.
|
| 7 |
+
"""
|
| 8 |
+
import warnings
|
| 9 |
+
|
| 10 |
+
import torch
|
| 11 |
+
from torch import optim
|
| 12 |
+
|
| 13 |
+
from .apply_optimizer_in_backward import (
|
| 14 |
+
_apply_optimizer_in_backward,
|
| 15 |
+
_get_in_backward_optimizers,
|
| 16 |
+
)
|
| 17 |
+
from .functional_adadelta import _FunctionalAdadelta
|
| 18 |
+
|
| 19 |
+
from .functional_adagrad import _FunctionalAdagrad
|
| 20 |
+
from .functional_adam import _FunctionalAdam
|
| 21 |
+
from .functional_adamax import _FunctionalAdamax
|
| 22 |
+
from .functional_adamw import _FunctionalAdamW
|
| 23 |
+
from .functional_rmsprop import _FunctionalRMSprop
|
| 24 |
+
from .functional_rprop import _FunctionalRprop
|
| 25 |
+
from .functional_sgd import _FunctionalSGD
|
| 26 |
+
from .named_optimizer import _NamedOptimizer
|
| 27 |
+
from .utils import as_functional_optim
|
| 28 |
+
|
| 29 |
+
with warnings.catch_warnings():
|
| 30 |
+
warnings.simplefilter("always")
|
| 31 |
+
warnings.warn(
|
| 32 |
+
"`TorchScript` support for functional optimizers is deprecated "
|
| 33 |
+
"and will be removed in a future PyTorch release. "
|
| 34 |
+
"Consider using the `torch.compile` optimizer instead.",
|
| 35 |
+
DeprecationWarning,
|
| 36 |
+
stacklevel=2,
|
| 37 |
+
)
|
| 38 |
+
|
| 39 |
+
# DistributedOptimizer imports torch.distributed.rpc names, so gate availability
|
| 40 |
+
# based on RPC being available.
|
| 41 |
+
if hasattr(torch._C, "_rpc_init"):
|
| 42 |
+
from .optimizer import DistributedOptimizer
|
| 43 |
+
|
| 44 |
+
from .post_localSGD_optimizer import PostLocalSGDOptimizer
|
| 45 |
+
from .zero_redundancy_optimizer import ZeroRedundancyOptimizer
|
| 46 |
+
|
| 47 |
+
__all__ = ["as_functional_optim", "DistributedOptimizer", "PostLocalSGDOptimizer", "ZeroRedundancyOptimizer"]
|
parrot/lib/python3.10/site-packages/torch/distributed/optim/__pycache__/__init__.cpython-310.pyc
ADDED
|
Binary file (1.9 kB). View file
|
|
|
parrot/lib/python3.10/site-packages/torch/distributed/optim/__pycache__/apply_optimizer_in_backward.cpython-310.pyc
ADDED
|
Binary file (4.16 kB). View file
|
|
|
parrot/lib/python3.10/site-packages/torch/distributed/optim/__pycache__/functional_adam.cpython-310.pyc
ADDED
|
Binary file (4 kB). View file
|
|
|
parrot/lib/python3.10/site-packages/torch/distributed/optim/__pycache__/functional_adamax.cpython-310.pyc
ADDED
|
Binary file (2.84 kB). View file
|
|
|
parrot/lib/python3.10/site-packages/torch/distributed/optim/__pycache__/functional_rmsprop.cpython-310.pyc
ADDED
|
Binary file (2.73 kB). View file
|
|
|
parrot/lib/python3.10/site-packages/torch/distributed/optim/__pycache__/functional_rprop.cpython-310.pyc
ADDED
|
Binary file (2.48 kB). View file
|
|
|
parrot/lib/python3.10/site-packages/torch/distributed/optim/__pycache__/post_localSGD_optimizer.cpython-310.pyc
ADDED
|
Binary file (5.21 kB). View file
|
|
|