Add files using upload-large-folder tool
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- .gitattributes +3 -0
- vllm/lib/python3.10/site-packages/cupy/random/_bit_generator.cpython-310-x86_64-linux-gnu.so +3 -0
- vllm/lib/python3.10/site-packages/torch/_decomp/__init__.py +484 -0
- vllm/lib/python3.10/site-packages/torch/_decomp/__pycache__/__init__.cpython-310.pyc +0 -0
- vllm/lib/python3.10/site-packages/torch/_decomp/__pycache__/decompositions_for_jvp.cpython-310.pyc +0 -0
- vllm/lib/python3.10/site-packages/torch/_decomp/__pycache__/decompositions_for_rng.cpython-310.pyc +0 -0
- vllm/lib/python3.10/site-packages/torch/_decomp/decompositions.py +0 -0
- vllm/lib/python3.10/site-packages/torch/_decomp/decompositions_for_jvp.py +335 -0
- vllm/lib/python3.10/site-packages/torch/_decomp/decompositions_for_rng.py +266 -0
- vllm/lib/python3.10/site-packages/torch/autograd/__pycache__/__init__.cpython-310.pyc +0 -0
- vllm/lib/python3.10/site-packages/torch/bin/protoc +3 -0
- vllm/lib/python3.10/site-packages/torch/bin/protoc-3.13.0.0 +3 -0
- vllm/lib/python3.10/site-packages/torch/contrib/__init__.py +0 -0
- vllm/lib/python3.10/site-packages/torch/contrib/__pycache__/__init__.cpython-310.pyc +0 -0
- vllm/lib/python3.10/site-packages/torch/contrib/__pycache__/_tensorboard_vis.cpython-310.pyc +0 -0
- vllm/lib/python3.10/site-packages/torch/contrib/_tensorboard_vis.py +143 -0
- vllm/lib/python3.10/site-packages/torch/nested/__pycache__/__init__.cpython-310.pyc +0 -0
- vllm/lib/python3.10/site-packages/torch/nested/_internal/__init__.py +0 -0
- vllm/lib/python3.10/site-packages/torch/nested/_internal/__pycache__/nested_tensor.cpython-310.pyc +0 -0
- vllm/lib/python3.10/site-packages/torch/nested/_internal/ops.py +1675 -0
- vllm/lib/python3.10/site-packages/torch/nested/_internal/sdpa.py +871 -0
- vllm/lib/python3.10/site-packages/torch/optim/__init__.py +63 -0
- vllm/lib/python3.10/site-packages/torch/optim/__pycache__/__init__.cpython-310.pyc +0 -0
- vllm/lib/python3.10/site-packages/torch/optim/__pycache__/_functional.cpython-310.pyc +0 -0
- vllm/lib/python3.10/site-packages/torch/optim/__pycache__/adadelta.cpython-310.pyc +0 -0
- vllm/lib/python3.10/site-packages/torch/optim/__pycache__/adagrad.cpython-310.pyc +0 -0
- vllm/lib/python3.10/site-packages/torch/optim/__pycache__/adam.cpython-310.pyc +0 -0
- vllm/lib/python3.10/site-packages/torch/optim/__pycache__/adamax.cpython-310.pyc +0 -0
- vllm/lib/python3.10/site-packages/torch/optim/__pycache__/adamw.cpython-310.pyc +0 -0
- vllm/lib/python3.10/site-packages/torch/optim/__pycache__/asgd.cpython-310.pyc +0 -0
- vllm/lib/python3.10/site-packages/torch/optim/__pycache__/lr_scheduler.cpython-310.pyc +0 -0
- vllm/lib/python3.10/site-packages/torch/optim/__pycache__/optimizer.cpython-310.pyc +0 -0
- vllm/lib/python3.10/site-packages/torch/optim/__pycache__/radam.cpython-310.pyc +0 -0
- vllm/lib/python3.10/site-packages/torch/optim/__pycache__/rmsprop.cpython-310.pyc +0 -0
- vllm/lib/python3.10/site-packages/torch/optim/__pycache__/rprop.cpython-310.pyc +0 -0
- vllm/lib/python3.10/site-packages/torch/optim/__pycache__/sgd.cpython-310.pyc +0 -0
- vllm/lib/python3.10/site-packages/torch/optim/__pycache__/sparse_adam.cpython-310.pyc +0 -0
- vllm/lib/python3.10/site-packages/torch/optim/__pycache__/swa_utils.cpython-310.pyc +0 -0
- vllm/lib/python3.10/site-packages/torch/optim/_functional.py +84 -0
- vllm/lib/python3.10/site-packages/torch/optim/adam.py +803 -0
- vllm/lib/python3.10/site-packages/torch/optim/adamax.py +473 -0
- vllm/lib/python3.10/site-packages/torch/optim/asgd.py +465 -0
- vllm/lib/python3.10/site-packages/torch/optim/lbfgs.py +495 -0
- vllm/lib/python3.10/site-packages/torch/optim/lr_scheduler.py +2151 -0
- vllm/lib/python3.10/site-packages/torch/optim/optimizer.py +1052 -0
- vllm/lib/python3.10/site-packages/torch/optim/radam.py +608 -0
- vllm/lib/python3.10/site-packages/torch/optim/rmsprop.py +528 -0
- vllm/lib/python3.10/site-packages/torch/optim/swa_utils.py +467 -0
- vllm/lib/python3.10/site-packages/torch/quantization/__init__.py +86 -0
- vllm/lib/python3.10/site-packages/torch/quantization/__pycache__/_numeric_suite_fx.cpython-310.pyc +0 -0
.gitattributes
CHANGED
|
@@ -1804,3 +1804,6 @@ vllm/lib/python3.10/site-packages/cupyx/cudnn.cpython-310-x86_64-linux-gnu.so fi
|
|
| 1804 |
vllm/lib/python3.10/site-packages/shapely/_geometry_helpers.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
|
| 1805 |
vllm/lib/python3.10/site-packages/watchfiles/_rust_notify.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
|
| 1806 |
vllm/lib/python3.10/site-packages/msgpack/_cmsgpack.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
|
|
|
|
|
|
| 1804 |
vllm/lib/python3.10/site-packages/shapely/_geometry_helpers.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
|
| 1805 |
vllm/lib/python3.10/site-packages/watchfiles/_rust_notify.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
|
| 1806 |
vllm/lib/python3.10/site-packages/msgpack/_cmsgpack.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
|
| 1807 |
+
vllm/lib/python3.10/site-packages/torch/bin/protoc-3.13.0.0 filter=lfs diff=lfs merge=lfs -text
|
| 1808 |
+
vllm/lib/python3.10/site-packages/cupy/random/_bit_generator.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
|
| 1809 |
+
vllm/lib/python3.10/site-packages/torch/bin/protoc filter=lfs diff=lfs merge=lfs -text
|
vllm/lib/python3.10/site-packages/cupy/random/_bit_generator.cpython-310-x86_64-linux-gnu.so
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:8710773f9a04fe743ff4fa4a7105412b69acaf19b5511a6bb1f2106294670043
|
| 3 |
+
size 1071656
|
vllm/lib/python3.10/site-packages/torch/_decomp/__init__.py
ADDED
|
@@ -0,0 +1,484 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# mypy: allow-untyped-defs
|
| 2 |
+
import inspect
|
| 3 |
+
from collections import defaultdict
|
| 4 |
+
from functools import wraps
|
| 5 |
+
from itertools import chain
|
| 6 |
+
from typing import Callable, Dict, List, Sequence, TypeVar, Union
|
| 7 |
+
from typing_extensions import ParamSpec
|
| 8 |
+
|
| 9 |
+
import torch
|
| 10 |
+
import torch.library
|
| 11 |
+
from torch._ops import HigherOrderOperator, OpOverload, OpOverloadPacket
|
| 12 |
+
from torch._prims_common import CustomOutParamAnnotation
|
| 13 |
+
from torch.utils import _pytree as pytree
|
| 14 |
+
|
| 15 |
+
|
| 16 |
+
__all__ = [
|
| 17 |
+
"decomposition_table",
|
| 18 |
+
"pre_autograd_decomposition_table",
|
| 19 |
+
"meta_table",
|
| 20 |
+
"register_decomposition",
|
| 21 |
+
"get_decompositions",
|
| 22 |
+
"core_aten_decompositions",
|
| 23 |
+
]
|
| 24 |
+
|
| 25 |
+
_T = TypeVar("_T")
|
| 26 |
+
_P = ParamSpec("_P")
|
| 27 |
+
|
| 28 |
+
# TODO: relax key type here; torch registrations should be possible to; but
|
| 29 |
+
# right now this type is accurate
|
| 30 |
+
global_decomposition_table: Dict[
|
| 31 |
+
str, Dict[torch._ops.OperatorBase, Callable]
|
| 32 |
+
] = defaultdict(dict)
|
| 33 |
+
|
| 34 |
+
decomposition_table = global_decomposition_table["post_autograd"]
|
| 35 |
+
pre_autograd_decomposition_table = global_decomposition_table["pre_autograd"]
|
| 36 |
+
meta_table = global_decomposition_table["meta"]
|
| 37 |
+
|
| 38 |
+
|
| 39 |
+
def _add_op_to_registry(registry, op, fn):
|
| 40 |
+
"""
|
| 41 |
+
This is an internal API for adding an op to the decomposition table.
|
| 42 |
+
|
| 43 |
+
If op is OpOverload, it will be added to the registry directly.
|
| 44 |
+
If op is OpOverloadPacket, all the valid op_overloads in the packet will be added to the registry.
|
| 45 |
+
"""
|
| 46 |
+
overloads: List[Union[torch._ops.OperatorBase]] = []
|
| 47 |
+
if isinstance(op, HigherOrderOperator):
|
| 48 |
+
# There's no concept of overloads for HigherOrderOperator
|
| 49 |
+
registry[op] = fn
|
| 50 |
+
return
|
| 51 |
+
elif isinstance(op, OpOverload):
|
| 52 |
+
overloads.append(op)
|
| 53 |
+
else:
|
| 54 |
+
assert isinstance(op, OpOverloadPacket)
|
| 55 |
+
for ol in op.overloads():
|
| 56 |
+
overloads.append(getattr(op, ol))
|
| 57 |
+
|
| 58 |
+
for op_overload in overloads:
|
| 59 |
+
if op_overload in registry:
|
| 60 |
+
raise RuntimeError(f"duplicate registrations for {op_overload}")
|
| 61 |
+
# TorchScript dumps a bunch of extra nonsense overloads
|
| 62 |
+
# which don't have corresponding dispatcher entries, we need
|
| 63 |
+
# to filter those out, e.g aten.add.float_int
|
| 64 |
+
if torch._C._dispatch_has_kernel(op_overload.name()):
|
| 65 |
+
registry[op_overload] = fn
|
| 66 |
+
|
| 67 |
+
|
| 68 |
+
def _convert_out_params(f):
|
| 69 |
+
out_annotation = f.__annotations__.get("out")
|
| 70 |
+
|
| 71 |
+
# If there are no out params, do not wrap the function.
|
| 72 |
+
if not out_annotation:
|
| 73 |
+
return f
|
| 74 |
+
|
| 75 |
+
# Hack to detect when out is a Tuple. There seems to be no pretty way of doing this
|
| 76 |
+
if getattr(out_annotation, "__origin__", None) is tuple:
|
| 77 |
+
sig = inspect.signature(f)
|
| 78 |
+
out_names = sig.return_annotation._fields
|
| 79 |
+
# If out is a tuple, we need to register a function that unpacks all the out
|
| 80 |
+
# elements as this is what native_functions.yaml expects
|
| 81 |
+
|
| 82 |
+
@wraps(f)
|
| 83 |
+
def _fn(*args, **kwargs):
|
| 84 |
+
out_kwargs = tuple(kwargs.pop(o, None) for o in out_names)
|
| 85 |
+
# Either all of the out kwargs are set or none of them
|
| 86 |
+
is_none = out_kwargs[0] is None
|
| 87 |
+
assert all((o is None) == is_none for o in out_kwargs)
|
| 88 |
+
return f(*args, **kwargs, out=None if is_none else out_kwargs)
|
| 89 |
+
|
| 90 |
+
out_params = [
|
| 91 |
+
inspect.Parameter(
|
| 92 |
+
o,
|
| 93 |
+
kind=inspect.Parameter.KEYWORD_ONLY,
|
| 94 |
+
default=None,
|
| 95 |
+
annotation=t,
|
| 96 |
+
)
|
| 97 |
+
for o, t in zip(out_names, out_annotation.__args__)
|
| 98 |
+
]
|
| 99 |
+
# Drop the out parameter and concatenate the new kwargs in the signature
|
| 100 |
+
params = chain((v for k, v in sig.parameters.items() if k != "out"), out_params)
|
| 101 |
+
_fn.__signature__ = inspect.Signature( # type: ignore[attr-defined]
|
| 102 |
+
parameters=params, return_annotation=sig.return_annotation # type: ignore[arg-type]
|
| 103 |
+
)
|
| 104 |
+
# Drop the out parameter and concatenate the new kwargs in the annotations
|
| 105 |
+
_fn.__annotations__ = {k: v for k, v in f.__annotations__.items() if k != "out"}
|
| 106 |
+
for o in out_params:
|
| 107 |
+
_fn.__annotations__[o.name] = o.annotation
|
| 108 |
+
|
| 109 |
+
# Propagate that this function is wrapped by `out_wrapper`
|
| 110 |
+
_fn._torch_decompositions_out_wrapper = f._torch_decompositions_out_wrapper # type: ignore[attr-defined]
|
| 111 |
+
|
| 112 |
+
return _fn
|
| 113 |
+
|
| 114 |
+
# Alternatively, there may be a single tensor out parameter with a name
|
| 115 |
+
# other than "out". This will need special treatment and is indicated by an
|
| 116 |
+
# annotation, which we will remove here so it is not exposed after wrapping.
|
| 117 |
+
custom_out_param_name = f.__annotations__.pop(CustomOutParamAnnotation, None)
|
| 118 |
+
if custom_out_param_name:
|
| 119 |
+
|
| 120 |
+
@wraps(f)
|
| 121 |
+
def _fn(*args, **kwargs):
|
| 122 |
+
out_kwarg = kwargs.pop(custom_out_param_name, None)
|
| 123 |
+
return f(*args, **kwargs, out=out_kwarg)
|
| 124 |
+
|
| 125 |
+
out_param = inspect.Parameter(
|
| 126 |
+
custom_out_param_name,
|
| 127 |
+
kind=inspect.Parameter.KEYWORD_ONLY,
|
| 128 |
+
default=None,
|
| 129 |
+
annotation=out_annotation,
|
| 130 |
+
)
|
| 131 |
+
|
| 132 |
+
# Drop the out parameter and concatenate the new kwarg in the signature
|
| 133 |
+
sig = inspect.signature(f)
|
| 134 |
+
params = chain(
|
| 135 |
+
(v for k, v in sig.parameters.items() if k != "out"), (out_param,)
|
| 136 |
+
)
|
| 137 |
+
_fn.__signature__ = inspect.Signature( # type: ignore[attr-defined]
|
| 138 |
+
parameters=params, return_annotation=sig.return_annotation # type: ignore[arg-type]
|
| 139 |
+
)
|
| 140 |
+
|
| 141 |
+
# Drop the out parameter and concatenate the new kwargs in the annotations
|
| 142 |
+
_fn.__annotations__ = {k: v for k, v in f.__annotations__.items() if k != "out"}
|
| 143 |
+
_fn.__annotations__[out_param.name] = out_param.annotation
|
| 144 |
+
|
| 145 |
+
return _fn
|
| 146 |
+
|
| 147 |
+
return f
|
| 148 |
+
|
| 149 |
+
|
| 150 |
+
def register_decomposition(
|
| 151 |
+
aten_op, registry=None, *, type="post_autograd", unsafe=False
|
| 152 |
+
) -> Callable[[Callable[_P, _T]], Callable[_P, _T]]:
|
| 153 |
+
"""
|
| 154 |
+
A decorator to register a function as a decomposition to the Python
|
| 155 |
+
decomposition table. Use it like this::
|
| 156 |
+
|
| 157 |
+
@register_decomposition(torch.ops.aten.clamp_min)
|
| 158 |
+
def clamp_min(x):
|
| 159 |
+
return torch.clamp(self, min=min)
|
| 160 |
+
|
| 161 |
+
If you are writing a new decomposition, consider contributing it
|
| 162 |
+
directly to PyTorch in torch._decomp.decompositions.
|
| 163 |
+
|
| 164 |
+
This API is experimental; we are almost certainly going to extend
|
| 165 |
+
the API when we make decompositions eligible for use in transforms (e.g.,
|
| 166 |
+
autograd) and not just backend tracing, where we then need to know if a
|
| 167 |
+
decomposition can be used to simulate a transform.
|
| 168 |
+
|
| 169 |
+
By default, we also will register it to the Meta key of dispatcher,
|
| 170 |
+
and replace the c++ Meta implementation if there is already one.
|
| 171 |
+
|
| 172 |
+
unsafe kwarg is for reuse of this function for registering non-function
|
| 173 |
+
things
|
| 174 |
+
"""
|
| 175 |
+
|
| 176 |
+
assert type in {"post_autograd", "pre_autograd", "meta"}
|
| 177 |
+
|
| 178 |
+
def decomposition_decorator(fn: Callable[_P, _T]) -> Callable[_P, _T]:
|
| 179 |
+
orig_fn = fn
|
| 180 |
+
if not unsafe:
|
| 181 |
+
fn = _convert_out_params(fn)
|
| 182 |
+
|
| 183 |
+
nonlocal registry
|
| 184 |
+
if registry is None:
|
| 185 |
+
registry = global_decomposition_table[type]
|
| 186 |
+
|
| 187 |
+
def register(op):
|
| 188 |
+
_add_op_to_registry(registry, op, fn)
|
| 189 |
+
|
| 190 |
+
# To handle allowing multiple aten_ops at once
|
| 191 |
+
pytree.tree_map_(register, aten_op)
|
| 192 |
+
return orig_fn
|
| 193 |
+
|
| 194 |
+
return decomposition_decorator
|
| 195 |
+
|
| 196 |
+
|
| 197 |
+
def get_decompositions(
|
| 198 |
+
aten_ops: Sequence[Union[torch._ops.OperatorBase, OpOverloadPacket]],
|
| 199 |
+
type: str = "post_autograd",
|
| 200 |
+
) -> Dict[torch._ops.OperatorBase, Callable]:
|
| 201 |
+
"""
|
| 202 |
+
Retrieve a dictionary of decompositions corresponding to the list of
|
| 203 |
+
operator overloads and overload packets passed as input. Overload
|
| 204 |
+
packets will include all decomposed overloads in the packet. If there is
|
| 205 |
+
no decomposition for a requested operator, it is silently ignored.
|
| 206 |
+
|
| 207 |
+
This API is experimental; we are almost certainly going to give an alternate,
|
| 208 |
+
more recommended formulation, where a user provides the set of operators
|
| 209 |
+
they know how to implement, and we provide decompositions for everything
|
| 210 |
+
not in this set.
|
| 211 |
+
"""
|
| 212 |
+
assert type in {"post_autograd", "pre_autograd", "meta"}
|
| 213 |
+
|
| 214 |
+
registry = global_decomposition_table[type]
|
| 215 |
+
packets_to_overloads = defaultdict(list)
|
| 216 |
+
for opo in registry:
|
| 217 |
+
if isinstance(opo, (OpOverload, OpOverloadPacket)):
|
| 218 |
+
packets_to_overloads[opo.overloadpacket].append(opo)
|
| 219 |
+
decompositions: Dict[torch._ops.OperatorBase, Callable] = {}
|
| 220 |
+
for op in aten_ops:
|
| 221 |
+
if isinstance(op, OpOverloadPacket) and op in packets_to_overloads:
|
| 222 |
+
for op_overload in packets_to_overloads[op]:
|
| 223 |
+
decompositions[op_overload] = registry[op_overload]
|
| 224 |
+
elif isinstance(op, (torch._ops.OperatorBase)) and op in registry:
|
| 225 |
+
decompositions[op] = registry[op]
|
| 226 |
+
return decompositions
|
| 227 |
+
|
| 228 |
+
|
| 229 |
+
def remove_decompositions(
|
| 230 |
+
decompositions: Dict[torch._ops.OperatorBase, Callable],
|
| 231 |
+
aten_ops: Sequence[Union[OpOverload, OpOverloadPacket]],
|
| 232 |
+
) -> None:
|
| 233 |
+
"""
|
| 234 |
+
Given a dictionary of decompositions obtained from get_decompositions(), removes
|
| 235 |
+
operators associated with a list of operator overloads and overload packets passed
|
| 236 |
+
as input. If the decomposition dictionary does not contain a decomposition that is
|
| 237 |
+
specified to be removed, it is silently ignored.
|
| 238 |
+
"""
|
| 239 |
+
for op in aten_ops:
|
| 240 |
+
if isinstance(op, OpOverloadPacket):
|
| 241 |
+
for overload_name in op.overloads():
|
| 242 |
+
opo = getattr(op, overload_name)
|
| 243 |
+
decompositions.pop(opo, None)
|
| 244 |
+
elif isinstance(op, OpOverload):
|
| 245 |
+
decompositions.pop(op, None)
|
| 246 |
+
|
| 247 |
+
|
| 248 |
+
# populate the table
|
| 249 |
+
import torch._decomp.decompositions
|
| 250 |
+
import torch._refs
|
| 251 |
+
|
| 252 |
+
|
| 253 |
+
# See NOTE [Core ATen Ops]
|
| 254 |
+
#
|
| 255 |
+
# list was copied from torch/_inductor/decomposition.py
|
| 256 |
+
# excluding decompositions that results in prim ops
|
| 257 |
+
# Resulting opset of decomposition is core aten ops
|
| 258 |
+
def core_aten_decompositions() -> Dict[torch._ops.OperatorBase, Callable]:
|
| 259 |
+
aten = torch.ops.aten
|
| 260 |
+
return get_decompositions(
|
| 261 |
+
[
|
| 262 |
+
aten.addcdiv,
|
| 263 |
+
aten.addcdiv_,
|
| 264 |
+
aten.addcmul,
|
| 265 |
+
aten.addcmul_,
|
| 266 |
+
aten.addr,
|
| 267 |
+
aten.affine_grid_generator,
|
| 268 |
+
aten.alias_copy,
|
| 269 |
+
aten.all,
|
| 270 |
+
aten.aminmax,
|
| 271 |
+
aten.arange.default,
|
| 272 |
+
aten.arange.start,
|
| 273 |
+
aten.avg_pool2d_backward,
|
| 274 |
+
aten.baddbmm,
|
| 275 |
+
aten.binary_cross_entropy,
|
| 276 |
+
aten.binary_cross_entropy_backward,
|
| 277 |
+
aten.binary_cross_entropy_with_logits,
|
| 278 |
+
aten.block_diag,
|
| 279 |
+
aten.celu,
|
| 280 |
+
aten.celu_,
|
| 281 |
+
aten.channel_shuffle,
|
| 282 |
+
aten.clamp_max,
|
| 283 |
+
aten.clamp_min,
|
| 284 |
+
aten.col2im,
|
| 285 |
+
aten.count_nonzero,
|
| 286 |
+
aten.linalg_cross,
|
| 287 |
+
aten.cudnn_batch_norm,
|
| 288 |
+
aten.cudnn_batch_norm_backward,
|
| 289 |
+
aten.miopen_batch_norm_backward,
|
| 290 |
+
aten.deg2rad,
|
| 291 |
+
aten.deg2rad_,
|
| 292 |
+
aten.detach,
|
| 293 |
+
aten.diag_embed,
|
| 294 |
+
aten.diagonal_backward,
|
| 295 |
+
aten.dot,
|
| 296 |
+
aten.vdot,
|
| 297 |
+
aten.elu,
|
| 298 |
+
aten.elu_,
|
| 299 |
+
aten.elu_backward,
|
| 300 |
+
aten._embedding_bag,
|
| 301 |
+
aten.embedding_dense_backward,
|
| 302 |
+
aten.empty_like,
|
| 303 |
+
aten._euclidean_dist.default,
|
| 304 |
+
aten.expand_as,
|
| 305 |
+
aten.expand_copy,
|
| 306 |
+
aten.eye,
|
| 307 |
+
aten.fill,
|
| 308 |
+
aten.fill_,
|
| 309 |
+
aten.floor_divide,
|
| 310 |
+
aten.frac,
|
| 311 |
+
aten.frac_,
|
| 312 |
+
aten._fused_moving_avg_obs_fq_helper,
|
| 313 |
+
aten.gelu_,
|
| 314 |
+
aten.gelu_backward,
|
| 315 |
+
aten.glu,
|
| 316 |
+
aten.glu_backward,
|
| 317 |
+
aten.hardshrink,
|
| 318 |
+
aten.hardsigmoid,
|
| 319 |
+
aten.hardsigmoid_,
|
| 320 |
+
aten.hardsigmoid_backward,
|
| 321 |
+
aten.hardswish,
|
| 322 |
+
aten.hardswish_,
|
| 323 |
+
aten.hardswish_backward,
|
| 324 |
+
aten.hardtanh_,
|
| 325 |
+
aten.hardtanh_backward,
|
| 326 |
+
aten.heaviside,
|
| 327 |
+
aten.heaviside_,
|
| 328 |
+
aten.huber_loss,
|
| 329 |
+
aten.huber_loss_backward,
|
| 330 |
+
aten.im2col,
|
| 331 |
+
aten.index_add,
|
| 332 |
+
aten.index_add_,
|
| 333 |
+
aten.index_copy,
|
| 334 |
+
aten.index_copy_,
|
| 335 |
+
aten.index_fill,
|
| 336 |
+
aten.index_fill_,
|
| 337 |
+
aten.isin,
|
| 338 |
+
aten.isneginf,
|
| 339 |
+
aten.isposinf,
|
| 340 |
+
aten.l1_loss,
|
| 341 |
+
aten._lazy_clone,
|
| 342 |
+
aten._test_parallel_materialize,
|
| 343 |
+
aten.leaky_relu_,
|
| 344 |
+
aten.leaky_relu_backward,
|
| 345 |
+
aten.lerp,
|
| 346 |
+
aten.lerp_,
|
| 347 |
+
aten.linspace,
|
| 348 |
+
aten.logaddexp,
|
| 349 |
+
aten.logaddexp2,
|
| 350 |
+
aten.logit,
|
| 351 |
+
aten.logit_,
|
| 352 |
+
aten.logit_backward,
|
| 353 |
+
aten.log_sigmoid_backward,
|
| 354 |
+
aten.log_sigmoid_forward,
|
| 355 |
+
aten._log_softmax_backward_data,
|
| 356 |
+
aten.logspace,
|
| 357 |
+
aten.logsumexp.default,
|
| 358 |
+
aten.masked_fill,
|
| 359 |
+
aten.masked_fill_,
|
| 360 |
+
aten.mish,
|
| 361 |
+
aten.mish_,
|
| 362 |
+
aten.mse_loss,
|
| 363 |
+
aten.mse_loss_backward,
|
| 364 |
+
aten.multi_margin_loss,
|
| 365 |
+
aten.multilabel_margin_loss_forward,
|
| 366 |
+
aten.mv,
|
| 367 |
+
aten.mvlgamma,
|
| 368 |
+
aten.mvlgamma_,
|
| 369 |
+
aten.nansum,
|
| 370 |
+
aten.nan_to_num,
|
| 371 |
+
aten.nan_to_num_,
|
| 372 |
+
aten.narrow,
|
| 373 |
+
aten.native_batch_norm_backward,
|
| 374 |
+
aten.native_dropout_backward,
|
| 375 |
+
aten.native_group_norm_backward,
|
| 376 |
+
aten.native_layer_norm_backward,
|
| 377 |
+
aten.new_empty,
|
| 378 |
+
aten.new_full,
|
| 379 |
+
aten.new_ones,
|
| 380 |
+
aten.new_zeros,
|
| 381 |
+
aten.nll_loss2d_forward,
|
| 382 |
+
aten.nll_loss2d_backward,
|
| 383 |
+
aten.nll_loss_backward,
|
| 384 |
+
aten.nll_loss_forward,
|
| 385 |
+
aten.norm,
|
| 386 |
+
aten.ones,
|
| 387 |
+
aten.ones_like,
|
| 388 |
+
aten.pixel_shuffle,
|
| 389 |
+
aten.pixel_unshuffle,
|
| 390 |
+
aten._prelu_kernel,
|
| 391 |
+
aten._prelu_kernel_backward,
|
| 392 |
+
aten._reshape_alias,
|
| 393 |
+
aten.rad2deg,
|
| 394 |
+
aten.rad2deg_,
|
| 395 |
+
aten.reflection_pad1d,
|
| 396 |
+
aten.reflection_pad1d_backward,
|
| 397 |
+
aten.reflection_pad2d,
|
| 398 |
+
aten.reflection_pad2d_backward,
|
| 399 |
+
aten.reflection_pad3d,
|
| 400 |
+
aten.reflection_pad3d_backward,
|
| 401 |
+
aten.replication_pad1d,
|
| 402 |
+
aten.replication_pad2d,
|
| 403 |
+
aten.replication_pad3d,
|
| 404 |
+
aten.renorm,
|
| 405 |
+
aten.renorm_,
|
| 406 |
+
aten.replication_pad2d,
|
| 407 |
+
aten.resize_as,
|
| 408 |
+
aten.roll,
|
| 409 |
+
aten.rot90,
|
| 410 |
+
aten.rrelu_with_noise,
|
| 411 |
+
aten.rrelu_with_noise_,
|
| 412 |
+
aten.rsub,
|
| 413 |
+
aten._safe_softmax,
|
| 414 |
+
aten._scaled_dot_product_flash_attention_for_cpu.default,
|
| 415 |
+
aten.select_backward,
|
| 416 |
+
aten.select_scatter,
|
| 417 |
+
aten.sgn,
|
| 418 |
+
aten.sgn_,
|
| 419 |
+
aten.sigmoid_backward,
|
| 420 |
+
aten.silu,
|
| 421 |
+
aten.silu_,
|
| 422 |
+
aten.silu_backward,
|
| 423 |
+
aten.sinc,
|
| 424 |
+
aten.sinc_,
|
| 425 |
+
aten.slice_backward,
|
| 426 |
+
aten.smooth_l1_loss,
|
| 427 |
+
aten.smooth_l1_loss_backward,
|
| 428 |
+
aten.soft_margin_loss,
|
| 429 |
+
aten.soft_margin_loss_backward,
|
| 430 |
+
aten._softmax_backward_data,
|
| 431 |
+
aten.softplus,
|
| 432 |
+
aten.softplus_backward,
|
| 433 |
+
aten.softshrink,
|
| 434 |
+
aten.special_entr,
|
| 435 |
+
aten.special_log_ndtr,
|
| 436 |
+
aten.special_xlog1py,
|
| 437 |
+
aten.split.Tensor,
|
| 438 |
+
aten.split_with_sizes_copy,
|
| 439 |
+
aten.squeeze.default,
|
| 440 |
+
aten.squeeze.dim,
|
| 441 |
+
aten.std,
|
| 442 |
+
aten.std_mean,
|
| 443 |
+
aten.stack,
|
| 444 |
+
aten.sum.default,
|
| 445 |
+
aten.sum.out,
|
| 446 |
+
aten.t,
|
| 447 |
+
aten.t_copy,
|
| 448 |
+
aten.take,
|
| 449 |
+
aten.tanh_backward,
|
| 450 |
+
aten.threshold,
|
| 451 |
+
aten.threshold_,
|
| 452 |
+
aten.threshold_backward,
|
| 453 |
+
aten.trace,
|
| 454 |
+
aten.transpose.int,
|
| 455 |
+
aten.tril,
|
| 456 |
+
aten.tril_,
|
| 457 |
+
aten.triu,
|
| 458 |
+
aten.triu_,
|
| 459 |
+
aten.unbind,
|
| 460 |
+
aten.unfold_backward,
|
| 461 |
+
aten.unfold_copy,
|
| 462 |
+
aten._unsafe_index,
|
| 463 |
+
aten._unsafe_index_put,
|
| 464 |
+
aten._unsafe_masked_index,
|
| 465 |
+
aten._unsafe_masked_index_put_accumulate,
|
| 466 |
+
aten.unsafe_split.Tensor,
|
| 467 |
+
aten.unsafe_split_with_sizes,
|
| 468 |
+
aten.unsqueeze_copy,
|
| 469 |
+
aten._unsafe_view,
|
| 470 |
+
aten.upsample_linear1d,
|
| 471 |
+
aten.upsample_bilinear2d,
|
| 472 |
+
aten.upsample_trilinear3d,
|
| 473 |
+
aten.upsample_nearest2d_backward,
|
| 474 |
+
aten.view_as_complex,
|
| 475 |
+
aten.xlogy,
|
| 476 |
+
aten.xlogy_,
|
| 477 |
+
aten.zero,
|
| 478 |
+
aten.zero_,
|
| 479 |
+
aten.zeros,
|
| 480 |
+
aten.zeros_like,
|
| 481 |
+
aten._chunk_cat,
|
| 482 |
+
aten._weight_norm_interface,
|
| 483 |
+
]
|
| 484 |
+
)
|
vllm/lib/python3.10/site-packages/torch/_decomp/__pycache__/__init__.cpython-310.pyc
ADDED
|
Binary file (13.4 kB). View file
|
|
|
vllm/lib/python3.10/site-packages/torch/_decomp/__pycache__/decompositions_for_jvp.cpython-310.pyc
ADDED
|
Binary file (6.73 kB). View file
|
|
|
vllm/lib/python3.10/site-packages/torch/_decomp/__pycache__/decompositions_for_rng.cpython-310.pyc
ADDED
|
Binary file (8.02 kB). View file
|
|
|
vllm/lib/python3.10/site-packages/torch/_decomp/decompositions.py
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
vllm/lib/python3.10/site-packages/torch/_decomp/decompositions_for_jvp.py
ADDED
|
@@ -0,0 +1,335 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# mypy: allow-untyped-decorators
|
| 2 |
+
# mypy: allow-untyped-defs
|
| 3 |
+
import inspect
|
| 4 |
+
from typing import Callable, Dict, List, Optional, Tuple
|
| 5 |
+
|
| 6 |
+
import torch
|
| 7 |
+
import torch._decomp
|
| 8 |
+
from torch import Tensor
|
| 9 |
+
from torch._prims_common.wrappers import _maybe_remove_out_wrapper
|
| 10 |
+
|
| 11 |
+
|
| 12 |
+
decomposition_table = torch._decomp.decomposition_table
|
| 13 |
+
decomposition_table_for_jvp: Dict[torch._ops.OperatorBase, Callable] = {}
|
| 14 |
+
register_decomposition = torch._decomp.register_decomposition
|
| 15 |
+
aten = torch.ops.aten
|
| 16 |
+
|
| 17 |
+
# NOTE: [forward-mode AD decompositions mechanism]
|
| 18 |
+
#
|
| 19 |
+
# The mechanism is in VariableType,
|
| 20 |
+
# IF any inputs have forward grad
|
| 21 |
+
# AND there is no forward AD formula implemented
|
| 22 |
+
# AND the functions is actually differentiable
|
| 23 |
+
# run the decomposition
|
| 24 |
+
# See run_jit_decomposition_with_args_for_jvp
|
| 25 |
+
# We currently use python decompositions that we torchscript.
|
| 26 |
+
#
|
| 27 |
+
# Note that we would be building the backward graph at the decomposed level
|
| 28 |
+
# too, but that is OK, because we would've errored out otherwise anyway.
|
| 29 |
+
#
|
| 30 |
+
# TODO: The mechanism we are using to register decompositions doesn't
|
| 31 |
+
# seem to be exclusively used for jvp. So open question here is whether
|
| 32 |
+
# torch/csrc/jit/runtime/decomposition_registry.cpp is being used for other things.
|
| 33 |
+
# If that is the case, we may go down the decomposition path unexpectedly
|
| 34 |
+
# (and possibly produce an unintelligible error) vs erroring out earlier and
|
| 35 |
+
# printing that the forward AD formula is not implemented.
|
| 36 |
+
#
|
| 37 |
+
# The solution to this may be to have a explicitly white list control when
|
| 38 |
+
# to enable the decomposition.
|
| 39 |
+
|
| 40 |
+
|
| 41 |
+
def maybe_register_decomposition(op):
|
| 42 |
+
def decorator(f):
|
| 43 |
+
try:
|
| 44 |
+
return register_decomposition(op)(f)
|
| 45 |
+
except Exception:
|
| 46 |
+
return f
|
| 47 |
+
|
| 48 |
+
return decorator
|
| 49 |
+
|
| 50 |
+
|
| 51 |
+
# Functions where we need a special decomposition for jvp but there's another version that
|
| 52 |
+
# should be used more generally (ex. for jvp we need to recompute the mean and variance for
|
| 53 |
+
# the backwards of a normalization function. Without jvp, it should use the saved value)
|
| 54 |
+
decomposition_table_for_jvp = {}
|
| 55 |
+
|
| 56 |
+
|
| 57 |
+
def register_decomposition_for_jvp(fn):
|
| 58 |
+
return register_decomposition(fn, registry=decomposition_table_for_jvp)
|
| 59 |
+
|
| 60 |
+
|
| 61 |
+
def _register_jit_decomposition_for_jvp(decomp, use_python=False):
|
| 62 |
+
if decomp in decomposition_table_for_jvp:
|
| 63 |
+
decomposition_table_used = decomposition_table_for_jvp
|
| 64 |
+
elif decomp in decomposition_table:
|
| 65 |
+
decomposition_table_used = decomposition_table
|
| 66 |
+
else:
|
| 67 |
+
raise RuntimeError(f"could not find decomposition for {decomp}")
|
| 68 |
+
decomp_fn = decomposition_table_used[decomp]
|
| 69 |
+
|
| 70 |
+
# `out_wrapper` extends a decompositions signature with
|
| 71 |
+
# an `out` parameter. However jit will use the unwrapped function's
|
| 72 |
+
# signature instead so we need to unwrap here to prevent an error
|
| 73 |
+
decomp_fn = _maybe_remove_out_wrapper(decomp_fn)
|
| 74 |
+
|
| 75 |
+
if use_python:
|
| 76 |
+
decomp_fn = torch.jit.ignore(decomp_fn)
|
| 77 |
+
sig = inspect.signature(decomp_fn)
|
| 78 |
+
|
| 79 |
+
# Create a string wrapping the function from the signature
|
| 80 |
+
# example output:
|
| 81 |
+
# def wrapped_decomp(x: torch.Tensor, y: int, z: int):
|
| 82 |
+
# return decomp_fn(x, y, z)
|
| 83 |
+
# Thanks copilot!
|
| 84 |
+
def get_function_def(sig):
|
| 85 |
+
param_def = [f"{param_str}" for param_str in sig.parameters.values()]
|
| 86 |
+
param_use = [f"{param_str}" for param_str in sig.parameters.keys()]
|
| 87 |
+
|
| 88 |
+
return f"def wrapped_decomp({', '.join(param_def)}):\n return decomp_fn({', '.join(param_use)})\n"
|
| 89 |
+
|
| 90 |
+
f_str = get_function_def(sig)
|
| 91 |
+
graph = torch.jit.CompilationUnit(f_str).wrapped_decomp.graph
|
| 92 |
+
else:
|
| 93 |
+
graph = torch.jit.script(decomp_fn).graph
|
| 94 |
+
torch.jit._register_decomposition(decomp, graph)
|
| 95 |
+
|
| 96 |
+
|
| 97 |
+
# The only decompositions here are temporary or hacks for the purposes of jvp
|
| 98 |
+
|
| 99 |
+
|
| 100 |
+
# TODO: do these also belong here?
|
| 101 |
+
@maybe_register_decomposition(aten.trace.default)
|
| 102 |
+
def trace(self: Tensor) -> Tensor:
|
| 103 |
+
return torch.sum(torch.diag(self))
|
| 104 |
+
|
| 105 |
+
|
| 106 |
+
@maybe_register_decomposition(aten.log_sigmoid_forward.default)
|
| 107 |
+
def log_sigmoid_forward(self: Tensor) -> Tuple[Tensor, Tensor]:
|
| 108 |
+
min = torch.minimum(self.new_zeros(()), self)
|
| 109 |
+
z = torch.exp(-torch.abs(self))
|
| 110 |
+
if self.is_cuda:
|
| 111 |
+
buffer = self.new_zeros((0,))
|
| 112 |
+
else:
|
| 113 |
+
buffer = z
|
| 114 |
+
return min - torch.log1p(z), buffer
|
| 115 |
+
|
| 116 |
+
|
| 117 |
+
def recompute_mean_var(
|
| 118 |
+
input: Tensor, rstd: Tensor, inner_dim_indices: List[int], keepdim: bool
|
| 119 |
+
):
|
| 120 |
+
# for most norm decompositions, it will be the same as the core version except for here.
|
| 121 |
+
# We recompute the mean and variance so that they track gradients through input
|
| 122 |
+
|
| 123 |
+
mean = torch.mean(input, dim=inner_dim_indices, keepdim=keepdim)
|
| 124 |
+
var = torch.var(input, dim=inner_dim_indices, unbiased=False, keepdim=keepdim)
|
| 125 |
+
eps = torch.pow(1 / rstd, 2) - var # this makes me so sad inside
|
| 126 |
+
eps = eps.detach()
|
| 127 |
+
rstd = 1 / torch.sqrt(var + eps)
|
| 128 |
+
return mean, rstd
|
| 129 |
+
|
| 130 |
+
|
| 131 |
+
@register_decomposition_for_jvp(aten.native_layer_norm_backward)
|
| 132 |
+
def native_layer_norm_backward(
|
| 133 |
+
grad_out: Tensor,
|
| 134 |
+
input: Tensor,
|
| 135 |
+
normalized_shape: List[int],
|
| 136 |
+
mean: Tensor,
|
| 137 |
+
rstd: Tensor,
|
| 138 |
+
weight: Optional[Tensor],
|
| 139 |
+
bias: Optional[Tensor],
|
| 140 |
+
output_mask: List[bool],
|
| 141 |
+
) -> Tuple[Optional[Tensor], Optional[Tensor], Optional[Tensor]]:
|
| 142 |
+
input_shape = input.shape
|
| 143 |
+
input_ndim = input.dim()
|
| 144 |
+
|
| 145 |
+
axis = input_ndim - len(normalized_shape)
|
| 146 |
+
inner_dims = input_shape[axis:]
|
| 147 |
+
outer_dims = input_shape[:axis]
|
| 148 |
+
inner_dim_indices = list(range(axis, input_ndim))
|
| 149 |
+
outer_dim_indices = list(range(0, axis))
|
| 150 |
+
|
| 151 |
+
N = 1
|
| 152 |
+
for i in inner_dims:
|
| 153 |
+
N *= i
|
| 154 |
+
M = 1
|
| 155 |
+
for i in outer_dims:
|
| 156 |
+
M *= i
|
| 157 |
+
if M <= 0 or N <= 0:
|
| 158 |
+
return (
|
| 159 |
+
input.new_zeros(input_shape),
|
| 160 |
+
input.new_zeros(input_shape[axis:]),
|
| 161 |
+
input.new_zeros(input_shape[axis:]),
|
| 162 |
+
)
|
| 163 |
+
|
| 164 |
+
mean_, rstd_ = recompute_mean_var(input, rstd, inner_dim_indices, keepdim=True)
|
| 165 |
+
|
| 166 |
+
x_hat = (input - mean_) * rstd_
|
| 167 |
+
if weight is not None:
|
| 168 |
+
grad_x_hat = grad_out * weight
|
| 169 |
+
else:
|
| 170 |
+
grad_x_hat = grad_out
|
| 171 |
+
a = grad_x_hat * N
|
| 172 |
+
b = torch.sum(grad_x_hat, inner_dim_indices, True)
|
| 173 |
+
c1 = torch.mul(grad_x_hat, x_hat)
|
| 174 |
+
c2 = torch.sum(c1, inner_dim_indices, True)
|
| 175 |
+
c3 = torch.mul(x_hat, c2)
|
| 176 |
+
inner = a - b - c3
|
| 177 |
+
|
| 178 |
+
if output_mask[0]:
|
| 179 |
+
d_input: Optional[Tensor] = (rstd_ / N) * inner
|
| 180 |
+
else:
|
| 181 |
+
d_input = torch.zeros_like(input) # should be None but doesn't work with vjp
|
| 182 |
+
|
| 183 |
+
if output_mask[1] and weight is not None:
|
| 184 |
+
if len(outer_dim_indices) > 0:
|
| 185 |
+
d_weight: Optional[Tensor] = torch.sum(
|
| 186 |
+
grad_out * x_hat, outer_dim_indices, False
|
| 187 |
+
)
|
| 188 |
+
else:
|
| 189 |
+
d_weight = grad_out * x_hat
|
| 190 |
+
elif weight is not None:
|
| 191 |
+
d_weight = torch.zeros_like(weight) # should be None but doesn't work with vjp
|
| 192 |
+
else:
|
| 193 |
+
d_weight = torch.zeros(()) # should be None but doesn't work with vjp
|
| 194 |
+
|
| 195 |
+
if output_mask[2] and bias is not None:
|
| 196 |
+
if len(outer_dim_indices) > 0:
|
| 197 |
+
d_bias: Optional[Tensor] = torch.sum(grad_out, outer_dim_indices, False)
|
| 198 |
+
else:
|
| 199 |
+
d_bias = grad_out.clone()
|
| 200 |
+
elif bias is not None:
|
| 201 |
+
d_bias = torch.zeros_like(bias) # should be None but doesn't work with vjp
|
| 202 |
+
else:
|
| 203 |
+
d_bias = torch.zeros(()) # should be None but doesn't work with vjp
|
| 204 |
+
|
| 205 |
+
return (d_input, d_weight, d_bias)
|
| 206 |
+
|
| 207 |
+
|
| 208 |
+
def prod(x: List[int]):
|
| 209 |
+
r = 1
|
| 210 |
+
for i in x:
|
| 211 |
+
r *= i
|
| 212 |
+
return r
|
| 213 |
+
|
| 214 |
+
|
| 215 |
+
@register_decomposition_for_jvp(aten.native_batch_norm_backward)
|
| 216 |
+
def native_batch_norm_backward(
|
| 217 |
+
grad_out: Tensor,
|
| 218 |
+
input: Tensor,
|
| 219 |
+
weight: Optional[Tensor],
|
| 220 |
+
running_mean: Optional[Tensor],
|
| 221 |
+
running_var: Optional[Tensor],
|
| 222 |
+
save_mean: Optional[Tensor],
|
| 223 |
+
save_invstd: Optional[Tensor],
|
| 224 |
+
train: bool,
|
| 225 |
+
eps: float,
|
| 226 |
+
output_mask: List[bool],
|
| 227 |
+
) -> Tuple[Tensor, Optional[Tensor], Optional[Tensor]]:
|
| 228 |
+
input_shape = input.shape
|
| 229 |
+
input_rank = input.dim()
|
| 230 |
+
assert input_rank >= 2, "rank of the input must be at least 2"
|
| 231 |
+
|
| 232 |
+
axis = 1
|
| 233 |
+
num_features = prod(input_shape) / input_shape[axis] # type: ignore[arg-type]
|
| 234 |
+
mean = save_mean
|
| 235 |
+
invstd = save_invstd
|
| 236 |
+
if train:
|
| 237 |
+
assert (
|
| 238 |
+
save_mean is not None and save_invstd is not None
|
| 239 |
+
), "when train=True, save_mean and save_invstd are required"
|
| 240 |
+
|
| 241 |
+
reduciton_dims = [0] + list(range(2, input.dim()))
|
| 242 |
+
assert invstd is not None # for typing
|
| 243 |
+
mean, invstd = recompute_mean_var(input, invstd, reduciton_dims, keepdim=False)
|
| 244 |
+
else:
|
| 245 |
+
assert running_mean is not None and running_var is not None
|
| 246 |
+
mean = running_mean
|
| 247 |
+
invstd = torch.rsqrt(running_var + eps)
|
| 248 |
+
|
| 249 |
+
assert invstd is not None and mean is not None
|
| 250 |
+
|
| 251 |
+
broadcast_mask = [1] * input_rank
|
| 252 |
+
broadcast_mask[axis] = input_shape[axis]
|
| 253 |
+
|
| 254 |
+
reduction_axes: List[int] = []
|
| 255 |
+
for i in range(input_rank):
|
| 256 |
+
if i != axis:
|
| 257 |
+
reduction_axes.append(i)
|
| 258 |
+
|
| 259 |
+
mean = torch.reshape(mean, broadcast_mask)
|
| 260 |
+
norm = 1.0 / num_features
|
| 261 |
+
grad_output_sum = torch.sum(grad_out, reduction_axes)
|
| 262 |
+
dot_p = torch.sum(grad_out * (input - mean), reduction_axes)
|
| 263 |
+
|
| 264 |
+
grad_mean = torch.reshape(grad_output_sum * norm, broadcast_mask)
|
| 265 |
+
proj_scale = torch.reshape(torch.mul(dot_p * norm, invstd * invstd), broadcast_mask)
|
| 266 |
+
|
| 267 |
+
if weight is None:
|
| 268 |
+
grad_scale = torch.reshape(invstd, broadcast_mask) * 1.0
|
| 269 |
+
else:
|
| 270 |
+
grad_scale = torch.reshape(invstd * weight, broadcast_mask)
|
| 271 |
+
|
| 272 |
+
if train:
|
| 273 |
+
proj = (input - mean) * proj_scale
|
| 274 |
+
grad_input = ((grad_out - proj) - grad_mean) * grad_scale
|
| 275 |
+
else:
|
| 276 |
+
grad_input = grad_out * grad_scale
|
| 277 |
+
|
| 278 |
+
if output_mask[1]:
|
| 279 |
+
grad_weight = dot_p * invstd
|
| 280 |
+
elif weight is not None:
|
| 281 |
+
grad_weight = torch.zeros_like(
|
| 282 |
+
weight
|
| 283 |
+
) # should be None but doesn't work with vjp
|
| 284 |
+
else:
|
| 285 |
+
grad_weight = torch.zeros(()) # should be None but doesn't work with vjp
|
| 286 |
+
|
| 287 |
+
if output_mask[2]:
|
| 288 |
+
grad_bias = grad_output_sum
|
| 289 |
+
else:
|
| 290 |
+
grad_bias = torch.zeros_like(
|
| 291 |
+
grad_output_sum
|
| 292 |
+
) # should be None but doesn't work with vjp
|
| 293 |
+
|
| 294 |
+
return (grad_input, grad_weight, grad_bias)
|
| 295 |
+
|
| 296 |
+
|
| 297 |
+
@register_decomposition_for_jvp(aten.batch_norm_backward)
|
| 298 |
+
def batch_norm_backward(
|
| 299 |
+
grad_out: Tensor,
|
| 300 |
+
input: Tensor,
|
| 301 |
+
weight: Tensor,
|
| 302 |
+
running_mean: Optional[Tensor],
|
| 303 |
+
running_var: Optional[Tensor],
|
| 304 |
+
save_mean: Optional[Tensor],
|
| 305 |
+
save_var: Optional[Tensor],
|
| 306 |
+
update: bool,
|
| 307 |
+
eps: float,
|
| 308 |
+
output_mask: List[bool],
|
| 309 |
+
reserve: Tensor,
|
| 310 |
+
) -> Tuple[Tensor, Optional[Tensor], Optional[Tensor]]:
|
| 311 |
+
return native_batch_norm_backward(
|
| 312 |
+
grad_out,
|
| 313 |
+
input,
|
| 314 |
+
weight,
|
| 315 |
+
running_mean,
|
| 316 |
+
running_var,
|
| 317 |
+
save_mean,
|
| 318 |
+
save_var,
|
| 319 |
+
update,
|
| 320 |
+
eps,
|
| 321 |
+
output_mask,
|
| 322 |
+
)
|
| 323 |
+
|
| 324 |
+
|
| 325 |
+
_register_jit_decomposition_for_jvp(torch.ops.aten.trace.default, use_python=True)
|
| 326 |
+
_register_jit_decomposition_for_jvp(torch.ops.aten.nll_loss_backward.default)
|
| 327 |
+
_register_jit_decomposition_for_jvp(torch.ops.aten.nll_loss2d_backward.default)
|
| 328 |
+
_register_jit_decomposition_for_jvp(torch.ops.aten._log_softmax_backward_data.default)
|
| 329 |
+
_register_jit_decomposition_for_jvp(torch.ops.aten._softmax_backward_data.default)
|
| 330 |
+
_register_jit_decomposition_for_jvp(torch.ops.aten.log_sigmoid_forward.default)
|
| 331 |
+
_register_jit_decomposition_for_jvp(torch.ops.aten.native_layer_norm_backward.default)
|
| 332 |
+
_register_jit_decomposition_for_jvp(torch.ops.aten.native_batch_norm_backward.default)
|
| 333 |
+
_register_jit_decomposition_for_jvp(torch.ops.aten.cudnn_batch_norm_backward.default)
|
| 334 |
+
_register_jit_decomposition_for_jvp(torch.ops.aten.batch_norm_backward.default)
|
| 335 |
+
_register_jit_decomposition_for_jvp(torch.ops.aten.miopen_batch_norm_backward.default)
|
vllm/lib/python3.10/site-packages/torch/_decomp/decompositions_for_rng.py
ADDED
|
@@ -0,0 +1,266 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# mypy: allow-untyped-decorators
|
| 2 |
+
# mypy: allow-untyped-defs
|
| 3 |
+
import functools
|
| 4 |
+
from collections import defaultdict
|
| 5 |
+
from typing import Callable, Dict
|
| 6 |
+
|
| 7 |
+
import torch
|
| 8 |
+
import torch._decomp as decomp
|
| 9 |
+
from torch._decomp import get_decompositions
|
| 10 |
+
from torch._ops import OpOverload
|
| 11 |
+
|
| 12 |
+
|
| 13 |
+
aten = torch.ops.aten
|
| 14 |
+
|
| 15 |
+
rng_decompositions: Dict[str, Dict[OpOverload, Callable]] = defaultdict(dict)
|
| 16 |
+
|
| 17 |
+
|
| 18 |
+
def register_rng_decomposition(aten_op):
|
| 19 |
+
return decomp.register_decomposition(aten_op, rng_decompositions)
|
| 20 |
+
|
| 21 |
+
|
| 22 |
+
def throw_on_non_cuda(device):
|
| 23 |
+
raise RuntimeError(
|
| 24 |
+
f"You are trying to functionalize a {device.type} RNG operator but {device.type} does not "
|
| 25 |
+
f"use Philox/counter-based RNG. Therefore, functionalizing a {device.type} RNG operator is "
|
| 26 |
+
"not supported. We are discussing the possibility of a Philox-based RNG implementation for CPU."
|
| 27 |
+
)
|
| 28 |
+
|
| 29 |
+
|
| 30 |
+
# TODO - We have to register many more distributions here, and also higher level
|
| 31 |
+
# ops like dropout which have fused implementation and can hide the rand inside.
|
| 32 |
+
@register_rng_decomposition(aten.rand)
|
| 33 |
+
def rand(shape, dtype=None, layout=torch.strided, device=None, pin_memory=False):
|
| 34 |
+
if device and device.type != "cuda":
|
| 35 |
+
throw_on_non_cuda(device)
|
| 36 |
+
seed, offset = PhiloxStateTracker.get_state_as_tuple()
|
| 37 |
+
dtype = dtype or torch.float32
|
| 38 |
+
out, offset_jump = torch.ops.rngprims.philox_rand(
|
| 39 |
+
shape, seed, offset, None, device, dtype
|
| 40 |
+
)
|
| 41 |
+
PhiloxStateTracker.advance_offset(offset_jump)
|
| 42 |
+
return out
|
| 43 |
+
|
| 44 |
+
|
| 45 |
+
@register_rng_decomposition(aten.rand_like)
|
| 46 |
+
def rand_like(
|
| 47 |
+
x: torch.Tensor,
|
| 48 |
+
dtype=None,
|
| 49 |
+
layout=None,
|
| 50 |
+
device=None,
|
| 51 |
+
pin_memory=False,
|
| 52 |
+
memory_format=torch.preserve_format,
|
| 53 |
+
):
|
| 54 |
+
device = device or x.device
|
| 55 |
+
if device.type != "cuda":
|
| 56 |
+
throw_on_non_cuda(device)
|
| 57 |
+
dtype = dtype or x.dtype
|
| 58 |
+
seed, offset = PhiloxStateTracker.get_state_as_tuple()
|
| 59 |
+
out, offset_jump = torch.ops.rngprims.philox_rand(
|
| 60 |
+
x.shape, seed, offset, None, device, dtype
|
| 61 |
+
)
|
| 62 |
+
PhiloxStateTracker.advance_offset(offset_jump)
|
| 63 |
+
return out
|
| 64 |
+
|
| 65 |
+
|
| 66 |
+
class PhiloxState:
|
| 67 |
+
"""
|
| 68 |
+
Represents a PhiloxRngState - (seed, offset) where offset = base_offset +
|
| 69 |
+
relative_offset. seed and base_offset basically point to the rng state just
|
| 70 |
+
before tracing starts. relative offset tracks the totally consumed offset at
|
| 71 |
+
trace time.
|
| 72 |
+
"""
|
| 73 |
+
|
| 74 |
+
def __init__(self) -> None:
|
| 75 |
+
self.reset()
|
| 76 |
+
|
| 77 |
+
def reset(self):
|
| 78 |
+
self.seed = torch.tensor(())
|
| 79 |
+
self.base_offset = torch.tensor(())
|
| 80 |
+
self.relative_offset = 0
|
| 81 |
+
self.offset_advanced_alteast_once = False
|
| 82 |
+
|
| 83 |
+
def validate_state(self):
|
| 84 |
+
assert self.seed.numel() != 0 and self.base_offset.numel() != 0
|
| 85 |
+
|
| 86 |
+
def advance_offset(self, consumed_offset):
|
| 87 |
+
self.offset_advanced_alteast_once = True
|
| 88 |
+
self.relative_offset = self.relative_offset + consumed_offset
|
| 89 |
+
|
| 90 |
+
def set_state(self, seed, base_offset, relative_offset=0):
|
| 91 |
+
self.seed = seed
|
| 92 |
+
self.base_offset = base_offset
|
| 93 |
+
self.relative_offset = relative_offset
|
| 94 |
+
|
| 95 |
+
def get_state_as_tuple(self):
|
| 96 |
+
self.validate_state()
|
| 97 |
+
return (self.seed, self.base_offset + self.relative_offset)
|
| 98 |
+
|
| 99 |
+
def get_state_as_tensor(self):
|
| 100 |
+
# Only needed because we override get_rng_state.
|
| 101 |
+
self.validate_state()
|
| 102 |
+
return torch.stack([self.seed, self.base_offset + self.relative_offset])
|
| 103 |
+
|
| 104 |
+
def set_state_from_tensor(self, state):
|
| 105 |
+
# Only needed because we override set_rng_state.
|
| 106 |
+
self.seed, self.base_offset = torch.unbind(state)
|
| 107 |
+
self.relative_offset = 0
|
| 108 |
+
|
| 109 |
+
|
| 110 |
+
class PhiloxStateTracker:
|
| 111 |
+
"""
|
| 112 |
+
Singleton class to track the philox rng state during AOT Autograd tracing.
|
| 113 |
+
For each aot tracing instance, AOT Autograd resets this tracker and keeps
|
| 114 |
+
track of both forward and backward offsets. At runtime, we only care about
|
| 115 |
+
the total consumed forward and backward offsets. For dynamic shapes, these
|
| 116 |
+
offsets are a function of input shapes. Therefore, the AOT generated graphs
|
| 117 |
+
have additional outputs that compute total consumed forward and backward
|
| 118 |
+
offsets.
|
| 119 |
+
"""
|
| 120 |
+
|
| 121 |
+
running_state: PhiloxState
|
| 122 |
+
fwd_state: PhiloxState
|
| 123 |
+
bwd_state: PhiloxState
|
| 124 |
+
|
| 125 |
+
def __enter__(self):
|
| 126 |
+
PhiloxStateTracker.reset()
|
| 127 |
+
return self
|
| 128 |
+
|
| 129 |
+
def __exit__(self, exc_type, exc_cal, exc_tb):
|
| 130 |
+
PhiloxStateTracker.reset()
|
| 131 |
+
|
| 132 |
+
@classmethod
|
| 133 |
+
def reset(cls):
|
| 134 |
+
cls.running_state = PhiloxState()
|
| 135 |
+
cls.fwd_state = PhiloxState()
|
| 136 |
+
cls.bwd_state = PhiloxState()
|
| 137 |
+
|
| 138 |
+
@classmethod
|
| 139 |
+
def mark_beginning_of_forward(cls):
|
| 140 |
+
# Tells the tracker to use fwd_state as the running state
|
| 141 |
+
cls.running_state = cls.fwd_state
|
| 142 |
+
|
| 143 |
+
@classmethod
|
| 144 |
+
def mark_beginning_of_backward(cls):
|
| 145 |
+
# Tells the tracker to use bwd_state as the running state
|
| 146 |
+
cls.running_state = cls.bwd_state
|
| 147 |
+
|
| 148 |
+
@classmethod
|
| 149 |
+
def record_state(cls, seed, offset, mode):
|
| 150 |
+
# Records the seed and offset tensors. These tensors are used to invoke
|
| 151 |
+
# the philox_rand functional primitives.
|
| 152 |
+
if mode == "forward":
|
| 153 |
+
cls.fwd_state.set_state(seed, offset)
|
| 154 |
+
cls.mark_beginning_of_forward()
|
| 155 |
+
else:
|
| 156 |
+
assert mode == "backward"
|
| 157 |
+
cls.bwd_state.set_state(seed, offset)
|
| 158 |
+
|
| 159 |
+
@classmethod
|
| 160 |
+
def get_state_as_tensor(cls):
|
| 161 |
+
# The only reason this exists is because we override get_rng_state and
|
| 162 |
+
# set_rng_state during tracing. get_rng_state expects a tensor output,
|
| 163 |
+
# so return (seed, offset) tuple upset other parts of the program like
|
| 164 |
+
# ctx.saved_tensors.
|
| 165 |
+
|
| 166 |
+
# A bad consequence is that if user saves and restores rng state, we
|
| 167 |
+
# have little bit of ugliness in the generated code, where we first
|
| 168 |
+
# concat the (seed, offset) to create a tensor for get_rng_state, and
|
| 169 |
+
# then split it back to get (seed, offset) tuple in set_rng_state.
|
| 170 |
+
|
| 171 |
+
# TODO: Investigate if there is be a better way to wrap the tuple in a
|
| 172 |
+
# false Tensor object, and then desugar it later on.
|
| 173 |
+
return cls.running_state.get_state_as_tensor()
|
| 174 |
+
|
| 175 |
+
@classmethod
|
| 176 |
+
def get_state_as_tuple(cls):
|
| 177 |
+
return cls.running_state.get_state_as_tuple()
|
| 178 |
+
|
| 179 |
+
@classmethod
|
| 180 |
+
def set_state_from_tensor(cls, x):
|
| 181 |
+
# This is only needed because we override set_rng_state. Look at the
|
| 182 |
+
# comment in get_state_from_tensor method.
|
| 183 |
+
cls.running_state.set_state_from_tensor(x)
|
| 184 |
+
|
| 185 |
+
@classmethod
|
| 186 |
+
def advance_offset(cls, consumed_offset):
|
| 187 |
+
cls.running_state.advance_offset(consumed_offset)
|
| 188 |
+
|
| 189 |
+
@classmethod
|
| 190 |
+
def get_current_relative_offset(cls):
|
| 191 |
+
return cls.running_state.relative_offset
|
| 192 |
+
|
| 193 |
+
@staticmethod
|
| 194 |
+
def multiple_of_4(offset):
|
| 195 |
+
# torch cuda rng state offset must be a multiple of 4. For inductor, as
|
| 196 |
+
# we sum up all the numel, the result might not be a multiple of 4. This
|
| 197 |
+
# method achieves that.
|
| 198 |
+
return (offset + 3) // 4 * 4
|
| 199 |
+
|
| 200 |
+
@classmethod
|
| 201 |
+
def get_updated_fwd_offset(cls):
|
| 202 |
+
# Short circuit if no rand ops were observed
|
| 203 |
+
if not cls.fwd_state.offset_advanced_alteast_once:
|
| 204 |
+
return cls.fwd_state.base_offset
|
| 205 |
+
return cls.multiple_of_4(
|
| 206 |
+
cls.fwd_state.base_offset + cls.fwd_state.relative_offset
|
| 207 |
+
)
|
| 208 |
+
|
| 209 |
+
@classmethod
|
| 210 |
+
def get_updated_bwd_offset(cls):
|
| 211 |
+
# Short circuit if no rand ops were observed
|
| 212 |
+
if not cls.bwd_state.offset_advanced_alteast_once:
|
| 213 |
+
return cls.bwd_state.base_offset
|
| 214 |
+
return cls.multiple_of_4(
|
| 215 |
+
cls.bwd_state.base_offset + cls.bwd_state.relative_offset
|
| 216 |
+
)
|
| 217 |
+
|
| 218 |
+
|
| 219 |
+
# Adding more decompositions which eventually use rand_like inside decomps.
|
| 220 |
+
# Adding these in rng_decompositions ensures the functionalization of rand_like
|
| 221 |
+
# ops used in these decomps. The list is copied from inductor codebase, which
|
| 222 |
+
# uses it for similar purpose.
|
| 223 |
+
#
|
| 224 |
+
# Caution - These decomps do not have same accuracy as that of eager. However,
|
| 225 |
+
# we can't just disable them with a config flag like fallback_random, because
|
| 226 |
+
# for functionalization of rng ops, we have to decompose these ops.
|
| 227 |
+
extra_random_decomps = get_decompositions(
|
| 228 |
+
[
|
| 229 |
+
aten.cauchy,
|
| 230 |
+
aten.cauchy_,
|
| 231 |
+
aten.exponential,
|
| 232 |
+
aten.exponential_,
|
| 233 |
+
aten.geometric,
|
| 234 |
+
aten.geometric_,
|
| 235 |
+
aten.native_dropout,
|
| 236 |
+
aten.normal,
|
| 237 |
+
aten.normal_,
|
| 238 |
+
aten.normal_functional,
|
| 239 |
+
aten.log_normal,
|
| 240 |
+
aten.log_normal_,
|
| 241 |
+
aten.rrelu_with_noise,
|
| 242 |
+
aten.rrelu_with_noise_,
|
| 243 |
+
aten.uniform_,
|
| 244 |
+
]
|
| 245 |
+
)
|
| 246 |
+
register_extra_random_decomp = functools.partial(
|
| 247 |
+
decomp.register_decomposition, registry=extra_random_decomps
|
| 248 |
+
)
|
| 249 |
+
|
| 250 |
+
|
| 251 |
+
@register_extra_random_decomp([aten.bernoulli_])
|
| 252 |
+
def bernoulli_(self, p=0.5):
|
| 253 |
+
if self.device == torch.device("cpu"):
|
| 254 |
+
return NotImplemented
|
| 255 |
+
return self.copy_(torch.rand_like(self, dtype=torch.float32) < p)
|
| 256 |
+
|
| 257 |
+
|
| 258 |
+
@register_extra_random_decomp([aten.bernoulli.p])
|
| 259 |
+
def bernoulli_p(self, p=0.5, *, generator=None):
|
| 260 |
+
if self.device == torch.device("cpu"):
|
| 261 |
+
return NotImplemented
|
| 262 |
+
assert generator is None
|
| 263 |
+
return torch.rand_like(self, dtype=torch.float32) < p
|
| 264 |
+
|
| 265 |
+
|
| 266 |
+
rng_decompositions.update(extra_random_decomps) # type: ignore[arg-type]
|
vllm/lib/python3.10/site-packages/torch/autograd/__pycache__/__init__.cpython-310.pyc
ADDED
|
Binary file (17.9 kB). View file
|
|
|
vllm/lib/python3.10/site-packages/torch/bin/protoc
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:3390873b2da56c1397adec3728f1588c51e182f15b123d3b4d4f248d31c1f4da
|
| 3 |
+
size 5330888
|
vllm/lib/python3.10/site-packages/torch/bin/protoc-3.13.0.0
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:3390873b2da56c1397adec3728f1588c51e182f15b123d3b4d4f248d31c1f4da
|
| 3 |
+
size 5330888
|
vllm/lib/python3.10/site-packages/torch/contrib/__init__.py
ADDED
|
File without changes
|
vllm/lib/python3.10/site-packages/torch/contrib/__pycache__/__init__.cpython-310.pyc
ADDED
|
Binary file (163 Bytes). View file
|
|
|
vllm/lib/python3.10/site-packages/torch/contrib/__pycache__/_tensorboard_vis.cpython-310.pyc
ADDED
|
Binary file (5.3 kB). View file
|
|
|
vllm/lib/python3.10/site-packages/torch/contrib/_tensorboard_vis.py
ADDED
|
@@ -0,0 +1,143 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# mypy: allow-untyped-defs
|
| 2 |
+
import time
|
| 3 |
+
from collections import defaultdict
|
| 4 |
+
from functools import partial
|
| 5 |
+
from typing import DefaultDict
|
| 6 |
+
|
| 7 |
+
import torch
|
| 8 |
+
|
| 9 |
+
|
| 10 |
+
# Unfortunately it doesn't seem as if there was any way to get TensorBoard to do
|
| 11 |
+
# anything without having TF installed, and so this file has a hard dependency on it
|
| 12 |
+
# as well. It really is a debugging tool, so it doesn't matter.
|
| 13 |
+
try:
|
| 14 |
+
from tensorflow.core.util import event_pb2
|
| 15 |
+
from tensorflow.core.framework import graph_pb2
|
| 16 |
+
from tensorflow.python.summary.writer.writer import FileWriter
|
| 17 |
+
except ImportError:
|
| 18 |
+
raise ImportError("TensorBoard visualization of GraphExecutors requires having "
|
| 19 |
+
"TensorFlow installed") from None
|
| 20 |
+
|
| 21 |
+
|
| 22 |
+
def dump_tensorboard_summary(graph_executor, logdir):
|
| 23 |
+
with FileWriter(logdir) as w:
|
| 24 |
+
pb_graph = visualize(graph_executor)
|
| 25 |
+
evt = event_pb2.Event(wall_time=time.time(), graph_def=pb_graph.SerializeToString())
|
| 26 |
+
w.add_event(evt)
|
| 27 |
+
|
| 28 |
+
|
| 29 |
+
def visualize(graph, name_prefix='', pb_graph=None, executors_it=None):
|
| 30 |
+
"""Visualizes an independent graph, or a graph executor."""
|
| 31 |
+
value_map = {}
|
| 32 |
+
pb_graph = pb_graph or graph_pb2.GraphDef()
|
| 33 |
+
|
| 34 |
+
if isinstance(graph, torch._C.GraphExecutorState):
|
| 35 |
+
visualize_graph_executor(graph, name_prefix, pb_graph,
|
| 36 |
+
partial(visualize, pb_graph=pb_graph))
|
| 37 |
+
return pb_graph
|
| 38 |
+
|
| 39 |
+
# Set up an input node
|
| 40 |
+
input_node = pb_graph.node.add(op='input', name=name_prefix + 'input')
|
| 41 |
+
for i, value in enumerate(graph.param_node().outputs()):
|
| 42 |
+
value_map[value.unique()] = name_prefix + 'input:' + str(i)
|
| 43 |
+
|
| 44 |
+
visualize_rec(graph, value_map, name_prefix, pb_graph, executors_it)
|
| 45 |
+
|
| 46 |
+
# Gather all outputs
|
| 47 |
+
return_node = pb_graph.node.add(op='output', name=name_prefix + 'output')
|
| 48 |
+
for value in graph.return_node().inputs():
|
| 49 |
+
return_node.input.append(value_map[value.unique()])
|
| 50 |
+
|
| 51 |
+
return pb_graph
|
| 52 |
+
|
| 53 |
+
|
| 54 |
+
def visualize_graph_executor(state, name_prefix, pb_graph, inline_graph):
|
| 55 |
+
"""Append the state of a given GraphExecutor to the graph protobuf.
|
| 56 |
+
|
| 57 |
+
Args:
|
| 58 |
+
state (GraphExecutor or GraphExecutorState): GraphExecutor to display.
|
| 59 |
+
name_prefix (str): Name prefix of the containing subgraph.
|
| 60 |
+
pb_graph (GraphDef): graph to append to.
|
| 61 |
+
inline_graph (Callable): a function that handles setting up a value_map,
|
| 62 |
+
so that some graphs in here can be inlined. This is necessary, because
|
| 63 |
+
this will simply be `visualize` for the top-level GraphExecutor,
|
| 64 |
+
or `inline_graph` for all nested ones.
|
| 65 |
+
|
| 66 |
+
The signature should look like (Graph, name_prefix) -> ().
|
| 67 |
+
It will be called exactly once.
|
| 68 |
+
|
| 69 |
+
The strategy is to embed all different configurations as independent subgraphs,
|
| 70 |
+
while inlining the original graph as the one that actually produces the values.
|
| 71 |
+
"""
|
| 72 |
+
if state.autograd_fallback_graph is not None:
|
| 73 |
+
visualize(graph=state.autograd_fallback_graph,
|
| 74 |
+
name_prefix=name_prefix + 'autograd_fallback/',
|
| 75 |
+
pb_graph=pb_graph,
|
| 76 |
+
executors_it=iter(state.autograd_fallback.executors()))
|
| 77 |
+
|
| 78 |
+
for i, (arg_spec, plan) in enumerate(state.execution_plans.items()):
|
| 79 |
+
subgraph_name = name_prefix + f'plan{i}/'
|
| 80 |
+
|
| 81 |
+
# Create a disconnected node that will keep information regarding the input
|
| 82 |
+
# types of this trace. This is unfortunately a bit too verbose to be included
|
| 83 |
+
# in the subgraph name.
|
| 84 |
+
input_kinds = pb_graph.node.add(op='INPUT_KIND', name=subgraph_name)
|
| 85 |
+
input_kinds.attr['inputs'].s = repr(arg_spec).encode('ascii')
|
| 86 |
+
|
| 87 |
+
visualize(plan.graph, subgraph_name, pb_graph, iter(plan.code.executors()))
|
| 88 |
+
|
| 89 |
+
# Show gradient as an independent subgraph of this plan
|
| 90 |
+
if plan.grad_executor is not None:
|
| 91 |
+
grad_subgraph_name = subgraph_name + 'grad/'
|
| 92 |
+
visualize(plan.grad_executor, grad_subgraph_name, pb_graph)
|
| 93 |
+
|
| 94 |
+
return inline_graph(state.graph, name_prefix + 'original/')
|
| 95 |
+
|
| 96 |
+
|
| 97 |
+
def visualize_rec(graph, value_map, name_prefix, pb_graph, executors_it=None):
|
| 98 |
+
"""Recursive part of visualize (basically skips setting up the input and output nodes)."""
|
| 99 |
+
def inline_graph(subgraph, name, node):
|
| 100 |
+
rec_value_map = {inp.unique(): value_map[val.unique()]
|
| 101 |
+
for inp, val in zip(subgraph.inputs(), node.inputs())}
|
| 102 |
+
visualize_rec(graph=subgraph,
|
| 103 |
+
value_map=rec_value_map,
|
| 104 |
+
name_prefix=name,
|
| 105 |
+
pb_graph=pb_graph)
|
| 106 |
+
for out, val in zip(subgraph.outputs(), node.outputs()):
|
| 107 |
+
value_map[val.unique()] = rec_value_map[out.unique()]
|
| 108 |
+
|
| 109 |
+
op_id_counter: DefaultDict[str, int] = defaultdict(int)
|
| 110 |
+
|
| 111 |
+
def name_for(node):
|
| 112 |
+
kind = node.kind()[node.kind().index('::') + 2:]
|
| 113 |
+
op_id_counter[kind] += 1
|
| 114 |
+
return kind, name_prefix + kind + '_' + str(op_id_counter[kind])
|
| 115 |
+
|
| 116 |
+
def add_fusion_group(node):
|
| 117 |
+
op, name = name_for(node)
|
| 118 |
+
inline_graph(node.g('Subgraph'), name + '/', node)
|
| 119 |
+
|
| 120 |
+
def add_graph_executor(node):
|
| 121 |
+
op, name = name_for(node)
|
| 122 |
+
if executors_it is None:
|
| 123 |
+
add_node(node)
|
| 124 |
+
else:
|
| 125 |
+
ge = next(executors_it)
|
| 126 |
+
visualize_graph_executor(ge, name + '/', pb_graph,
|
| 127 |
+
partial(inline_graph, node=node))
|
| 128 |
+
|
| 129 |
+
def add_node(node):
|
| 130 |
+
if node.kind() == 'prim::FusionGroup':
|
| 131 |
+
return add_fusion_group(node)
|
| 132 |
+
elif node.kind() == 'prim::GraphExecutor':
|
| 133 |
+
return add_graph_executor(node)
|
| 134 |
+
op, name = name_for(node)
|
| 135 |
+
pb_node = pb_graph.node.add(op=op, name=name)
|
| 136 |
+
for value in node.inputs():
|
| 137 |
+
pb_node.input.append(value_map[value.unique()])
|
| 138 |
+
# TODO: handle attrs
|
| 139 |
+
for i, value in enumerate(node.outputs()):
|
| 140 |
+
value_map[value.unique()] = name + ':' + str(i)
|
| 141 |
+
|
| 142 |
+
for node in graph.nodes():
|
| 143 |
+
add_node(node)
|
vllm/lib/python3.10/site-packages/torch/nested/__pycache__/__init__.cpython-310.pyc
ADDED
|
Binary file (18.2 kB). View file
|
|
|
vllm/lib/python3.10/site-packages/torch/nested/_internal/__init__.py
ADDED
|
File without changes
|
vllm/lib/python3.10/site-packages/torch/nested/_internal/__pycache__/nested_tensor.cpython-310.pyc
ADDED
|
Binary file (13.2 kB). View file
|
|
|
vllm/lib/python3.10/site-packages/torch/nested/_internal/ops.py
ADDED
|
@@ -0,0 +1,1675 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# mypy: allow-untyped-defs
|
| 2 |
+
import functools
|
| 3 |
+
import math
|
| 4 |
+
import operator
|
| 5 |
+
from typing import * # noqa: F403
|
| 6 |
+
|
| 7 |
+
import torch
|
| 8 |
+
import torch.nn.functional as F
|
| 9 |
+
from torch.fx.operator_schemas import normalize_function
|
| 10 |
+
from torch.nested._internal.sdpa import jagged_scaled_dot_product_attention
|
| 11 |
+
|
| 12 |
+
from .nested_tensor import NestedTensor
|
| 13 |
+
|
| 14 |
+
|
| 15 |
+
__all__: List[Any] = []
|
| 16 |
+
|
| 17 |
+
JAGGED_OPS_TABLE: Dict[Any, Any] = {}
|
| 18 |
+
|
| 19 |
+
|
| 20 |
+
# Simplifying assumption: we assume that the batch dim is always the left-most
|
| 21 |
+
# dim, and the ragged dim is always the second dim.
|
| 22 |
+
def _outer_to_inner_dim(ndim, dim):
|
| 23 |
+
assert dim >= 0 and dim < ndim
|
| 24 |
+
return 0 if dim < 2 else dim - 1
|
| 25 |
+
|
| 26 |
+
|
| 27 |
+
def _wrap_jagged_dim(
|
| 28 |
+
ndim, dim, op_name, convert_to_inner_dim=True, allow_batch_dim=False
|
| 29 |
+
):
|
| 30 |
+
from torch._prims_common import canonicalize_dims
|
| 31 |
+
|
| 32 |
+
wrapped = canonicalize_dims(ndim, dim)
|
| 33 |
+
if wrapped == 1:
|
| 34 |
+
raise RuntimeError(f"{op_name}(): not supported for NestedTensor on dim=1")
|
| 35 |
+
elif wrapped == 0 and not allow_batch_dim:
|
| 36 |
+
raise RuntimeError(f"{op_name}(): not supported for NestedTensor on dim=0")
|
| 37 |
+
return _outer_to_inner_dim(ndim, wrapped) if convert_to_inner_dim else wrapped
|
| 38 |
+
|
| 39 |
+
|
| 40 |
+
def _wrap_jagged_dims(ndim, dims, op_name, ragged_idx=1):
|
| 41 |
+
"""
|
| 42 |
+
For NestedTensor operators,
|
| 43 |
+
wraps dimensions to non-negative values,
|
| 44 |
+
and returns metadata related to reduction dimension(s).
|
| 45 |
+
"""
|
| 46 |
+
from torch._prims_common import canonicalize_dims
|
| 47 |
+
|
| 48 |
+
assert isinstance(
|
| 49 |
+
dims, (tuple, list)
|
| 50 |
+
), f"_wrap_jagged_dims(): cannot iterate over dimensions of type {type(dims)}"
|
| 51 |
+
|
| 52 |
+
wrapped_dims = [
|
| 53 |
+
canonicalize_dims(ndim, d) for d in dims
|
| 54 |
+
] # convert all indices to non-negative values
|
| 55 |
+
|
| 56 |
+
operate_on_batch = 0 in wrapped_dims
|
| 57 |
+
operate_on_ragged = ragged_idx in wrapped_dims
|
| 58 |
+
operate_on_non_batch = any(d != 0 and d != ragged_idx for d in wrapped_dims)
|
| 59 |
+
|
| 60 |
+
outer_to_inner_dim = tuple(
|
| 61 |
+
_outer_to_inner_dim(ndim, d) for d in wrapped_dims if d != 0
|
| 62 |
+
)
|
| 63 |
+
|
| 64 |
+
return outer_to_inner_dim, operate_on_batch, operate_on_ragged, operate_on_non_batch
|
| 65 |
+
|
| 66 |
+
|
| 67 |
+
def check_schema(schema_str: str, func, *args, **kwargs) -> None:
|
| 68 |
+
named_arg_types = schema_str.split(", ")
|
| 69 |
+
num_optional_args = [x.endswith("?") for x in named_arg_types].count(True)
|
| 70 |
+
min_args = len(named_arg_types) - num_optional_args
|
| 71 |
+
|
| 72 |
+
# special case: ellipses allows for any number of unchecked args at the end
|
| 73 |
+
if named_arg_types[-1] == "...":
|
| 74 |
+
named_arg_types = named_arg_types[:-1]
|
| 75 |
+
else:
|
| 76 |
+
if not (len(args) >= min_args and len(args) <= len(named_arg_types)):
|
| 77 |
+
raise ValueError(
|
| 78 |
+
f"NestedTensor {func.__name__}({schema_str}): expected at least {min_args} "
|
| 79 |
+
f"arguments and at most {len(named_arg_types)} arguments, but got: "
|
| 80 |
+
f"{len(args)} arguments"
|
| 81 |
+
)
|
| 82 |
+
|
| 83 |
+
arg_type_check_fns = {
|
| 84 |
+
"t": lambda x: isinstance(x, torch.Tensor) and not isinstance(x, NestedTensor),
|
| 85 |
+
"jt": lambda x: isinstance(x, NestedTensor)
|
| 86 |
+
and x._lengths is None
|
| 87 |
+
and x._ragged_idx == 1, # ops with "jt" require contiguous JT only
|
| 88 |
+
"jt_all": lambda x: isinstance(
|
| 89 |
+
x, NestedTensor
|
| 90 |
+
), # ops with "jt_all" can accept all kinds of JT
|
| 91 |
+
"any": lambda x: True,
|
| 92 |
+
}
|
| 93 |
+
for i, named_arg_type in enumerate(named_arg_types):
|
| 94 |
+
name, arg_type = named_arg_type.split(": ")
|
| 95 |
+
is_optional = arg_type.endswith("?")
|
| 96 |
+
normalized_arg_type = arg_type[:-1] if is_optional else arg_type
|
| 97 |
+
if normalized_arg_type not in arg_type_check_fns.keys():
|
| 98 |
+
raise AssertionError(f"Unknown arg type: {normalized_arg_type}")
|
| 99 |
+
|
| 100 |
+
if i >= len(args):
|
| 101 |
+
if not is_optional:
|
| 102 |
+
raise ValueError(
|
| 103 |
+
f"NestedTensor {func.__name__}({schema_str}) "
|
| 104 |
+
f"missing required argument: {name}"
|
| 105 |
+
)
|
| 106 |
+
continue
|
| 107 |
+
|
| 108 |
+
_check_fn = arg_type_check_fns[normalized_arg_type]
|
| 109 |
+
|
| 110 |
+
def check_fn(x, is_optional=is_optional):
|
| 111 |
+
if is_optional:
|
| 112 |
+
return x is None or _check_fn(x)
|
| 113 |
+
else:
|
| 114 |
+
return _check_fn(x)
|
| 115 |
+
|
| 116 |
+
if not check_fn(args[i]):
|
| 117 |
+
type_to_desc = {
|
| 118 |
+
"t": "tensor",
|
| 119 |
+
"t?": "optional tensor",
|
| 120 |
+
"jt": "contiguous jagged layout NestedTensor",
|
| 121 |
+
"jt_all": "jagged layout NestedTensor",
|
| 122 |
+
"any": "<any type>",
|
| 123 |
+
}
|
| 124 |
+
|
| 125 |
+
raise ValueError(
|
| 126 |
+
f"NestedTensor {func.__name__}({schema_str}): expected {name} to be a "
|
| 127 |
+
f"{type_to_desc[arg_type]}"
|
| 128 |
+
)
|
| 129 |
+
|
| 130 |
+
|
| 131 |
+
def check_ragged_dim_same(
|
| 132 |
+
func, a: NestedTensor, a_name: str, b: NestedTensor, b_name: str
|
| 133 |
+
) -> None:
|
| 134 |
+
# Calling into .shape here
|
| 135 |
+
if a._size[a._ragged_idx] != b._size[b._ragged_idx]:
|
| 136 |
+
raise RuntimeError(
|
| 137 |
+
f"NestedTensor {func.__name__}: expected {a_name} and {b_name} to have the "
|
| 138 |
+
"same exact offsets tensor."
|
| 139 |
+
)
|
| 140 |
+
|
| 141 |
+
|
| 142 |
+
# returns True if the raggedness-relevant portions of the NT shape
|
| 143 |
+
# match those of the specified size
|
| 144 |
+
def raggedness_matches(nt, size):
|
| 145 |
+
end = nt._ragged_idx + 1
|
| 146 |
+
nt_ragged = nt._size[:end]
|
| 147 |
+
size_ragged = size[:end]
|
| 148 |
+
return len(nt_ragged) == len(size_ragged) and (
|
| 149 |
+
all(ns == s or s == -1 for ns, s in zip(nt_ragged, size_ragged))
|
| 150 |
+
)
|
| 151 |
+
|
| 152 |
+
|
| 153 |
+
def squeeze_leading_ones(t):
|
| 154 |
+
# Note: [ Squeezing leading ones ]
|
| 155 |
+
#
|
| 156 |
+
# Squeeze leading ones from t.
|
| 157 |
+
#
|
| 158 |
+
# We want:
|
| 159 |
+
# (B, j0, ?, ?) + (1, 1, ?, ?) -> (B, j0, ?, ?)
|
| 160 |
+
# (B, j0, ?, ?) + (1, 1, 1, ?, ?) -> (1, B, j0, ?, ?) (not yet supported)
|
| 161 |
+
#
|
| 162 |
+
# 1) Squeeze extra ones and grab values from NT
|
| 163 |
+
# (1, 1, ?, ?) -> (?, ?) and (sum(*), ?, ?) -> (B, j0, ?, ?)
|
| 164 |
+
# 2) Do dense broadcasting:
|
| 165 |
+
# (sum(*), ?, ?) + (?, ?) -> (sum(*), ?, ?)
|
| 166 |
+
# 3) Construct nested tensor
|
| 167 |
+
# (sum(*), ?, ?) -> (B, j0, ?, ?)
|
| 168 |
+
#
|
| 169 |
+
# If unsqueezing on the 0th dim becomes supported, we would unsqueeze
|
| 170 |
+
# at step (4) and we would need to update this function to record how
|
| 171 |
+
# many ones we unsqueezed.
|
| 172 |
+
while t.dim() > 0 and t.shape[0] == 1:
|
| 173 |
+
t = t.squeeze(0)
|
| 174 |
+
return t
|
| 175 |
+
|
| 176 |
+
|
| 177 |
+
def register_func(tables, aten_ops, schema_str):
|
| 178 |
+
if not isinstance(aten_ops, list):
|
| 179 |
+
aten_ops = [aten_ops]
|
| 180 |
+
if not isinstance(tables, list):
|
| 181 |
+
tables = [tables]
|
| 182 |
+
|
| 183 |
+
def wrapper(func):
|
| 184 |
+
for aten_op in aten_ops:
|
| 185 |
+
|
| 186 |
+
def get_inner(aten_op):
|
| 187 |
+
def inner(*args, **kwargs):
|
| 188 |
+
check_schema(schema_str, func, *args, **kwargs)
|
| 189 |
+
return func(aten_op, *args, **kwargs)
|
| 190 |
+
|
| 191 |
+
return inner
|
| 192 |
+
|
| 193 |
+
for table in tables:
|
| 194 |
+
table[aten_op] = get_inner(aten_op)
|
| 195 |
+
return func
|
| 196 |
+
|
| 197 |
+
return wrapper
|
| 198 |
+
|
| 199 |
+
|
| 200 |
+
register_jagged_func = functools.partial(register_func, JAGGED_OPS_TABLE)
|
| 201 |
+
|
| 202 |
+
|
| 203 |
+
def lookup_jagged(func, *args, **kwargs) -> Optional[Callable]:
|
| 204 |
+
dispatch_func = JAGGED_OPS_TABLE.get(func, None)
|
| 205 |
+
if dispatch_func is not None:
|
| 206 |
+
return dispatch_func
|
| 207 |
+
|
| 208 |
+
# Handle pointwise fallbacks
|
| 209 |
+
if torch.Tag.pointwise in func.tags:
|
| 210 |
+
# Assume there aren't additional tensors that aren't the "unary/binary" args
|
| 211 |
+
num_tensor_args = sum(isinstance(x, torch.Tensor) for x in args)
|
| 212 |
+
if num_tensor_args == 1:
|
| 213 |
+
# Build up the check schema string. The first tensor arg is assumed to be
|
| 214 |
+
# an NJT and other args are sent through as-is.
|
| 215 |
+
schema_parts = []
|
| 216 |
+
for arg in func._schema.arguments:
|
| 217 |
+
if isinstance(arg.type, torch.TensorType):
|
| 218 |
+
schema_parts.append(f"{arg.name}: jt_all")
|
| 219 |
+
break
|
| 220 |
+
else:
|
| 221 |
+
schema_parts.append(f"{arg.name}: any")
|
| 222 |
+
schema_parts.append("...")
|
| 223 |
+
check_schema_str = ", ".join(schema_parts)
|
| 224 |
+
check_schema(check_schema_str, func, *args, **kwargs)
|
| 225 |
+
return functools.partial(jagged_unary_pointwise, func)
|
| 226 |
+
elif num_tensor_args == 2:
|
| 227 |
+
check_schema("lhs: any, rhs: any, ...", func, *args, **kwargs)
|
| 228 |
+
return functools.partial(jagged_binary_pointwise, func)
|
| 229 |
+
|
| 230 |
+
return None
|
| 231 |
+
|
| 232 |
+
|
| 233 |
+
def extract_kwargs(arg):
|
| 234 |
+
kwargs = {
|
| 235 |
+
"offsets": arg.offsets(),
|
| 236 |
+
"_metadata_cache": arg._metadata_cache,
|
| 237 |
+
"_ragged_idx": arg._ragged_idx,
|
| 238 |
+
}
|
| 239 |
+
return kwargs
|
| 240 |
+
|
| 241 |
+
|
| 242 |
+
def jagged_unary_pointwise(func, *args, **kwargs):
|
| 243 |
+
# assume if we get here that there is a single NJT input in the args
|
| 244 |
+
njt = next(arg for arg in args if isinstance(arg, NestedTensor))
|
| 245 |
+
return NestedTensor(
|
| 246 |
+
func(*(arg._values if arg is njt else arg for arg in args), **kwargs),
|
| 247 |
+
**extract_kwargs(njt),
|
| 248 |
+
)
|
| 249 |
+
|
| 250 |
+
|
| 251 |
+
def jagged_binary_pointwise(func, *args, **kwargs):
|
| 252 |
+
a, b = args[0], args[1]
|
| 253 |
+
assert isinstance(a, NestedTensor) or isinstance(b, NestedTensor)
|
| 254 |
+
|
| 255 |
+
mismatch_error_msg = (
|
| 256 |
+
"cannot call binary pointwise function {} with inputs of shapes {} and {}"
|
| 257 |
+
)
|
| 258 |
+
# a is NT, b is NT
|
| 259 |
+
if isinstance(a, NestedTensor) and isinstance(b, NestedTensor):
|
| 260 |
+
# ex: (B, j0, D) + (B, j0, D)
|
| 261 |
+
# ex: (B, j0, D) + (B, j0, 1)
|
| 262 |
+
if raggedness_matches(a, b._size):
|
| 263 |
+
return NestedTensor(
|
| 264 |
+
func(a._values, b._values, *args[2:], **kwargs), **extract_kwargs(a)
|
| 265 |
+
)
|
| 266 |
+
raise RuntimeError(mismatch_error_msg.format(func.__name__, a._size, b._size))
|
| 267 |
+
# either a is NT or b is NT at this point
|
| 268 |
+
a_is_nt = isinstance(a, NestedTensor)
|
| 269 |
+
extracted_kwargs = extract_kwargs(a) if a_is_nt else extract_kwargs(b)
|
| 270 |
+
|
| 271 |
+
# === Handle broadcasting across the batch / ragged dims ===
|
| 272 |
+
|
| 273 |
+
# Easy case: take advantage of pre-existing broadcasting logic
|
| 274 |
+
# ex: (B, j0, ?, ?) + (?) -> (B, j0, ?, ?)
|
| 275 |
+
# ex: (B, j0, ?, ?) + (?, ?) -> (B, j0, ?, ?)
|
| 276 |
+
# ex: (B, j0, ?, ?) + (1, 1, ?, ?) -> (B, j0, ?, ?)
|
| 277 |
+
nt, t = (a, b) if a_is_nt else (b, a)
|
| 278 |
+
# See Note: [ Squeezing leading ones ]
|
| 279 |
+
if t.dim() > nt.dim():
|
| 280 |
+
raise NotImplementedError("NYI: broadcasting NT with T with larger dim")
|
| 281 |
+
t_squeezed = squeeze_leading_ones(t)
|
| 282 |
+
if nt.dim() >= t_squeezed.dim() + 2:
|
| 283 |
+
lhs, rhs = (nt._values, t_squeezed) if a_is_nt else (t_squeezed, nt._values)
|
| 284 |
+
return NestedTensor(func(lhs, rhs, *args[2:], **kwargs), **extracted_kwargs)
|
| 285 |
+
|
| 286 |
+
# Harder case: do manual broadcasting over unbound components
|
| 287 |
+
# when NT dim == non-NT dim
|
| 288 |
+
# ex: (B, j0, D_0, D_1) + (B, 1, D_0, D_1) -> (B, j0, D_0, D_1)
|
| 289 |
+
if a.dim() == b.dim():
|
| 290 |
+
# ex: (B, j0, D_0, D_1) + (1, 1, D_0, D_1) -> should
|
| 291 |
+
# be (B, j0, D_0, D_1) but not yet supported
|
| 292 |
+
if a.shape[0] != b.shape[0]:
|
| 293 |
+
raise RuntimeError(
|
| 294 |
+
mismatch_error_msg.format(func.__name__, a.shape, b.shape)
|
| 295 |
+
)
|
| 296 |
+
|
| 297 |
+
# need to use offsets to broadcast across ragged dim properly
|
| 298 |
+
# NB: inefficient fallback here; Triton codegen can help this
|
| 299 |
+
# TODO: Make this work with autograd
|
| 300 |
+
outputs = []
|
| 301 |
+
for a_comp, b_comp in zip(a.unbind(), b.unbind()):
|
| 302 |
+
outputs.append(func(a_comp, b_comp, *args[2:], **kwargs))
|
| 303 |
+
new_values = torch.cat(outputs, dim=0)
|
| 304 |
+
return NestedTensor(new_values, **extracted_kwargs)
|
| 305 |
+
|
| 306 |
+
# ex: (B, j0, D_0, D_1) + (A, B, 1, D_0, D_1) -> error because this breaks the invariant
|
| 307 |
+
# that ragged dim is wrt left-most batch dim
|
| 308 |
+
raise RuntimeError(mismatch_error_msg.format(func.__name__, a.shape, b.shape))
|
| 309 |
+
|
| 310 |
+
|
| 311 |
+
def jagged_torch_function(func, *args, **kwargs):
|
| 312 |
+
# SDPA has special kernels that handle nested tensors.
|
| 313 |
+
# Dispatch to the correct implementation here
|
| 314 |
+
if func is torch._C._nn.scaled_dot_product_attention:
|
| 315 |
+
return jagged_scaled_dot_product_attention(*args, **kwargs)
|
| 316 |
+
|
| 317 |
+
if func.__name__ == "apply_":
|
| 318 |
+
func(args[0]._values, *args[1:], **kwargs)
|
| 319 |
+
return args[0]
|
| 320 |
+
|
| 321 |
+
# Handle flatten() here because it's CompositeImplicit.
|
| 322 |
+
if func.__name__ == "flatten":
|
| 323 |
+
|
| 324 |
+
def _flatten_sig(input, start_dim=0, end_dim=-1):
|
| 325 |
+
pass
|
| 326 |
+
|
| 327 |
+
_, new_kwargs = normalize_function( # type: ignore[misc]
|
| 328 |
+
_flatten_sig, args=args, kwargs=kwargs, normalize_to_only_use_kwargs=True
|
| 329 |
+
)
|
| 330 |
+
|
| 331 |
+
inp = new_kwargs.pop("input")
|
| 332 |
+
|
| 333 |
+
# NB: stay in outer dim space because we're going to redispatch on a NT input
|
| 334 |
+
start_dim = _wrap_jagged_dim(
|
| 335 |
+
inp.dim(), new_kwargs["start_dim"], "flatten", convert_to_inner_dim=False
|
| 336 |
+
)
|
| 337 |
+
end_dim = _wrap_jagged_dim(
|
| 338 |
+
inp.dim(), new_kwargs["end_dim"], "flatten", convert_to_inner_dim=False
|
| 339 |
+
)
|
| 340 |
+
|
| 341 |
+
if start_dim == end_dim:
|
| 342 |
+
return inp
|
| 343 |
+
|
| 344 |
+
product = functools.reduce(operator.mul, inp.shape[start_dim : end_dim + 1])
|
| 345 |
+
new_shape = (*inp.shape[:start_dim], product, *inp.shape[end_dim + 1 :])
|
| 346 |
+
|
| 347 |
+
return inp.reshape(*new_shape)
|
| 348 |
+
|
| 349 |
+
raise NotImplementedError(func)
|
| 350 |
+
|
| 351 |
+
|
| 352 |
+
@register_jagged_func(
|
| 353 |
+
[
|
| 354 |
+
torch.ops.aten.is_non_overlapping_and_dense.default,
|
| 355 |
+
torch.ops.aten.sym_size.default,
|
| 356 |
+
torch.ops.aten.dim.default,
|
| 357 |
+
torch.ops.aten.numel.default,
|
| 358 |
+
torch.ops.aten.sym_numel.default,
|
| 359 |
+
torch.ops.aten.sym_stride.default,
|
| 360 |
+
torch.ops.aten.sym_storage_offset.default,
|
| 361 |
+
],
|
| 362 |
+
"self: jt_all",
|
| 363 |
+
)
|
| 364 |
+
def tensor_attr_supported_getter(func, *args, **kwargs):
|
| 365 |
+
if func == torch.ops.aten.is_non_overlapping_and_dense.default:
|
| 366 |
+
return False
|
| 367 |
+
|
| 368 |
+
if func == torch.ops.aten.sym_size.default:
|
| 369 |
+
return args[0]._size
|
| 370 |
+
|
| 371 |
+
if func == torch.ops.aten.dim.default:
|
| 372 |
+
return len(args[0]._size)
|
| 373 |
+
|
| 374 |
+
if func in (torch.ops.aten.sym_numel.default, torch.ops.aten.numel.default):
|
| 375 |
+
if args[0]._lengths is not None:
|
| 376 |
+
return int(sum(args[0]._lengths) * math.prod(args[0]._size[2:]))
|
| 377 |
+
return args[0]._values.numel()
|
| 378 |
+
|
| 379 |
+
if func == torch.ops.aten.sym_stride.default:
|
| 380 |
+
return args[0]._strides
|
| 381 |
+
|
| 382 |
+
if func == torch.ops.aten.sym_storage_offset.default:
|
| 383 |
+
return args[0]._values.storage_offset()
|
| 384 |
+
|
| 385 |
+
|
| 386 |
+
@register_jagged_func(torch.ops.prim.layout.default, "self: jt_all")
|
| 387 |
+
def prim_layout_default(func, *args, **kwargs):
|
| 388 |
+
return torch.jagged
|
| 389 |
+
|
| 390 |
+
|
| 391 |
+
@register_jagged_func(
|
| 392 |
+
[torch.ops.aten.size.default],
|
| 393 |
+
"self: jt_all",
|
| 394 |
+
)
|
| 395 |
+
def tensor_attr_unsupported_getter(func, *args, **kwargs):
|
| 396 |
+
if func == torch.ops.aten.size.default:
|
| 397 |
+
raise RuntimeError(
|
| 398 |
+
"NestedTensors does not support directly calling torch.ops.aten.size "
|
| 399 |
+
"please use `nested_tensor.size()` instead."
|
| 400 |
+
)
|
| 401 |
+
|
| 402 |
+
|
| 403 |
+
@register_jagged_func(torch.ops.aten.is_contiguous.default, "self: jt_all")
|
| 404 |
+
def is_contiguous_general(func, *args, **kwargs):
|
| 405 |
+
from torch._prims_common import is_contiguous_for_memory_format
|
| 406 |
+
|
| 407 |
+
_, new_kwargs = normalize_function( # type: ignore[misc]
|
| 408 |
+
func, args=args, kwargs=kwargs, normalize_to_only_use_kwargs=True
|
| 409 |
+
)
|
| 410 |
+
inp = new_kwargs.pop("input")
|
| 411 |
+
|
| 412 |
+
# If created from narrow() check for lengths
|
| 413 |
+
if inp.lengths() is not None:
|
| 414 |
+
return False
|
| 415 |
+
|
| 416 |
+
new_kwargs["memory_format"] = new_kwargs.get(
|
| 417 |
+
"memory_format", torch.contiguous_format
|
| 418 |
+
)
|
| 419 |
+
if new_kwargs["memory_format"] == torch.preserve_format:
|
| 420 |
+
return True
|
| 421 |
+
return is_contiguous_for_memory_format(inp._values, **new_kwargs)
|
| 422 |
+
|
| 423 |
+
|
| 424 |
+
register_jagged_func(
|
| 425 |
+
torch.ops.aten.is_contiguous.memory_format, "self: jt_all, memory_format: any?"
|
| 426 |
+
)(is_contiguous_general)
|
| 427 |
+
|
| 428 |
+
|
| 429 |
+
@register_jagged_func(
|
| 430 |
+
torch.ops.aten.clone.default, "input: jt_all, memory_format: any?"
|
| 431 |
+
)
|
| 432 |
+
def clone_default(func, *args, **kwargs):
|
| 433 |
+
_, new_kwargs = normalize_function( # type: ignore[misc]
|
| 434 |
+
func, args=args, kwargs=kwargs, normalize_to_only_use_kwargs=True
|
| 435 |
+
)
|
| 436 |
+
|
| 437 |
+
inp = new_kwargs.pop("input")
|
| 438 |
+
|
| 439 |
+
new_meta = extract_kwargs(inp)
|
| 440 |
+
|
| 441 |
+
if inp._lengths is not None:
|
| 442 |
+
if new_kwargs["memory_format"] == torch.contiguous_format:
|
| 443 |
+
# need to copy to remove "holes" non-contiguity / lengths metadata
|
| 444 |
+
# TODO: write a kernel for this
|
| 445 |
+
from .nested_tensor import jagged_from_list
|
| 446 |
+
|
| 447 |
+
# TODO: We probably want the output to have the same ragged structure / nested int.
|
| 448 |
+
assert (
|
| 449 |
+
inp._ragged_idx == 1
|
| 450 |
+
), "NJT with ragged_idx != 1 not supported for contiguous clone"
|
| 451 |
+
contig, _ = jagged_from_list(inp.unbind(), offsets=None)
|
| 452 |
+
return contig
|
| 453 |
+
else:
|
| 454 |
+
# need to preserve any lengths metadata present
|
| 455 |
+
new_meta["lengths"] = inp._lengths
|
| 456 |
+
|
| 457 |
+
return NestedTensor(func(inp._values, **new_kwargs), **new_meta)
|
| 458 |
+
|
| 459 |
+
|
| 460 |
+
@register_jagged_func(torch.ops.aten.linear.default, "input: jt, weight: t, bias: t?")
|
| 461 |
+
def linear_default(func, *args, **kwargs):
|
| 462 |
+
_, new_kwargs = normalize_function( # type: ignore[misc]
|
| 463 |
+
func, args=args, kwargs=kwargs, normalize_to_only_use_kwargs=True
|
| 464 |
+
)
|
| 465 |
+
|
| 466 |
+
inp = new_kwargs.pop("input")
|
| 467 |
+
|
| 468 |
+
return NestedTensor(func(inp._values, **new_kwargs), **extract_kwargs(inp))
|
| 469 |
+
|
| 470 |
+
|
| 471 |
+
@register_jagged_func(
|
| 472 |
+
torch.ops.aten.linear_backward.default,
|
| 473 |
+
"self: jt, grad_output: jt, weight: t, output_mask: any",
|
| 474 |
+
)
|
| 475 |
+
def linear_backward_default(func, *args, **kwargs):
|
| 476 |
+
_, new_kwargs = normalize_function( # type: ignore[misc]
|
| 477 |
+
func, args=args, kwargs=kwargs, normalize_to_only_use_kwargs=True
|
| 478 |
+
)
|
| 479 |
+
|
| 480 |
+
inp = new_kwargs.pop("input")
|
| 481 |
+
grad_output = new_kwargs.pop("grad_output")
|
| 482 |
+
weight = new_kwargs.pop("weight")
|
| 483 |
+
|
| 484 |
+
check_ragged_dim_same(func, inp, "self", grad_output, "grad_output")
|
| 485 |
+
ds = NestedTensor(
|
| 486 |
+
torch.matmul(grad_output._values, weight), **extract_kwargs(grad_output)
|
| 487 |
+
)
|
| 488 |
+
dw = torch.matmul(grad_output._values.transpose(-2, -1), inp._values)
|
| 489 |
+
db = None # NYI: gradient for bias, need to reduce over ragged dim
|
| 490 |
+
return (ds, dw, db)
|
| 491 |
+
|
| 492 |
+
|
| 493 |
+
@register_jagged_func(torch.ops.aten.to.dtype, "input: jt_all, dtype: any")
|
| 494 |
+
def to_dtype(func, *args, **kwargs):
|
| 495 |
+
_, new_kwargs = normalize_function( # type: ignore[misc]
|
| 496 |
+
func, args=args, kwargs=kwargs, normalize_to_only_use_kwargs=True
|
| 497 |
+
)
|
| 498 |
+
|
| 499 |
+
inp = new_kwargs.pop("input")
|
| 500 |
+
|
| 501 |
+
return NestedTensor(func(inp._values, **new_kwargs), **extract_kwargs(inp))
|
| 502 |
+
|
| 503 |
+
|
| 504 |
+
@register_jagged_func(torch.ops.aten._to_copy.default, "self: jt_all")
|
| 505 |
+
def to_copy_default(func, *args, **kwargs):
|
| 506 |
+
from .nested_tensor import _tensor_symint_registry
|
| 507 |
+
|
| 508 |
+
_, new_kwargs = normalize_function( # type: ignore[misc]
|
| 509 |
+
func, args=args, kwargs=kwargs, normalize_to_only_use_kwargs=True
|
| 510 |
+
)
|
| 511 |
+
|
| 512 |
+
inp = new_kwargs.pop("input")
|
| 513 |
+
# don't change layout
|
| 514 |
+
new_kwargs.pop("layout")
|
| 515 |
+
|
| 516 |
+
new_values = func(inp._values, **new_kwargs)
|
| 517 |
+
new_offsets = inp._offsets.to(device=new_values.device)
|
| 518 |
+
|
| 519 |
+
from torch._subclasses.fake_tensor import FakeTensor
|
| 520 |
+
from torch._subclasses.functional_tensor import (
|
| 521 |
+
FunctionalTensor,
|
| 522 |
+
mb_unwrap_functional_tensor,
|
| 523 |
+
)
|
| 524 |
+
|
| 525 |
+
if isinstance(new_offsets, (FakeTensor, FunctionalTensor)):
|
| 526 |
+
# Temporary hack until we have the union find
|
| 527 |
+
tgt = mb_unwrap_functional_tensor(new_offsets)
|
| 528 |
+
src = mb_unwrap_functional_tensor(inp._offsets)
|
| 529 |
+
tgt.nested_int_memo = src.nested_int_memo
|
| 530 |
+
else:
|
| 531 |
+
_tensor_symint_registry[new_offsets] = _tensor_symint_registry[inp._offsets]
|
| 532 |
+
inp_kwargs = extract_kwargs(inp)
|
| 533 |
+
inp_kwargs["offsets"] = new_offsets
|
| 534 |
+
|
| 535 |
+
return NestedTensor(new_values, **inp_kwargs)
|
| 536 |
+
|
| 537 |
+
|
| 538 |
+
@register_jagged_func(
|
| 539 |
+
torch.ops.aten.copy_.default, "self: jt_all, src: jt_all, non_blocking: any?"
|
| 540 |
+
)
|
| 541 |
+
def copy_default(func, *args, **kwargs):
|
| 542 |
+
_, new_kwargs = normalize_function( # type: ignore[misc]
|
| 543 |
+
func, args=args, kwargs=kwargs, normalize_to_only_use_kwargs=True
|
| 544 |
+
)
|
| 545 |
+
inp = new_kwargs.pop("input")
|
| 546 |
+
src = new_kwargs.pop("src")
|
| 547 |
+
if inp._size != src._size:
|
| 548 |
+
raise RuntimeError(
|
| 549 |
+
"copy_ only supports Nested Tensors that have same size and the exact same offset tensor."
|
| 550 |
+
)
|
| 551 |
+
inp.values().copy_(src.values())
|
| 552 |
+
return inp
|
| 553 |
+
|
| 554 |
+
|
| 555 |
+
register_jagged_func(torch.ops.aten.detach.default, "self: jt_all")(
|
| 556 |
+
jagged_unary_pointwise
|
| 557 |
+
)
|
| 558 |
+
|
| 559 |
+
|
| 560 |
+
@register_jagged_func(
|
| 561 |
+
[
|
| 562 |
+
torch.ops.aten.empty_like.default,
|
| 563 |
+
torch.ops.aten.ones_like.default,
|
| 564 |
+
torch.ops.aten.zeros_like.default,
|
| 565 |
+
torch.ops.aten.randn_like.default,
|
| 566 |
+
],
|
| 567 |
+
"self: jt_all",
|
| 568 |
+
)
|
| 569 |
+
def like_factory_default(func, *args, **kwargs):
|
| 570 |
+
_, new_kwargs = normalize_function( # type: ignore[misc]
|
| 571 |
+
func, args=args, kwargs=kwargs, normalize_to_only_use_kwargs=True
|
| 572 |
+
)
|
| 573 |
+
|
| 574 |
+
inp = new_kwargs.pop("input")
|
| 575 |
+
|
| 576 |
+
# Default layout is technically torch.strided but only jagged is supported here.
|
| 577 |
+
# Rather than force users to specify the layout, assume jagged.
|
| 578 |
+
# This should be set to strided for redispatching on values.
|
| 579 |
+
new_kwargs["layout"] = torch.strided
|
| 580 |
+
|
| 581 |
+
return NestedTensor(func(inp._values, **new_kwargs), **extract_kwargs(inp))
|
| 582 |
+
|
| 583 |
+
|
| 584 |
+
@register_jagged_func(torch.ops.aten.zero_.default, "self: jt_all")
|
| 585 |
+
def zero__default(func, *args, **kwargs):
|
| 586 |
+
_, new_kwargs = normalize_function( # type: ignore[misc]
|
| 587 |
+
func, args=args, kwargs=kwargs, normalize_to_only_use_kwargs=True
|
| 588 |
+
)
|
| 589 |
+
|
| 590 |
+
inp = new_kwargs.pop("input")
|
| 591 |
+
func(inp._values)
|
| 592 |
+
return inp
|
| 593 |
+
|
| 594 |
+
|
| 595 |
+
@register_jagged_func(
|
| 596 |
+
torch.ops.aten._softmax.default, "self: jt_all, dim: any, half_to_float: any"
|
| 597 |
+
)
|
| 598 |
+
def _softmax_default(func, *args, **kwargs):
|
| 599 |
+
_, new_kwargs = normalize_function( # type: ignore[misc]
|
| 600 |
+
func, args=args, kwargs=kwargs, normalize_to_only_use_kwargs=True
|
| 601 |
+
)
|
| 602 |
+
|
| 603 |
+
if isinstance(new_kwargs["dim"], tuple):
|
| 604 |
+
raise RuntimeError(
|
| 605 |
+
"softmax(): not supported for dimensions of type 'tuple' for NestedTensor"
|
| 606 |
+
)
|
| 607 |
+
|
| 608 |
+
inp = new_kwargs.pop("input")
|
| 609 |
+
|
| 610 |
+
(
|
| 611 |
+
new_kwargs["dim"],
|
| 612 |
+
reduce_on_batch,
|
| 613 |
+
reduce_on_ragged,
|
| 614 |
+
reduce_on_non_batch,
|
| 615 |
+
) = _wrap_jagged_dims(
|
| 616 |
+
inp.dim(),
|
| 617 |
+
(new_kwargs["dim"],),
|
| 618 |
+
"softmax",
|
| 619 |
+
inp._ragged_idx,
|
| 620 |
+
)
|
| 621 |
+
|
| 622 |
+
if reduce_on_batch:
|
| 623 |
+
raise RuntimeError(
|
| 624 |
+
"softmax(): not supported when reducing across the batch dimension for NestedTensor"
|
| 625 |
+
)
|
| 626 |
+
|
| 627 |
+
if reduce_on_ragged and inp._ragged_idx > 1:
|
| 628 |
+
raise RuntimeError(
|
| 629 |
+
"softmax(): not supported when reducing along the ragged dimension for ragged_idx > 1 for NestedTensor"
|
| 630 |
+
)
|
| 631 |
+
|
| 632 |
+
if reduce_on_ragged and inp._lengths is not None:
|
| 633 |
+
raise RuntimeError(
|
| 634 |
+
"softmax(): not supported where lengths is not None "
|
| 635 |
+
+ "if reducing across the ragged dimension for NestedTensor"
|
| 636 |
+
)
|
| 637 |
+
|
| 638 |
+
new_kwargs["dim"] = new_kwargs["dim"][
|
| 639 |
+
0
|
| 640 |
+
] # torch.softmax takes in the reduction dimension as an integer
|
| 641 |
+
|
| 642 |
+
if reduce_on_ragged:
|
| 643 |
+
padded_softmax_values = torch.nn.functional.softmax(
|
| 644 |
+
torch.ops.aten._jagged_to_padded_dense_forward(
|
| 645 |
+
inp._values.reshape(
|
| 646 |
+
inp._values.shape[0], -1
|
| 647 |
+
), # values are required to be 2D tensors for j2pd
|
| 648 |
+
[inp._offsets],
|
| 649 |
+
max_lengths=[inp._max_seqlen], # max length of ragged dimension
|
| 650 |
+
padding_value=float("-inf"), # e^-inf = 0
|
| 651 |
+
),
|
| 652 |
+
dim=inp._ragged_idx,
|
| 653 |
+
)
|
| 654 |
+
|
| 655 |
+
softmax_values = torch.ops.aten._padded_dense_to_jagged_forward(
|
| 656 |
+
padded_softmax_values,
|
| 657 |
+
[inp._offsets],
|
| 658 |
+
total_L=inp._values.shape[
|
| 659 |
+
0
|
| 660 |
+
], # providing this parameter helps avoid a GPU/CPU sync
|
| 661 |
+
).reshape(
|
| 662 |
+
-1, *inp._values.shape[1:]
|
| 663 |
+
) # expand softmax_values back to original shape (inp._values.shape)
|
| 664 |
+
|
| 665 |
+
return NestedTensor(softmax_values, **extract_kwargs(inp))
|
| 666 |
+
|
| 667 |
+
return NestedTensor(func(inp._values, **new_kwargs), **extract_kwargs(inp))
|
| 668 |
+
|
| 669 |
+
|
| 670 |
+
@register_jagged_func(
|
| 671 |
+
torch.ops.aten._softmax_backward_data.default,
|
| 672 |
+
"grad_output: jt, output: jt, dim: any, input_dtype: any",
|
| 673 |
+
)
|
| 674 |
+
def _softmax_backward(func, *args, **kwargs):
|
| 675 |
+
_, new_kwargs = normalize_function( # type: ignore[misc]
|
| 676 |
+
func, args=args, kwargs=kwargs, normalize_to_only_use_kwargs=True
|
| 677 |
+
)
|
| 678 |
+
grad_out = new_kwargs.pop("grad_output")
|
| 679 |
+
output = new_kwargs.pop("output")
|
| 680 |
+
return NestedTensor(
|
| 681 |
+
func(grad_out._values, output._values, **new_kwargs), **extract_kwargs(grad_out)
|
| 682 |
+
)
|
| 683 |
+
|
| 684 |
+
|
| 685 |
+
@register_jagged_func(
|
| 686 |
+
torch.ops.aten.native_dropout.default, "self: jt, float: any, train: any?"
|
| 687 |
+
)
|
| 688 |
+
def native_dropout_default(func, *args, **kwargs):
|
| 689 |
+
_, new_kwargs = normalize_function( # type: ignore[misc]
|
| 690 |
+
func, args=args, kwargs=kwargs, normalize_to_only_use_kwargs=True
|
| 691 |
+
)
|
| 692 |
+
|
| 693 |
+
inp = new_kwargs.pop("input")
|
| 694 |
+
out1, out2 = func(inp._values, **new_kwargs)
|
| 695 |
+
return (
|
| 696 |
+
NestedTensor(out1, **extract_kwargs(inp)),
|
| 697 |
+
NestedTensor(out2, **extract_kwargs(inp)),
|
| 698 |
+
)
|
| 699 |
+
|
| 700 |
+
|
| 701 |
+
@register_jagged_func(
|
| 702 |
+
torch.ops.aten.native_dropout_backward.default,
|
| 703 |
+
"grad_output: jt, mask: jt, scale: any",
|
| 704 |
+
)
|
| 705 |
+
def native_dropout_backward_default(func, *args, **kwargs):
|
| 706 |
+
_, new_kwargs = normalize_function( # type: ignore[misc]
|
| 707 |
+
func, args=args, kwargs=kwargs, normalize_to_only_use_kwargs=True
|
| 708 |
+
)
|
| 709 |
+
grad_output = new_kwargs.pop("grad_output")
|
| 710 |
+
mask = new_kwargs.pop("mask")
|
| 711 |
+
return NestedTensor(
|
| 712 |
+
func(grad_output._values, mask._values, **new_kwargs),
|
| 713 |
+
**extract_kwargs(grad_output),
|
| 714 |
+
)
|
| 715 |
+
|
| 716 |
+
|
| 717 |
+
@register_jagged_func(torch.ops.aten.prod.dim_int, "self: jt, dim: any, keepdim: any?")
|
| 718 |
+
def prod_dim_int(func, *args, **kwargs):
|
| 719 |
+
_, new_kwargs = normalize_function( # type: ignore[misc]
|
| 720 |
+
func, args=args, kwargs=kwargs, normalize_to_only_use_kwargs=True
|
| 721 |
+
)
|
| 722 |
+
|
| 723 |
+
inp = new_kwargs.pop("input")
|
| 724 |
+
# TODO: Figure out how to handle this better
|
| 725 |
+
# keep_dim is required to keep it in jagged format
|
| 726 |
+
if not new_kwargs["keepdim"]:
|
| 727 |
+
raise RuntimeError("prod(): keepdim=True must be set for NestedTensor")
|
| 728 |
+
dim = new_kwargs["dim"]
|
| 729 |
+
new_kwargs["dim"] = _wrap_jagged_dim(len(inp._size), dim, "prod")
|
| 730 |
+
|
| 731 |
+
return NestedTensor(func(inp._values, **new_kwargs), **extract_kwargs(args[0]))
|
| 732 |
+
|
| 733 |
+
|
| 734 |
+
@register_jagged_func(
|
| 735 |
+
torch.ops.aten.split.Tensor, "self: jt, split_size: any, dim: any"
|
| 736 |
+
)
|
| 737 |
+
def split_tensor(func, *args, **kwargs):
|
| 738 |
+
_, new_kwargs = normalize_function( # type: ignore[misc]
|
| 739 |
+
func, args=args, kwargs=kwargs, normalize_to_only_use_kwargs=True
|
| 740 |
+
)
|
| 741 |
+
|
| 742 |
+
inp = new_kwargs.pop("input")
|
| 743 |
+
|
| 744 |
+
new_kwargs["dim"] = _wrap_jagged_dim(inp.dim(), new_kwargs["dim"], "split")
|
| 745 |
+
|
| 746 |
+
return tuple(
|
| 747 |
+
NestedTensor(values=x, **extract_kwargs(inp))
|
| 748 |
+
for x in func(inp._values, **new_kwargs)
|
| 749 |
+
)
|
| 750 |
+
|
| 751 |
+
|
| 752 |
+
@register_jagged_func(
|
| 753 |
+
torch.ops.aten.split_with_sizes.default, "self: jt, split_sizes: any, dim: any"
|
| 754 |
+
)
|
| 755 |
+
def split_with_sizes_default(func, *args, **kwargs):
|
| 756 |
+
_, new_kwargs = normalize_function( # type: ignore[misc]
|
| 757 |
+
func, args=args, kwargs=kwargs, normalize_to_only_use_kwargs=True
|
| 758 |
+
)
|
| 759 |
+
|
| 760 |
+
inp = new_kwargs.pop("input")
|
| 761 |
+
|
| 762 |
+
new_kwargs["dim"] = _wrap_jagged_dim(
|
| 763 |
+
inp.dim(), new_kwargs["dim"], "split_with_sizes"
|
| 764 |
+
)
|
| 765 |
+
|
| 766 |
+
return [
|
| 767 |
+
NestedTensor(values=x, **extract_kwargs(inp))
|
| 768 |
+
for x in func(inp._values, **new_kwargs)
|
| 769 |
+
]
|
| 770 |
+
|
| 771 |
+
|
| 772 |
+
@register_jagged_func(
|
| 773 |
+
torch.ops.aten.narrow.default, "self: jt, dim: any, start: any, length: any"
|
| 774 |
+
)
|
| 775 |
+
def narrow(func, *args, **kwargs):
|
| 776 |
+
_, new_kwargs = normalize_function( # type: ignore[misc]
|
| 777 |
+
func, args=args, kwargs=kwargs, normalize_to_only_use_kwargs=True
|
| 778 |
+
)
|
| 779 |
+
inp = new_kwargs.pop("input")
|
| 780 |
+
|
| 781 |
+
dim = _wrap_jagged_dim(inp.dim(), new_kwargs["dim"], "narrow")
|
| 782 |
+
values = func(
|
| 783 |
+
inp._values,
|
| 784 |
+
dim=dim,
|
| 785 |
+
start=new_kwargs["start"],
|
| 786 |
+
length=new_kwargs["length"],
|
| 787 |
+
)
|
| 788 |
+
return NestedTensor(values, **extract_kwargs(inp))
|
| 789 |
+
|
| 790 |
+
|
| 791 |
+
@register_jagged_func(torch.ops.aten.chunk.default, "self: jt, chunks: any, dim: any?")
|
| 792 |
+
def chunk_default(func, *args, **kwargs):
|
| 793 |
+
_, new_kwargs = normalize_function( # type: ignore[misc]
|
| 794 |
+
func, args=args, kwargs=kwargs, normalize_to_only_use_kwargs=True
|
| 795 |
+
)
|
| 796 |
+
|
| 797 |
+
inp = new_kwargs.pop("input")
|
| 798 |
+
|
| 799 |
+
new_kwargs["dim"] = _wrap_jagged_dim(
|
| 800 |
+
inp.dim(), new_kwargs["dim"], "chunk", allow_batch_dim=True
|
| 801 |
+
)
|
| 802 |
+
|
| 803 |
+
if new_kwargs["dim"] == 0:
|
| 804 |
+
chunks = new_kwargs["chunks"]
|
| 805 |
+
dim0_size = inp._size[0]
|
| 806 |
+
chunk_size = math.ceil(dim0_size / chunks)
|
| 807 |
+
|
| 808 |
+
# get _offsets of the chunks
|
| 809 |
+
lengths = inp._offsets.diff()
|
| 810 |
+
chunked_lengths = lengths.chunk(chunks)
|
| 811 |
+
chunked_offsets = [torch.cumsum(x, dim=0) for x in chunked_lengths]
|
| 812 |
+
chunked_offsets = [F.pad(x, (1, 0), value=0) for x in chunked_offsets] # type: ignore[arg-type]
|
| 813 |
+
nested_kwargs = [
|
| 814 |
+
{"offsets": per_offsets, "_ragged_idx": inp._ragged_idx}
|
| 815 |
+
for per_offsets in chunked_offsets
|
| 816 |
+
]
|
| 817 |
+
|
| 818 |
+
# get _values of the chunks
|
| 819 |
+
split_sizes = [x.sum().item() for x in chunked_lengths]
|
| 820 |
+
chunk_values = inp._values.split(split_sizes)
|
| 821 |
+
|
| 822 |
+
return [
|
| 823 |
+
NestedTensor(values=chunk_values[i], **(nested_kwargs[i]))
|
| 824 |
+
for i in range(0, chunk_size)
|
| 825 |
+
]
|
| 826 |
+
else:
|
| 827 |
+
return [
|
| 828 |
+
NestedTensor(values=x, **extract_kwargs(inp))
|
| 829 |
+
for x in func(inp._values, **new_kwargs)
|
| 830 |
+
]
|
| 831 |
+
|
| 832 |
+
|
| 833 |
+
@register_jagged_func(torch.ops.aten.unbind.int, "self: jt_all, dim: any?")
|
| 834 |
+
def unbind_int(func, *args, **kwargs):
|
| 835 |
+
# Note that this specializes on the length of the offsets
|
| 836 |
+
_, new_kwargs = normalize_function( # type: ignore[misc]
|
| 837 |
+
func, args=args, kwargs=kwargs, normalize_to_only_use_kwargs=True
|
| 838 |
+
)
|
| 839 |
+
|
| 840 |
+
dim = new_kwargs["dim"]
|
| 841 |
+
if dim != 0:
|
| 842 |
+
raise RuntimeError("unbind(): only supported for NestedTensor on dim=0")
|
| 843 |
+
|
| 844 |
+
inp = new_kwargs.pop("input")
|
| 845 |
+
values = inp.values()
|
| 846 |
+
offsets = inp.offsets()
|
| 847 |
+
lengths = inp.lengths()
|
| 848 |
+
ragged_idx = inp._ragged_idx
|
| 849 |
+
|
| 850 |
+
if lengths is None:
|
| 851 |
+
return torch.split(values, offsets.diff().tolist(), dim=(ragged_idx - 1))
|
| 852 |
+
|
| 853 |
+
if ragged_idx <= 0:
|
| 854 |
+
raise RuntimeError(
|
| 855 |
+
"unbind(): nested tensor ragged_idx out of bounds (should be >= 1)"
|
| 856 |
+
)
|
| 857 |
+
for i in range(lengths.shape[0]):
|
| 858 |
+
if offsets[i] + lengths[i] > values.shape[ragged_idx - 1]:
|
| 859 |
+
raise RuntimeError(
|
| 860 |
+
"unbind(): nested tensor offsets and lengths do not match ragged_idx dimension"
|
| 861 |
+
)
|
| 862 |
+
return [
|
| 863 |
+
torch.narrow(values, dim=(ragged_idx - 1), start=offsets[i], length=lengths[i])
|
| 864 |
+
for i in range(lengths.shape[0])
|
| 865 |
+
]
|
| 866 |
+
|
| 867 |
+
|
| 868 |
+
@register_jagged_func(torch.ops.aten.squeeze.dim, "self: jt, dim: any")
|
| 869 |
+
def squeeze_dim(func, *args, **kwargs):
|
| 870 |
+
_, new_kwargs = normalize_function( # type: ignore[misc]
|
| 871 |
+
func, args=args, kwargs=kwargs, normalize_to_only_use_kwargs=True
|
| 872 |
+
)
|
| 873 |
+
|
| 874 |
+
inp = new_kwargs.pop("input")
|
| 875 |
+
values = inp._values
|
| 876 |
+
|
| 877 |
+
new_kwargs["dim"] = _wrap_jagged_dim(len(inp._size), new_kwargs["dim"], "squeeze")
|
| 878 |
+
return NestedTensor(func(values, **new_kwargs), **extract_kwargs(inp))
|
| 879 |
+
|
| 880 |
+
|
| 881 |
+
@register_jagged_func(torch.ops.aten.unsqueeze.default, "self: jt, dim: any")
|
| 882 |
+
def unsqueeze_default(func, *args, **kwargs):
|
| 883 |
+
_, new_kwargs = normalize_function( # type: ignore[misc]
|
| 884 |
+
func, args=args, kwargs=kwargs, normalize_to_only_use_kwargs=True
|
| 885 |
+
)
|
| 886 |
+
|
| 887 |
+
inp = new_kwargs.pop("input")
|
| 888 |
+
values = inp._values
|
| 889 |
+
|
| 890 |
+
# Account for collapsed jagged dim
|
| 891 |
+
dim = new_kwargs["dim"]
|
| 892 |
+
new_kwargs["dim"] = _wrap_jagged_dim(len(inp._size) + 1, dim, "unsqueeze")
|
| 893 |
+
return NestedTensor(func(values, **new_kwargs), **extract_kwargs(inp))
|
| 894 |
+
|
| 895 |
+
|
| 896 |
+
@register_jagged_func(torch.ops.aten.cat.default, "tensors: any, dim: any")
|
| 897 |
+
def cat_default(func, *args, **kwargs):
|
| 898 |
+
_, new_kwargs = normalize_function( # type: ignore[misc]
|
| 899 |
+
func, args=args, kwargs=kwargs, normalize_to_only_use_kwargs=True
|
| 900 |
+
)
|
| 901 |
+
|
| 902 |
+
tensors = new_kwargs.pop("tensors")
|
| 903 |
+
|
| 904 |
+
# Convert any non-nested to nested
|
| 905 |
+
nested = [t for t in tensors if t.is_nested]
|
| 906 |
+
assert len(nested) > 0
|
| 907 |
+
first = nested[0]
|
| 908 |
+
tensors = [t if t.is_nested else t.expand_as(first) for t in tensors]
|
| 909 |
+
|
| 910 |
+
# Account for collapsed jagged dim
|
| 911 |
+
dim = new_kwargs["dim"]
|
| 912 |
+
new_kwargs["dim"] = _wrap_jagged_dim(len(first.shape), dim, "cat")
|
| 913 |
+
|
| 914 |
+
return NestedTensor(
|
| 915 |
+
func([t._values for t in tensors], **new_kwargs), **extract_kwargs(tensors[0])
|
| 916 |
+
)
|
| 917 |
+
|
| 918 |
+
|
| 919 |
+
@register_jagged_func(torch.ops.aten.matmul.default, "self: jt, other: any")
|
| 920 |
+
def matmul_default(func, *args, **kwargs):
|
| 921 |
+
_, new_kwargs = normalize_function( # type: ignore[misc]
|
| 922 |
+
func, args=args, kwargs=kwargs, normalize_to_only_use_kwargs=True
|
| 923 |
+
)
|
| 924 |
+
|
| 925 |
+
inp = new_kwargs.pop("input")
|
| 926 |
+
other = new_kwargs.pop("other")
|
| 927 |
+
|
| 928 |
+
if inp.is_nested and not other.is_nested:
|
| 929 |
+
return NestedTensor(
|
| 930 |
+
func(inp._values, other, **new_kwargs), **extract_kwargs(inp)
|
| 931 |
+
)
|
| 932 |
+
elif inp.is_nested and other.is_nested:
|
| 933 |
+
# BMM with equivalent ragged dims between the two inputs
|
| 934 |
+
if inp.dim() > 3 and other.dim() > 3 and raggedness_matches(inp, other._size):
|
| 935 |
+
return NestedTensor(func(inp._values, other._values), **extract_kwargs(inp))
|
| 936 |
+
|
| 937 |
+
raise RuntimeError(
|
| 938 |
+
f"matmul(): not supported between inputs of shapes {inp._size} and {other.shape}"
|
| 939 |
+
)
|
| 940 |
+
|
| 941 |
+
|
| 942 |
+
@register_jagged_func(
|
| 943 |
+
torch.ops.aten.expand.default, "self: jt, size: any, implicit: any?"
|
| 944 |
+
)
|
| 945 |
+
def expand_default(func, *args, **kwargs):
|
| 946 |
+
_, new_kwargs = normalize_function( # type: ignore[misc]
|
| 947 |
+
func, args=args, kwargs=kwargs, normalize_to_only_use_kwargs=True
|
| 948 |
+
)
|
| 949 |
+
|
| 950 |
+
inp = new_kwargs.pop("input")
|
| 951 |
+
size = new_kwargs["size"]
|
| 952 |
+
|
| 953 |
+
assert ("implicit" not in new_kwargs) or (not new_kwargs.pop("implicit"))
|
| 954 |
+
if not raggedness_matches(inp, size):
|
| 955 |
+
raise RuntimeError(f"expand(): cannot expand shape {inp._size} -> {size}")
|
| 956 |
+
|
| 957 |
+
expand_arg = [-1, *size[2:]]
|
| 958 |
+
return NestedTensor(func(inp._values, expand_arg), **extract_kwargs(inp))
|
| 959 |
+
|
| 960 |
+
|
| 961 |
+
@register_jagged_func(torch.ops.aten.expand_as.default, "self: t, other: jt")
|
| 962 |
+
def expand_as_default(func, *args, **kwargs):
|
| 963 |
+
_, new_kwargs = normalize_function( # type: ignore[misc]
|
| 964 |
+
func, args=args, kwargs=kwargs, normalize_to_only_use_kwargs=True
|
| 965 |
+
)
|
| 966 |
+
|
| 967 |
+
inp = new_kwargs.pop("input")
|
| 968 |
+
other = new_kwargs.pop("other")
|
| 969 |
+
|
| 970 |
+
return NestedTensor(func(inp, other._values), **extract_kwargs(other))
|
| 971 |
+
|
| 972 |
+
|
| 973 |
+
@register_jagged_func(torch.ops.aten.where.self, "condition: jt, self: jt, other: jt")
|
| 974 |
+
def where_self(func, *args, **kwargs):
|
| 975 |
+
_, new_kwargs = normalize_function( # type: ignore[misc]
|
| 976 |
+
func, args=args, kwargs=kwargs, normalize_to_only_use_kwargs=True
|
| 977 |
+
)
|
| 978 |
+
|
| 979 |
+
condition = new_kwargs.pop("condition")
|
| 980 |
+
inp = new_kwargs.pop("input")
|
| 981 |
+
other = new_kwargs.pop("other")
|
| 982 |
+
|
| 983 |
+
assert condition._size == other._size == inp._size
|
| 984 |
+
|
| 985 |
+
return NestedTensor(
|
| 986 |
+
func(condition._values, inp._values, other._values, **new_kwargs),
|
| 987 |
+
**extract_kwargs(condition),
|
| 988 |
+
)
|
| 989 |
+
|
| 990 |
+
|
| 991 |
+
@register_jagged_func(torch.ops.aten._pin_memory.default, "self: jt, device: any?")
|
| 992 |
+
def _pin_memory_default(func, *args, **kwargs):
|
| 993 |
+
_, new_kwargs = normalize_function( # type: ignore[misc]
|
| 994 |
+
func, args=args, kwargs=kwargs, normalize_to_only_use_kwargs=True
|
| 995 |
+
)
|
| 996 |
+
|
| 997 |
+
inp = new_kwargs.pop("input")
|
| 998 |
+
|
| 999 |
+
return NestedTensor(func(inp._values, **new_kwargs), **extract_kwargs(inp))
|
| 1000 |
+
|
| 1001 |
+
|
| 1002 |
+
@register_jagged_func(torch.ops.aten.is_pinned.default, "self: jt, device: any?")
|
| 1003 |
+
def is_pinned_default(func, *args, **kwargs):
|
| 1004 |
+
_, new_kwargs = normalize_function( # type: ignore[misc]
|
| 1005 |
+
func, args=args, kwargs=kwargs, normalize_to_only_use_kwargs=True
|
| 1006 |
+
)
|
| 1007 |
+
|
| 1008 |
+
inp = new_kwargs.pop("input")
|
| 1009 |
+
|
| 1010 |
+
return func(inp._values, **new_kwargs)
|
| 1011 |
+
|
| 1012 |
+
|
| 1013 |
+
@register_jagged_func(
|
| 1014 |
+
torch.ops.aten.is_same_size.default, "self: jt_all, other: jt_all"
|
| 1015 |
+
)
|
| 1016 |
+
def is_same_size_default(func, *args, **kwargs):
|
| 1017 |
+
return args[0]._size == args[1]._size
|
| 1018 |
+
|
| 1019 |
+
|
| 1020 |
+
@register_jagged_func(
|
| 1021 |
+
torch.ops.aten.sum.dim_IntList,
|
| 1022 |
+
"self: jt_all, dim: any?, keepdim: any?, dtype: any?",
|
| 1023 |
+
)
|
| 1024 |
+
def sum_dim_IntList(func, *args, **kwargs):
|
| 1025 |
+
"""
|
| 1026 |
+
Performs a sum along the provided tensor dimension.
|
| 1027 |
+
Returns a dense tensor if the ragged dimension is reduced away, else returns a nested tensor.
|
| 1028 |
+
"""
|
| 1029 |
+
_, new_kwargs = normalize_function( # type: ignore[misc]
|
| 1030 |
+
func, args=args, kwargs=kwargs, normalize_to_only_use_kwargs=True
|
| 1031 |
+
)
|
| 1032 |
+
inp = new_kwargs.pop("input")
|
| 1033 |
+
|
| 1034 |
+
(
|
| 1035 |
+
new_kwargs["dim"],
|
| 1036 |
+
reduce_on_batch,
|
| 1037 |
+
reduce_on_ragged,
|
| 1038 |
+
reduce_on_non_batch,
|
| 1039 |
+
) = _wrap_jagged_dims(
|
| 1040 |
+
inp.dim(),
|
| 1041 |
+
new_kwargs["dim"],
|
| 1042 |
+
"sum",
|
| 1043 |
+
inp._ragged_idx,
|
| 1044 |
+
)
|
| 1045 |
+
|
| 1046 |
+
if reduce_on_ragged and inp._lengths is not None:
|
| 1047 |
+
raise RuntimeError(
|
| 1048 |
+
"sum(): not supported where lengths is not None "
|
| 1049 |
+
+ "if reducing across the ragged dimension for NestedTensor"
|
| 1050 |
+
)
|
| 1051 |
+
|
| 1052 |
+
if reduce_on_ragged: # raggedness reduced away --> return dense tensor
|
| 1053 |
+
if (
|
| 1054 |
+
reduce_on_batch
|
| 1055 |
+
): # reduction cases: (batch, ragged), (batch, ragged, non-batch), etc.
|
| 1056 |
+
out = func(
|
| 1057 |
+
inp._values, **new_kwargs
|
| 1058 |
+
) # no need to read offsets --> apply sum directly on values
|
| 1059 |
+
else:
|
| 1060 |
+
if (
|
| 1061 |
+
reduce_on_non_batch
|
| 1062 |
+
): # invalid reduction cases: (ragged, non-batch), etc.
|
| 1063 |
+
raise RuntimeError(
|
| 1064 |
+
"sum(): not supported along a ragged and non-batch dimension for NestedTensor"
|
| 1065 |
+
)
|
| 1066 |
+
# reduction cases: (ragged)
|
| 1067 |
+
values_ragged_dim_outer = inp._values.permute(
|
| 1068 |
+
inp._ragged_idx - 1, # outer dimension
|
| 1069 |
+
*range(0, inp._ragged_idx - 1),
|
| 1070 |
+
*range(inp._ragged_idx, inp.dim() - 1),
|
| 1071 |
+
) # shift reduction dimension of values backward to outer dimension
|
| 1072 |
+
|
| 1073 |
+
# _jagged_to_padded_dense_forward requires values to be a 2D tensor
|
| 1074 |
+
# with the ragged dimension as the 0th dimension
|
| 1075 |
+
padded = torch.ops.aten._jagged_to_padded_dense_forward(
|
| 1076 |
+
values_ragged_dim_outer.reshape(values_ragged_dim_outer.shape[0], -1),
|
| 1077 |
+
[inp._offsets],
|
| 1078 |
+
max_lengths=[inp._max_seqlen],
|
| 1079 |
+
)
|
| 1080 |
+
|
| 1081 |
+
padded_ragged_dim_original = padded.view(
|
| 1082 |
+
padded.shape[0],
|
| 1083 |
+
inp._max_seqlen,
|
| 1084 |
+
*values_ragged_dim_outer.shape[
|
| 1085 |
+
1:
|
| 1086 |
+
], # expand non-batch dimensions of padded tensor
|
| 1087 |
+
).permute(
|
| 1088 |
+
0,
|
| 1089 |
+
*range(2, inp._ragged_idx + 1),
|
| 1090 |
+
1,
|
| 1091 |
+
*range(inp._ragged_idx + 1, inp.dim()),
|
| 1092 |
+
) # shift reduction dimension of padded tensor forward to original ragged dimension
|
| 1093 |
+
|
| 1094 |
+
out = torch.sum(
|
| 1095 |
+
padded_ragged_dim_original,
|
| 1096 |
+
dim=inp._ragged_idx,
|
| 1097 |
+
) # need to read offsets --> pad jagged dimension and apply sum
|
| 1098 |
+
|
| 1099 |
+
if new_kwargs["keepdim"]:
|
| 1100 |
+
# TODO: Fix this; it's a bug. should be unsqueezing on ragged_idx
|
| 1101 |
+
out = out.unsqueeze(0)
|
| 1102 |
+
return out
|
| 1103 |
+
else: # raggedness preserved --> return nested tensor
|
| 1104 |
+
if (
|
| 1105 |
+
reduce_on_batch
|
| 1106 |
+
): # invalid reduction cases: (batch), (batch, non-batch), etc.
|
| 1107 |
+
raise RuntimeError(
|
| 1108 |
+
"sum(): not supported along the batch dimension but not the ragged dimension for NestedTensor"
|
| 1109 |
+
)
|
| 1110 |
+
# reduction cases: (non-batch), (non-batch, non-batch), etc.
|
| 1111 |
+
return NestedTensor(
|
| 1112 |
+
func(inp._values, **new_kwargs), **extract_kwargs(inp)
|
| 1113 |
+
) # apply sum directly on values
|
| 1114 |
+
|
| 1115 |
+
|
| 1116 |
+
@register_jagged_func(
|
| 1117 |
+
torch.ops.aten.transpose.int, "self: jt_all, dim0: any, dim1: any"
|
| 1118 |
+
)
|
| 1119 |
+
def transpose_int(func, *args, **kwargs):
|
| 1120 |
+
_, new_kwargs = normalize_function( # type: ignore[misc]
|
| 1121 |
+
func, args=args, kwargs=kwargs, normalize_to_only_use_kwargs=True
|
| 1122 |
+
)
|
| 1123 |
+
|
| 1124 |
+
from torch._prims_common import canonicalize_dims
|
| 1125 |
+
|
| 1126 |
+
inp = new_kwargs.pop("input")
|
| 1127 |
+
dim0, dim1 = canonicalize_dims(inp.dim(), (new_kwargs["dim0"], new_kwargs["dim1"]))
|
| 1128 |
+
|
| 1129 |
+
if inp._lengths is not None:
|
| 1130 |
+
raise ValueError(
|
| 1131 |
+
"transpose(): not supported on jagged layout nested tensor with holes"
|
| 1132 |
+
)
|
| 1133 |
+
|
| 1134 |
+
# To support the SDPA API, inputs need to have the ragged idx transposed to dim 2
|
| 1135 |
+
# instead of 1, although the internal Flash and mem-effn implementations will
|
| 1136 |
+
# use the inputs with raggedness in dim 1.
|
| 1137 |
+
if dim0 == inp._ragged_idx or dim1 == inp._ragged_idx:
|
| 1138 |
+
if dim0 == 0 or dim1 == 0:
|
| 1139 |
+
raise ValueError(
|
| 1140 |
+
"Transpose is not supported on the batch dimension for jagged NT"
|
| 1141 |
+
)
|
| 1142 |
+
if dim0 == inp._ragged_idx:
|
| 1143 |
+
to_dim = dim1
|
| 1144 |
+
else:
|
| 1145 |
+
to_dim = dim0
|
| 1146 |
+
inp_kwargs = extract_kwargs(inp)
|
| 1147 |
+
inp_kwargs["_ragged_idx"] = to_dim
|
| 1148 |
+
return NestedTensor(
|
| 1149 |
+
inp.values().transpose(
|
| 1150 |
+
_outer_to_inner_dim(len(inp._size), dim0),
|
| 1151 |
+
_outer_to_inner_dim(len(inp._size), dim1),
|
| 1152 |
+
),
|
| 1153 |
+
**inp_kwargs,
|
| 1154 |
+
)
|
| 1155 |
+
|
| 1156 |
+
new_kwargs["dim0"] = _wrap_jagged_dim(inp.dim(), new_kwargs["dim0"], "transpose")
|
| 1157 |
+
new_kwargs["dim1"] = _wrap_jagged_dim(inp.dim(), new_kwargs["dim1"], "transpose")
|
| 1158 |
+
|
| 1159 |
+
return NestedTensor(func(inp._values, **new_kwargs), **extract_kwargs(inp))
|
| 1160 |
+
|
| 1161 |
+
|
| 1162 |
+
@register_jagged_func(torch.ops.aten.permute.default, "self: jt_all, dims: any")
|
| 1163 |
+
def permute_default(func, *args, **kwargs):
|
| 1164 |
+
_, new_kwargs = normalize_function( # type: ignore[misc]
|
| 1165 |
+
func, args=args, kwargs=kwargs, normalize_to_only_use_kwargs=True
|
| 1166 |
+
)
|
| 1167 |
+
inp = new_kwargs.pop("input")
|
| 1168 |
+
dims = new_kwargs.pop("dims")
|
| 1169 |
+
inp_kwargs = extract_kwargs(inp)
|
| 1170 |
+
inp_dim = len(inp._size)
|
| 1171 |
+
|
| 1172 |
+
# The first two checks are the same as the checks in the normal permute implementation
|
| 1173 |
+
if inp_dim != len(dims):
|
| 1174 |
+
raise ValueError(
|
| 1175 |
+
f"permute(): number of dimensions in the tensor input ({inp_dim}) "
|
| 1176 |
+
+ f"does not match the length of the desired ordering of dimensions ({len(dims)}).",
|
| 1177 |
+
)
|
| 1178 |
+
|
| 1179 |
+
from torch._prims_common import canonicalize_dims
|
| 1180 |
+
|
| 1181 |
+
canonicalized_dims = canonicalize_dims(inp_dim, dims)
|
| 1182 |
+
|
| 1183 |
+
if len(canonicalized_dims) != len(set(canonicalized_dims)):
|
| 1184 |
+
raise ValueError("permute(): duplicate dims are not allowed.")
|
| 1185 |
+
|
| 1186 |
+
if inp._lengths is not None:
|
| 1187 |
+
raise ValueError(
|
| 1188 |
+
"permute(): not supported on jagged layout nested tensor with holes"
|
| 1189 |
+
)
|
| 1190 |
+
if canonicalized_dims[0] != 0:
|
| 1191 |
+
raise ValueError(
|
| 1192 |
+
"Permute is not supported on the batch dimension for jagged NT"
|
| 1193 |
+
)
|
| 1194 |
+
inp_kwargs["_ragged_idx"] = canonicalized_dims.index(inp._ragged_idx)
|
| 1195 |
+
inner_dims = [_outer_to_inner_dim(inp_dim, dim) for dim in canonicalized_dims[1:]]
|
| 1196 |
+
new_kwargs["dims"] = inner_dims
|
| 1197 |
+
return NestedTensor(func(inp._values, **new_kwargs), **inp_kwargs)
|
| 1198 |
+
|
| 1199 |
+
|
| 1200 |
+
@register_jagged_func(
|
| 1201 |
+
[torch.ops.aten.view.default, torch.ops.aten._unsafe_view.default],
|
| 1202 |
+
"self: jt_all, size: any",
|
| 1203 |
+
)
|
| 1204 |
+
def view_default(func, *args, **kwargs):
|
| 1205 |
+
_, new_kwargs = normalize_function( # type: ignore[misc]
|
| 1206 |
+
func, args=args, kwargs=kwargs, normalize_to_only_use_kwargs=True
|
| 1207 |
+
)
|
| 1208 |
+
|
| 1209 |
+
inp = new_kwargs.pop("input")
|
| 1210 |
+
size = new_kwargs.pop("size")
|
| 1211 |
+
|
| 1212 |
+
if inp._ragged_idx != 1 and tuple(inp._size) != tuple(size):
|
| 1213 |
+
raise RuntimeError(
|
| 1214 |
+
f"view(): does not support ragged_idx != 1 except when inp._size == size. "
|
| 1215 |
+
f"inp._size is ({inp._size}) and size is ({size})."
|
| 1216 |
+
)
|
| 1217 |
+
|
| 1218 |
+
# Ensure specified size still includes batch and ragged dims
|
| 1219 |
+
if len(size) < 3 or not raggedness_matches(inp, size):
|
| 1220 |
+
raise RuntimeError(f"view(): cannot view shape {inp._size} as {size}")
|
| 1221 |
+
|
| 1222 |
+
# outer size: the size of the NT, e.g. [3, j0, 10]
|
| 1223 |
+
# inner size: the size of the values, e.g. [8, 10] (e.g. for offsets = [0, 3, 5, 8])
|
| 1224 |
+
# this function gets inner_size[inner_idx] for a given inner_idx.
|
| 1225 |
+
#
|
| 1226 |
+
# example: for outer size [a, b, c, j0, d, e, f]
|
| 1227 |
+
# assume that j0 is ragged, other are concrete integers
|
| 1228 |
+
# and ragged_idx=3
|
| 1229 |
+
# inner size will be [b, c, inp._values.size(ragged_idx), d, e, f]
|
| 1230 |
+
# therefore:
|
| 1231 |
+
# inner_size[0] = outer_size[1]
|
| 1232 |
+
# inner_size[1] = outer_size[2]
|
| 1233 |
+
# inner_size[0] = inp._values.size(ragged_idx - 1)
|
| 1234 |
+
# inner_size[3] = outer_size[4]
|
| 1235 |
+
# inner_size[4] = outer_size[5]
|
| 1236 |
+
def get_inner_size(inner_idx):
|
| 1237 |
+
nonlocal inp, size
|
| 1238 |
+
if inner_idx == inp._ragged_idx - 1:
|
| 1239 |
+
return inp._values.size(inner_idx)
|
| 1240 |
+
else:
|
| 1241 |
+
return size[inner_idx + 1]
|
| 1242 |
+
|
| 1243 |
+
inner_size = [get_inner_size(i) for i in range(len(size) - 1)]
|
| 1244 |
+
|
| 1245 |
+
return NestedTensor(func(inp._values, inner_size), **extract_kwargs(inp))
|
| 1246 |
+
|
| 1247 |
+
|
| 1248 |
+
@register_jagged_func(
|
| 1249 |
+
torch.ops.aten.native_layer_norm.default,
|
| 1250 |
+
"input: jt_all, normalized_shape: any, weight: any?, bias: any?, eps: any",
|
| 1251 |
+
)
|
| 1252 |
+
def native_layer_norm_default(func, *args, **kwargs):
|
| 1253 |
+
_, new_kwargs = normalize_function( # type: ignore[misc]
|
| 1254 |
+
func, args=args, kwargs=kwargs, normalize_to_only_use_kwargs=True
|
| 1255 |
+
)
|
| 1256 |
+
|
| 1257 |
+
inp = new_kwargs.pop("input")
|
| 1258 |
+
|
| 1259 |
+
if inp.dim() <= 2:
|
| 1260 |
+
raise RuntimeError(
|
| 1261 |
+
"layer_norm(): not supported for NestedTensor objects with 2 or fewer dimensions"
|
| 1262 |
+
)
|
| 1263 |
+
|
| 1264 |
+
normalized_shape = new_kwargs["normalized_shape"]
|
| 1265 |
+
ragged_size = inp.shape[inp._ragged_idx]
|
| 1266 |
+
|
| 1267 |
+
num_dims_not_normalized = inp.dim() - len(normalized_shape)
|
| 1268 |
+
|
| 1269 |
+
if (
|
| 1270 |
+
num_dims_not_normalized == 0
|
| 1271 |
+
): # error if trying to normalize over the batch dimension
|
| 1272 |
+
raise RuntimeError(
|
| 1273 |
+
"layer_norm(): not supported when normalizing over the batch dimension for NestedTensor"
|
| 1274 |
+
)
|
| 1275 |
+
|
| 1276 |
+
if ragged_size in normalized_shape and inp._lengths is not None:
|
| 1277 |
+
raise RuntimeError(
|
| 1278 |
+
"layer_norm(): not supported where lengths is not None if operating on the ragged dimension for NestedTensor"
|
| 1279 |
+
)
|
| 1280 |
+
|
| 1281 |
+
if (
|
| 1282 |
+
ragged_size in normalized_shape
|
| 1283 |
+
): # special handling for normalizing over the ragged dimension
|
| 1284 |
+
padded_input = torch.ops.aten._jagged_to_padded_dense_forward(
|
| 1285 |
+
inp._values.flatten(
|
| 1286 |
+
start_dim=inp._ragged_idx
|
| 1287 |
+
), # _jagged_to_padded_dense_forward requires values to be a 2D tensor
|
| 1288 |
+
[inp._offsets],
|
| 1289 |
+
max_lengths=[inp._max_seqlen], # max length of ragged dimension
|
| 1290 |
+
)
|
| 1291 |
+
|
| 1292 |
+
padded_mask = torch.ops.aten._jagged_to_padded_dense_forward(
|
| 1293 |
+
torch.ones((inp._values.shape[0], 1), device=inp.device, dtype=inp.dtype),
|
| 1294 |
+
[inp._offsets],
|
| 1295 |
+
max_lengths=[inp._max_seqlen], # max length of ragged dimension
|
| 1296 |
+
).expand(
|
| 1297 |
+
padded_input.shape
|
| 1298 |
+
) # mask elements outside of the ragged dimension and expand to the same shape as padded input (3D dense tensor)
|
| 1299 |
+
|
| 1300 |
+
ragged_lengths = (
|
| 1301 |
+
inp._offsets.diff().unsqueeze(1).unsqueeze(1) * padded_input.shape[2]
|
| 1302 |
+
) # ragged dim * inner dim, since we sum over dims (1, 2) (the layer on which we normalize)
|
| 1303 |
+
|
| 1304 |
+
mean = (
|
| 1305 |
+
torch.sum(
|
| 1306 |
+
padded_input,
|
| 1307 |
+
dim=(1, 2),
|
| 1308 |
+
keepdim=True,
|
| 1309 |
+
)
|
| 1310 |
+
/ ragged_lengths
|
| 1311 |
+
) # a sum over (1, 2) ensures layer norm, whereas a sum over (1) would be an instance norm
|
| 1312 |
+
|
| 1313 |
+
padded_normalized = (
|
| 1314 |
+
padded_input - mean
|
| 1315 |
+
) * padded_mask # mask elements outside of the ragged dimension size for correct variance calculation
|
| 1316 |
+
|
| 1317 |
+
variance = (
|
| 1318 |
+
torch.sum(
|
| 1319 |
+
torch.square(padded_normalized),
|
| 1320 |
+
dim=(1, 2),
|
| 1321 |
+
keepdim=True,
|
| 1322 |
+
)
|
| 1323 |
+
/ ragged_lengths
|
| 1324 |
+
) # a sum over (1, 2) ensures layer norm, whereas a sum over (1) would be an instance norm
|
| 1325 |
+
|
| 1326 |
+
std = torch.sqrt(variance + new_kwargs["eps"])
|
| 1327 |
+
padded_layer_norm = padded_normalized / std
|
| 1328 |
+
|
| 1329 |
+
jagged_layer_norm_values = torch.ops.aten._padded_dense_to_jagged_forward(
|
| 1330 |
+
padded_layer_norm,
|
| 1331 |
+
[inp._offsets],
|
| 1332 |
+
total_L=inp._values.shape[
|
| 1333 |
+
0
|
| 1334 |
+
], # providing this parameter helps avoid a GPU/CPU sync
|
| 1335 |
+
).unflatten(
|
| 1336 |
+
-1, inp.shape[inp._ragged_idx + 1 :]
|
| 1337 |
+
) # unflatten last dimension back into original nested tensor shape, e.g. (B, *, WH) --> (B, *, W, H)
|
| 1338 |
+
|
| 1339 |
+
return (
|
| 1340 |
+
NestedTensor(jagged_layer_norm_values, **extract_kwargs(inp)),
|
| 1341 |
+
mean,
|
| 1342 |
+
std,
|
| 1343 |
+
)
|
| 1344 |
+
|
| 1345 |
+
output, mean, std = func(inp._values, **new_kwargs)
|
| 1346 |
+
return (NestedTensor(output, **extract_kwargs(inp)), mean, std)
|
| 1347 |
+
|
| 1348 |
+
|
| 1349 |
+
@register_jagged_func(
|
| 1350 |
+
torch.ops.aten.native_layer_norm_backward.default,
|
| 1351 |
+
"grad_out: jt, input: jt, normalized_shape: any, mean: any, rstd: any, weight: any?, bias: any?, output_mask: any",
|
| 1352 |
+
)
|
| 1353 |
+
def native_layer_norm_backward_default(func, *args, **kwargs):
|
| 1354 |
+
_, new_kwargs = normalize_function( # type: ignore[misc]
|
| 1355 |
+
func, args=args, kwargs=kwargs, normalize_to_only_use_kwargs=True
|
| 1356 |
+
)
|
| 1357 |
+
grad_out = new_kwargs.pop("grad_out")
|
| 1358 |
+
inp = new_kwargs.pop("input")
|
| 1359 |
+
d_input, d_gamma, d_beta = func(grad_out._values, inp._values, **new_kwargs)
|
| 1360 |
+
if d_input is None:
|
| 1361 |
+
return (None, d_gamma, d_beta)
|
| 1362 |
+
|
| 1363 |
+
return (NestedTensor(d_input, **extract_kwargs(inp)), d_gamma, d_beta)
|
| 1364 |
+
|
| 1365 |
+
|
| 1366 |
+
@register_jagged_func(torch.ops.aten.select.int, "self: jt, dim: any, index: any")
|
| 1367 |
+
def select_int(func, *args, **kwargs):
|
| 1368 |
+
_, new_kwargs = normalize_function( # type: ignore[misc]
|
| 1369 |
+
func, args=args, kwargs=kwargs, normalize_to_only_use_kwargs=True
|
| 1370 |
+
)
|
| 1371 |
+
|
| 1372 |
+
inp = new_kwargs.pop("input")
|
| 1373 |
+
new_kwargs["dim"] = _wrap_jagged_dim(
|
| 1374 |
+
inp.dim(), new_kwargs["dim"], "select", allow_batch_dim=True
|
| 1375 |
+
)
|
| 1376 |
+
|
| 1377 |
+
# handle batch dim slicing via unbind() for now
|
| 1378 |
+
# TODO: make this more efficient
|
| 1379 |
+
if new_kwargs["dim"] == 0:
|
| 1380 |
+
return inp.unbind()[new_kwargs["index"]]
|
| 1381 |
+
|
| 1382 |
+
return NestedTensor(func(inp._values, **new_kwargs), **extract_kwargs(inp))
|
| 1383 |
+
|
| 1384 |
+
|
| 1385 |
+
@register_jagged_func(
|
| 1386 |
+
torch.ops.aten.slice.Tensor,
|
| 1387 |
+
"self: jt, dim: any?, start: any?, end: any?, step: any?",
|
| 1388 |
+
)
|
| 1389 |
+
def slice_tensor(func, *args, **kwargs):
|
| 1390 |
+
_, new_kwargs = normalize_function( # type: ignore[misc]
|
| 1391 |
+
func, args=args, kwargs=kwargs, normalize_to_only_use_kwargs=True
|
| 1392 |
+
)
|
| 1393 |
+
|
| 1394 |
+
inp = new_kwargs.pop("input")
|
| 1395 |
+
new_kwargs["dim"] = _wrap_jagged_dim(inp.dim(), new_kwargs["dim"], "slice")
|
| 1396 |
+
|
| 1397 |
+
return NestedTensor(func(inp._values, **new_kwargs), **extract_kwargs(inp))
|
| 1398 |
+
|
| 1399 |
+
|
| 1400 |
+
@register_jagged_func(
|
| 1401 |
+
torch.ops.aten.convolution.default,
|
| 1402 |
+
"input: jt, weight: t, bias: t?, stride: any, padding: any, "
|
| 1403 |
+
"dilation: any, transposed: any, output_padding: any, groups: any",
|
| 1404 |
+
)
|
| 1405 |
+
def convolution_default(func, *args, **kwargs):
|
| 1406 |
+
_, new_kwargs = normalize_function( # type: ignore[misc]
|
| 1407 |
+
func, args=args, kwargs=kwargs, normalize_to_only_use_kwargs=True
|
| 1408 |
+
)
|
| 1409 |
+
|
| 1410 |
+
inp = new_kwargs.pop("input")
|
| 1411 |
+
|
| 1412 |
+
return NestedTensor(func(inp._values, **new_kwargs), **extract_kwargs(inp))
|
| 1413 |
+
|
| 1414 |
+
|
| 1415 |
+
@register_jagged_func(
|
| 1416 |
+
torch.ops.aten.mean.dim, "self: jt_all, dim: any?, keepdim: any?, dtype: any?"
|
| 1417 |
+
)
|
| 1418 |
+
def mean_dim(func, *args, **kwargs):
|
| 1419 |
+
"""
|
| 1420 |
+
Performs a mean along the provided tensor dimension.
|
| 1421 |
+
Returns a dense tensor if the ragged dimension is reduced away, else returns a nested tensor.
|
| 1422 |
+
"""
|
| 1423 |
+
_, new_kwargs = normalize_function( # type: ignore[misc]
|
| 1424 |
+
func, args=args, kwargs=kwargs, normalize_to_only_use_kwargs=True
|
| 1425 |
+
)
|
| 1426 |
+
|
| 1427 |
+
if len(new_kwargs["dim"]) > 1:
|
| 1428 |
+
raise RuntimeError(
|
| 1429 |
+
"mean(): not supported across multiple dimensions for NestedTensor"
|
| 1430 |
+
)
|
| 1431 |
+
|
| 1432 |
+
inp = new_kwargs.pop("input")
|
| 1433 |
+
|
| 1434 |
+
(
|
| 1435 |
+
new_kwargs["dim"],
|
| 1436 |
+
reduce_on_batch,
|
| 1437 |
+
reduce_on_ragged,
|
| 1438 |
+
reduce_on_non_batch,
|
| 1439 |
+
) = _wrap_jagged_dims(
|
| 1440 |
+
inp.dim(),
|
| 1441 |
+
new_kwargs["dim"],
|
| 1442 |
+
"mean",
|
| 1443 |
+
inp._ragged_idx,
|
| 1444 |
+
)
|
| 1445 |
+
|
| 1446 |
+
if reduce_on_batch:
|
| 1447 |
+
raise RuntimeError(
|
| 1448 |
+
"mean(): not supported along the batch dimension but not the ragged dimension for NestedTensor"
|
| 1449 |
+
)
|
| 1450 |
+
|
| 1451 |
+
if reduce_on_ragged and inp._lengths is not None:
|
| 1452 |
+
raise RuntimeError(
|
| 1453 |
+
"mean(): not supported where lengths is not None "
|
| 1454 |
+
+ "if reducing across the ragged dimension for NestedTensor"
|
| 1455 |
+
)
|
| 1456 |
+
|
| 1457 |
+
if not new_kwargs["keepdim"]:
|
| 1458 |
+
raise RuntimeError("mean(): not supported when keepdim=False for NestedTensor")
|
| 1459 |
+
|
| 1460 |
+
if reduce_on_ragged: # raggedness reduced away
|
| 1461 |
+
torch_sum = torch.sum(inp, dim=inp._ragged_idx, keepdim=new_kwargs["keepdim"])
|
| 1462 |
+
|
| 1463 |
+
# for every non-batch dimension,
|
| 1464 |
+
# unsqueeze lengths into the same shape as the PyTorch sum,
|
| 1465 |
+
# as the extra dimensions must all be divided by the same length
|
| 1466 |
+
lengths = inp._offsets.diff()
|
| 1467 |
+
for _ in range(inp.dim() - 2):
|
| 1468 |
+
lengths = lengths.unsqueeze(-1)
|
| 1469 |
+
|
| 1470 |
+
return torch_sum / lengths.broadcast_to(torch_sum.shape)
|
| 1471 |
+
|
| 1472 |
+
return NestedTensor(
|
| 1473 |
+
func(inp._values, **new_kwargs), **extract_kwargs(inp)
|
| 1474 |
+
) # raggedness preserved
|
| 1475 |
+
|
| 1476 |
+
|
| 1477 |
+
@register_jagged_func(torch.ops.aten.stack.default, "tensors: any, dim: any")
|
| 1478 |
+
def stack_default(func, *args, **kwargs):
|
| 1479 |
+
_, new_kwargs = normalize_function( # type: ignore[misc]
|
| 1480 |
+
func, args=args, kwargs=kwargs, normalize_to_only_use_kwargs=True
|
| 1481 |
+
)
|
| 1482 |
+
|
| 1483 |
+
# guaranteed this is non-empty if we got here
|
| 1484 |
+
tensors = new_kwargs.pop("tensors")
|
| 1485 |
+
for t in tensors:
|
| 1486 |
+
if not isinstance(t, NestedTensor):
|
| 1487 |
+
raise RuntimeError("stack(): expected all nested tensors inputs")
|
| 1488 |
+
|
| 1489 |
+
if t.dim() != tensors[0].dim():
|
| 1490 |
+
raise RuntimeError(
|
| 1491 |
+
"stack(): expected all nested tensors to have the same dim"
|
| 1492 |
+
)
|
| 1493 |
+
|
| 1494 |
+
if not raggedness_matches(t, tensors[0].shape):
|
| 1495 |
+
raise RuntimeError(
|
| 1496 |
+
"stack(): expected all nested tensors to have the same nested structure"
|
| 1497 |
+
)
|
| 1498 |
+
|
| 1499 |
+
new_kwargs["dim"] = _wrap_jagged_dim(
|
| 1500 |
+
tensors[0].dim() + 1, new_kwargs["dim"], "stack"
|
| 1501 |
+
)
|
| 1502 |
+
|
| 1503 |
+
return NestedTensor(
|
| 1504 |
+
func([t._values for t in tensors], **new_kwargs), **extract_kwargs(tensors[0])
|
| 1505 |
+
)
|
| 1506 |
+
|
| 1507 |
+
|
| 1508 |
+
@register_jagged_func(
|
| 1509 |
+
torch.ops.aten.embedding.default,
|
| 1510 |
+
"weight: t, indices: jt, padding_idx: any?, scale_grad_by_freq: any?, sparse: any?",
|
| 1511 |
+
)
|
| 1512 |
+
def embedding_default(func, *args, **kwargs):
|
| 1513 |
+
_, new_kwargs = normalize_function( # type: ignore[misc]
|
| 1514 |
+
func, args=args, kwargs=kwargs, normalize_to_only_use_kwargs=True
|
| 1515 |
+
)
|
| 1516 |
+
|
| 1517 |
+
# guaranteed this is non-empty if we got here
|
| 1518 |
+
indices = new_kwargs.pop("indices")
|
| 1519 |
+
weight = new_kwargs.pop("weight")
|
| 1520 |
+
|
| 1521 |
+
return NestedTensor(
|
| 1522 |
+
func(weight, indices._values, **new_kwargs), **extract_kwargs(indices)
|
| 1523 |
+
)
|
| 1524 |
+
|
| 1525 |
+
|
| 1526 |
+
@register_jagged_func(
|
| 1527 |
+
[
|
| 1528 |
+
torch.ops.aten.values.default,
|
| 1529 |
+
torch.ops.aten._nested_get_values.default,
|
| 1530 |
+
],
|
| 1531 |
+
"self: jt_all",
|
| 1532 |
+
)
|
| 1533 |
+
def values_default(func, *args, **kwargs):
|
| 1534 |
+
_, new_kwargs = normalize_function( # type: ignore[misc]
|
| 1535 |
+
func, args=args, kwargs=kwargs, normalize_to_only_use_kwargs=True
|
| 1536 |
+
)
|
| 1537 |
+
|
| 1538 |
+
inp = new_kwargs.pop("input")
|
| 1539 |
+
|
| 1540 |
+
# TODO: Handle inference mode properly.
|
| 1541 |
+
# See https://github.com/pytorch/pytorch/issues/112024#issuecomment-1779554292
|
| 1542 |
+
return inp._values.detach()
|
| 1543 |
+
|
| 1544 |
+
|
| 1545 |
+
@register_jagged_func(torch.ops.aten.all.default, "self: jt_all")
|
| 1546 |
+
def all_default(func, *args, **kwargs):
|
| 1547 |
+
_, new_kwargs = normalize_function( # type: ignore[misc]
|
| 1548 |
+
func, args=args, kwargs=kwargs, normalize_to_only_use_kwargs=True
|
| 1549 |
+
)
|
| 1550 |
+
|
| 1551 |
+
inp = new_kwargs.pop("input")
|
| 1552 |
+
|
| 1553 |
+
return func(inp._values)
|
| 1554 |
+
|
| 1555 |
+
|
| 1556 |
+
@register_jagged_func(
|
| 1557 |
+
torch.ops.aten._nested_view_from_jagged.default,
|
| 1558 |
+
"values: t, offsets: t, dummy: jt_all, lengths: t?, ragged_idx: any?, min_seqlen: t?, max_seqlen: t?",
|
| 1559 |
+
)
|
| 1560 |
+
def _nested_view_from_jagged_default(func, *args, **kwargs):
|
| 1561 |
+
_, new_kwargs = normalize_function( # type: ignore[misc]
|
| 1562 |
+
func, args=args, kwargs=kwargs, normalize_to_only_use_kwargs=True
|
| 1563 |
+
)
|
| 1564 |
+
|
| 1565 |
+
values, offsets, lengths = (
|
| 1566 |
+
new_kwargs["input"],
|
| 1567 |
+
new_kwargs["offsets"],
|
| 1568 |
+
new_kwargs["lengths"],
|
| 1569 |
+
)
|
| 1570 |
+
ragged_idx = new_kwargs["ragged_idx"]
|
| 1571 |
+
min_seqlen = new_kwargs["min_seqlen"]
|
| 1572 |
+
max_seqlen = new_kwargs["max_seqlen"]
|
| 1573 |
+
metadata_cache = {}
|
| 1574 |
+
if min_seqlen is not None:
|
| 1575 |
+
metadata_cache["min_seqlen"] = min_seqlen
|
| 1576 |
+
if max_seqlen is not None:
|
| 1577 |
+
metadata_cache["max_seqlen"] = max_seqlen
|
| 1578 |
+
|
| 1579 |
+
return NestedTensor(
|
| 1580 |
+
values,
|
| 1581 |
+
offsets,
|
| 1582 |
+
lengths=lengths,
|
| 1583 |
+
_ragged_idx=ragged_idx,
|
| 1584 |
+
_metadata_cache=metadata_cache,
|
| 1585 |
+
)
|
| 1586 |
+
|
| 1587 |
+
|
| 1588 |
+
@register_jagged_func(torch.ops.aten._nested_get_offsets.default, "self: jt_all")
|
| 1589 |
+
def _nested_get_offsets(func, *args, **kwargs):
|
| 1590 |
+
_, new_kwargs = normalize_function( # type: ignore[misc]
|
| 1591 |
+
func, args=args, kwargs=kwargs, normalize_to_only_use_kwargs=True
|
| 1592 |
+
)
|
| 1593 |
+
|
| 1594 |
+
inp = new_kwargs.pop("input")
|
| 1595 |
+
return inp._offsets
|
| 1596 |
+
|
| 1597 |
+
|
| 1598 |
+
@register_jagged_func(torch.ops.aten._nested_get_lengths.default, "self: jt_all")
|
| 1599 |
+
def _nested_get_lengths(func, *args, **kwargs):
|
| 1600 |
+
_, new_kwargs = normalize_function( # type: ignore[misc]
|
| 1601 |
+
func, args=args, kwargs=kwargs, normalize_to_only_use_kwargs=True
|
| 1602 |
+
)
|
| 1603 |
+
|
| 1604 |
+
inp = new_kwargs.pop("input")
|
| 1605 |
+
return inp._lengths
|
| 1606 |
+
|
| 1607 |
+
|
| 1608 |
+
@register_jagged_func(torch.ops.aten._nested_get_ragged_idx.default, "self: jt_all")
|
| 1609 |
+
def _nested_get_ragged_idx(func, *args, **kwargs):
|
| 1610 |
+
_, new_kwargs = normalize_function( # type: ignore[misc]
|
| 1611 |
+
func, args=args, kwargs=kwargs, normalize_to_only_use_kwargs=True
|
| 1612 |
+
)
|
| 1613 |
+
|
| 1614 |
+
inp = new_kwargs.pop("input")
|
| 1615 |
+
return inp._ragged_idx
|
| 1616 |
+
|
| 1617 |
+
|
| 1618 |
+
@register_jagged_func(torch.ops.aten._nested_get_min_seqlen.default, "self: jt_all")
|
| 1619 |
+
def _nested_get_min_seqlen(func, *args, **kwargs):
|
| 1620 |
+
_, new_kwargs = normalize_function( # type: ignore[misc]
|
| 1621 |
+
func, args=args, kwargs=kwargs, normalize_to_only_use_kwargs=True
|
| 1622 |
+
)
|
| 1623 |
+
|
| 1624 |
+
inp = new_kwargs.pop("input")
|
| 1625 |
+
return inp._metadata_cache.get("min_seqlen", None)
|
| 1626 |
+
|
| 1627 |
+
|
| 1628 |
+
@register_jagged_func(torch.ops.aten._nested_get_max_seqlen.default, "self: jt_all")
|
| 1629 |
+
def _nested_get_max_seqlen(func, *args, **kwargs):
|
| 1630 |
+
_, new_kwargs = normalize_function( # type: ignore[misc]
|
| 1631 |
+
func, args=args, kwargs=kwargs, normalize_to_only_use_kwargs=True
|
| 1632 |
+
)
|
| 1633 |
+
|
| 1634 |
+
inp = new_kwargs.pop("input")
|
| 1635 |
+
return inp._metadata_cache.get("max_seqlen", None)
|
| 1636 |
+
|
| 1637 |
+
|
| 1638 |
+
# If a section of the Nested Tensor is fully masked out we still retain the section with a length of 0
|
| 1639 |
+
@register_jagged_func(torch.ops.aten.masked_select.default, "self: jt, mask: any")
|
| 1640 |
+
def masked_select_default(func, *args, **kwargs):
|
| 1641 |
+
_, new_kwargs = normalize_function( # type: ignore[misc]
|
| 1642 |
+
func, args=args, kwargs=kwargs, normalize_to_only_use_kwargs=True
|
| 1643 |
+
)
|
| 1644 |
+
inp = new_kwargs.pop("input")
|
| 1645 |
+
mask = new_kwargs.pop("mask")
|
| 1646 |
+
|
| 1647 |
+
if inp.ndim > 2:
|
| 1648 |
+
raise RuntimeError("masked_select only support 2-D selections currently")
|
| 1649 |
+
elif inp.shape != mask.shape:
|
| 1650 |
+
raise RuntimeError(
|
| 1651 |
+
f"Mask with shape {mask.shape} is not compatible with input's shape {inp.shape}"
|
| 1652 |
+
)
|
| 1653 |
+
res_values = inp._values.masked_select(mask.values())
|
| 1654 |
+
mask_cumsum = F.pad(mask.values().cumsum(dim=0), (1, 0)) # type: ignore[arg-type]
|
| 1655 |
+
|
| 1656 |
+
args = extract_kwargs(inp)
|
| 1657 |
+
args["offsets"] = mask_cumsum[inp._offsets]
|
| 1658 |
+
return NestedTensor(
|
| 1659 |
+
values=res_values,
|
| 1660 |
+
**args,
|
| 1661 |
+
)
|
| 1662 |
+
|
| 1663 |
+
|
| 1664 |
+
# Make the dummy available on the C++ side.
|
| 1665 |
+
@register_jagged_func(torch.ops.aten._nested_get_jagged_dummy.default, "self: any")
|
| 1666 |
+
def _nested_get_jagged_dummy(func, *args, **kwargs):
|
| 1667 |
+
from torch.nested._internal.nested_tensor import _nt_view_dummy
|
| 1668 |
+
|
| 1669 |
+
return _nt_view_dummy()
|
| 1670 |
+
|
| 1671 |
+
|
| 1672 |
+
with torch.library._scoped_library("aten", "IMPL") as aten:
|
| 1673 |
+
aten.impl("_nested_get_jagged_dummy", _nested_get_jagged_dummy, "CPU")
|
| 1674 |
+
aten.impl("_nested_get_jagged_dummy", _nested_get_jagged_dummy, "CUDA")
|
| 1675 |
+
aten.impl("_nested_get_jagged_dummy", _nested_get_jagged_dummy, "Meta")
|
vllm/lib/python3.10/site-packages/torch/nested/_internal/sdpa.py
ADDED
|
@@ -0,0 +1,871 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# mypy: allow-untyped-defs
|
| 2 |
+
import logging
|
| 3 |
+
from typing import Optional, Tuple
|
| 4 |
+
|
| 5 |
+
import torch
|
| 6 |
+
import torch.nn
|
| 7 |
+
import torch.nn.functional as F
|
| 8 |
+
from torch.backends.cuda import (
|
| 9 |
+
can_use_efficient_attention,
|
| 10 |
+
can_use_flash_attention,
|
| 11 |
+
flash_sdp_enabled,
|
| 12 |
+
math_sdp_enabled,
|
| 13 |
+
mem_efficient_sdp_enabled,
|
| 14 |
+
SDPAParams,
|
| 15 |
+
)
|
| 16 |
+
from torch.nn.attention import SDPBackend
|
| 17 |
+
|
| 18 |
+
from .nested_tensor import NestedTensor
|
| 19 |
+
|
| 20 |
+
|
| 21 |
+
log = logging.getLogger(__name__)
|
| 22 |
+
|
| 23 |
+
|
| 24 |
+
def _validate_sdpa_input(
|
| 25 |
+
query: torch.Tensor,
|
| 26 |
+
key: torch.Tensor,
|
| 27 |
+
value: torch.Tensor,
|
| 28 |
+
attn_mask: Optional[torch.Tensor] = None,
|
| 29 |
+
dropout_p=0.0,
|
| 30 |
+
is_causal=False,
|
| 31 |
+
scale=None,
|
| 32 |
+
):
|
| 33 |
+
if (
|
| 34 |
+
not isinstance(query, NestedTensor)
|
| 35 |
+
or not isinstance(key, NestedTensor)
|
| 36 |
+
or not isinstance(value, NestedTensor)
|
| 37 |
+
):
|
| 38 |
+
raise ValueError(
|
| 39 |
+
f"Expected query, key, and value to be nested tensors, "
|
| 40 |
+
f"but got query.is_nested: {query.is_nested}, key.is_nested: {key.is_nested}, "
|
| 41 |
+
f"and value.is_nested: {value.is_nested} instead."
|
| 42 |
+
)
|
| 43 |
+
if query.dtype != key.dtype or query.dtype != value.dtype:
|
| 44 |
+
raise ValueError(
|
| 45 |
+
f"Expected query, key, and value to have the same dtype, "
|
| 46 |
+
f"but got query.dtype: {query.dtype}, key.dtype: {key.dtype}, "
|
| 47 |
+
f"and value.dtype: {value.dtype} instead."
|
| 48 |
+
)
|
| 49 |
+
if query.device != key.device or query.device != value.device:
|
| 50 |
+
raise ValueError(
|
| 51 |
+
f"Expected query, key, and value to have the same device type, "
|
| 52 |
+
f"but got query.device: {query.device}, key.device: {key.device}, "
|
| 53 |
+
f"and value.device: {value.device} instead."
|
| 54 |
+
)
|
| 55 |
+
if query.dim() < 3 or key.dim() < 3 or value.dim() < 3:
|
| 56 |
+
raise ValueError(
|
| 57 |
+
f"Expected query, key, and value to all be at least 3 dimensional, but got query.dim: "
|
| 58 |
+
f"{query.dim()}, key.dim: {key.dim()} and value.dim: {value.dim()} instead."
|
| 59 |
+
)
|
| 60 |
+
if query._ragged_idx != key._ragged_idx or query._ragged_idx != value._ragged_idx:
|
| 61 |
+
raise ValueError(
|
| 62 |
+
f"Expected query, key, and value to all be ragged on the same dimension, but got ragged "
|
| 63 |
+
f"dims {query._ragged_idx}, {key._ragged_idx}, and {value._ragged_idx}, respectively."
|
| 64 |
+
)
|
| 65 |
+
if attn_mask is not None:
|
| 66 |
+
# TODO: Figure out whether masks are actually supported for this layout or not
|
| 67 |
+
raise ValueError("Masks are not yet supported!")
|
| 68 |
+
if attn_mask.dtype != torch.bool and attn_mask.dtype != query.dtype:
|
| 69 |
+
raise ValueError(
|
| 70 |
+
f"Expected attn_mask dtype to be bool or to match query dtype, but got attn_mask.dtype: "
|
| 71 |
+
f"{attn_mask.dtype}, and query.dtype: {query.dtype} instead."
|
| 72 |
+
)
|
| 73 |
+
|
| 74 |
+
|
| 75 |
+
def _check_batch_size_nested(params: SDPAParams, debug=False) -> bool:
|
| 76 |
+
# This is expected to be called after check_tensor_shapes ensuring that the
|
| 77 |
+
# size() calls won't error since the inputs are all 4 dimensional
|
| 78 |
+
q_batch_size = params.query.size(0)
|
| 79 |
+
k_batch_size = params.key.size(0)
|
| 80 |
+
v_batch_size = params.value.size(0)
|
| 81 |
+
|
| 82 |
+
# num_heads logic for nested input is checked in
|
| 83 |
+
# check_for_seq_len_0_nested_tensor as there is handling there to make sure
|
| 84 |
+
# num_heads is not ragged
|
| 85 |
+
return q_batch_size == k_batch_size and q_batch_size == v_batch_size
|
| 86 |
+
|
| 87 |
+
|
| 88 |
+
def _check_head_dim_size_flash_nested(params: SDPAParams, debug=False) -> bool:
|
| 89 |
+
max_size = 256
|
| 90 |
+
query_size_last = params.query.size(-1)
|
| 91 |
+
key_size_last = params.key.size(-1)
|
| 92 |
+
value_size_last = params.value.size(-1)
|
| 93 |
+
same_head_dim_size = (
|
| 94 |
+
query_size_last == key_size_last and query_size_last == value_size_last
|
| 95 |
+
)
|
| 96 |
+
if not (
|
| 97 |
+
same_head_dim_size
|
| 98 |
+
and (query_size_last % 8 == 0)
|
| 99 |
+
and (query_size_last <= max_size)
|
| 100 |
+
):
|
| 101 |
+
if debug:
|
| 102 |
+
log.warning(
|
| 103 |
+
"For NestedTensor inputs, Flash attention requires q,k,v to have the same "
|
| 104 |
+
"last dimension and to be a multiple of 8 and less than or equal to 256. "
|
| 105 |
+
"Got Query.size(-1): %d, Key.size(-1): %d, Value.size(-1): %d instead.",
|
| 106 |
+
query_size_last,
|
| 107 |
+
key_size_last,
|
| 108 |
+
value_size_last,
|
| 109 |
+
)
|
| 110 |
+
return False
|
| 111 |
+
return True
|
| 112 |
+
|
| 113 |
+
|
| 114 |
+
def _check_for_seq_len_0_and_consistent_head_dim_nested_helper(
|
| 115 |
+
param: torch.Tensor, param_name: str, debug=False
|
| 116 |
+
) -> bool:
|
| 117 |
+
assert isinstance(param, NestedTensor), "param should be a jagged NT"
|
| 118 |
+
|
| 119 |
+
if param._ragged_idx == 1:
|
| 120 |
+
# num_head_dims is ragged
|
| 121 |
+
if debug:
|
| 122 |
+
log.warning(
|
| 123 |
+
"Fused kernels do not support ragged num_head_dims, %s has a ragged num_heads.",
|
| 124 |
+
param_name,
|
| 125 |
+
)
|
| 126 |
+
return False
|
| 127 |
+
|
| 128 |
+
# This is being called inside sdp with shape [batch, heads, {seq_len}, dim]
|
| 129 |
+
if param._get_min_seqlen() == 0:
|
| 130 |
+
if debug:
|
| 131 |
+
log.warning(
|
| 132 |
+
"Fused kernels do not support seq_len == 0, %s has a seq len of 0.",
|
| 133 |
+
param_name,
|
| 134 |
+
)
|
| 135 |
+
return False
|
| 136 |
+
|
| 137 |
+
return True
|
| 138 |
+
|
| 139 |
+
|
| 140 |
+
def _try_broadcast_param_size(q_size, k_size, v_size, param_name, debug=False) -> bool:
|
| 141 |
+
max_size = max(q_size, k_size, v_size)
|
| 142 |
+
if (
|
| 143 |
+
(q_size != max_size and q_size != 1)
|
| 144 |
+
or (k_size != max_size and k_size != 1)
|
| 145 |
+
or (v_size != max_size and v_size != 1)
|
| 146 |
+
):
|
| 147 |
+
if debug:
|
| 148 |
+
log.warning(
|
| 149 |
+
"Both fused kernels require query, key and value to have broadcastable %s, "
|
| 150 |
+
"got Query %s %d, Key %s %d, Value %s %d instead.",
|
| 151 |
+
param_name,
|
| 152 |
+
param_name,
|
| 153 |
+
q_size,
|
| 154 |
+
param_name,
|
| 155 |
+
k_size,
|
| 156 |
+
param_name,
|
| 157 |
+
v_size,
|
| 158 |
+
)
|
| 159 |
+
return False
|
| 160 |
+
return True
|
| 161 |
+
|
| 162 |
+
|
| 163 |
+
def _check_for_seq_len_0_nested(params: SDPAParams, debug=False) -> bool:
|
| 164 |
+
# When this function is called we are assured that the nt is dim==4
|
| 165 |
+
q_is_safe = (
|
| 166 |
+
_check_for_seq_len_0_and_consistent_head_dim_nested_helper(
|
| 167 |
+
params.query, "query", debug
|
| 168 |
+
)
|
| 169 |
+
if params.query.is_nested
|
| 170 |
+
else True
|
| 171 |
+
)
|
| 172 |
+
# short circuit if any is unsafe
|
| 173 |
+
if not q_is_safe:
|
| 174 |
+
return False
|
| 175 |
+
|
| 176 |
+
k_is_safe = (
|
| 177 |
+
_check_for_seq_len_0_and_consistent_head_dim_nested_helper(
|
| 178 |
+
params.key, "key", debug
|
| 179 |
+
)
|
| 180 |
+
if params.key.is_nested
|
| 181 |
+
else True
|
| 182 |
+
)
|
| 183 |
+
# short circuit if any is unsafe
|
| 184 |
+
if not k_is_safe:
|
| 185 |
+
return False
|
| 186 |
+
|
| 187 |
+
v_is_safe = (
|
| 188 |
+
_check_for_seq_len_0_and_consistent_head_dim_nested_helper(
|
| 189 |
+
params.value, "value", debug
|
| 190 |
+
)
|
| 191 |
+
if params.value.is_nested
|
| 192 |
+
else True
|
| 193 |
+
)
|
| 194 |
+
# short circuit if any is unsafe
|
| 195 |
+
if not v_is_safe:
|
| 196 |
+
return False
|
| 197 |
+
|
| 198 |
+
# We now know none of the inputs have ragged num_heads, so we can safely
|
| 199 |
+
# access .size(1)
|
| 200 |
+
q_num_heads = params.query.size(1)
|
| 201 |
+
k_num_heads = params.key.size(1)
|
| 202 |
+
v_num_heads = params.value.size(1)
|
| 203 |
+
same_num_heads = q_num_heads == k_num_heads and q_num_heads == v_num_heads
|
| 204 |
+
|
| 205 |
+
if not same_num_heads:
|
| 206 |
+
if (
|
| 207 |
+
params.query.requires_grad
|
| 208 |
+
or params.key.requires_grad
|
| 209 |
+
or params.value.requires_grad
|
| 210 |
+
):
|
| 211 |
+
if debug:
|
| 212 |
+
log.warning(
|
| 213 |
+
"Both fused kernels do not support training with broadcasted NT inputs."
|
| 214 |
+
)
|
| 215 |
+
return False
|
| 216 |
+
return _try_broadcast_param_size(
|
| 217 |
+
q_num_heads, k_num_heads, v_num_heads, "num heads", debug
|
| 218 |
+
)
|
| 219 |
+
return True
|
| 220 |
+
|
| 221 |
+
|
| 222 |
+
def _can_use_flash_sdpa_jagged(params: SDPAParams, debug=False) -> bool:
|
| 223 |
+
constraints = (
|
| 224 |
+
_check_batch_size_nested,
|
| 225 |
+
_check_head_dim_size_flash_nested,
|
| 226 |
+
_check_for_seq_len_0_nested,
|
| 227 |
+
)
|
| 228 |
+
for constraint in constraints:
|
| 229 |
+
if not constraint(params, debug):
|
| 230 |
+
return False
|
| 231 |
+
return True
|
| 232 |
+
|
| 233 |
+
|
| 234 |
+
def _can_use_efficient_sdpa_jagged(params: SDPAParams, debug=False) -> bool:
|
| 235 |
+
constraints = (
|
| 236 |
+
_check_batch_size_nested,
|
| 237 |
+
_check_for_seq_len_0_nested,
|
| 238 |
+
)
|
| 239 |
+
for constraint in constraints:
|
| 240 |
+
if not constraint(params, debug):
|
| 241 |
+
return False
|
| 242 |
+
return True
|
| 243 |
+
|
| 244 |
+
|
| 245 |
+
def _can_use_math_sdpa_jagged(params: SDPAParams, debug=False) -> bool:
|
| 246 |
+
if (
|
| 247 |
+
not params.query.transpose(1, 2).is_contiguous()
|
| 248 |
+
or not params.key.transpose(1, 2).is_contiguous()
|
| 249 |
+
or not params.value.transpose(1, 2).is_contiguous()
|
| 250 |
+
):
|
| 251 |
+
if debug:
|
| 252 |
+
log.warning(
|
| 253 |
+
"If inputs are nested tensors they must be contiguous after transposing."
|
| 254 |
+
)
|
| 255 |
+
return False
|
| 256 |
+
if params.is_causal:
|
| 257 |
+
if debug:
|
| 258 |
+
log.warning(
|
| 259 |
+
"Nested tensors for query / key are not supported when is_causal=True."
|
| 260 |
+
)
|
| 261 |
+
return False
|
| 262 |
+
return True
|
| 263 |
+
|
| 264 |
+
|
| 265 |
+
def _select_sdp_backend(query, key, value, attn_mask, dropout, is_causal, enable_gqa):
|
| 266 |
+
if (
|
| 267 |
+
not flash_sdp_enabled()
|
| 268 |
+
and not mem_efficient_sdp_enabled()
|
| 269 |
+
and not math_sdp_enabled()
|
| 270 |
+
):
|
| 271 |
+
return SDPBackend.ERROR
|
| 272 |
+
|
| 273 |
+
ordering = (
|
| 274 |
+
SDPBackend.FLASH_ATTENTION,
|
| 275 |
+
SDPBackend.EFFICIENT_ATTENTION,
|
| 276 |
+
SDPBackend.MATH,
|
| 277 |
+
)
|
| 278 |
+
|
| 279 |
+
params = SDPAParams(query, key, value, attn_mask, dropout, is_causal, enable_gqa)
|
| 280 |
+
|
| 281 |
+
for backend in ordering:
|
| 282 |
+
if backend == SDPBackend.FLASH_ATTENTION:
|
| 283 |
+
if can_use_flash_attention(params) and _can_use_flash_sdpa_jagged(params):
|
| 284 |
+
return SDPBackend.FLASH_ATTENTION
|
| 285 |
+
if backend == SDPBackend.EFFICIENT_ATTENTION:
|
| 286 |
+
if can_use_efficient_attention(params) and _can_use_efficient_sdpa_jagged(
|
| 287 |
+
params
|
| 288 |
+
):
|
| 289 |
+
return SDPBackend.EFFICIENT_ATTENTION
|
| 290 |
+
if backend == SDPBackend.MATH:
|
| 291 |
+
if math_sdp_enabled() and _can_use_math_sdpa_jagged(params):
|
| 292 |
+
return SDPBackend.MATH
|
| 293 |
+
|
| 294 |
+
log.warning("Memory efficient kernel not used because:")
|
| 295 |
+
can_use_efficient_attention(params, debug=True)
|
| 296 |
+
_can_use_efficient_sdpa_jagged(params, debug=True)
|
| 297 |
+
log.warning("Flash attention kernel not used because:")
|
| 298 |
+
can_use_flash_attention(params, debug=True)
|
| 299 |
+
_can_use_flash_sdpa_jagged(params, debug=True)
|
| 300 |
+
log.warning("Math attention kernel not used because:")
|
| 301 |
+
_can_use_math_sdpa_jagged(params, debug=True)
|
| 302 |
+
return SDPBackend.ERROR
|
| 303 |
+
|
| 304 |
+
|
| 305 |
+
def _cumulative_and_max_seq_len_nnz(qkv: torch.Tensor) -> Tuple[torch.Tensor, int, int]:
|
| 306 |
+
# This function is used to calculate two pieces of metadata that are needed
|
| 307 |
+
# for use with flash-attention and efficient_attention kernels. They are the
|
| 308 |
+
# cumulative sequence_length over a batch of sequences and the maximum
|
| 309 |
+
# sequence length.
|
| 310 |
+
|
| 311 |
+
# It returns a tuple of cumulative sequence lengths and the maximum sequence
|
| 312 |
+
# length, and the last element in the cumulative_sequence_lengths
|
| 313 |
+
if not isinstance(qkv, NestedTensor):
|
| 314 |
+
raise ValueError("QKV must be nested for flash cumulative_seq_len calculation.")
|
| 315 |
+
|
| 316 |
+
if qkv.lengths() is None:
|
| 317 |
+
# TODO: Explore performance impact of copying
|
| 318 |
+
cumulative_seqlen = qkv.offsets().to(dtype=torch.int32, device=qkv.device)
|
| 319 |
+
max_seqlen = qkv._get_max_seqlen()
|
| 320 |
+
n_elem = qkv.values().shape[0]
|
| 321 |
+
else:
|
| 322 |
+
# TODO: Explore performance impact of copying
|
| 323 |
+
cumulative_seqlen = (
|
| 324 |
+
qkv.lengths().cumsum(0).to(dtype=torch.int32, device=qkv.device)
|
| 325 |
+
)
|
| 326 |
+
batch_size = qkv.size(0)
|
| 327 |
+
max_seqlen = qkv._get_max_seqlen()
|
| 328 |
+
# TODO: Explore performance impact when compiling
|
| 329 |
+
n_elem = int(cumulative_seqlen[-1].item())
|
| 330 |
+
return cumulative_seqlen, max_seqlen, n_elem
|
| 331 |
+
|
| 332 |
+
|
| 333 |
+
def _is_safe_to_get_storage_as_tensor(tensor: torch.Tensor):
|
| 334 |
+
# This function checks if a nested tensor is valid for
|
| 335 |
+
# use with the flash-attention and efficient_attention kernels without
|
| 336 |
+
# needing to call contiguous on the nested tensor input.
|
| 337 |
+
# It checks that the storage offsets' adjacent_differences are a constant
|
| 338 |
+
# mutiple of the previous tensor in the nested tensor and that the strides
|
| 339 |
+
# are monitonically decreasing. This check is done after calling transpose on
|
| 340 |
+
# the nested tensor resulting in a Nt of shape [bsz, {seq_len}, num_heads, dim]
|
| 341 |
+
|
| 342 |
+
# Returns a boolean indicating if contiguous needs to be called for input
|
| 343 |
+
assert isinstance(tensor, NestedTensor)
|
| 344 |
+
offsets = tensor.offsets()
|
| 345 |
+
strides = tensor._strides
|
| 346 |
+
|
| 347 |
+
n_tensors = offsets.size(0) - 1
|
| 348 |
+
if n_tensors <= 1:
|
| 349 |
+
return True
|
| 350 |
+
|
| 351 |
+
# Check initially that the tensor strides are in strictly descending order
|
| 352 |
+
prev_stride = strides[1]
|
| 353 |
+
for stride in strides[2:]:
|
| 354 |
+
if prev_stride <= stride:
|
| 355 |
+
# This would mean that the last stride is greater than the seq_len
|
| 356 |
+
# stride
|
| 357 |
+
return False
|
| 358 |
+
prev_stride = stride
|
| 359 |
+
|
| 360 |
+
# Congrats you made it!
|
| 361 |
+
return True
|
| 362 |
+
|
| 363 |
+
|
| 364 |
+
def _view_as_dense(
|
| 365 |
+
tensor: torch.Tensor, Nnz: int, num_heads: int, head_dim: int
|
| 366 |
+
) -> torch.Tensor:
|
| 367 |
+
if tensor.is_nested:
|
| 368 |
+
return tensor.values()
|
| 369 |
+
return tensor.view(Nnz, num_heads, head_dim)
|
| 370 |
+
|
| 371 |
+
|
| 372 |
+
# TODO: Next iteration should add test cases and check it works
|
| 373 |
+
# def _sdpa_nested_preprocessing_with_broadcast(query, key, value):
|
| 374 |
+
# # Query (Batch x Num_heads x {Q_seq_len} x Dim_per_head)
|
| 375 |
+
# # Key (Batch x Num_heads x {KV_seq_len} x Dim_per_head)
|
| 376 |
+
# # Value (Batch x Num_heads x {KV_seq_len} x Dim_per_head)
|
| 377 |
+
# q_batch_size = query.size(0)
|
| 378 |
+
# k_batch_size = key.size(0)
|
| 379 |
+
# v_batch_size = value.size(0)
|
| 380 |
+
|
| 381 |
+
# output_batch_size = max(q_batch_size, k_batch_size, v_batch_size)
|
| 382 |
+
|
| 383 |
+
# q_num_heads = query.size(1)
|
| 384 |
+
# k_num_heads = key.size(1)
|
| 385 |
+
# v_num_heads = value.size(1)
|
| 386 |
+
|
| 387 |
+
# output_num_heads = max(q_num_heads, k_num_heads, v_num_heads)
|
| 388 |
+
|
| 389 |
+
# head_dim_qk = query.size(3)
|
| 390 |
+
# head_dim_v = value.size(3)
|
| 391 |
+
|
| 392 |
+
# q_t = query.transpose(1, 2)
|
| 393 |
+
# k_t = key.transpose(1, 2)
|
| 394 |
+
# v_t = value.transpose(1, 2)
|
| 395 |
+
|
| 396 |
+
# # Checks in sdp_utils ensure that if {*}_batch_size/{*}_num_heads !=
|
| 397 |
+
# # output_batch_size/num_heads then they are 1
|
| 398 |
+
# q_batch_size_needs_broadcast = q_batch_size != output_batch_size
|
| 399 |
+
# k_batch_size_needs_broadcast = k_batch_size != output_batch_size
|
| 400 |
+
# v_batch_size_needs_broadcast = v_batch_size != output_batch_size
|
| 401 |
+
|
| 402 |
+
# # If {*}_batch_size_needs_broadcast, then
|
| 403 |
+
# # (1) max_seqlen_batch_{*} is given by {*}_t.size(1)
|
| 404 |
+
# # this is because needs_broadcast indicates that the batch_size is 1
|
| 405 |
+
# # and hence there is only 1 value for seq_len
|
| 406 |
+
# # (2) The cum_seq_lens are given by [0, {*}_t.size(1), 2 * {*}_t.size(1),
|
| 407 |
+
# # ..., outut_batch_size * {*}_t.size(1)]
|
| 408 |
+
# # (3) Nnz_{*} is given by output_batch_size * {*}_t.size(1)
|
| 409 |
+
|
| 410 |
+
# if q_batch_size_needs_broadcast or not q_t.is_nested:
|
| 411 |
+
# max_seqlen_batch_q = q_t.size(1)
|
| 412 |
+
# cumulative_sequence_length_q = torch.arange(
|
| 413 |
+
# 0,
|
| 414 |
+
# (output_batch_size + 1) * max_seqlen_batch_q,
|
| 415 |
+
# max_seqlen_batch_q,
|
| 416 |
+
# device=q_t.device,
|
| 417 |
+
# dtype=torch.int32,
|
| 418 |
+
# )
|
| 419 |
+
# Nnz_q = output_batch_size * max_seqlen_batch_q
|
| 420 |
+
# else:
|
| 421 |
+
# (
|
| 422 |
+
# cumulative_sequence_length_q,
|
| 423 |
+
# max_seqlen_batch_q,
|
| 424 |
+
# Nnz_q,
|
| 425 |
+
# ) = _cumulative_and_max_seq_len_nnz(q_t)
|
| 426 |
+
|
| 427 |
+
# if k_batch_size_needs_broadcast and v_batch_size_needs_broadcast:
|
| 428 |
+
# assert k_t.size(1) == v_t.size(1)
|
| 429 |
+
# max_seqlen_batch_kv = k_t.size(1)
|
| 430 |
+
# cumulative_sequence_length_kv = torch.arange(
|
| 431 |
+
# 0,
|
| 432 |
+
# (output_batch_size + 1) * max_seqlen_batch_kv,
|
| 433 |
+
# max_seqlen_batch_kv,
|
| 434 |
+
# device=k_t.device,
|
| 435 |
+
# dtype=torch.int32,
|
| 436 |
+
# )
|
| 437 |
+
# Nnz_kv = output_batch_size * max_seqlen_batch_kv
|
| 438 |
+
# else:
|
| 439 |
+
# cumulative_sequence_length_kv, max_seqlen_batch_kv, Nnz_kv = (
|
| 440 |
+
# _cumulative_and_max_seq_len_nnz(v_t)
|
| 441 |
+
# if k_batch_size_needs_broadcast
|
| 442 |
+
# else _cumulative_and_max_seq_len_nnz(k_t)
|
| 443 |
+
# )
|
| 444 |
+
|
| 445 |
+
# q_num_heads_needs_broadcast = q_num_heads != output_num_heads
|
| 446 |
+
# k_num_heads_needs_broadcast = k_num_heads != output_num_heads
|
| 447 |
+
# v_num_heads_needs_broadcast = v_num_heads != output_num_heads
|
| 448 |
+
|
| 449 |
+
# if not q_t.is_nested:
|
| 450 |
+
# query_buffer_reshaped = q_t.expand(
|
| 451 |
+
# output_batch_size, q_t.size(1), output_num_heads, head_dim_qk
|
| 452 |
+
# )
|
| 453 |
+
# query_buffer_reshaped = query_buffer_reshaped.reshape(
|
| 454 |
+
# Nnz_q, output_num_heads, head_dim_qk
|
| 455 |
+
# )
|
| 456 |
+
# else:
|
| 457 |
+
# if not q_t.is_contiguous() and not _is_safe_to_get_storage_as_tensor(q_t):
|
| 458 |
+
# q_t = q_t.contiguous()
|
| 459 |
+
# # If we are broadcasting then Nnz_q will be the output_batch_size since
|
| 460 |
+
# # seq_len is 1
|
| 461 |
+
# effective_batch_size_q = (
|
| 462 |
+
# output_batch_size if q_batch_size_needs_broadcast else Nnz_q
|
| 463 |
+
# )
|
| 464 |
+
# query_buffer_reshaped = _view_as_dense(
|
| 465 |
+
# q_t, effective_batch_size_q, output_num_heads, head_dim_qk
|
| 466 |
+
# )
|
| 467 |
+
|
| 468 |
+
# # If the physical layout of the NestedTensor's storage
|
| 469 |
+
# # is not: batch, {seq_len}, num_heads, head_dim then we need
|
| 470 |
+
# # to call contiguous
|
| 471 |
+
# if not k_t.is_contiguous() and not _is_safe_to_get_storage_as_tensor(k_t):
|
| 472 |
+
# k_t = k_t.contiguous()
|
| 473 |
+
# if not v_t.is_contiguous() and not _is_safe_to_get_storage_as_tensor(v_t):
|
| 474 |
+
# v_t = v_t.contiguous()
|
| 475 |
+
|
| 476 |
+
# effective_batch_size_k = (
|
| 477 |
+
# output_batch_size if k_batch_size_needs_broadcast else Nnz_kv
|
| 478 |
+
# )
|
| 479 |
+
# key_buffer_reshaped = _view_as_dense(
|
| 480 |
+
# k_t, effective_batch_size_k, output_num_heads, head_dim_qk
|
| 481 |
+
# )
|
| 482 |
+
|
| 483 |
+
# effective_batch_size_v = (
|
| 484 |
+
# output_batch_size if v_batch_size_needs_broadcast else Nnz_kv
|
| 485 |
+
# )
|
| 486 |
+
# value_buffer_reshaped = _view_as_dense(
|
| 487 |
+
# v_t, effective_batch_size_v, output_num_heads, head_dim_v
|
| 488 |
+
# )
|
| 489 |
+
|
| 490 |
+
# if not q_batch_size_needs_broadcast:
|
| 491 |
+
# output_shape = q_t._size
|
| 492 |
+
# if head_dim_v != head_dim_qk:
|
| 493 |
+
# output_shape[-1] = head_dim_v
|
| 494 |
+
# if q_num_heads_needs_broadcast:
|
| 495 |
+
# output_shape[1] = output_num_heads
|
| 496 |
+
# else:
|
| 497 |
+
# output_shape = torch.empty(3, dtype=torch.int64, device=torch.device("cpu"))
|
| 498 |
+
# output_shape[0] = q_t.size(1)
|
| 499 |
+
# output_shape[1] = output_num_heads
|
| 500 |
+
# output_shape[2] = head_dim_v
|
| 501 |
+
|
| 502 |
+
# return (
|
| 503 |
+
# query_buffer_reshaped,
|
| 504 |
+
# key_buffer_reshaped,
|
| 505 |
+
# value_buffer_reshaped,
|
| 506 |
+
# cumulative_sequence_length_q,
|
| 507 |
+
# cumulative_sequence_length_kv,
|
| 508 |
+
# max_seqlen_batch_q,
|
| 509 |
+
# max_seqlen_batch_kv,
|
| 510 |
+
# output_shape,
|
| 511 |
+
# )
|
| 512 |
+
|
| 513 |
+
|
| 514 |
+
def _sdpa_nested_preprocessing(query, key, value):
|
| 515 |
+
# Query (Batch x Num_heads x {Q_seq_len} x Dim_per_head)
|
| 516 |
+
# Key (Batch x Num_heads x {KV_seq_len} x Dim_per_head)
|
| 517 |
+
# Value (Batch x Num_heads x {KV_seq_len} x Dim_per_head)
|
| 518 |
+
q_batch_size = query.size(0)
|
| 519 |
+
k_batch_size = key.size(0)
|
| 520 |
+
v_batch_size = value.size(0)
|
| 521 |
+
|
| 522 |
+
q_num_heads = query.size(1)
|
| 523 |
+
k_num_heads = key.size(1)
|
| 524 |
+
v_num_heads = value.size(1)
|
| 525 |
+
|
| 526 |
+
if not (q_batch_size == k_batch_size and q_batch_size == v_batch_size) or not (
|
| 527 |
+
q_num_heads == k_num_heads and k_num_heads == v_num_heads
|
| 528 |
+
):
|
| 529 |
+
raise RuntimeError(
|
| 530 |
+
"This path is currently not implemented for jagged layout NT."
|
| 531 |
+
)
|
| 532 |
+
# return _sdpa_nested_preprocessing_with_broadcast(query, key, value)
|
| 533 |
+
|
| 534 |
+
num_heads = query.size(1)
|
| 535 |
+
head_dim_qk = query.size(3)
|
| 536 |
+
head_dim_v = value.size(3)
|
| 537 |
+
q_t = query.transpose(1, 2)
|
| 538 |
+
k_t = key.transpose(1, 2)
|
| 539 |
+
v_t = value.transpose(1, 2)
|
| 540 |
+
|
| 541 |
+
(
|
| 542 |
+
cumulative_sequence_length_q,
|
| 543 |
+
max_seqlen_batch_q,
|
| 544 |
+
Nnz_q,
|
| 545 |
+
) = _cumulative_and_max_seq_len_nnz(q_t)
|
| 546 |
+
(
|
| 547 |
+
cumulative_sequence_length_kv,
|
| 548 |
+
max_seqlen_batch_kv,
|
| 549 |
+
Nnz_kv,
|
| 550 |
+
) = _cumulative_and_max_seq_len_nnz(k_t)
|
| 551 |
+
|
| 552 |
+
# [TODO] K and V have to have the same Nnz, should probably torch_check
|
| 553 |
+
# assume in order to not iterate over v
|
| 554 |
+
|
| 555 |
+
# If the physical layout of the NestedTensor's storage
|
| 556 |
+
# is not: batch, {seq_len}, num_heads, head_dim then we need
|
| 557 |
+
# to call contiguous
|
| 558 |
+
if not q_t.is_contiguous() and not _is_safe_to_get_storage_as_tensor(q_t):
|
| 559 |
+
q_t = q_t.contiguous()
|
| 560 |
+
if not k_t.is_contiguous() and not _is_safe_to_get_storage_as_tensor(k_t):
|
| 561 |
+
k_t = k_t.contiguous()
|
| 562 |
+
if not v_t.is_contiguous() and not _is_safe_to_get_storage_as_tensor(v_t):
|
| 563 |
+
v_t = v_t.contiguous()
|
| 564 |
+
|
| 565 |
+
query_buffer_reshaped = _view_as_dense(q_t, Nnz_q, num_heads, head_dim_qk)
|
| 566 |
+
key_buffer_reshaped = _view_as_dense(k_t, Nnz_kv, num_heads, head_dim_qk)
|
| 567 |
+
value_buffer_reshaped = _view_as_dense(v_t, Nnz_kv, num_heads, head_dim_v)
|
| 568 |
+
|
| 569 |
+
output_nt_info = {
|
| 570 |
+
"offsets": q_t.offsets(),
|
| 571 |
+
"_max_seqlen": q_t._get_max_seqlen(),
|
| 572 |
+
"_min_seqlen": q_t._get_min_seqlen(),
|
| 573 |
+
}
|
| 574 |
+
|
| 575 |
+
return (
|
| 576 |
+
query_buffer_reshaped,
|
| 577 |
+
key_buffer_reshaped,
|
| 578 |
+
value_buffer_reshaped,
|
| 579 |
+
cumulative_sequence_length_q,
|
| 580 |
+
cumulative_sequence_length_kv,
|
| 581 |
+
max_seqlen_batch_q,
|
| 582 |
+
max_seqlen_batch_kv,
|
| 583 |
+
output_nt_info,
|
| 584 |
+
)
|
| 585 |
+
|
| 586 |
+
|
| 587 |
+
def _pad_last_dim(
|
| 588 |
+
tensor: torch.Tensor, alignment_size: int, slice: bool
|
| 589 |
+
) -> torch.Tensor:
|
| 590 |
+
# FlashAttentionV2 requires that head dimension be a multiple of 8
|
| 591 |
+
# This was previously done within the kernel, however
|
| 592 |
+
# This causes the kernel to maybe alias query, key, value
|
| 593 |
+
# So instead we pad the head_dimensions to be a multiple of 8
|
| 594 |
+
# in the composite region
|
| 595 |
+
last_dim_size = tensor.size(-1)
|
| 596 |
+
if last_dim_size % alignment_size == 0:
|
| 597 |
+
return tensor
|
| 598 |
+
pad_count = alignment_size - (last_dim_size % alignment_size)
|
| 599 |
+
tensor = torch.nn.functional.pad(tensor, [0, pad_count])
|
| 600 |
+
if slice:
|
| 601 |
+
return tensor[..., 0:last_dim_size]
|
| 602 |
+
return tensor
|
| 603 |
+
|
| 604 |
+
|
| 605 |
+
# TODO: coalesce with torch/nn/utils/attention.py
|
| 606 |
+
def _calculate_scale(query, scale):
|
| 607 |
+
# TODO: Investigate why math.sqrt() isn't properly handled by Dynamo?
|
| 608 |
+
softmax_scale = scale if scale is not None else torch.sym_sqrt(1.0 / query.size(-1))
|
| 609 |
+
return softmax_scale
|
| 610 |
+
|
| 611 |
+
|
| 612 |
+
def _post_process_flash_output(out: torch.Tensor, og_size):
|
| 613 |
+
if not out.is_nested and out.size(-1) != og_size:
|
| 614 |
+
out = out[..., 0:og_size]
|
| 615 |
+
return out
|
| 616 |
+
|
| 617 |
+
|
| 618 |
+
def _is_computing_meta_flops(x):
|
| 619 |
+
# Note: there's a use case of using meta tensors & the dispatch-based flop counter.
|
| 620 |
+
# We can use this function to check for this scenario in order to handle it specially.
|
| 621 |
+
if not torch.jit.is_scripting() and x.device.type == "meta":
|
| 622 |
+
torch_dispatch_mode_stack = (
|
| 623 |
+
torch.utils._python_dispatch._get_current_dispatch_mode_stack()
|
| 624 |
+
)
|
| 625 |
+
return any(
|
| 626 |
+
type(x) == torch.utils.flop_counter.FlopCounterMode
|
| 627 |
+
for x in torch_dispatch_mode_stack
|
| 628 |
+
)
|
| 629 |
+
return False
|
| 630 |
+
|
| 631 |
+
|
| 632 |
+
def _autocast(
|
| 633 |
+
query: torch.Tensor,
|
| 634 |
+
key: torch.Tensor,
|
| 635 |
+
value: torch.Tensor,
|
| 636 |
+
attn_mask: Optional[torch.Tensor],
|
| 637 |
+
) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor, Optional[torch.Tensor]]:
|
| 638 |
+
"""
|
| 639 |
+
[Autocasting SDPA for NJT]
|
| 640 |
+
|
| 641 |
+
Normal autocasting doesn't work for NJT+SDPA right now:
|
| 642 |
+
* NJT intercepts the __torch_function__ call for scaled_dot_product_attention, which happens
|
| 643 |
+
before we get to any aten ops or dispatcher logic; then the torch_function logic calls into
|
| 644 |
+
efficient attention or flash attention. So, autocasting on the scaled_dot_product_attention
|
| 645 |
+
op won't work because we never see that aten op.
|
| 646 |
+
* If we put autocasting on `_flash_attention_forward`, then we'll get autocasting to run, but
|
| 647 |
+
the kernel selection logic in torch_function handling (ie. jagged_scaled_dot_product_attention)
|
| 648 |
+
won't work correctly: the kernel selection logic will run before autocasting, and choose
|
| 649 |
+
a kernel based on the un-autocasted dtypes; but then autocasting will run and the actual
|
| 650 |
+
attention computation will happen in a different dtype.
|
| 651 |
+
|
| 652 |
+
An alternative is to just change the backend selection logic for SDPA+NJT to be autocast-aware
|
| 653 |
+
and rely on autocasting to do the actual conversions for flash attention / efficient attention.
|
| 654 |
+
However, by manually doing the actual autocast before the backend selection, we ensure that the
|
| 655 |
+
autocast handling for backend selection doesn't diverge from the autocast handling for the
|
| 656 |
+
actual dtype conversions.
|
| 657 |
+
"""
|
| 658 |
+
device_type = query.device.type
|
| 659 |
+
# meta device is not supported by autocast, so break early for it
|
| 660 |
+
if _is_computing_meta_flops(query) or not torch.is_autocast_enabled(device_type):
|
| 661 |
+
return query, key, value, attn_mask
|
| 662 |
+
|
| 663 |
+
def cvt(x):
|
| 664 |
+
if x is None:
|
| 665 |
+
return x
|
| 666 |
+
target_dtype = torch.get_autocast_dtype(device_type)
|
| 667 |
+
if (
|
| 668 |
+
(not x.dtype.is_floating_point)
|
| 669 |
+
or x.dtype == target_dtype
|
| 670 |
+
or x.dtype == torch.float64
|
| 671 |
+
):
|
| 672 |
+
return x
|
| 673 |
+
return x.to(target_dtype)
|
| 674 |
+
|
| 675 |
+
return cvt(query), cvt(key), cvt(value), cvt(attn_mask)
|
| 676 |
+
|
| 677 |
+
|
| 678 |
+
def jagged_scaled_dot_product_attention(
|
| 679 |
+
query: torch.Tensor,
|
| 680 |
+
key: torch.Tensor,
|
| 681 |
+
value: torch.Tensor,
|
| 682 |
+
attn_mask: Optional[torch.Tensor] = None,
|
| 683 |
+
dropout_p=0.0,
|
| 684 |
+
is_causal=False,
|
| 685 |
+
scale=None,
|
| 686 |
+
enable_gqa=False,
|
| 687 |
+
):
|
| 688 |
+
query, key, value, attn_mask = _autocast(query, key, value, attn_mask)
|
| 689 |
+
_validate_sdpa_input(query, key, value, attn_mask, dropout_p, is_causal, scale)
|
| 690 |
+
# for mypy, ugh
|
| 691 |
+
assert (
|
| 692 |
+
isinstance(query, NestedTensor)
|
| 693 |
+
and isinstance(key, NestedTensor)
|
| 694 |
+
and isinstance(value, NestedTensor)
|
| 695 |
+
)
|
| 696 |
+
from torch.nested._internal.nested_tensor import nested_view_from_values_offsets
|
| 697 |
+
|
| 698 |
+
# Special path for non-ragged sequence length (e.g. for SAM where we have a ragged
|
| 699 |
+
# second batch dim instead). For this case, we can just send the dense buffers through
|
| 700 |
+
# vanilla SDPA.
|
| 701 |
+
if query.dim() > 3 and key.dim() > 3 and value.dim() > 3 and query._ragged_idx == 1:
|
| 702 |
+
output = F.scaled_dot_product_attention(
|
| 703 |
+
query.values(),
|
| 704 |
+
key.values(),
|
| 705 |
+
value.values(),
|
| 706 |
+
attn_mask=(
|
| 707 |
+
attn_mask.values() if isinstance(attn_mask, NestedTensor) else attn_mask
|
| 708 |
+
),
|
| 709 |
+
dropout_p=dropout_p,
|
| 710 |
+
is_causal=is_causal,
|
| 711 |
+
scale=scale,
|
| 712 |
+
)
|
| 713 |
+
return nested_view_from_values_offsets(output, query.offsets())
|
| 714 |
+
|
| 715 |
+
compute_logsumexp = query.requires_grad or key.requires_grad or value.requires_grad
|
| 716 |
+
|
| 717 |
+
backend_choice = _select_sdp_backend(
|
| 718 |
+
query, key, value, attn_mask, dropout_p, is_causal, enable_gqa
|
| 719 |
+
)
|
| 720 |
+
|
| 721 |
+
if _is_computing_meta_flops(query):
|
| 722 |
+
# Backend choice will probably not be correct if we have a meta device,
|
| 723 |
+
# because backend choice is device-aware. In this case, we mostly just
|
| 724 |
+
# want to avoid using math backend (which does a .item() call).
|
| 725 |
+
# Arbitrarily choose flash attention.
|
| 726 |
+
backend_choice = SDPBackend.FLASH_ATTENTION
|
| 727 |
+
|
| 728 |
+
if backend_choice == SDPBackend.FLASH_ATTENTION:
|
| 729 |
+
og_size = query.size(-1)
|
| 730 |
+
query_padded = _pad_last_dim(query, 8, False)
|
| 731 |
+
key_padded = _pad_last_dim(key, 8, False)
|
| 732 |
+
value_padded = _pad_last_dim(value, 8, False)
|
| 733 |
+
# We need to calculate the scale based off the OG head dim size
|
| 734 |
+
og_scale = _calculate_scale(query, scale)
|
| 735 |
+
(
|
| 736 |
+
query_buffer_reshaped,
|
| 737 |
+
key_buffer_reshaped,
|
| 738 |
+
value_buffer_reshaped,
|
| 739 |
+
cumulative_sequence_length_q,
|
| 740 |
+
cumulative_sequence_length_kv,
|
| 741 |
+
max_seqlen_batch_q,
|
| 742 |
+
max_seqlen_batch_kv,
|
| 743 |
+
output_nt_info,
|
| 744 |
+
) = _sdpa_nested_preprocessing(query_padded, key_padded, value_padded)
|
| 745 |
+
|
| 746 |
+
(
|
| 747 |
+
attention,
|
| 748 |
+
logsumexp,
|
| 749 |
+
philox_seed,
|
| 750 |
+
philox_offset,
|
| 751 |
+
debug_attn_mask,
|
| 752 |
+
) = torch.ops.aten._flash_attention_forward(
|
| 753 |
+
query_buffer_reshaped,
|
| 754 |
+
key_buffer_reshaped,
|
| 755 |
+
value_buffer_reshaped,
|
| 756 |
+
cumulative_sequence_length_q,
|
| 757 |
+
cumulative_sequence_length_kv,
|
| 758 |
+
max_seqlen_batch_q,
|
| 759 |
+
max_seqlen_batch_kv,
|
| 760 |
+
dropout_p,
|
| 761 |
+
is_causal,
|
| 762 |
+
False,
|
| 763 |
+
scale=og_scale,
|
| 764 |
+
)
|
| 765 |
+
|
| 766 |
+
# Reshape output to convert nnz to batch_size and seq_len
|
| 767 |
+
attention = nested_view_from_values_offsets(
|
| 768 |
+
attention, # output from flash_attn is [total_q, num_heads, head_size_og]
|
| 769 |
+
output_nt_info["offsets"],
|
| 770 |
+
min_seqlen=output_nt_info["_min_seqlen"],
|
| 771 |
+
max_seqlen=output_nt_info["_max_seqlen"],
|
| 772 |
+
).transpose(1, 2)
|
| 773 |
+
return _post_process_flash_output(attention, og_size)
|
| 774 |
+
elif backend_choice == SDPBackend.EFFICIENT_ATTENTION:
|
| 775 |
+
(
|
| 776 |
+
query_reshaped,
|
| 777 |
+
key_reshaped,
|
| 778 |
+
value_reshaped,
|
| 779 |
+
cumulative_sequence_length_q,
|
| 780 |
+
cumulative_sequence_length_kv,
|
| 781 |
+
max_seqlen_batch_q,
|
| 782 |
+
max_seqlen_batch_kv,
|
| 783 |
+
output_nt_info,
|
| 784 |
+
) = _sdpa_nested_preprocessing(query, key, value)
|
| 785 |
+
(
|
| 786 |
+
attention,
|
| 787 |
+
log_sumexp,
|
| 788 |
+
seed,
|
| 789 |
+
offset,
|
| 790 |
+
max_seqlen_q,
|
| 791 |
+
max_seqlen_batch_kv,
|
| 792 |
+
) = torch.ops.aten._efficient_attention_forward(
|
| 793 |
+
query_reshaped.unsqueeze(0),
|
| 794 |
+
key_reshaped.unsqueeze(0),
|
| 795 |
+
value_reshaped.unsqueeze(0),
|
| 796 |
+
None,
|
| 797 |
+
cumulative_sequence_length_q,
|
| 798 |
+
cumulative_sequence_length_kv,
|
| 799 |
+
max_seqlen_batch_q,
|
| 800 |
+
max_seqlen_batch_kv,
|
| 801 |
+
dropout_p,
|
| 802 |
+
int(is_causal),
|
| 803 |
+
compute_logsumexp,
|
| 804 |
+
scale=scale,
|
| 805 |
+
)
|
| 806 |
+
|
| 807 |
+
# Reshape output to convert nnz to batch_size and seq_len
|
| 808 |
+
return nested_view_from_values_offsets(
|
| 809 |
+
attention.squeeze(0),
|
| 810 |
+
output_nt_info["offsets"],
|
| 811 |
+
min_seqlen=output_nt_info["_min_seqlen"],
|
| 812 |
+
max_seqlen=output_nt_info["_max_seqlen"],
|
| 813 |
+
).transpose(1, 2)
|
| 814 |
+
elif backend_choice == SDPBackend.MATH:
|
| 815 |
+
# save the offsets and shape of the inputs, so we can reshape the final output
|
| 816 |
+
# query @ key = attn: [B, D1, j0, D'] @ [B, D1, D' j1] = [B, D1, j0, j1]
|
| 817 |
+
# attn @ value = out: [B, D1, j0, j1] @ [B, D1, j1, D2] = [B, D1, j0, D2]
|
| 818 |
+
offsets = query.offsets()
|
| 819 |
+
d1 = query._size[1]
|
| 820 |
+
d2 = value._size[-1]
|
| 821 |
+
|
| 822 |
+
min_seqlen_tensor = query._metadata_cache.get(
|
| 823 |
+
"min_seqlen", None
|
| 824 |
+
) # type: ignore[attr-defined]
|
| 825 |
+
max_seqlen_tensor = query._metadata_cache.get(
|
| 826 |
+
"max_seqlen", None
|
| 827 |
+
) # type: ignore[attr-defined]
|
| 828 |
+
|
| 829 |
+
# convert jagged layout Nested Tensor to strided layout Nested Tensor
|
| 830 |
+
# which support the math implementation of SDPA
|
| 831 |
+
def get_strided_layout_nested_tensor(jagged_layout_nt):
|
| 832 |
+
lengths = jagged_layout_nt._offsets[1:] - jagged_layout_nt._offsets[:-1]
|
| 833 |
+
transpose = torch.transpose(jagged_layout_nt, 1, 2)
|
| 834 |
+
tensor_list = transpose.values().split(list(lengths), dim=0)
|
| 835 |
+
strided_nt = torch.nested.as_nested_tensor(list(tensor_list))
|
| 836 |
+
strided_nt = strided_nt.transpose(1, 2).contiguous()
|
| 837 |
+
return strided_nt
|
| 838 |
+
|
| 839 |
+
query = get_strided_layout_nested_tensor(query)
|
| 840 |
+
key = get_strided_layout_nested_tensor(key)
|
| 841 |
+
value = get_strided_layout_nested_tensor(value)
|
| 842 |
+
|
| 843 |
+
attn_out = torch._scaled_dot_product_attention_math(
|
| 844 |
+
query, key, value, attn_mask, dropout_p, is_causal, scale=scale
|
| 845 |
+
)[0]
|
| 846 |
+
|
| 847 |
+
from torch.nested._internal.nested_tensor import _load_val_from_tensor
|
| 848 |
+
|
| 849 |
+
# convert strided layout Nested Tensor back to jagged layout Nested Tensor
|
| 850 |
+
attn_out = attn_out.transpose(1, 2).contiguous().values()
|
| 851 |
+
attn_out = attn_out.view(-1, d1, d2)
|
| 852 |
+
attn_out = nested_view_from_values_offsets(
|
| 853 |
+
attn_out,
|
| 854 |
+
offsets,
|
| 855 |
+
min_seqlen=(
|
| 856 |
+
None
|
| 857 |
+
if min_seqlen_tensor is None
|
| 858 |
+
else _load_val_from_tensor(min_seqlen_tensor)
|
| 859 |
+
),
|
| 860 |
+
max_seqlen=(
|
| 861 |
+
None
|
| 862 |
+
if max_seqlen_tensor is None
|
| 863 |
+
else _load_val_from_tensor(max_seqlen_tensor)
|
| 864 |
+
),
|
| 865 |
+
).transpose(1, 2)
|
| 866 |
+
|
| 867 |
+
return attn_out
|
| 868 |
+
else:
|
| 869 |
+
raise RuntimeError(
|
| 870 |
+
"No viable backend for scaled_dot_product_attention was found."
|
| 871 |
+
)
|
vllm/lib/python3.10/site-packages/torch/optim/__init__.py
ADDED
|
@@ -0,0 +1,63 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
:mod:`torch.optim` is a package implementing various optimization algorithms.
|
| 3 |
+
|
| 4 |
+
Most commonly used methods are already supported, and the interface is general
|
| 5 |
+
enough, so that more sophisticated ones can also be easily integrated in the
|
| 6 |
+
future.
|
| 7 |
+
"""
|
| 8 |
+
|
| 9 |
+
from torch.optim import lr_scheduler as lr_scheduler, swa_utils as swa_utils
|
| 10 |
+
from torch.optim._adafactor import Adafactor as Adafactor
|
| 11 |
+
from torch.optim.adadelta import Adadelta as Adadelta
|
| 12 |
+
from torch.optim.adagrad import Adagrad as Adagrad
|
| 13 |
+
from torch.optim.adam import Adam as Adam
|
| 14 |
+
from torch.optim.adamax import Adamax as Adamax
|
| 15 |
+
from torch.optim.adamw import AdamW as AdamW
|
| 16 |
+
from torch.optim.asgd import ASGD as ASGD
|
| 17 |
+
from torch.optim.lbfgs import LBFGS as LBFGS
|
| 18 |
+
from torch.optim.nadam import NAdam as NAdam
|
| 19 |
+
from torch.optim.optimizer import Optimizer as Optimizer
|
| 20 |
+
from torch.optim.radam import RAdam as RAdam
|
| 21 |
+
from torch.optim.rmsprop import RMSprop as RMSprop
|
| 22 |
+
from torch.optim.rprop import Rprop as Rprop
|
| 23 |
+
from torch.optim.sgd import SGD as SGD
|
| 24 |
+
from torch.optim.sparse_adam import SparseAdam as SparseAdam
|
| 25 |
+
|
| 26 |
+
|
| 27 |
+
Adafactor.__module__ = "torch.optim"
|
| 28 |
+
|
| 29 |
+
|
| 30 |
+
del adadelta # type: ignore[name-defined] # noqa: F821
|
| 31 |
+
del adagrad # type: ignore[name-defined] # noqa: F821
|
| 32 |
+
del adam # type: ignore[name-defined] # noqa: F821
|
| 33 |
+
del adamw # type: ignore[name-defined] # noqa: F821
|
| 34 |
+
del sparse_adam # type: ignore[name-defined] # noqa: F821
|
| 35 |
+
del adamax # type: ignore[name-defined] # noqa: F821
|
| 36 |
+
del asgd # type: ignore[name-defined] # noqa: F821
|
| 37 |
+
del sgd # type: ignore[name-defined] # noqa: F821
|
| 38 |
+
del radam # type: ignore[name-defined] # noqa: F821
|
| 39 |
+
del rprop # type: ignore[name-defined] # noqa: F821
|
| 40 |
+
del rmsprop # type: ignore[name-defined] # noqa: F821
|
| 41 |
+
del optimizer # type: ignore[name-defined] # noqa: F821
|
| 42 |
+
del nadam # type: ignore[name-defined] # noqa: F821
|
| 43 |
+
del lbfgs # type: ignore[name-defined] # noqa: F821
|
| 44 |
+
|
| 45 |
+
__all__ = [
|
| 46 |
+
"Adafactor",
|
| 47 |
+
"Adadelta",
|
| 48 |
+
"Adagrad",
|
| 49 |
+
"Adam",
|
| 50 |
+
"Adamax",
|
| 51 |
+
"AdamW",
|
| 52 |
+
"ASGD",
|
| 53 |
+
"LBFGS",
|
| 54 |
+
"lr_scheduler",
|
| 55 |
+
"NAdam",
|
| 56 |
+
"Optimizer",
|
| 57 |
+
"RAdam",
|
| 58 |
+
"RMSprop",
|
| 59 |
+
"Rprop",
|
| 60 |
+
"SGD",
|
| 61 |
+
"SparseAdam",
|
| 62 |
+
"swa_utils",
|
| 63 |
+
]
|
vllm/lib/python3.10/site-packages/torch/optim/__pycache__/__init__.cpython-310.pyc
ADDED
|
Binary file (1.54 kB). View file
|
|
|
vllm/lib/python3.10/site-packages/torch/optim/__pycache__/_functional.cpython-310.pyc
ADDED
|
Binary file (2.08 kB). View file
|
|
|
vllm/lib/python3.10/site-packages/torch/optim/__pycache__/adadelta.cpython-310.pyc
ADDED
|
Binary file (11.1 kB). View file
|
|
|
vllm/lib/python3.10/site-packages/torch/optim/__pycache__/adagrad.cpython-310.pyc
ADDED
|
Binary file (13.2 kB). View file
|
|
|
vllm/lib/python3.10/site-packages/torch/optim/__pycache__/adam.cpython-310.pyc
ADDED
|
Binary file (17 kB). View file
|
|
|
vllm/lib/python3.10/site-packages/torch/optim/__pycache__/adamax.cpython-310.pyc
ADDED
|
Binary file (11.4 kB). View file
|
|
|
vllm/lib/python3.10/site-packages/torch/optim/__pycache__/adamw.cpython-310.pyc
ADDED
|
Binary file (17 kB). View file
|
|
|
vllm/lib/python3.10/site-packages/torch/optim/__pycache__/asgd.cpython-310.pyc
ADDED
|
Binary file (9.44 kB). View file
|
|
|
vllm/lib/python3.10/site-packages/torch/optim/__pycache__/lr_scheduler.cpython-310.pyc
ADDED
|
Binary file (71.9 kB). View file
|
|
|
vllm/lib/python3.10/site-packages/torch/optim/__pycache__/optimizer.cpython-310.pyc
ADDED
|
Binary file (35.6 kB). View file
|
|
|
vllm/lib/python3.10/site-packages/torch/optim/__pycache__/radam.cpython-310.pyc
ADDED
|
Binary file (16.2 kB). View file
|
|
|
vllm/lib/python3.10/site-packages/torch/optim/__pycache__/rmsprop.cpython-310.pyc
ADDED
|
Binary file (12.9 kB). View file
|
|
|
vllm/lib/python3.10/site-packages/torch/optim/__pycache__/rprop.cpython-310.pyc
ADDED
|
Binary file (11.6 kB). View file
|
|
|
vllm/lib/python3.10/site-packages/torch/optim/__pycache__/sgd.cpython-310.pyc
ADDED
|
Binary file (12.9 kB). View file
|
|
|
vllm/lib/python3.10/site-packages/torch/optim/__pycache__/sparse_adam.cpython-310.pyc
ADDED
|
Binary file (6.64 kB). View file
|
|
|
vllm/lib/python3.10/site-packages/torch/optim/__pycache__/swa_utils.cpython-310.pyc
ADDED
|
Binary file (16.3 kB). View file
|
|
|
vllm/lib/python3.10/site-packages/torch/optim/_functional.py
ADDED
|
@@ -0,0 +1,84 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# mypy: allow-untyped-defs
|
| 2 |
+
r"""Functional interface."""
|
| 3 |
+
import math
|
| 4 |
+
from typing import List
|
| 5 |
+
|
| 6 |
+
from torch import Tensor
|
| 7 |
+
|
| 8 |
+
from .adadelta import adadelta # type: ignore[attr-defined] # noqa: F401
|
| 9 |
+
from .adagrad import _make_sparse, adagrad # type: ignore[attr-defined] # noqa: F401
|
| 10 |
+
from .adam import adam # type: ignore[attr-defined] # noqa: F401
|
| 11 |
+
from .adamax import adamax # type: ignore[attr-defined] # noqa: F401
|
| 12 |
+
from .adamw import adamw # type: ignore[attr-defined] # noqa: F401
|
| 13 |
+
from .asgd import asgd # type: ignore[attr-defined] # noqa: F401
|
| 14 |
+
from .nadam import nadam # type: ignore[attr-defined] # noqa: F401
|
| 15 |
+
from .radam import radam # type: ignore[attr-defined] # noqa: F401
|
| 16 |
+
from .rmsprop import rmsprop # type: ignore[attr-defined] # noqa: F401
|
| 17 |
+
from .rprop import rprop # type: ignore[attr-defined] # noqa: F401
|
| 18 |
+
from .sgd import sgd # type: ignore[attr-defined] # noqa: F401
|
| 19 |
+
|
| 20 |
+
|
| 21 |
+
# TODO: use foreach API in optim._functional to do all the computation
|
| 22 |
+
|
| 23 |
+
|
| 24 |
+
def sparse_adam(
|
| 25 |
+
params: List[Tensor],
|
| 26 |
+
grads: List[Tensor],
|
| 27 |
+
exp_avgs: List[Tensor],
|
| 28 |
+
exp_avg_sqs: List[Tensor],
|
| 29 |
+
state_steps: List[int],
|
| 30 |
+
*,
|
| 31 |
+
eps: float,
|
| 32 |
+
beta1: float,
|
| 33 |
+
beta2: float,
|
| 34 |
+
lr: float,
|
| 35 |
+
maximize: bool,
|
| 36 |
+
):
|
| 37 |
+
r"""Functional API that performs Sparse Adam algorithm computation.
|
| 38 |
+
|
| 39 |
+
See :class:`~torch.optim.SparseAdam` for details.
|
| 40 |
+
"""
|
| 41 |
+
for i, param in enumerate(params):
|
| 42 |
+
grad = grads[i]
|
| 43 |
+
grad = grad if not maximize else -grad
|
| 44 |
+
grad = grad.coalesce() # the update is non-linear so indices must be unique
|
| 45 |
+
grad_indices = grad._indices()
|
| 46 |
+
grad_values = grad._values()
|
| 47 |
+
if grad_values.numel() == 0:
|
| 48 |
+
# Skip update for empty grad
|
| 49 |
+
continue
|
| 50 |
+
size = grad.size()
|
| 51 |
+
|
| 52 |
+
exp_avg = exp_avgs[i]
|
| 53 |
+
exp_avg_sq = exp_avg_sqs[i]
|
| 54 |
+
step = state_steps[i]
|
| 55 |
+
|
| 56 |
+
def make_sparse(values):
|
| 57 |
+
constructor = grad.new
|
| 58 |
+
if grad_indices.dim() == 0 or values.dim() == 0:
|
| 59 |
+
return constructor().resize_as_(grad)
|
| 60 |
+
return constructor(grad_indices, values, size)
|
| 61 |
+
|
| 62 |
+
# Decay the first and second moment running average coefficient
|
| 63 |
+
# old <- b * old + (1 - b) * new
|
| 64 |
+
# <==> old += (1 - b) * (new - old)
|
| 65 |
+
old_exp_avg_values = exp_avg.sparse_mask(grad)._values()
|
| 66 |
+
exp_avg_update_values = grad_values.sub(old_exp_avg_values).mul_(1 - beta1)
|
| 67 |
+
exp_avg.add_(make_sparse(exp_avg_update_values))
|
| 68 |
+
old_exp_avg_sq_values = exp_avg_sq.sparse_mask(grad)._values()
|
| 69 |
+
exp_avg_sq_update_values = (
|
| 70 |
+
grad_values.pow(2).sub_(old_exp_avg_sq_values).mul_(1 - beta2)
|
| 71 |
+
)
|
| 72 |
+
exp_avg_sq.add_(make_sparse(exp_avg_sq_update_values))
|
| 73 |
+
|
| 74 |
+
# Dense addition again is intended, avoiding another sparse_mask
|
| 75 |
+
numer = exp_avg_update_values.add_(old_exp_avg_values)
|
| 76 |
+
exp_avg_sq_update_values.add_(old_exp_avg_sq_values)
|
| 77 |
+
denom = exp_avg_sq_update_values.sqrt_().add_(eps)
|
| 78 |
+
del exp_avg_update_values, exp_avg_sq_update_values
|
| 79 |
+
|
| 80 |
+
bias_correction1 = 1 - beta1**step
|
| 81 |
+
bias_correction2 = 1 - beta2**step
|
| 82 |
+
step_size = lr * math.sqrt(bias_correction2) / bias_correction1
|
| 83 |
+
|
| 84 |
+
param.add_(make_sparse(-step_size * numer.div_(denom)))
|
vllm/lib/python3.10/site-packages/torch/optim/adam.py
ADDED
|
@@ -0,0 +1,803 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# mypy: allow-untyped-decorators
|
| 2 |
+
# mypy: allow-untyped-defs
|
| 3 |
+
from typing import cast, List, Optional, Tuple, Union
|
| 4 |
+
|
| 5 |
+
import torch
|
| 6 |
+
from torch import Tensor
|
| 7 |
+
|
| 8 |
+
from .optimizer import (
|
| 9 |
+
_capturable_doc,
|
| 10 |
+
_default_to_fused_or_foreach,
|
| 11 |
+
_device_dtype_check_for_fused,
|
| 12 |
+
_differentiable_doc,
|
| 13 |
+
_disable_dynamo_if_unsupported,
|
| 14 |
+
_foreach_doc,
|
| 15 |
+
_fused_doc,
|
| 16 |
+
_get_capturable_supported_devices,
|
| 17 |
+
_get_scalar_dtype,
|
| 18 |
+
_get_value,
|
| 19 |
+
_maximize_doc,
|
| 20 |
+
_stack_if_compiling,
|
| 21 |
+
_use_grad_for_differentiable,
|
| 22 |
+
_view_as_real,
|
| 23 |
+
DeviceDict,
|
| 24 |
+
Optimizer,
|
| 25 |
+
ParamsT,
|
| 26 |
+
)
|
| 27 |
+
|
| 28 |
+
|
| 29 |
+
__all__ = ["Adam", "adam"]
|
| 30 |
+
|
| 31 |
+
|
| 32 |
+
class Adam(Optimizer):
|
| 33 |
+
def __init__(
|
| 34 |
+
self,
|
| 35 |
+
params: ParamsT,
|
| 36 |
+
lr: Union[float, Tensor] = 1e-3,
|
| 37 |
+
betas: Tuple[float, float] = (0.9, 0.999),
|
| 38 |
+
eps: float = 1e-8,
|
| 39 |
+
weight_decay: float = 0,
|
| 40 |
+
amsgrad: bool = False,
|
| 41 |
+
*,
|
| 42 |
+
foreach: Optional[bool] = None,
|
| 43 |
+
maximize: bool = False,
|
| 44 |
+
capturable: bool = False,
|
| 45 |
+
differentiable: bool = False,
|
| 46 |
+
fused: Optional[bool] = None,
|
| 47 |
+
):
|
| 48 |
+
if isinstance(lr, Tensor):
|
| 49 |
+
if foreach and not capturable:
|
| 50 |
+
raise ValueError(
|
| 51 |
+
"lr as a Tensor is not supported for capturable=False and foreach=True"
|
| 52 |
+
)
|
| 53 |
+
if lr.numel() != 1:
|
| 54 |
+
raise ValueError("Tensor lr must be 1-element")
|
| 55 |
+
if not 0.0 <= lr:
|
| 56 |
+
raise ValueError(f"Invalid learning rate: {lr}")
|
| 57 |
+
if not 0.0 <= eps:
|
| 58 |
+
raise ValueError(f"Invalid epsilon value: {eps}")
|
| 59 |
+
if not 0.0 <= betas[0] < 1.0:
|
| 60 |
+
raise ValueError(f"Invalid beta parameter at index 0: {betas[0]}")
|
| 61 |
+
if not 0.0 <= betas[1] < 1.0:
|
| 62 |
+
raise ValueError(f"Invalid beta parameter at index 1: {betas[1]}")
|
| 63 |
+
if not 0.0 <= weight_decay:
|
| 64 |
+
raise ValueError(f"Invalid weight_decay value: {weight_decay}")
|
| 65 |
+
|
| 66 |
+
defaults = dict(
|
| 67 |
+
lr=lr,
|
| 68 |
+
betas=betas,
|
| 69 |
+
eps=eps,
|
| 70 |
+
weight_decay=weight_decay,
|
| 71 |
+
amsgrad=amsgrad,
|
| 72 |
+
maximize=maximize,
|
| 73 |
+
foreach=foreach,
|
| 74 |
+
capturable=capturable,
|
| 75 |
+
differentiable=differentiable,
|
| 76 |
+
fused=fused,
|
| 77 |
+
)
|
| 78 |
+
super().__init__(params, defaults)
|
| 79 |
+
|
| 80 |
+
if fused:
|
| 81 |
+
if differentiable:
|
| 82 |
+
raise RuntimeError("`fused` does not support `differentiable`")
|
| 83 |
+
self._step_supports_amp_scaling = True
|
| 84 |
+
# TODO(crcrpar): [low prec params & their higher prec copy]
|
| 85 |
+
# Support AMP with FP16/BF16 model params which would need
|
| 86 |
+
# higher prec copy of params to do update math in higher prec to
|
| 87 |
+
# alleviate the loss of information.
|
| 88 |
+
if foreach:
|
| 89 |
+
raise RuntimeError("`fused` and `foreach` cannot be `True` together.")
|
| 90 |
+
|
| 91 |
+
def __setstate__(self, state):
|
| 92 |
+
super().__setstate__(state)
|
| 93 |
+
for group in self.param_groups:
|
| 94 |
+
group.setdefault("amsgrad", False)
|
| 95 |
+
group.setdefault("maximize", False)
|
| 96 |
+
group.setdefault("foreach", None)
|
| 97 |
+
group.setdefault("capturable", False)
|
| 98 |
+
group.setdefault("differentiable", False)
|
| 99 |
+
fused = group.setdefault("fused", None)
|
| 100 |
+
for p in group["params"]:
|
| 101 |
+
p_state = self.state.get(p, [])
|
| 102 |
+
if len(p_state) != 0 and not torch.is_tensor(p_state["step"]):
|
| 103 |
+
step_val = float(p_state["step"])
|
| 104 |
+
p_state["step"] = (
|
| 105 |
+
torch.tensor(
|
| 106 |
+
step_val,
|
| 107 |
+
dtype=_get_scalar_dtype(is_fused=fused),
|
| 108 |
+
device=p.device,
|
| 109 |
+
)
|
| 110 |
+
if group["capturable"] or group["fused"]
|
| 111 |
+
else torch.tensor(step_val, dtype=_get_scalar_dtype())
|
| 112 |
+
)
|
| 113 |
+
|
| 114 |
+
def _init_group(
|
| 115 |
+
self,
|
| 116 |
+
group,
|
| 117 |
+
params_with_grad,
|
| 118 |
+
grads,
|
| 119 |
+
exp_avgs,
|
| 120 |
+
exp_avg_sqs,
|
| 121 |
+
max_exp_avg_sqs,
|
| 122 |
+
state_steps,
|
| 123 |
+
):
|
| 124 |
+
has_complex = False
|
| 125 |
+
for p in group["params"]:
|
| 126 |
+
if p.grad is not None:
|
| 127 |
+
has_complex |= torch.is_complex(p)
|
| 128 |
+
params_with_grad.append(p)
|
| 129 |
+
if p.grad.is_sparse:
|
| 130 |
+
raise RuntimeError(
|
| 131 |
+
"Adam does not support sparse gradients, please consider SparseAdam instead"
|
| 132 |
+
)
|
| 133 |
+
grads.append(p.grad)
|
| 134 |
+
|
| 135 |
+
state = self.state[p]
|
| 136 |
+
# Lazy state initialization
|
| 137 |
+
if len(state) == 0:
|
| 138 |
+
if group["fused"]:
|
| 139 |
+
_device_dtype_check_for_fused(p)
|
| 140 |
+
# note(crcrpar): [special device hosting for step]
|
| 141 |
+
# Deliberately host `step` on CPU if both capturable and fused are off.
|
| 142 |
+
# This is because kernel launches are costly on CUDA and XLA.
|
| 143 |
+
state["step"] = (
|
| 144 |
+
torch.zeros(
|
| 145 |
+
(),
|
| 146 |
+
dtype=_get_scalar_dtype(is_fused=group["fused"]),
|
| 147 |
+
device=p.device,
|
| 148 |
+
)
|
| 149 |
+
if group["capturable"] or group["fused"]
|
| 150 |
+
else torch.tensor(0.0, dtype=_get_scalar_dtype())
|
| 151 |
+
)
|
| 152 |
+
# Exponential moving average of gradient values
|
| 153 |
+
state["exp_avg"] = torch.zeros_like(
|
| 154 |
+
p, memory_format=torch.preserve_format
|
| 155 |
+
)
|
| 156 |
+
# Exponential moving average of squared gradient values
|
| 157 |
+
state["exp_avg_sq"] = torch.zeros_like(
|
| 158 |
+
p, memory_format=torch.preserve_format
|
| 159 |
+
)
|
| 160 |
+
if group["amsgrad"]:
|
| 161 |
+
# Maintains max of all exp. moving avg. of sq. grad. values
|
| 162 |
+
state["max_exp_avg_sq"] = torch.zeros_like(
|
| 163 |
+
p, memory_format=torch.preserve_format
|
| 164 |
+
)
|
| 165 |
+
|
| 166 |
+
exp_avgs.append(state["exp_avg"])
|
| 167 |
+
exp_avg_sqs.append(state["exp_avg_sq"])
|
| 168 |
+
|
| 169 |
+
if group["amsgrad"]:
|
| 170 |
+
max_exp_avg_sqs.append(state["max_exp_avg_sq"])
|
| 171 |
+
if group["differentiable"] and state["step"].requires_grad:
|
| 172 |
+
raise RuntimeError(
|
| 173 |
+
"`requires_grad` is not supported for `step` in differentiable mode"
|
| 174 |
+
)
|
| 175 |
+
|
| 176 |
+
# Foreach without capturable does not support a tensor lr
|
| 177 |
+
if (
|
| 178 |
+
group["foreach"]
|
| 179 |
+
and torch.is_tensor(group["lr"])
|
| 180 |
+
and not group["capturable"]
|
| 181 |
+
):
|
| 182 |
+
raise RuntimeError(
|
| 183 |
+
"lr as a Tensor is not supported for capturable=False and foreach=True"
|
| 184 |
+
)
|
| 185 |
+
|
| 186 |
+
state_steps.append(state["step"])
|
| 187 |
+
return has_complex
|
| 188 |
+
|
| 189 |
+
@_use_grad_for_differentiable
|
| 190 |
+
def step(self, closure=None):
|
| 191 |
+
"""Perform a single optimization step.
|
| 192 |
+
|
| 193 |
+
Args:
|
| 194 |
+
closure (Callable, optional): A closure that reevaluates the model
|
| 195 |
+
and returns the loss.
|
| 196 |
+
"""
|
| 197 |
+
self._cuda_graph_capture_health_check()
|
| 198 |
+
|
| 199 |
+
loss = None
|
| 200 |
+
if closure is not None:
|
| 201 |
+
with torch.enable_grad():
|
| 202 |
+
loss = closure()
|
| 203 |
+
|
| 204 |
+
for group in self.param_groups:
|
| 205 |
+
params_with_grad: List[Tensor] = []
|
| 206 |
+
grads: List[Tensor] = []
|
| 207 |
+
exp_avgs: List[Tensor] = []
|
| 208 |
+
exp_avg_sqs: List[Tensor] = []
|
| 209 |
+
max_exp_avg_sqs: List[Tensor] = []
|
| 210 |
+
state_steps: List[Tensor] = []
|
| 211 |
+
beta1, beta2 = group["betas"]
|
| 212 |
+
|
| 213 |
+
has_complex = self._init_group(
|
| 214 |
+
group,
|
| 215 |
+
params_with_grad,
|
| 216 |
+
grads,
|
| 217 |
+
exp_avgs,
|
| 218 |
+
exp_avg_sqs,
|
| 219 |
+
max_exp_avg_sqs,
|
| 220 |
+
state_steps,
|
| 221 |
+
)
|
| 222 |
+
|
| 223 |
+
adam(
|
| 224 |
+
params_with_grad,
|
| 225 |
+
grads,
|
| 226 |
+
exp_avgs,
|
| 227 |
+
exp_avg_sqs,
|
| 228 |
+
max_exp_avg_sqs,
|
| 229 |
+
state_steps,
|
| 230 |
+
amsgrad=group["amsgrad"],
|
| 231 |
+
has_complex=has_complex,
|
| 232 |
+
beta1=beta1,
|
| 233 |
+
beta2=beta2,
|
| 234 |
+
lr=group["lr"],
|
| 235 |
+
weight_decay=group["weight_decay"],
|
| 236 |
+
eps=group["eps"],
|
| 237 |
+
maximize=group["maximize"],
|
| 238 |
+
foreach=group["foreach"],
|
| 239 |
+
capturable=group["capturable"],
|
| 240 |
+
differentiable=group["differentiable"],
|
| 241 |
+
fused=group["fused"],
|
| 242 |
+
grad_scale=getattr(self, "grad_scale", None),
|
| 243 |
+
found_inf=getattr(self, "found_inf", None),
|
| 244 |
+
)
|
| 245 |
+
|
| 246 |
+
return loss
|
| 247 |
+
|
| 248 |
+
|
| 249 |
+
Adam.__doc__ = (
|
| 250 |
+
r"""Implements Adam algorithm.
|
| 251 |
+
|
| 252 |
+
.. math::
|
| 253 |
+
\begin{aligned}
|
| 254 |
+
&\rule{110mm}{0.4pt} \\
|
| 255 |
+
&\textbf{input} : \gamma \text{ (lr)}, \beta_1, \beta_2
|
| 256 |
+
\text{ (betas)},\theta_0 \text{ (params)},f(\theta) \text{ (objective)} \\
|
| 257 |
+
&\hspace{13mm} \lambda \text{ (weight decay)}, \: \textit{amsgrad},
|
| 258 |
+
\:\textit{maximize} \\
|
| 259 |
+
&\textbf{initialize} : m_0 \leftarrow 0 \text{ ( first moment)},
|
| 260 |
+
v_0\leftarrow 0 \text{ (second moment)},\: \widehat{v_0}^{max}\leftarrow 0\\[-1.ex]
|
| 261 |
+
&\rule{110mm}{0.4pt} \\
|
| 262 |
+
&\textbf{for} \: t=1 \: \textbf{to} \: \ldots \: \textbf{do} \\
|
| 263 |
+
|
| 264 |
+
&\hspace{5mm}\textbf{if} \: \textit{maximize}: \\
|
| 265 |
+
&\hspace{10mm}g_t \leftarrow -\nabla_{\theta} f_t (\theta_{t-1}) \\
|
| 266 |
+
&\hspace{5mm}\textbf{else} \\
|
| 267 |
+
&\hspace{10mm}g_t \leftarrow \nabla_{\theta} f_t (\theta_{t-1}) \\
|
| 268 |
+
&\hspace{5mm}\textbf{if} \: \lambda \neq 0 \\
|
| 269 |
+
&\hspace{10mm} g_t \leftarrow g_t + \lambda \theta_{t-1} \\
|
| 270 |
+
&\hspace{5mm}m_t \leftarrow \beta_1 m_{t-1} + (1 - \beta_1) g_t \\
|
| 271 |
+
&\hspace{5mm}v_t \leftarrow \beta_2 v_{t-1} + (1-\beta_2) g^2_t \\
|
| 272 |
+
&\hspace{5mm}\widehat{m_t} \leftarrow m_t/\big(1-\beta_1^t \big) \\
|
| 273 |
+
&\hspace{5mm}\widehat{v_t} \leftarrow v_t/\big(1-\beta_2^t \big) \\
|
| 274 |
+
&\hspace{5mm}\textbf{if} \: amsgrad \\
|
| 275 |
+
&\hspace{10mm}\widehat{v_t}^{max} \leftarrow \mathrm{max}(\widehat{v_t}^{max},
|
| 276 |
+
\widehat{v_t}) \\
|
| 277 |
+
&\hspace{10mm}\theta_t \leftarrow \theta_{t-1} - \gamma \widehat{m_t}/
|
| 278 |
+
\big(\sqrt{\widehat{v_t}^{max}} + \epsilon \big) \\
|
| 279 |
+
&\hspace{5mm}\textbf{else} \\
|
| 280 |
+
&\hspace{10mm}\theta_t \leftarrow \theta_{t-1} - \gamma \widehat{m_t}/
|
| 281 |
+
\big(\sqrt{\widehat{v_t}} + \epsilon \big) \\
|
| 282 |
+
&\rule{110mm}{0.4pt} \\[-1.ex]
|
| 283 |
+
&\bf{return} \: \theta_t \\[-1.ex]
|
| 284 |
+
&\rule{110mm}{0.4pt} \\[-1.ex]
|
| 285 |
+
\end{aligned}
|
| 286 |
+
|
| 287 |
+
For further details regarding the algorithm we refer to `Adam: A Method for Stochastic Optimization`_.
|
| 288 |
+
"""
|
| 289 |
+
+ rf"""
|
| 290 |
+
Args:
|
| 291 |
+
params (iterable): iterable of parameters to optimize or dicts defining
|
| 292 |
+
parameter groups
|
| 293 |
+
lr (float, Tensor, optional): learning rate (default: 1e-3). A tensor LR
|
| 294 |
+
is not yet supported for all our implementations. Please use a float
|
| 295 |
+
LR if you are not also specifying fused=True or capturable=True.
|
| 296 |
+
betas (Tuple[float, float], optional): coefficients used for computing
|
| 297 |
+
running averages of gradient and its square (default: (0.9, 0.999))
|
| 298 |
+
eps (float, optional): term added to the denominator to improve
|
| 299 |
+
numerical stability (default: 1e-8)
|
| 300 |
+
weight_decay (float, optional): weight decay (L2 penalty) (default: 0)
|
| 301 |
+
amsgrad (bool, optional): whether to use the AMSGrad variant of this
|
| 302 |
+
algorithm from the paper `On the Convergence of Adam and Beyond`_
|
| 303 |
+
(default: False)
|
| 304 |
+
{_foreach_doc}
|
| 305 |
+
{_maximize_doc}
|
| 306 |
+
{_capturable_doc}
|
| 307 |
+
{_differentiable_doc}
|
| 308 |
+
{_fused_doc}
|
| 309 |
+
.. Note::
|
| 310 |
+
A prototype implementation of Adam and AdamW for MPS supports `torch.float32` and `torch.float16`.
|
| 311 |
+
.. _Adam\: A Method for Stochastic Optimization:
|
| 312 |
+
https://arxiv.org/abs/1412.6980
|
| 313 |
+
.. _On the Convergence of Adam and Beyond:
|
| 314 |
+
https://openreview.net/forum?id=ryQu7f-RZ
|
| 315 |
+
|
| 316 |
+
"""
|
| 317 |
+
)
|
| 318 |
+
|
| 319 |
+
|
| 320 |
+
def _single_tensor_adam(
|
| 321 |
+
params: List[Tensor],
|
| 322 |
+
grads: List[Tensor],
|
| 323 |
+
exp_avgs: List[Tensor],
|
| 324 |
+
exp_avg_sqs: List[Tensor],
|
| 325 |
+
max_exp_avg_sqs: List[Tensor],
|
| 326 |
+
state_steps: List[Tensor],
|
| 327 |
+
grad_scale: Optional[Tensor],
|
| 328 |
+
found_inf: Optional[Tensor],
|
| 329 |
+
*,
|
| 330 |
+
amsgrad: bool,
|
| 331 |
+
has_complex: bool,
|
| 332 |
+
beta1: float,
|
| 333 |
+
beta2: float,
|
| 334 |
+
lr: Union[float, Tensor],
|
| 335 |
+
weight_decay: float,
|
| 336 |
+
eps: float,
|
| 337 |
+
maximize: bool,
|
| 338 |
+
capturable: bool,
|
| 339 |
+
differentiable: bool,
|
| 340 |
+
):
|
| 341 |
+
assert grad_scale is None and found_inf is None
|
| 342 |
+
|
| 343 |
+
if torch.jit.is_scripting():
|
| 344 |
+
# this assert is due to JIT being dumb and not realizing that the ops below
|
| 345 |
+
# have overloads to handle both float and Tensor lrs, so we just assert it's
|
| 346 |
+
# a float since most people using JIT are using floats
|
| 347 |
+
assert isinstance(lr, float)
|
| 348 |
+
|
| 349 |
+
for i, param in enumerate(params):
|
| 350 |
+
grad = grads[i] if not maximize else -grads[i]
|
| 351 |
+
exp_avg = exp_avgs[i]
|
| 352 |
+
exp_avg_sq = exp_avg_sqs[i]
|
| 353 |
+
step_t = state_steps[i]
|
| 354 |
+
|
| 355 |
+
# If compiling, the compiler will handle cudagraph checks, see note [torch.compile x capturable]
|
| 356 |
+
if not torch._utils.is_compiling() and capturable:
|
| 357 |
+
capturable_supported_devices = _get_capturable_supported_devices()
|
| 358 |
+
assert (
|
| 359 |
+
param.device.type == step_t.device.type
|
| 360 |
+
and param.device.type in capturable_supported_devices
|
| 361 |
+
), f"If capturable=True, params and state_steps must be on supported devices: {capturable_supported_devices}."
|
| 362 |
+
|
| 363 |
+
# update step
|
| 364 |
+
step_t += 1
|
| 365 |
+
|
| 366 |
+
if weight_decay != 0:
|
| 367 |
+
grad = grad.add(param, alpha=weight_decay)
|
| 368 |
+
|
| 369 |
+
if torch.is_complex(param):
|
| 370 |
+
grad = torch.view_as_real(grad)
|
| 371 |
+
exp_avg = torch.view_as_real(exp_avg)
|
| 372 |
+
exp_avg_sq = torch.view_as_real(exp_avg_sq)
|
| 373 |
+
if amsgrad:
|
| 374 |
+
max_exp_avg_sqs[i] = torch.view_as_real(max_exp_avg_sqs[i])
|
| 375 |
+
param = torch.view_as_real(param)
|
| 376 |
+
|
| 377 |
+
# Decay the first and second moment running average coefficient
|
| 378 |
+
exp_avg.lerp_(grad, 1 - beta1)
|
| 379 |
+
exp_avg_sq.mul_(beta2).addcmul_(grad, grad.conj(), value=1 - beta2)
|
| 380 |
+
|
| 381 |
+
if capturable or differentiable:
|
| 382 |
+
step = step_t
|
| 383 |
+
|
| 384 |
+
bias_correction1 = 1 - beta1**step
|
| 385 |
+
bias_correction2 = 1 - beta2**step
|
| 386 |
+
|
| 387 |
+
step_size = lr / bias_correction1
|
| 388 |
+
step_size_neg = step_size.neg()
|
| 389 |
+
|
| 390 |
+
bias_correction2_sqrt = bias_correction2.sqrt()
|
| 391 |
+
|
| 392 |
+
if amsgrad:
|
| 393 |
+
# Maintains the maximum of all 2nd moment running avg. till now
|
| 394 |
+
if differentiable:
|
| 395 |
+
max_exp_avg_sq = max_exp_avg_sqs[i].clone()
|
| 396 |
+
else:
|
| 397 |
+
max_exp_avg_sq = max_exp_avg_sqs[i]
|
| 398 |
+
|
| 399 |
+
max_exp_avg_sqs[i].copy_(torch.maximum(max_exp_avg_sq, exp_avg_sq))
|
| 400 |
+
|
| 401 |
+
# Uses the max. for normalizing running avg. of gradient
|
| 402 |
+
# Folds in (admittedly ugly) 1-elem step_size math here to avoid extra param-set-sized read+write
|
| 403 |
+
# (can't fold it into addcdiv_ below because addcdiv_ requires value is a Number, not a Tensor)
|
| 404 |
+
denom = (
|
| 405 |
+
max_exp_avg_sqs[i].sqrt() / (bias_correction2_sqrt * step_size_neg)
|
| 406 |
+
).add_(eps / step_size_neg)
|
| 407 |
+
else:
|
| 408 |
+
denom = (
|
| 409 |
+
exp_avg_sq.sqrt() / (bias_correction2_sqrt * step_size_neg)
|
| 410 |
+
).add_(eps / step_size_neg)
|
| 411 |
+
|
| 412 |
+
param.addcdiv_(exp_avg, denom)
|
| 413 |
+
else:
|
| 414 |
+
step = _get_value(step_t)
|
| 415 |
+
|
| 416 |
+
bias_correction1 = 1 - beta1**step
|
| 417 |
+
bias_correction2 = 1 - beta2**step
|
| 418 |
+
|
| 419 |
+
step_size = lr / bias_correction1
|
| 420 |
+
|
| 421 |
+
bias_correction2_sqrt = bias_correction2**0.5
|
| 422 |
+
|
| 423 |
+
if amsgrad:
|
| 424 |
+
# Maintains the maximum of all 2nd moment running avg. till now
|
| 425 |
+
torch.maximum(max_exp_avg_sqs[i], exp_avg_sq, out=max_exp_avg_sqs[i])
|
| 426 |
+
|
| 427 |
+
# Use the max. for normalizing running avg. of gradient
|
| 428 |
+
denom = (max_exp_avg_sqs[i].sqrt() / bias_correction2_sqrt).add_(eps)
|
| 429 |
+
else:
|
| 430 |
+
denom = (exp_avg_sq.sqrt() / bias_correction2_sqrt).add_(eps)
|
| 431 |
+
|
| 432 |
+
param.addcdiv_(exp_avg, denom, value=-step_size)
|
| 433 |
+
|
| 434 |
+
# Lastly, switch back to complex view
|
| 435 |
+
if amsgrad and torch.is_complex(params[i]):
|
| 436 |
+
max_exp_avg_sqs[i] = torch.view_as_complex(max_exp_avg_sqs[i])
|
| 437 |
+
|
| 438 |
+
|
| 439 |
+
def _multi_tensor_adam(
|
| 440 |
+
params: List[Tensor],
|
| 441 |
+
grads: List[Tensor],
|
| 442 |
+
exp_avgs: List[Tensor],
|
| 443 |
+
exp_avg_sqs: List[Tensor],
|
| 444 |
+
max_exp_avg_sqs: List[Tensor],
|
| 445 |
+
state_steps: List[Tensor],
|
| 446 |
+
grad_scale: Optional[Tensor],
|
| 447 |
+
found_inf: Optional[Tensor],
|
| 448 |
+
*,
|
| 449 |
+
amsgrad: bool,
|
| 450 |
+
has_complex: bool,
|
| 451 |
+
beta1: float,
|
| 452 |
+
beta2: float,
|
| 453 |
+
lr: Union[float, Tensor],
|
| 454 |
+
weight_decay: float,
|
| 455 |
+
eps: float,
|
| 456 |
+
maximize: bool,
|
| 457 |
+
capturable: bool,
|
| 458 |
+
differentiable: bool,
|
| 459 |
+
):
|
| 460 |
+
if len(params) == 0:
|
| 461 |
+
return
|
| 462 |
+
|
| 463 |
+
if isinstance(lr, Tensor) and not capturable:
|
| 464 |
+
raise RuntimeError(
|
| 465 |
+
"lr as a Tensor is not supported for capturable=False and foreach=True"
|
| 466 |
+
)
|
| 467 |
+
|
| 468 |
+
# If compiling, the compiler will handle cudagraph checks, see note [torch.compile x capturable]
|
| 469 |
+
if not torch._utils.is_compiling() and capturable:
|
| 470 |
+
capturable_supported_devices = _get_capturable_supported_devices(
|
| 471 |
+
supports_xla=False
|
| 472 |
+
)
|
| 473 |
+
assert all(
|
| 474 |
+
p.device.type == step.device.type
|
| 475 |
+
and p.device.type in capturable_supported_devices
|
| 476 |
+
for p, step in zip(params, state_steps)
|
| 477 |
+
), f"If capturable=True, params and state_steps must be on supported devices: {capturable_supported_devices}."
|
| 478 |
+
|
| 479 |
+
assert grad_scale is None and found_inf is None
|
| 480 |
+
|
| 481 |
+
assert not differentiable, "_foreach ops don't support autograd"
|
| 482 |
+
|
| 483 |
+
grouped_tensors = Optimizer._group_tensors_by_device_and_dtype(
|
| 484 |
+
[params, grads, exp_avgs, exp_avg_sqs, max_exp_avg_sqs, state_steps] # type: ignore[list-item]
|
| 485 |
+
)
|
| 486 |
+
for (
|
| 487 |
+
device_params_,
|
| 488 |
+
device_grads_,
|
| 489 |
+
device_exp_avgs_,
|
| 490 |
+
device_exp_avg_sqs_,
|
| 491 |
+
device_max_exp_avg_sqs_,
|
| 492 |
+
device_state_steps_,
|
| 493 |
+
), _ in grouped_tensors.values():
|
| 494 |
+
device_params = cast(List[Tensor], device_params_)
|
| 495 |
+
device_grads = cast(List[Tensor], device_grads_)
|
| 496 |
+
device_exp_avgs = cast(List[Tensor], device_exp_avgs_)
|
| 497 |
+
device_exp_avg_sqs = cast(List[Tensor], device_exp_avg_sqs_)
|
| 498 |
+
device_state_steps = cast(List[Tensor], device_state_steps_)
|
| 499 |
+
|
| 500 |
+
# Handle complex parameters
|
| 501 |
+
if has_complex:
|
| 502 |
+
if amsgrad:
|
| 503 |
+
device_max_exp_avg_sqs = cast(List[Tensor], device_max_exp_avg_sqs_)
|
| 504 |
+
_view_as_real(
|
| 505 |
+
device_params,
|
| 506 |
+
device_grads,
|
| 507 |
+
device_exp_avgs,
|
| 508 |
+
device_exp_avg_sqs,
|
| 509 |
+
device_max_exp_avg_sqs,
|
| 510 |
+
)
|
| 511 |
+
else:
|
| 512 |
+
_view_as_real(
|
| 513 |
+
device_params, device_grads, device_exp_avgs, device_exp_avg_sqs
|
| 514 |
+
)
|
| 515 |
+
|
| 516 |
+
if maximize:
|
| 517 |
+
device_grads = torch._foreach_neg(device_grads) # type: ignore[assignment]
|
| 518 |
+
|
| 519 |
+
# Update steps
|
| 520 |
+
# If steps are on CPU, foreach will fall back to the slow path, which is a for-loop calling t.add(1) over
|
| 521 |
+
# and over. 1 will then be wrapped into a Tensor over and over again, which is slower than if we just
|
| 522 |
+
# wrapped it once now. The alpha is required to assure we go to the right overload.
|
| 523 |
+
if not torch._utils.is_compiling() and device_state_steps[0].is_cpu:
|
| 524 |
+
torch._foreach_add_(
|
| 525 |
+
device_state_steps, torch.tensor(1.0, device="cpu"), alpha=1.0
|
| 526 |
+
)
|
| 527 |
+
else:
|
| 528 |
+
torch._foreach_add_(device_state_steps, 1)
|
| 529 |
+
|
| 530 |
+
if weight_decay != 0:
|
| 531 |
+
# Re-use the intermediate memory (device_grads) already allocated for maximize
|
| 532 |
+
if maximize:
|
| 533 |
+
torch._foreach_add_(device_grads, device_params, alpha=weight_decay)
|
| 534 |
+
else:
|
| 535 |
+
device_grads = torch._foreach_add( # type: ignore[assignment]
|
| 536 |
+
device_grads, device_params, alpha=weight_decay
|
| 537 |
+
)
|
| 538 |
+
|
| 539 |
+
# Decay the first and second moment running average coefficient
|
| 540 |
+
torch._foreach_lerp_(device_exp_avgs, device_grads, 1 - beta1)
|
| 541 |
+
|
| 542 |
+
torch._foreach_mul_(device_exp_avg_sqs, beta2)
|
| 543 |
+
torch._foreach_addcmul_(
|
| 544 |
+
device_exp_avg_sqs, device_grads, device_grads, 1 - beta2
|
| 545 |
+
)
|
| 546 |
+
|
| 547 |
+
# Delete the local intermediate since it won't be used anymore to save on peak memory
|
| 548 |
+
del device_grads
|
| 549 |
+
|
| 550 |
+
bias_correction1: Union[Tuple[Tensor, ...], List[Tensor]]
|
| 551 |
+
bias_correction2: Union[Tuple[Tensor, ...], List[Tensor]]
|
| 552 |
+
bias_correction2_sqrt: Union[Tuple[Tensor, ...], List[Tensor]]
|
| 553 |
+
|
| 554 |
+
if capturable:
|
| 555 |
+
bias_correction1 = torch._foreach_pow(beta1, device_state_steps)
|
| 556 |
+
bias_correction2 = torch._foreach_pow(beta2, device_state_steps)
|
| 557 |
+
# foreach_sub doesn't allow a scalar as the first arg
|
| 558 |
+
torch._foreach_sub_(bias_correction1, 1)
|
| 559 |
+
torch._foreach_sub_(bias_correction2, 1)
|
| 560 |
+
# we do not negate bias_correction1 as it'll need to be negated later anyway
|
| 561 |
+
torch._foreach_neg_(bias_correction2)
|
| 562 |
+
|
| 563 |
+
# foreach_div doesn't allow a scalar as the first arg
|
| 564 |
+
torch._foreach_div_(bias_correction1, lr)
|
| 565 |
+
torch._foreach_reciprocal_(bias_correction1)
|
| 566 |
+
|
| 567 |
+
torch._foreach_sqrt_(bias_correction2)
|
| 568 |
+
|
| 569 |
+
# Re-assign for clarity as we maintain minimal intermediates: we'll have
|
| 570 |
+
# step_size = - lr / (1 - beta1 ^ t) where t = num_steps
|
| 571 |
+
# bias_correction2_sqrt = sqrt(1 - beta2 ^ t)
|
| 572 |
+
step_size = bias_correction1
|
| 573 |
+
bias_correction2_sqrt = bias_correction2
|
| 574 |
+
|
| 575 |
+
if amsgrad:
|
| 576 |
+
device_max_exp_avg_sqs = cast(List[Tensor], device_max_exp_avg_sqs_)
|
| 577 |
+
# Maintains the maximum of all 2nd moment running avg. till now
|
| 578 |
+
torch._foreach_maximum_(device_max_exp_avg_sqs, device_exp_avg_sqs) # type: ignore[assignment]
|
| 579 |
+
|
| 580 |
+
# Set intermediate to the max. for normalizing running avg. of gradient when amsgrad
|
| 581 |
+
exp_avg_sq_sqrt = torch._foreach_sqrt(device_max_exp_avg_sqs)
|
| 582 |
+
else:
|
| 583 |
+
exp_avg_sq_sqrt = torch._foreach_sqrt(device_exp_avg_sqs)
|
| 584 |
+
|
| 585 |
+
torch._foreach_div_(exp_avg_sq_sqrt, bias_correction2_sqrt)
|
| 586 |
+
torch._foreach_add_(exp_avg_sq_sqrt, eps)
|
| 587 |
+
torch._foreach_div_(exp_avg_sq_sqrt, step_size)
|
| 588 |
+
|
| 589 |
+
# at this point, exp_avg_sq_sqrt = - (1 - beta^t) * [sqrt(exp_avg_sq / (1 - beta2^t)) + eps] / lr
|
| 590 |
+
torch._foreach_addcdiv_(device_params, device_exp_avgs, exp_avg_sq_sqrt)
|
| 591 |
+
else:
|
| 592 |
+
bias_correction1 = [
|
| 593 |
+
1 - beta1 ** _get_value(step) for step in device_state_steps
|
| 594 |
+
]
|
| 595 |
+
bias_correction2 = [
|
| 596 |
+
1 - beta2 ** _get_value(step) for step in device_state_steps
|
| 597 |
+
]
|
| 598 |
+
|
| 599 |
+
step_size = _stack_if_compiling([(lr / bc) * -1 for bc in bias_correction1])
|
| 600 |
+
|
| 601 |
+
bias_correction2_sqrt = [bc**0.5 for bc in bias_correction2] # type: ignore[arg-type]
|
| 602 |
+
|
| 603 |
+
if amsgrad:
|
| 604 |
+
device_max_exp_avg_sqs = cast(List[Tensor], device_max_exp_avg_sqs_)
|
| 605 |
+
# Maintains the maximum of all 2nd moment running avg. till now
|
| 606 |
+
torch._foreach_maximum_(device_max_exp_avg_sqs, device_exp_avg_sqs)
|
| 607 |
+
|
| 608 |
+
# Use the max. for normalizing running avg. of gradient
|
| 609 |
+
exp_avg_sq_sqrt = torch._foreach_sqrt(device_max_exp_avg_sqs)
|
| 610 |
+
else:
|
| 611 |
+
exp_avg_sq_sqrt = torch._foreach_sqrt(device_exp_avg_sqs)
|
| 612 |
+
|
| 613 |
+
torch._foreach_div_(exp_avg_sq_sqrt, bias_correction2_sqrt)
|
| 614 |
+
torch._foreach_add_(exp_avg_sq_sqrt, eps)
|
| 615 |
+
torch._foreach_addcdiv_(
|
| 616 |
+
device_params, device_exp_avgs, exp_avg_sq_sqrt, step_size # type: ignore[arg-type]
|
| 617 |
+
)
|
| 618 |
+
|
| 619 |
+
|
| 620 |
+
def _fused_adam(
|
| 621 |
+
params: List[Tensor],
|
| 622 |
+
grads: List[Tensor],
|
| 623 |
+
exp_avgs: List[Tensor],
|
| 624 |
+
exp_avg_sqs: List[Tensor],
|
| 625 |
+
max_exp_avg_sqs: List[Tensor],
|
| 626 |
+
state_steps: List[Tensor],
|
| 627 |
+
grad_scale: Optional[Tensor],
|
| 628 |
+
found_inf: Optional[Tensor],
|
| 629 |
+
*,
|
| 630 |
+
amsgrad: bool,
|
| 631 |
+
has_complex: bool, # Needed for consistency.
|
| 632 |
+
beta1: float,
|
| 633 |
+
beta2: float,
|
| 634 |
+
lr: Union[float, Tensor],
|
| 635 |
+
weight_decay: float,
|
| 636 |
+
eps: float,
|
| 637 |
+
maximize: bool,
|
| 638 |
+
capturable: bool, # Needed for consistency.
|
| 639 |
+
differentiable: bool,
|
| 640 |
+
) -> None:
|
| 641 |
+
if not params:
|
| 642 |
+
return
|
| 643 |
+
if differentiable:
|
| 644 |
+
raise RuntimeError("Adam with fused=True does not support differentiable=True")
|
| 645 |
+
|
| 646 |
+
grad_scale_dict: DeviceDict = (
|
| 647 |
+
{grad_scale.device: grad_scale} if grad_scale is not None else {}
|
| 648 |
+
)
|
| 649 |
+
found_inf_dict: DeviceDict = (
|
| 650 |
+
{found_inf.device: found_inf} if found_inf is not None else {}
|
| 651 |
+
)
|
| 652 |
+
|
| 653 |
+
# We only shuffle around the lr when it is a Tensor and on CUDA, otherwise, we prefer
|
| 654 |
+
# treating it as a scalar.
|
| 655 |
+
lr_dict: Optional[DeviceDict] = (
|
| 656 |
+
{lr.device: lr} if isinstance(lr, Tensor) and str(lr.device) != "cpu" else None
|
| 657 |
+
)
|
| 658 |
+
grouped_tensors = Optimizer._group_tensors_by_device_and_dtype(
|
| 659 |
+
[params, grads, exp_avgs, exp_avg_sqs, max_exp_avg_sqs, state_steps] # type: ignore[list-item]
|
| 660 |
+
)
|
| 661 |
+
for (device, _), (
|
| 662 |
+
(
|
| 663 |
+
device_params_,
|
| 664 |
+
device_grads_,
|
| 665 |
+
device_exp_avgs_,
|
| 666 |
+
device_exp_avg_sqs_,
|
| 667 |
+
device_max_exp_avg_sqs,
|
| 668 |
+
device_state_steps_,
|
| 669 |
+
),
|
| 670 |
+
_,
|
| 671 |
+
) in grouped_tensors.items():
|
| 672 |
+
device_params = cast(List[Tensor], device_params_)
|
| 673 |
+
device_grads = cast(List[Tensor], device_grads_)
|
| 674 |
+
device_exp_avgs = cast(List[Tensor], device_exp_avgs_)
|
| 675 |
+
device_exp_avg_sqs = cast(List[Tensor], device_exp_avg_sqs_)
|
| 676 |
+
device_state_steps = cast(List[Tensor], device_state_steps_)
|
| 677 |
+
|
| 678 |
+
if device.type == "mps": # type: ignore[union-attr]
|
| 679 |
+
assert found_inf is None and grad_scale is None
|
| 680 |
+
|
| 681 |
+
device_grad_scale, device_found_inf = None, None
|
| 682 |
+
if grad_scale is not None:
|
| 683 |
+
device_grad_scale = grad_scale_dict.setdefault(
|
| 684 |
+
device, grad_scale.to(device, non_blocking=True)
|
| 685 |
+
)
|
| 686 |
+
if found_inf is not None:
|
| 687 |
+
device_found_inf = found_inf_dict.setdefault(
|
| 688 |
+
device, found_inf.to(device, non_blocking=True)
|
| 689 |
+
)
|
| 690 |
+
if lr_dict is not None and device not in lr_dict:
|
| 691 |
+
lr_dict[device] = lr.to(device=device, non_blocking=True) # type: ignore[union-attr]
|
| 692 |
+
lr = lr_dict[device]
|
| 693 |
+
torch._foreach_add_(device_state_steps, 1)
|
| 694 |
+
torch._fused_adam_(
|
| 695 |
+
device_params,
|
| 696 |
+
device_grads,
|
| 697 |
+
device_exp_avgs,
|
| 698 |
+
device_exp_avg_sqs,
|
| 699 |
+
device_max_exp_avg_sqs, # type: ignore[arg-type]
|
| 700 |
+
device_state_steps,
|
| 701 |
+
amsgrad=amsgrad,
|
| 702 |
+
lr=lr, # type: ignore[arg-type]
|
| 703 |
+
beta1=beta1,
|
| 704 |
+
beta2=beta2,
|
| 705 |
+
weight_decay=weight_decay,
|
| 706 |
+
eps=eps,
|
| 707 |
+
maximize=maximize,
|
| 708 |
+
grad_scale=device_grad_scale,
|
| 709 |
+
found_inf=device_found_inf,
|
| 710 |
+
)
|
| 711 |
+
if device_found_inf is not None:
|
| 712 |
+
torch._foreach_sub_(
|
| 713 |
+
device_state_steps, [device_found_inf] * len(device_state_steps)
|
| 714 |
+
)
|
| 715 |
+
|
| 716 |
+
|
| 717 |
+
@_disable_dynamo_if_unsupported(single_tensor_fn=_single_tensor_adam)
|
| 718 |
+
def adam(
|
| 719 |
+
params: List[Tensor],
|
| 720 |
+
grads: List[Tensor],
|
| 721 |
+
exp_avgs: List[Tensor],
|
| 722 |
+
exp_avg_sqs: List[Tensor],
|
| 723 |
+
max_exp_avg_sqs: List[Tensor],
|
| 724 |
+
state_steps: List[Tensor],
|
| 725 |
+
# kwonly args with defaults are not supported by functions compiled with torchscript issue #70627
|
| 726 |
+
# setting this as kwarg for now as functional API is compiled by torch/distributed/optim
|
| 727 |
+
foreach: Optional[bool] = None,
|
| 728 |
+
capturable: bool = False,
|
| 729 |
+
differentiable: bool = False,
|
| 730 |
+
fused: Optional[bool] = None,
|
| 731 |
+
grad_scale: Optional[Tensor] = None,
|
| 732 |
+
found_inf: Optional[Tensor] = None,
|
| 733 |
+
has_complex: bool = False,
|
| 734 |
+
*,
|
| 735 |
+
amsgrad: bool,
|
| 736 |
+
beta1: float,
|
| 737 |
+
beta2: float,
|
| 738 |
+
lr: Union[float, Tensor],
|
| 739 |
+
weight_decay: float,
|
| 740 |
+
eps: float,
|
| 741 |
+
maximize: bool,
|
| 742 |
+
):
|
| 743 |
+
r"""Functional API that performs Adam algorithm computation.
|
| 744 |
+
|
| 745 |
+
See :class:`~torch.optim.Adam` for details.
|
| 746 |
+
"""
|
| 747 |
+
# Respect when the user inputs False/True for foreach or fused. We only want to change
|
| 748 |
+
# the default when neither have been user-specified. Note that we default to foreach
|
| 749 |
+
# and pass False to use_fused. This is not a mistake--we want to give the fused impl
|
| 750 |
+
# bake-in time before making it the default, even if it is typically faster.
|
| 751 |
+
if fused is None and foreach is None:
|
| 752 |
+
_, foreach = _default_to_fused_or_foreach(
|
| 753 |
+
params, differentiable, use_fused=False
|
| 754 |
+
)
|
| 755 |
+
# Do not flip on foreach for the unsupported case where lr is a Tensor and capturable=False.
|
| 756 |
+
if foreach and isinstance(lr, Tensor) and not capturable:
|
| 757 |
+
foreach = False
|
| 758 |
+
if fused is None:
|
| 759 |
+
fused = False
|
| 760 |
+
if foreach is None:
|
| 761 |
+
foreach = False
|
| 762 |
+
|
| 763 |
+
# this check is slow during compilation, so we skip it
|
| 764 |
+
# if it's strictly needed we can add this check back in dynamo
|
| 765 |
+
if not torch._utils.is_compiling() and not all(
|
| 766 |
+
isinstance(t, torch.Tensor) for t in state_steps
|
| 767 |
+
):
|
| 768 |
+
raise RuntimeError(
|
| 769 |
+
"API has changed, `state_steps` argument must contain a list of singleton tensors"
|
| 770 |
+
)
|
| 771 |
+
|
| 772 |
+
if foreach and torch.jit.is_scripting():
|
| 773 |
+
raise RuntimeError("torch.jit.script not supported with foreach optimizers")
|
| 774 |
+
if fused and torch.jit.is_scripting():
|
| 775 |
+
raise RuntimeError("torch.jit.script not supported with fused optimizers")
|
| 776 |
+
|
| 777 |
+
if fused and not torch.jit.is_scripting():
|
| 778 |
+
func = _fused_adam
|
| 779 |
+
elif foreach and not torch.jit.is_scripting():
|
| 780 |
+
func = _multi_tensor_adam
|
| 781 |
+
else:
|
| 782 |
+
func = _single_tensor_adam
|
| 783 |
+
|
| 784 |
+
func(
|
| 785 |
+
params,
|
| 786 |
+
grads,
|
| 787 |
+
exp_avgs,
|
| 788 |
+
exp_avg_sqs,
|
| 789 |
+
max_exp_avg_sqs,
|
| 790 |
+
state_steps,
|
| 791 |
+
amsgrad=amsgrad,
|
| 792 |
+
has_complex=has_complex,
|
| 793 |
+
beta1=beta1,
|
| 794 |
+
beta2=beta2,
|
| 795 |
+
lr=lr,
|
| 796 |
+
weight_decay=weight_decay,
|
| 797 |
+
eps=eps,
|
| 798 |
+
maximize=maximize,
|
| 799 |
+
capturable=capturable,
|
| 800 |
+
differentiable=differentiable,
|
| 801 |
+
grad_scale=grad_scale,
|
| 802 |
+
found_inf=found_inf,
|
| 803 |
+
)
|
vllm/lib/python3.10/site-packages/torch/optim/adamax.py
ADDED
|
@@ -0,0 +1,473 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# mypy: allow-untyped-decorators
|
| 2 |
+
# mypy: allow-untyped-defs
|
| 3 |
+
from typing import cast, List, Optional, Tuple, Union
|
| 4 |
+
|
| 5 |
+
import torch
|
| 6 |
+
from torch import Tensor
|
| 7 |
+
|
| 8 |
+
from .optimizer import (
|
| 9 |
+
_capturable_doc,
|
| 10 |
+
_default_to_fused_or_foreach,
|
| 11 |
+
_differentiable_doc,
|
| 12 |
+
_disable_dynamo_if_unsupported,
|
| 13 |
+
_foreach_doc,
|
| 14 |
+
_get_capturable_supported_devices,
|
| 15 |
+
_get_scalar_dtype,
|
| 16 |
+
_get_value,
|
| 17 |
+
_maximize_doc,
|
| 18 |
+
_use_grad_for_differentiable,
|
| 19 |
+
_view_as_real,
|
| 20 |
+
Optimizer,
|
| 21 |
+
ParamsT,
|
| 22 |
+
)
|
| 23 |
+
|
| 24 |
+
|
| 25 |
+
__all__ = ["Adamax", "adamax"]
|
| 26 |
+
|
| 27 |
+
|
| 28 |
+
class Adamax(Optimizer):
|
| 29 |
+
def __init__(
|
| 30 |
+
self,
|
| 31 |
+
params: ParamsT,
|
| 32 |
+
lr: Union[float, Tensor] = 2e-3,
|
| 33 |
+
betas: Tuple[float, float] = (0.9, 0.999),
|
| 34 |
+
eps: float = 1e-8,
|
| 35 |
+
weight_decay: float = 0,
|
| 36 |
+
foreach: Optional[bool] = None,
|
| 37 |
+
*,
|
| 38 |
+
maximize: bool = False,
|
| 39 |
+
differentiable: bool = False,
|
| 40 |
+
capturable: bool = False,
|
| 41 |
+
):
|
| 42 |
+
if isinstance(lr, Tensor) and lr.numel() != 1:
|
| 43 |
+
raise ValueError("Tensor lr must be 1-element")
|
| 44 |
+
if not 0.0 <= lr:
|
| 45 |
+
raise ValueError(f"Invalid learning rate: {lr}")
|
| 46 |
+
if not 0.0 <= eps:
|
| 47 |
+
raise ValueError(f"Invalid epsilon value: {eps}")
|
| 48 |
+
if not 0.0 <= betas[0] < 1.0:
|
| 49 |
+
raise ValueError(f"Invalid beta parameter at index 0: {betas[0]}")
|
| 50 |
+
if not 0.0 <= betas[1] < 1.0:
|
| 51 |
+
raise ValueError(f"Invalid beta parameter at index 1: {betas[1]}")
|
| 52 |
+
if not 0.0 <= weight_decay:
|
| 53 |
+
raise ValueError(f"Invalid weight_decay value: {weight_decay}")
|
| 54 |
+
|
| 55 |
+
defaults = dict(
|
| 56 |
+
lr=lr,
|
| 57 |
+
betas=betas,
|
| 58 |
+
eps=eps,
|
| 59 |
+
weight_decay=weight_decay,
|
| 60 |
+
foreach=foreach,
|
| 61 |
+
maximize=maximize,
|
| 62 |
+
differentiable=differentiable,
|
| 63 |
+
capturable=capturable,
|
| 64 |
+
)
|
| 65 |
+
super().__init__(params, defaults)
|
| 66 |
+
|
| 67 |
+
def __setstate__(self, state):
|
| 68 |
+
super().__setstate__(state)
|
| 69 |
+
for group in self.param_groups:
|
| 70 |
+
group.setdefault("foreach", None)
|
| 71 |
+
group.setdefault("maximize", False)
|
| 72 |
+
group.setdefault("differentiable", False)
|
| 73 |
+
group.setdefault("capturable", False)
|
| 74 |
+
for p in group["params"]:
|
| 75 |
+
p_state = self.state.get(p, [])
|
| 76 |
+
if len(p_state) != 0 and not torch.is_tensor(p_state["step"]):
|
| 77 |
+
step_val = float(p_state["step"])
|
| 78 |
+
p_state["step"] = (
|
| 79 |
+
torch.tensor(
|
| 80 |
+
step_val, dtype=_get_scalar_dtype(), device=p.device
|
| 81 |
+
)
|
| 82 |
+
if group["capturable"]
|
| 83 |
+
else torch.tensor(step_val, dtype=_get_scalar_dtype())
|
| 84 |
+
)
|
| 85 |
+
|
| 86 |
+
def _init_group(
|
| 87 |
+
self, group, params_with_grad, grads, exp_avgs, exp_infs, state_steps
|
| 88 |
+
):
|
| 89 |
+
has_complex = False
|
| 90 |
+
for p in group["params"]:
|
| 91 |
+
if p.grad is None:
|
| 92 |
+
continue
|
| 93 |
+
has_complex |= torch.is_complex(p)
|
| 94 |
+
params_with_grad.append(p)
|
| 95 |
+
if p.grad.is_sparse:
|
| 96 |
+
raise RuntimeError("Adamax does not support sparse gradients")
|
| 97 |
+
grads.append(p.grad)
|
| 98 |
+
|
| 99 |
+
state = self.state[p]
|
| 100 |
+
|
| 101 |
+
# State initialization
|
| 102 |
+
if len(state) == 0:
|
| 103 |
+
state["step"] = (
|
| 104 |
+
torch.zeros((), dtype=_get_scalar_dtype(), device=p.device)
|
| 105 |
+
if group["capturable"]
|
| 106 |
+
else torch.tensor(0.0, dtype=_get_scalar_dtype())
|
| 107 |
+
)
|
| 108 |
+
state["exp_avg"] = torch.zeros_like(
|
| 109 |
+
p, memory_format=torch.preserve_format
|
| 110 |
+
)
|
| 111 |
+
state["exp_inf"] = torch.zeros_like(
|
| 112 |
+
p, memory_format=torch.preserve_format
|
| 113 |
+
)
|
| 114 |
+
|
| 115 |
+
exp_avgs.append(state["exp_avg"])
|
| 116 |
+
exp_infs.append(state["exp_inf"])
|
| 117 |
+
state_steps.append(state["step"])
|
| 118 |
+
|
| 119 |
+
return has_complex
|
| 120 |
+
|
| 121 |
+
@_use_grad_for_differentiable
|
| 122 |
+
def step(self, closure=None):
|
| 123 |
+
"""Performs a single optimization step.
|
| 124 |
+
|
| 125 |
+
Args:
|
| 126 |
+
closure (Callable, optional): A closure that reevaluates the model
|
| 127 |
+
and returns the loss.
|
| 128 |
+
"""
|
| 129 |
+
self._cuda_graph_capture_health_check()
|
| 130 |
+
|
| 131 |
+
loss = None
|
| 132 |
+
if closure is not None:
|
| 133 |
+
with torch.enable_grad():
|
| 134 |
+
loss = closure()
|
| 135 |
+
|
| 136 |
+
for group in self.param_groups:
|
| 137 |
+
params_with_grad: List[Tensor] = []
|
| 138 |
+
grads: List[Tensor] = []
|
| 139 |
+
exp_avgs: List[Tensor] = []
|
| 140 |
+
exp_infs: List[Tensor] = []
|
| 141 |
+
state_steps: List[Tensor] = []
|
| 142 |
+
|
| 143 |
+
beta1, beta2 = group["betas"]
|
| 144 |
+
eps = group["eps"]
|
| 145 |
+
lr = group["lr"]
|
| 146 |
+
weight_decay = group["weight_decay"]
|
| 147 |
+
foreach = group["foreach"]
|
| 148 |
+
maximize = group["maximize"]
|
| 149 |
+
differentiable = group["differentiable"]
|
| 150 |
+
capturable = group["capturable"]
|
| 151 |
+
|
| 152 |
+
has_complex = self._init_group(
|
| 153 |
+
group, params_with_grad, grads, exp_avgs, exp_infs, state_steps
|
| 154 |
+
)
|
| 155 |
+
|
| 156 |
+
adamax(
|
| 157 |
+
params_with_grad,
|
| 158 |
+
grads,
|
| 159 |
+
exp_avgs,
|
| 160 |
+
exp_infs,
|
| 161 |
+
state_steps,
|
| 162 |
+
eps=eps,
|
| 163 |
+
beta1=beta1,
|
| 164 |
+
beta2=beta2,
|
| 165 |
+
lr=lr,
|
| 166 |
+
weight_decay=weight_decay,
|
| 167 |
+
foreach=foreach,
|
| 168 |
+
maximize=maximize,
|
| 169 |
+
differentiable=differentiable,
|
| 170 |
+
capturable=capturable,
|
| 171 |
+
has_complex=has_complex,
|
| 172 |
+
)
|
| 173 |
+
|
| 174 |
+
return loss
|
| 175 |
+
|
| 176 |
+
|
| 177 |
+
Adamax.__doc__ = (
|
| 178 |
+
r"""Implements Adamax algorithm (a variant of Adam based on infinity norm).
|
| 179 |
+
|
| 180 |
+
.. math::
|
| 181 |
+
\begin{aligned}
|
| 182 |
+
&\rule{110mm}{0.4pt} \\
|
| 183 |
+
&\textbf{input} : \gamma \text{ (lr)}, \beta_1, \beta_2
|
| 184 |
+
\text{ (betas)},\theta_0 \text{ (params)},f(\theta) \text{ (objective)},
|
| 185 |
+
\: \lambda \text{ (weight decay)}, \\
|
| 186 |
+
&\hspace{13mm} \epsilon \text{ (epsilon)} \\
|
| 187 |
+
&\textbf{initialize} : m_0 \leftarrow 0 \text{ ( first moment)},
|
| 188 |
+
u_0 \leftarrow 0 \text{ ( infinity norm)} \\[-1.ex]
|
| 189 |
+
&\rule{110mm}{0.4pt} \\
|
| 190 |
+
&\textbf{for} \: t=1 \: \textbf{to} \: \ldots \: \textbf{do} \\
|
| 191 |
+
&\hspace{5mm}g_t \leftarrow \nabla_{\theta} f_t (\theta_{t-1}) \\
|
| 192 |
+
&\hspace{5mm}if \: \lambda \neq 0 \\
|
| 193 |
+
&\hspace{10mm} g_t \leftarrow g_t + \lambda \theta_{t-1} \\
|
| 194 |
+
&\hspace{5mm}m_t \leftarrow \beta_1 m_{t-1} + (1 - \beta_1) g_t \\
|
| 195 |
+
&\hspace{5mm}u_t \leftarrow \mathrm{max}(\beta_2 u_{t-1}, |g_{t}|+\epsilon) \\
|
| 196 |
+
&\hspace{5mm}\theta_t \leftarrow \theta_{t-1} - \frac{\gamma m_t}{(1-\beta^t_1) u_t} \\
|
| 197 |
+
&\rule{110mm}{0.4pt} \\[-1.ex]
|
| 198 |
+
&\bf{return} \: \theta_t \\[-1.ex]
|
| 199 |
+
&\rule{110mm}{0.4pt} \\[-1.ex]
|
| 200 |
+
\end{aligned}
|
| 201 |
+
|
| 202 |
+
For further details regarding the algorithm we refer to `Adam: A Method for Stochastic Optimization`_.
|
| 203 |
+
"""
|
| 204 |
+
+ rf"""
|
| 205 |
+
Args:
|
| 206 |
+
params (iterable): iterable of parameters to optimize or dicts defining
|
| 207 |
+
parameter groups
|
| 208 |
+
lr (float, Tensor, optional): learning rate (default: 2e-3)
|
| 209 |
+
betas (Tuple[float, float], optional): coefficients used for computing
|
| 210 |
+
running averages of gradient and its square
|
| 211 |
+
eps (float, optional): term added to the denominator to improve
|
| 212 |
+
numerical stability (default: 1e-8)
|
| 213 |
+
weight_decay (float, optional): weight decay (L2 penalty) (default: 0)
|
| 214 |
+
{_foreach_doc}
|
| 215 |
+
{_maximize_doc}
|
| 216 |
+
{_differentiable_doc}
|
| 217 |
+
{_capturable_doc}
|
| 218 |
+
|
| 219 |
+
.. _Adam\: A Method for Stochastic Optimization:
|
| 220 |
+
https://arxiv.org/abs/1412.6980
|
| 221 |
+
|
| 222 |
+
"""
|
| 223 |
+
)
|
| 224 |
+
|
| 225 |
+
|
| 226 |
+
def _single_tensor_adamax(
|
| 227 |
+
params: List[Tensor],
|
| 228 |
+
grads: List[Tensor],
|
| 229 |
+
exp_avgs: List[Tensor],
|
| 230 |
+
exp_infs: List[Tensor],
|
| 231 |
+
state_steps: List[Tensor],
|
| 232 |
+
*,
|
| 233 |
+
eps: float,
|
| 234 |
+
beta1: float,
|
| 235 |
+
beta2: float,
|
| 236 |
+
lr: float,
|
| 237 |
+
weight_decay: float,
|
| 238 |
+
maximize: bool,
|
| 239 |
+
differentiable: bool,
|
| 240 |
+
capturable: bool,
|
| 241 |
+
has_complex: bool,
|
| 242 |
+
):
|
| 243 |
+
for i, param in enumerate(params):
|
| 244 |
+
grad = grads[i]
|
| 245 |
+
grad = grad if not maximize else -grad
|
| 246 |
+
exp_avg = exp_avgs[i]
|
| 247 |
+
exp_inf = exp_infs[i]
|
| 248 |
+
step_t = state_steps[i]
|
| 249 |
+
|
| 250 |
+
# If compiling, the compiler will handle cudagraph checks, see note [torch.compile x capturable]
|
| 251 |
+
if not torch._utils.is_compiling() and capturable:
|
| 252 |
+
capturable_supported_devices = _get_capturable_supported_devices()
|
| 253 |
+
assert (
|
| 254 |
+
param.device.type == step_t.device.type
|
| 255 |
+
and param.device.type in capturable_supported_devices
|
| 256 |
+
), f"If capturable=True, params and state_steps must be on supported devices: {capturable_supported_devices}."
|
| 257 |
+
|
| 258 |
+
# update step
|
| 259 |
+
step_t += 1
|
| 260 |
+
|
| 261 |
+
if weight_decay != 0:
|
| 262 |
+
grad = grad.add(param, alpha=weight_decay)
|
| 263 |
+
|
| 264 |
+
if torch.is_complex(param):
|
| 265 |
+
param = torch.view_as_real(param)
|
| 266 |
+
grad = torch.view_as_real(grad)
|
| 267 |
+
exp_avg = torch.view_as_real(exp_avg)
|
| 268 |
+
exp_inf = torch.view_as_real(exp_inf)
|
| 269 |
+
|
| 270 |
+
# Update biased first moment estimate.
|
| 271 |
+
exp_avg.lerp_(grad, 1 - beta1)
|
| 272 |
+
# Update the exponentially weighted infinity norm.
|
| 273 |
+
if not differentiable:
|
| 274 |
+
torch.maximum(
|
| 275 |
+
exp_inf.mul_(beta2),
|
| 276 |
+
grad.abs().add_(eps),
|
| 277 |
+
out=exp_inf,
|
| 278 |
+
)
|
| 279 |
+
else:
|
| 280 |
+
norm_buf = torch.cat(
|
| 281 |
+
[exp_inf.mul_(beta2).unsqueeze(0), grad.abs().add_(eps).unsqueeze_(0)],
|
| 282 |
+
0,
|
| 283 |
+
)
|
| 284 |
+
exp_inf.copy_(torch.amax(norm_buf, 0, keepdim=False))
|
| 285 |
+
|
| 286 |
+
if capturable:
|
| 287 |
+
# why jump through extra hoops and negate bias_correction? check out #121238
|
| 288 |
+
# once fixed, we should use bias_correction with addcdiv value=-1 for readability
|
| 289 |
+
neg_bias_correction = beta1**step_t - 1
|
| 290 |
+
neg_bias_correction.div_(lr)
|
| 291 |
+
denom = exp_inf * neg_bias_correction
|
| 292 |
+
param.addcdiv_(exp_avg, denom)
|
| 293 |
+
else:
|
| 294 |
+
bias_correction = 1 - beta1 ** _get_value(step_t)
|
| 295 |
+
clr = lr / bias_correction
|
| 296 |
+
|
| 297 |
+
param.addcdiv_(exp_avg, exp_inf, value=-clr)
|
| 298 |
+
|
| 299 |
+
|
| 300 |
+
def _multi_tensor_adamax(
|
| 301 |
+
params: List[Tensor],
|
| 302 |
+
grads: List[Tensor],
|
| 303 |
+
exp_avgs: List[Tensor],
|
| 304 |
+
exp_infs: List[Tensor],
|
| 305 |
+
state_steps: List[Tensor],
|
| 306 |
+
*,
|
| 307 |
+
eps: float,
|
| 308 |
+
beta1: float,
|
| 309 |
+
beta2: float,
|
| 310 |
+
lr: float,
|
| 311 |
+
weight_decay: float,
|
| 312 |
+
maximize: bool,
|
| 313 |
+
differentiable: bool,
|
| 314 |
+
capturable: bool,
|
| 315 |
+
has_complex: bool,
|
| 316 |
+
):
|
| 317 |
+
assert not differentiable, "_foreach ops don't support autograd"
|
| 318 |
+
|
| 319 |
+
if len(params) == 0:
|
| 320 |
+
return
|
| 321 |
+
|
| 322 |
+
# If compiling, the compiler will handle cudagraph checks, see note [torch.compile x capturable]
|
| 323 |
+
if not torch._utils.is_compiling() and capturable:
|
| 324 |
+
capturable_supported_devices = _get_capturable_supported_devices(
|
| 325 |
+
supports_xla=False
|
| 326 |
+
)
|
| 327 |
+
assert all(
|
| 328 |
+
p.device.type == step.device.type
|
| 329 |
+
and p.device.type in capturable_supported_devices
|
| 330 |
+
for p, step in zip(params, state_steps)
|
| 331 |
+
), f"If capturable=True, params and state_steps must be on supported devices: {capturable_supported_devices}."
|
| 332 |
+
|
| 333 |
+
grouped_tensors = Optimizer._group_tensors_by_device_and_dtype(
|
| 334 |
+
[params, grads, exp_avgs, exp_infs, state_steps] # type: ignore[list-item]
|
| 335 |
+
)
|
| 336 |
+
for (
|
| 337 |
+
grouped_params_,
|
| 338 |
+
grouped_grads_,
|
| 339 |
+
grouped_exp_avgs_,
|
| 340 |
+
grouped_exp_infs_,
|
| 341 |
+
grouped_state_steps_,
|
| 342 |
+
), _ in grouped_tensors.values():
|
| 343 |
+
grouped_params = cast(List[Tensor], grouped_params_)
|
| 344 |
+
grouped_grads = cast(List[Tensor], grouped_grads_)
|
| 345 |
+
grouped_exp_avgs = cast(List[Tensor], grouped_exp_avgs_)
|
| 346 |
+
grouped_exp_infs = cast(List[Tensor], grouped_exp_infs_)
|
| 347 |
+
grouped_state_steps = cast(List[Tensor], grouped_state_steps_)
|
| 348 |
+
|
| 349 |
+
if has_complex:
|
| 350 |
+
_view_as_real(
|
| 351 |
+
grouped_params, grouped_grads, grouped_exp_avgs, grouped_exp_infs
|
| 352 |
+
)
|
| 353 |
+
|
| 354 |
+
if maximize:
|
| 355 |
+
grouped_grads = torch._foreach_neg(grouped_grads) # type: ignore[assignment]
|
| 356 |
+
|
| 357 |
+
# Update steps
|
| 358 |
+
# If steps are on CPU, foreach will fall back to the slow path, which is a for-loop calling t.add(1) over
|
| 359 |
+
# and over. 1 will then be wrapped into a Tensor over and over again, which is slower than if we just
|
| 360 |
+
# wrapped it once now. The alpha is required to assure we go to the right overload.
|
| 361 |
+
if not torch._utils.is_compiling() and grouped_state_steps[0].is_cpu:
|
| 362 |
+
torch._foreach_add_(
|
| 363 |
+
grouped_state_steps, torch.tensor(1.0, device="cpu"), alpha=1.0
|
| 364 |
+
)
|
| 365 |
+
else:
|
| 366 |
+
torch._foreach_add_(grouped_state_steps, 1)
|
| 367 |
+
|
| 368 |
+
if weight_decay != 0:
|
| 369 |
+
if maximize:
|
| 370 |
+
# Re-use the intermediate memory (grouped_grads) already allocated for maximize
|
| 371 |
+
torch._foreach_add_(grouped_grads, grouped_params, alpha=weight_decay)
|
| 372 |
+
else:
|
| 373 |
+
grouped_grads = torch._foreach_add( # type: ignore[assignment]
|
| 374 |
+
grouped_grads, grouped_params, alpha=weight_decay
|
| 375 |
+
)
|
| 376 |
+
|
| 377 |
+
# Update biased first moment estimate.
|
| 378 |
+
torch._foreach_lerp_(grouped_exp_avgs, grouped_grads, 1 - beta1)
|
| 379 |
+
|
| 380 |
+
# Update the exponentially weighted infinity norm.
|
| 381 |
+
torch._foreach_mul_(grouped_exp_infs, beta2)
|
| 382 |
+
|
| 383 |
+
# in this case, we need to introduce a copy of the grads
|
| 384 |
+
# since one has not been introduced previously
|
| 385 |
+
if not maximize and weight_decay == 0:
|
| 386 |
+
grouped_grads = torch._foreach_abs(grouped_grads) # type: ignore[assignment]
|
| 387 |
+
else:
|
| 388 |
+
torch._foreach_abs_(grouped_grads)
|
| 389 |
+
|
| 390 |
+
torch._foreach_add_(grouped_grads, eps)
|
| 391 |
+
torch._foreach_maximum_(grouped_exp_infs, grouped_grads)
|
| 392 |
+
|
| 393 |
+
bias_corrections: Union[Tuple[Tensor, ...], List[Tensor]]
|
| 394 |
+
if capturable:
|
| 395 |
+
bias_corrections = torch._foreach_pow(beta1, grouped_state_steps)
|
| 396 |
+
# foreach_sub doesn't allow a scalar as the first arg
|
| 397 |
+
torch._foreach_sub_(bias_corrections, 1)
|
| 398 |
+
torch._foreach_div_(bias_corrections, lr)
|
| 399 |
+
|
| 400 |
+
denom = torch._foreach_mul(grouped_exp_infs, bias_corrections)
|
| 401 |
+
torch._foreach_addcdiv_(grouped_params, grouped_exp_avgs, denom)
|
| 402 |
+
else:
|
| 403 |
+
bias_corrections = [
|
| 404 |
+
1 - beta1 ** _get_value(step) for step in grouped_state_steps
|
| 405 |
+
]
|
| 406 |
+
step_size = [(_get_value(lr) / bc) * -1 for bc in bias_corrections]
|
| 407 |
+
torch._foreach_addcdiv_(
|
| 408 |
+
grouped_params, grouped_exp_avgs, grouped_exp_infs, step_size
|
| 409 |
+
)
|
| 410 |
+
|
| 411 |
+
|
| 412 |
+
@_disable_dynamo_if_unsupported(single_tensor_fn=_single_tensor_adamax)
|
| 413 |
+
def adamax(
|
| 414 |
+
params: List[Tensor],
|
| 415 |
+
grads: List[Tensor],
|
| 416 |
+
exp_avgs: List[Tensor],
|
| 417 |
+
exp_infs: List[Tensor],
|
| 418 |
+
state_steps: List[Tensor],
|
| 419 |
+
# kwonly args with defaults are not supported by functions compiled with torchscript issue #70627
|
| 420 |
+
# setting this as kwarg for now as functional API is compiled by torch/distributed/optim
|
| 421 |
+
foreach: Optional[bool] = None,
|
| 422 |
+
maximize: bool = False,
|
| 423 |
+
differentiable: bool = False,
|
| 424 |
+
capturable: bool = False,
|
| 425 |
+
has_complex: bool = False,
|
| 426 |
+
*,
|
| 427 |
+
eps: float,
|
| 428 |
+
beta1: float,
|
| 429 |
+
beta2: float,
|
| 430 |
+
lr: float,
|
| 431 |
+
weight_decay: float,
|
| 432 |
+
):
|
| 433 |
+
r"""Functional API that performs adamax algorithm computation.
|
| 434 |
+
|
| 435 |
+
See :class:`~torch.optim.Adamax` for details.
|
| 436 |
+
"""
|
| 437 |
+
|
| 438 |
+
if not torch._utils.is_compiling() and not all(
|
| 439 |
+
isinstance(t, torch.Tensor) for t in state_steps
|
| 440 |
+
):
|
| 441 |
+
raise RuntimeError(
|
| 442 |
+
"API has changed, `state_steps` argument must contain a list of singleton tensors"
|
| 443 |
+
)
|
| 444 |
+
|
| 445 |
+
if foreach is None:
|
| 446 |
+
_, foreach = _default_to_fused_or_foreach(
|
| 447 |
+
params, differentiable, use_fused=False
|
| 448 |
+
)
|
| 449 |
+
|
| 450 |
+
if foreach and torch.jit.is_scripting():
|
| 451 |
+
raise RuntimeError("torch.jit.script not supported with foreach optimizers")
|
| 452 |
+
|
| 453 |
+
if foreach and not torch.jit.is_scripting():
|
| 454 |
+
func = _multi_tensor_adamax
|
| 455 |
+
else:
|
| 456 |
+
func = _single_tensor_adamax
|
| 457 |
+
|
| 458 |
+
func(
|
| 459 |
+
params,
|
| 460 |
+
grads,
|
| 461 |
+
exp_avgs,
|
| 462 |
+
exp_infs,
|
| 463 |
+
state_steps,
|
| 464 |
+
eps=eps,
|
| 465 |
+
beta1=beta1,
|
| 466 |
+
beta2=beta2,
|
| 467 |
+
lr=lr,
|
| 468 |
+
weight_decay=weight_decay,
|
| 469 |
+
maximize=maximize,
|
| 470 |
+
differentiable=differentiable,
|
| 471 |
+
has_complex=has_complex,
|
| 472 |
+
capturable=capturable,
|
| 473 |
+
)
|
vllm/lib/python3.10/site-packages/torch/optim/asgd.py
ADDED
|
@@ -0,0 +1,465 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# mypy: allow-untyped-decorators
|
| 2 |
+
# mypy: allow-untyped-defs
|
| 3 |
+
from typing import cast, List, Optional, Tuple, Union
|
| 4 |
+
|
| 5 |
+
import torch
|
| 6 |
+
from torch import Tensor
|
| 7 |
+
|
| 8 |
+
from .optimizer import (
|
| 9 |
+
_capturable_doc,
|
| 10 |
+
_default_to_fused_or_foreach,
|
| 11 |
+
_differentiable_doc,
|
| 12 |
+
_disable_dynamo_if_unsupported,
|
| 13 |
+
_foreach_doc,
|
| 14 |
+
_get_capturable_supported_devices,
|
| 15 |
+
_get_scalar_dtype,
|
| 16 |
+
_get_value,
|
| 17 |
+
_maximize_doc,
|
| 18 |
+
_use_grad_for_differentiable,
|
| 19 |
+
_view_as_real,
|
| 20 |
+
Optimizer,
|
| 21 |
+
ParamsT,
|
| 22 |
+
)
|
| 23 |
+
|
| 24 |
+
|
| 25 |
+
__all__ = ["ASGD", "asgd"]
|
| 26 |
+
|
| 27 |
+
|
| 28 |
+
class ASGD(Optimizer):
|
| 29 |
+
def __init__(
|
| 30 |
+
self,
|
| 31 |
+
params: ParamsT,
|
| 32 |
+
lr: Union[float, Tensor] = 1e-2,
|
| 33 |
+
lambd: float = 1e-4,
|
| 34 |
+
alpha: float = 0.75,
|
| 35 |
+
t0: float = 1e6,
|
| 36 |
+
weight_decay: float = 0,
|
| 37 |
+
foreach: Optional[bool] = None,
|
| 38 |
+
maximize: bool = False,
|
| 39 |
+
differentiable: bool = False,
|
| 40 |
+
capturable: bool = False,
|
| 41 |
+
):
|
| 42 |
+
if isinstance(lr, Tensor) and lr.numel() != 1:
|
| 43 |
+
raise ValueError("Tensor lr must be 1-element")
|
| 44 |
+
if not 0.0 <= lr:
|
| 45 |
+
raise ValueError(f"Invalid learning rate: {lr}")
|
| 46 |
+
if not 0.0 <= weight_decay:
|
| 47 |
+
raise ValueError(f"Invalid weight_decay value: {weight_decay}")
|
| 48 |
+
|
| 49 |
+
defaults = dict(
|
| 50 |
+
lr=lr,
|
| 51 |
+
lambd=lambd,
|
| 52 |
+
alpha=alpha,
|
| 53 |
+
t0=t0,
|
| 54 |
+
weight_decay=weight_decay,
|
| 55 |
+
foreach=foreach,
|
| 56 |
+
maximize=maximize,
|
| 57 |
+
differentiable=differentiable,
|
| 58 |
+
capturable=capturable,
|
| 59 |
+
)
|
| 60 |
+
super().__init__(params, defaults)
|
| 61 |
+
|
| 62 |
+
def __setstate__(self, state):
|
| 63 |
+
super().__setstate__(state)
|
| 64 |
+
for group in self.param_groups:
|
| 65 |
+
group.setdefault("foreach", None)
|
| 66 |
+
group.setdefault("maximize", False)
|
| 67 |
+
group.setdefault("differentiable", False)
|
| 68 |
+
group.setdefault("capturable", False)
|
| 69 |
+
for p in group["params"]:
|
| 70 |
+
p_state = self.state.get(p, [])
|
| 71 |
+
if len(p_state) != 0:
|
| 72 |
+
if not torch.is_tensor(p_state["step"]):
|
| 73 |
+
step_val = float(p_state["step"])
|
| 74 |
+
p_state["step"] = torch.tensor(
|
| 75 |
+
step_val, dtype=_get_scalar_dtype(), device=p.device
|
| 76 |
+
)
|
| 77 |
+
if not torch.is_tensor(p_state["eta"]):
|
| 78 |
+
p_state["eta"] = torch.tensor(
|
| 79 |
+
p_state["eta"], dtype=_get_scalar_dtype(), device=p.device
|
| 80 |
+
)
|
| 81 |
+
if not torch.is_tensor(p_state["mu"]):
|
| 82 |
+
p_state["mu"] = torch.tensor(
|
| 83 |
+
p_state["mu"], dtype=_get_scalar_dtype(), device=p.device
|
| 84 |
+
)
|
| 85 |
+
|
| 86 |
+
def _init_group(self, group, params_with_grad, grads, mus, axs, etas, state_steps):
|
| 87 |
+
has_complex = False
|
| 88 |
+
for p in group["params"]:
|
| 89 |
+
if p.grad is not None:
|
| 90 |
+
has_complex |= torch.is_complex(p)
|
| 91 |
+
params_with_grad.append(p)
|
| 92 |
+
if p.grad.is_sparse:
|
| 93 |
+
raise RuntimeError("ASGD does not support sparse gradients")
|
| 94 |
+
grads.append(p.grad)
|
| 95 |
+
|
| 96 |
+
state = self.state[p]
|
| 97 |
+
# State initialization
|
| 98 |
+
if len(state) == 0:
|
| 99 |
+
state["step"] = torch.zeros(
|
| 100 |
+
(), device=p.device, dtype=_get_scalar_dtype()
|
| 101 |
+
)
|
| 102 |
+
state["eta"] = (
|
| 103 |
+
torch.as_tensor(
|
| 104 |
+
group["lr"], device=p.device, dtype=_get_scalar_dtype()
|
| 105 |
+
)
|
| 106 |
+
.clone()
|
| 107 |
+
.detach()
|
| 108 |
+
)
|
| 109 |
+
state["mu"] = torch.ones(
|
| 110 |
+
(), device=p.device, dtype=_get_scalar_dtype()
|
| 111 |
+
)
|
| 112 |
+
state["ax"] = torch.zeros_like(
|
| 113 |
+
p, memory_format=torch.preserve_format
|
| 114 |
+
)
|
| 115 |
+
|
| 116 |
+
mus.append(state["mu"])
|
| 117 |
+
axs.append(state["ax"])
|
| 118 |
+
etas.append(state["eta"])
|
| 119 |
+
state_steps.append(state["step"])
|
| 120 |
+
return has_complex
|
| 121 |
+
|
| 122 |
+
@_use_grad_for_differentiable
|
| 123 |
+
def step(self, closure=None):
|
| 124 |
+
"""Perform a single optimization step.
|
| 125 |
+
|
| 126 |
+
Args:
|
| 127 |
+
closure (Callable, optional): A closure that reevaluates the model
|
| 128 |
+
and returns the loss.
|
| 129 |
+
"""
|
| 130 |
+
self._cuda_graph_capture_health_check()
|
| 131 |
+
|
| 132 |
+
loss = None
|
| 133 |
+
if closure is not None:
|
| 134 |
+
with torch.enable_grad():
|
| 135 |
+
loss = closure()
|
| 136 |
+
|
| 137 |
+
for group in self.param_groups:
|
| 138 |
+
params_with_grad: List[Tensor] = []
|
| 139 |
+
grads: List[Tensor] = []
|
| 140 |
+
mus: List[Tensor] = []
|
| 141 |
+
axs: List[Tensor] = []
|
| 142 |
+
etas: List[Tensor] = []
|
| 143 |
+
state_steps: List[Tensor] = []
|
| 144 |
+
|
| 145 |
+
has_complex = self._init_group(
|
| 146 |
+
group, params_with_grad, grads, mus, axs, etas, state_steps
|
| 147 |
+
)
|
| 148 |
+
|
| 149 |
+
asgd(
|
| 150 |
+
params_with_grad,
|
| 151 |
+
grads,
|
| 152 |
+
axs,
|
| 153 |
+
mus,
|
| 154 |
+
etas,
|
| 155 |
+
state_steps,
|
| 156 |
+
lambd=group["lambd"],
|
| 157 |
+
lr=group["lr"],
|
| 158 |
+
t0=group["t0"],
|
| 159 |
+
alpha=group["alpha"],
|
| 160 |
+
weight_decay=group["weight_decay"],
|
| 161 |
+
foreach=group["foreach"],
|
| 162 |
+
maximize=group["maximize"],
|
| 163 |
+
differentiable=group["differentiable"],
|
| 164 |
+
capturable=group["capturable"],
|
| 165 |
+
has_complex=has_complex,
|
| 166 |
+
)
|
| 167 |
+
|
| 168 |
+
return loss
|
| 169 |
+
|
| 170 |
+
|
| 171 |
+
ASGD.__doc__ = rf"""Implements Averaged Stochastic Gradient Descent.
|
| 172 |
+
|
| 173 |
+
It has been proposed in `Acceleration of stochastic approximation by
|
| 174 |
+
averaging`_.
|
| 175 |
+
|
| 176 |
+
Args:
|
| 177 |
+
params (iterable): iterable of parameters to optimize or dicts defining
|
| 178 |
+
parameter groups
|
| 179 |
+
lr (float, Tensor, optional): learning rate (default: 1e-2)
|
| 180 |
+
lambd (float, optional): decay term (default: 1e-4)
|
| 181 |
+
alpha (float, optional): power for eta update (default: 0.75)
|
| 182 |
+
t0 (float, optional): point at which to start averaging (default: 1e6)
|
| 183 |
+
weight_decay (float, optional): weight decay (L2 penalty) (default: 0)
|
| 184 |
+
{_foreach_doc}
|
| 185 |
+
{_maximize_doc}
|
| 186 |
+
{_differentiable_doc}
|
| 187 |
+
{_capturable_doc}
|
| 188 |
+
|
| 189 |
+
.. _Acceleration of stochastic approximation by averaging:
|
| 190 |
+
https://dl.acm.org/citation.cfm?id=131098
|
| 191 |
+
|
| 192 |
+
"""
|
| 193 |
+
|
| 194 |
+
|
| 195 |
+
def _single_tensor_asgd(
|
| 196 |
+
params: List[Tensor],
|
| 197 |
+
grads: List[Tensor],
|
| 198 |
+
axs: List[Tensor],
|
| 199 |
+
mus: List[Tensor],
|
| 200 |
+
etas: List[Tensor],
|
| 201 |
+
state_steps: List[Tensor],
|
| 202 |
+
*,
|
| 203 |
+
lambd: float,
|
| 204 |
+
lr: float,
|
| 205 |
+
t0: float,
|
| 206 |
+
alpha: float,
|
| 207 |
+
weight_decay: float,
|
| 208 |
+
maximize: bool,
|
| 209 |
+
differentiable: bool,
|
| 210 |
+
capturable: bool,
|
| 211 |
+
has_complex: bool,
|
| 212 |
+
):
|
| 213 |
+
for i, param in enumerate(params):
|
| 214 |
+
grad = grads[i]
|
| 215 |
+
grad = grad if not maximize else -grad
|
| 216 |
+
mu = mus[i]
|
| 217 |
+
ax = axs[i]
|
| 218 |
+
eta = etas[i]
|
| 219 |
+
step_t = state_steps[i]
|
| 220 |
+
|
| 221 |
+
# If compiling, the compiler will handle cudagraph checks, see note [torch.compile x capturable]
|
| 222 |
+
if not torch._utils.is_compiling() and capturable:
|
| 223 |
+
capturable_supported_devices = _get_capturable_supported_devices()
|
| 224 |
+
assert (
|
| 225 |
+
param.device.type
|
| 226 |
+
== mu.device.type
|
| 227 |
+
== eta.device.type
|
| 228 |
+
== step_t.device.type
|
| 229 |
+
and param.device.type in capturable_supported_devices
|
| 230 |
+
), (
|
| 231 |
+
f"If capturable=True, params, mus, etas, and state_steps must be "
|
| 232 |
+
f"on supported devices: {capturable_supported_devices}."
|
| 233 |
+
)
|
| 234 |
+
|
| 235 |
+
if torch.is_complex(param):
|
| 236 |
+
grad = torch.view_as_real(grad)
|
| 237 |
+
param = torch.view_as_real(param)
|
| 238 |
+
ax = torch.view_as_real(ax)
|
| 239 |
+
|
| 240 |
+
# update step
|
| 241 |
+
step_t += 1
|
| 242 |
+
|
| 243 |
+
if weight_decay != 0:
|
| 244 |
+
grad = grad.add(param, alpha=weight_decay)
|
| 245 |
+
|
| 246 |
+
if capturable:
|
| 247 |
+
param.mul_(1 - lambd * eta)
|
| 248 |
+
param.addcmul_(grad, eta, value=-1) # update parameter
|
| 249 |
+
else:
|
| 250 |
+
eta_value = _get_value(eta)
|
| 251 |
+
param.mul_(1 - lambd * eta_value) # decay term
|
| 252 |
+
param.add_(grad, alpha=-eta_value) # update parameter
|
| 253 |
+
|
| 254 |
+
# averaging
|
| 255 |
+
if capturable or mu.item() != 1:
|
| 256 |
+
ax.add_(param.sub(ax).mul_(mu))
|
| 257 |
+
else:
|
| 258 |
+
ax.copy_(param)
|
| 259 |
+
|
| 260 |
+
if capturable:
|
| 261 |
+
eta.copy_(lr / ((1 + lambd * lr * step_t) ** alpha))
|
| 262 |
+
mu.copy_(1 / torch.maximum(step_t - t0, torch.ones_like(step_t)))
|
| 263 |
+
else:
|
| 264 |
+
step = _get_value(step_t)
|
| 265 |
+
new_eta = torch.as_tensor(lr / ((1 + lambd * lr * step) ** alpha))
|
| 266 |
+
eta.copy_(new_eta)
|
| 267 |
+
new_mu = torch.as_tensor(1 / max(1, step - t0))
|
| 268 |
+
mu.copy_(new_mu)
|
| 269 |
+
|
| 270 |
+
|
| 271 |
+
def _multi_tensor_asgd(
|
| 272 |
+
params: List[Tensor],
|
| 273 |
+
grads: List[Tensor],
|
| 274 |
+
axs: List[Tensor],
|
| 275 |
+
mus: List[Tensor],
|
| 276 |
+
etas: List[Tensor],
|
| 277 |
+
state_steps: List[Tensor],
|
| 278 |
+
*,
|
| 279 |
+
lambd: float,
|
| 280 |
+
lr: float,
|
| 281 |
+
t0: float,
|
| 282 |
+
alpha: float,
|
| 283 |
+
weight_decay: float,
|
| 284 |
+
maximize: bool,
|
| 285 |
+
differentiable: bool,
|
| 286 |
+
capturable: bool,
|
| 287 |
+
has_complex: bool,
|
| 288 |
+
):
|
| 289 |
+
if len(params) == 0:
|
| 290 |
+
return
|
| 291 |
+
|
| 292 |
+
assert not differentiable, "_foreach ops don't support autograd"
|
| 293 |
+
|
| 294 |
+
# If compiling, the compiler will handle cudagraph checks, see note [torch.compile x capturable]
|
| 295 |
+
if not torch._utils.is_compiling() and capturable:
|
| 296 |
+
capturable_supported_devices = _get_capturable_supported_devices(
|
| 297 |
+
supports_xla=False
|
| 298 |
+
)
|
| 299 |
+
assert all(
|
| 300 |
+
p.device.type == mu.device.type == eta.device.type == step.device.type
|
| 301 |
+
and p.device.type in capturable_supported_devices
|
| 302 |
+
for p, mu, eta, step in zip(params, mus, etas, state_steps)
|
| 303 |
+
), f"If capturable=True, params, mus, etas, and state_steps must be on supported devices: {capturable_supported_devices}."
|
| 304 |
+
|
| 305 |
+
grouped_tensors = Optimizer._group_tensors_by_device_and_dtype(
|
| 306 |
+
[params, grads, axs, mus, etas, state_steps] # type: ignore[list-item]
|
| 307 |
+
)
|
| 308 |
+
for (device, _), (
|
| 309 |
+
(
|
| 310 |
+
grouped_params_,
|
| 311 |
+
grouped_grads_,
|
| 312 |
+
grouped_axs_,
|
| 313 |
+
grouped_mus_,
|
| 314 |
+
grouped_etas_,
|
| 315 |
+
grouped_state_steps_,
|
| 316 |
+
),
|
| 317 |
+
_,
|
| 318 |
+
) in grouped_tensors.items():
|
| 319 |
+
grouped_params = cast(List[Tensor], grouped_params_)
|
| 320 |
+
grouped_grads = cast(List[Tensor], grouped_grads_)
|
| 321 |
+
grouped_axs = cast(List[Tensor], grouped_axs_)
|
| 322 |
+
grouped_mus = cast(List[Tensor], grouped_mus_)
|
| 323 |
+
grouped_etas = cast(List[Tensor], grouped_etas_)
|
| 324 |
+
grouped_state_steps = cast(List[Tensor], grouped_state_steps_)
|
| 325 |
+
|
| 326 |
+
if has_complex:
|
| 327 |
+
_view_as_real(grouped_params, grouped_grads, grouped_axs)
|
| 328 |
+
|
| 329 |
+
if maximize:
|
| 330 |
+
grouped_grads = torch._foreach_neg(grouped_grads) # type: ignore[assignment]
|
| 331 |
+
|
| 332 |
+
# Update steps
|
| 333 |
+
# If steps are on CPU, foreach will fall back to the slow path, which is a for-loop calling t.add(1) over
|
| 334 |
+
# and over. 1 will then be wrapped into a Tensor over and over again, which is slower than if we just
|
| 335 |
+
# wrapped it once now. The alpha is required to assure we go to the right overload.
|
| 336 |
+
if not torch._utils.is_compiling() and grouped_state_steps[0].is_cpu:
|
| 337 |
+
torch._foreach_add_(
|
| 338 |
+
grouped_state_steps, torch.tensor(1.0, device="cpu"), alpha=1.0
|
| 339 |
+
)
|
| 340 |
+
else:
|
| 341 |
+
torch._foreach_add_(grouped_state_steps, 1)
|
| 342 |
+
|
| 343 |
+
# intermediate = grad + param * lambd
|
| 344 |
+
intermediate: Union[Tuple[Tensor, ...], List[Tensor]]
|
| 345 |
+
if weight_decay != 0:
|
| 346 |
+
if maximize:
|
| 347 |
+
torch._foreach_add_(grouped_grads, grouped_params, alpha=weight_decay)
|
| 348 |
+
intermediate = grouped_grads
|
| 349 |
+
else:
|
| 350 |
+
intermediate = torch._foreach_add(
|
| 351 |
+
grouped_grads, grouped_params, alpha=weight_decay
|
| 352 |
+
)
|
| 353 |
+
|
| 354 |
+
torch._foreach_add_(intermediate, grouped_params, alpha=lambd)
|
| 355 |
+
else:
|
| 356 |
+
intermediate = torch._foreach_add(
|
| 357 |
+
grouped_grads, grouped_params, alpha=lambd
|
| 358 |
+
)
|
| 359 |
+
|
| 360 |
+
# update param
|
| 361 |
+
# param * (1 - lambd * eta) - eta * grad
|
| 362 |
+
# => param - param * lambd * eta - eta * grad
|
| 363 |
+
# => param - eta * intermediate
|
| 364 |
+
torch._foreach_addcmul_(grouped_params, intermediate, grouped_etas, value=-1)
|
| 365 |
+
del intermediate
|
| 366 |
+
|
| 367 |
+
# update grouped_axs
|
| 368 |
+
# averaging: ax = ax + mu * (param - ax)
|
| 369 |
+
# Note (mlazos): We can't use lerp here since it requires weight to be float64
|
| 370 |
+
# and our grouping code requires dtypes to match for all tensors in a group (and it should, since
|
| 371 |
+
# we use the mus in other places)
|
| 372 |
+
# all dtypes need to match, so we could introduce a cast in a loop
|
| 373 |
+
# but since this only adds one additional kernel launch, this looks like the cleaner
|
| 374 |
+
# and faster solution
|
| 375 |
+
intermediate = torch._foreach_sub(grouped_params, grouped_axs)
|
| 376 |
+
torch._foreach_addcmul_(grouped_axs, intermediate, grouped_mus)
|
| 377 |
+
del intermediate
|
| 378 |
+
|
| 379 |
+
new_etas: Union[Tuple[Tensor, ...], List[Tensor]]
|
| 380 |
+
new_mus: Union[Tuple[Tensor, ...], List[Tensor]]
|
| 381 |
+
if capturable:
|
| 382 |
+
# update grouped_mus
|
| 383 |
+
new_mus = torch._foreach_sub(grouped_state_steps, t0)
|
| 384 |
+
torch._foreach_maximum_(new_mus, 1.0)
|
| 385 |
+
torch._foreach_reciprocal_(new_mus)
|
| 386 |
+
torch._foreach_copy_(grouped_mus, new_mus)
|
| 387 |
+
del new_mus
|
| 388 |
+
|
| 389 |
+
# update eta = lr / ((1 + lambd * lr * step)^alpha)
|
| 390 |
+
new_etas = torch._foreach_mul(grouped_state_steps, lambd)
|
| 391 |
+
torch._foreach_mul_(new_etas, lr)
|
| 392 |
+
torch._foreach_add_(new_etas, 1)
|
| 393 |
+
torch._foreach_pow_(new_etas, alpha)
|
| 394 |
+
torch._foreach_reciprocal_(new_etas)
|
| 395 |
+
torch._foreach_mul_(new_etas, lr)
|
| 396 |
+
torch._foreach_copy_(grouped_etas, new_etas)
|
| 397 |
+
else:
|
| 398 |
+
new_etas = [
|
| 399 |
+
torch.as_tensor(lr / ((1 + lambd * lr * step) ** alpha), device=device)
|
| 400 |
+
for step in grouped_state_steps
|
| 401 |
+
]
|
| 402 |
+
new_mus = [
|
| 403 |
+
torch.as_tensor(1 / max(1, _get_value(step) - t0), device=device)
|
| 404 |
+
for step in grouped_state_steps
|
| 405 |
+
]
|
| 406 |
+
torch._foreach_copy_(grouped_etas, new_etas)
|
| 407 |
+
torch._foreach_copy_(grouped_mus, new_mus)
|
| 408 |
+
|
| 409 |
+
|
| 410 |
+
@_disable_dynamo_if_unsupported(single_tensor_fn=_single_tensor_asgd)
|
| 411 |
+
def asgd(
|
| 412 |
+
params: List[Tensor],
|
| 413 |
+
grads: List[Tensor],
|
| 414 |
+
axs: List[Tensor],
|
| 415 |
+
mus: List[Tensor],
|
| 416 |
+
etas: List[Tensor],
|
| 417 |
+
state_steps: List[Tensor],
|
| 418 |
+
# kwonly args with defaults are not supported by functions compiled with torchscript issue #70627
|
| 419 |
+
# setting this as kwarg for now as functional API is compiled by torch/distributed/optim
|
| 420 |
+
foreach: Optional[bool] = None,
|
| 421 |
+
maximize: bool = False,
|
| 422 |
+
differentiable: bool = False,
|
| 423 |
+
capturable: bool = False,
|
| 424 |
+
has_complex: bool = False,
|
| 425 |
+
*,
|
| 426 |
+
lambd: float,
|
| 427 |
+
lr: float,
|
| 428 |
+
t0: float,
|
| 429 |
+
alpha: float,
|
| 430 |
+
weight_decay: float,
|
| 431 |
+
):
|
| 432 |
+
r"""Functional API that performs asgd algorithm computation.
|
| 433 |
+
|
| 434 |
+
See :class:`~torch.optim.ASGD` for details.
|
| 435 |
+
"""
|
| 436 |
+
if foreach is None:
|
| 437 |
+
_, foreach = _default_to_fused_or_foreach(
|
| 438 |
+
params, differentiable, use_fused=False
|
| 439 |
+
)
|
| 440 |
+
|
| 441 |
+
if foreach and torch.jit.is_scripting():
|
| 442 |
+
raise RuntimeError("torch.jit.script not supported with foreach optimizers")
|
| 443 |
+
|
| 444 |
+
if foreach and not torch.jit.is_scripting():
|
| 445 |
+
func = _multi_tensor_asgd
|
| 446 |
+
else:
|
| 447 |
+
func = _single_tensor_asgd
|
| 448 |
+
|
| 449 |
+
func(
|
| 450 |
+
params,
|
| 451 |
+
grads,
|
| 452 |
+
axs,
|
| 453 |
+
mus,
|
| 454 |
+
etas,
|
| 455 |
+
state_steps,
|
| 456 |
+
lambd=lambd,
|
| 457 |
+
lr=lr,
|
| 458 |
+
t0=t0,
|
| 459 |
+
alpha=alpha,
|
| 460 |
+
weight_decay=weight_decay,
|
| 461 |
+
maximize=maximize,
|
| 462 |
+
differentiable=differentiable,
|
| 463 |
+
capturable=capturable,
|
| 464 |
+
has_complex=has_complex,
|
| 465 |
+
)
|
vllm/lib/python3.10/site-packages/torch/optim/lbfgs.py
ADDED
|
@@ -0,0 +1,495 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# mypy: allow-untyped-defs
|
| 2 |
+
from typing import Optional, Union
|
| 3 |
+
|
| 4 |
+
import torch
|
| 5 |
+
from torch import Tensor
|
| 6 |
+
|
| 7 |
+
from .optimizer import Optimizer, ParamsT
|
| 8 |
+
|
| 9 |
+
|
| 10 |
+
__all__ = ["LBFGS"]
|
| 11 |
+
|
| 12 |
+
|
| 13 |
+
def _cubic_interpolate(x1, f1, g1, x2, f2, g2, bounds=None):
|
| 14 |
+
# ported from https://github.com/torch/optim/blob/master/polyinterp.lua
|
| 15 |
+
# Compute bounds of interpolation area
|
| 16 |
+
if bounds is not None:
|
| 17 |
+
xmin_bound, xmax_bound = bounds
|
| 18 |
+
else:
|
| 19 |
+
xmin_bound, xmax_bound = (x1, x2) if x1 <= x2 else (x2, x1)
|
| 20 |
+
|
| 21 |
+
# Code for most common case: cubic interpolation of 2 points
|
| 22 |
+
# w/ function and derivative values for both
|
| 23 |
+
# Solution in this case (where x2 is the farthest point):
|
| 24 |
+
# d1 = g1 + g2 - 3*(f1-f2)/(x1-x2);
|
| 25 |
+
# d2 = sqrt(d1^2 - g1*g2);
|
| 26 |
+
# min_pos = x2 - (x2 - x1)*((g2 + d2 - d1)/(g2 - g1 + 2*d2));
|
| 27 |
+
# t_new = min(max(min_pos,xmin_bound),xmax_bound);
|
| 28 |
+
d1 = g1 + g2 - 3 * (f1 - f2) / (x1 - x2)
|
| 29 |
+
d2_square = d1**2 - g1 * g2
|
| 30 |
+
if d2_square >= 0:
|
| 31 |
+
d2 = d2_square.sqrt()
|
| 32 |
+
if x1 <= x2:
|
| 33 |
+
min_pos = x2 - (x2 - x1) * ((g2 + d2 - d1) / (g2 - g1 + 2 * d2))
|
| 34 |
+
else:
|
| 35 |
+
min_pos = x1 - (x1 - x2) * ((g1 + d2 - d1) / (g1 - g2 + 2 * d2))
|
| 36 |
+
return min(max(min_pos, xmin_bound), xmax_bound)
|
| 37 |
+
else:
|
| 38 |
+
return (xmin_bound + xmax_bound) / 2.0
|
| 39 |
+
|
| 40 |
+
|
| 41 |
+
def _strong_wolfe(
|
| 42 |
+
obj_func, x, t, d, f, g, gtd, c1=1e-4, c2=0.9, tolerance_change=1e-9, max_ls=25
|
| 43 |
+
):
|
| 44 |
+
# ported from https://github.com/torch/optim/blob/master/lswolfe.lua
|
| 45 |
+
d_norm = d.abs().max()
|
| 46 |
+
g = g.clone(memory_format=torch.contiguous_format)
|
| 47 |
+
# evaluate objective and gradient using initial step
|
| 48 |
+
f_new, g_new = obj_func(x, t, d)
|
| 49 |
+
ls_func_evals = 1
|
| 50 |
+
gtd_new = g_new.dot(d)
|
| 51 |
+
|
| 52 |
+
# bracket an interval containing a point satisfying the Wolfe criteria
|
| 53 |
+
t_prev, f_prev, g_prev, gtd_prev = 0, f, g, gtd
|
| 54 |
+
done = False
|
| 55 |
+
ls_iter = 0
|
| 56 |
+
while ls_iter < max_ls:
|
| 57 |
+
# check conditions
|
| 58 |
+
if f_new > (f + c1 * t * gtd) or (ls_iter > 1 and f_new >= f_prev):
|
| 59 |
+
bracket = [t_prev, t]
|
| 60 |
+
bracket_f = [f_prev, f_new]
|
| 61 |
+
bracket_g = [g_prev, g_new.clone(memory_format=torch.contiguous_format)]
|
| 62 |
+
bracket_gtd = [gtd_prev, gtd_new]
|
| 63 |
+
break
|
| 64 |
+
|
| 65 |
+
if abs(gtd_new) <= -c2 * gtd:
|
| 66 |
+
bracket = [t]
|
| 67 |
+
bracket_f = [f_new]
|
| 68 |
+
bracket_g = [g_new]
|
| 69 |
+
done = True
|
| 70 |
+
break
|
| 71 |
+
|
| 72 |
+
if gtd_new >= 0:
|
| 73 |
+
bracket = [t_prev, t]
|
| 74 |
+
bracket_f = [f_prev, f_new]
|
| 75 |
+
bracket_g = [g_prev, g_new.clone(memory_format=torch.contiguous_format)]
|
| 76 |
+
bracket_gtd = [gtd_prev, gtd_new]
|
| 77 |
+
break
|
| 78 |
+
|
| 79 |
+
# interpolate
|
| 80 |
+
min_step = t + 0.01 * (t - t_prev)
|
| 81 |
+
max_step = t * 10
|
| 82 |
+
tmp = t
|
| 83 |
+
t = _cubic_interpolate(
|
| 84 |
+
t_prev, f_prev, gtd_prev, t, f_new, gtd_new, bounds=(min_step, max_step)
|
| 85 |
+
)
|
| 86 |
+
|
| 87 |
+
# next step
|
| 88 |
+
t_prev = tmp
|
| 89 |
+
f_prev = f_new
|
| 90 |
+
g_prev = g_new.clone(memory_format=torch.contiguous_format)
|
| 91 |
+
gtd_prev = gtd_new
|
| 92 |
+
f_new, g_new = obj_func(x, t, d)
|
| 93 |
+
ls_func_evals += 1
|
| 94 |
+
gtd_new = g_new.dot(d)
|
| 95 |
+
ls_iter += 1
|
| 96 |
+
|
| 97 |
+
# reached max number of iterations?
|
| 98 |
+
if ls_iter == max_ls:
|
| 99 |
+
bracket = [0, t]
|
| 100 |
+
bracket_f = [f, f_new]
|
| 101 |
+
bracket_g = [g, g_new]
|
| 102 |
+
|
| 103 |
+
# zoom phase: we now have a point satisfying the criteria, or
|
| 104 |
+
# a bracket around it. We refine the bracket until we find the
|
| 105 |
+
# exact point satisfying the criteria
|
| 106 |
+
insuf_progress = False
|
| 107 |
+
# find high and low points in bracket
|
| 108 |
+
low_pos, high_pos = (0, 1) if bracket_f[0] <= bracket_f[-1] else (1, 0) # type: ignore[possibly-undefined]
|
| 109 |
+
while not done and ls_iter < max_ls:
|
| 110 |
+
# line-search bracket is so small
|
| 111 |
+
if abs(bracket[1] - bracket[0]) * d_norm < tolerance_change: # type: ignore[possibly-undefined]
|
| 112 |
+
break
|
| 113 |
+
|
| 114 |
+
# compute new trial value
|
| 115 |
+
t = _cubic_interpolate(
|
| 116 |
+
bracket[0],
|
| 117 |
+
bracket_f[0],
|
| 118 |
+
bracket_gtd[0], # type: ignore[possibly-undefined]
|
| 119 |
+
bracket[1],
|
| 120 |
+
bracket_f[1],
|
| 121 |
+
bracket_gtd[1],
|
| 122 |
+
)
|
| 123 |
+
|
| 124 |
+
# test that we are making sufficient progress:
|
| 125 |
+
# in case `t` is so close to boundary, we mark that we are making
|
| 126 |
+
# insufficient progress, and if
|
| 127 |
+
# + we have made insufficient progress in the last step, or
|
| 128 |
+
# + `t` is at one of the boundary,
|
| 129 |
+
# we will move `t` to a position which is `0.1 * len(bracket)`
|
| 130 |
+
# away from the nearest boundary point.
|
| 131 |
+
eps = 0.1 * (max(bracket) - min(bracket))
|
| 132 |
+
if min(max(bracket) - t, t - min(bracket)) < eps:
|
| 133 |
+
# interpolation close to boundary
|
| 134 |
+
if insuf_progress or t >= max(bracket) or t <= min(bracket):
|
| 135 |
+
# evaluate at 0.1 away from boundary
|
| 136 |
+
if abs(t - max(bracket)) < abs(t - min(bracket)):
|
| 137 |
+
t = max(bracket) - eps
|
| 138 |
+
else:
|
| 139 |
+
t = min(bracket) + eps
|
| 140 |
+
insuf_progress = False
|
| 141 |
+
else:
|
| 142 |
+
insuf_progress = True
|
| 143 |
+
else:
|
| 144 |
+
insuf_progress = False
|
| 145 |
+
|
| 146 |
+
# Evaluate new point
|
| 147 |
+
f_new, g_new = obj_func(x, t, d)
|
| 148 |
+
ls_func_evals += 1
|
| 149 |
+
gtd_new = g_new.dot(d)
|
| 150 |
+
ls_iter += 1
|
| 151 |
+
|
| 152 |
+
if f_new > (f + c1 * t * gtd) or f_new >= bracket_f[low_pos]:
|
| 153 |
+
# Armijo condition not satisfied or not lower than lowest point
|
| 154 |
+
bracket[high_pos] = t
|
| 155 |
+
bracket_f[high_pos] = f_new
|
| 156 |
+
bracket_g[high_pos] = g_new.clone(memory_format=torch.contiguous_format) # type: ignore[possibly-undefined]
|
| 157 |
+
bracket_gtd[high_pos] = gtd_new
|
| 158 |
+
low_pos, high_pos = (0, 1) if bracket_f[0] <= bracket_f[1] else (1, 0)
|
| 159 |
+
else:
|
| 160 |
+
if abs(gtd_new) <= -c2 * gtd:
|
| 161 |
+
# Wolfe conditions satisfied
|
| 162 |
+
done = True
|
| 163 |
+
elif gtd_new * (bracket[high_pos] - bracket[low_pos]) >= 0:
|
| 164 |
+
# old high becomes new low
|
| 165 |
+
bracket[high_pos] = bracket[low_pos]
|
| 166 |
+
bracket_f[high_pos] = bracket_f[low_pos]
|
| 167 |
+
bracket_g[high_pos] = bracket_g[low_pos] # type: ignore[possibly-undefined]
|
| 168 |
+
bracket_gtd[high_pos] = bracket_gtd[low_pos]
|
| 169 |
+
|
| 170 |
+
# new point becomes new low
|
| 171 |
+
bracket[low_pos] = t
|
| 172 |
+
bracket_f[low_pos] = f_new
|
| 173 |
+
bracket_g[low_pos] = g_new.clone(memory_format=torch.contiguous_format) # type: ignore[possibly-undefined]
|
| 174 |
+
bracket_gtd[low_pos] = gtd_new
|
| 175 |
+
|
| 176 |
+
# return stuff
|
| 177 |
+
t = bracket[low_pos] # type: ignore[possibly-undefined]
|
| 178 |
+
f_new = bracket_f[low_pos]
|
| 179 |
+
g_new = bracket_g[low_pos] # type: ignore[possibly-undefined]
|
| 180 |
+
return f_new, g_new, t, ls_func_evals
|
| 181 |
+
|
| 182 |
+
|
| 183 |
+
class LBFGS(Optimizer):
|
| 184 |
+
"""Implements L-BFGS algorithm.
|
| 185 |
+
|
| 186 |
+
Heavily inspired by `minFunc
|
| 187 |
+
<https://www.cs.ubc.ca/~schmidtm/Software/minFunc.html>`_.
|
| 188 |
+
|
| 189 |
+
.. warning::
|
| 190 |
+
This optimizer doesn't support per-parameter options and parameter
|
| 191 |
+
groups (there can be only one).
|
| 192 |
+
|
| 193 |
+
.. warning::
|
| 194 |
+
Right now all parameters have to be on a single device. This will be
|
| 195 |
+
improved in the future.
|
| 196 |
+
|
| 197 |
+
.. note::
|
| 198 |
+
This is a very memory intensive optimizer (it requires additional
|
| 199 |
+
``param_bytes * (history_size + 1)`` bytes). If it doesn't fit in memory
|
| 200 |
+
try reducing the history size, or use a different algorithm.
|
| 201 |
+
|
| 202 |
+
Args:
|
| 203 |
+
params (iterable): iterable of parameters to optimize. Parameters must be real.
|
| 204 |
+
lr (float): learning rate (default: 1)
|
| 205 |
+
max_iter (int): maximal number of iterations per optimization step
|
| 206 |
+
(default: 20)
|
| 207 |
+
max_eval (int): maximal number of function evaluations per optimization
|
| 208 |
+
step (default: max_iter * 1.25).
|
| 209 |
+
tolerance_grad (float): termination tolerance on first order optimality
|
| 210 |
+
(default: 1e-7).
|
| 211 |
+
tolerance_change (float): termination tolerance on function
|
| 212 |
+
value/parameter changes (default: 1e-9).
|
| 213 |
+
history_size (int): update history size (default: 100).
|
| 214 |
+
line_search_fn (str): either 'strong_wolfe' or None (default: None).
|
| 215 |
+
"""
|
| 216 |
+
|
| 217 |
+
def __init__(
|
| 218 |
+
self,
|
| 219 |
+
params: ParamsT,
|
| 220 |
+
lr: Union[float, Tensor] = 1,
|
| 221 |
+
max_iter: int = 20,
|
| 222 |
+
max_eval: Optional[int] = None,
|
| 223 |
+
tolerance_grad: float = 1e-7,
|
| 224 |
+
tolerance_change: float = 1e-9,
|
| 225 |
+
history_size: int = 100,
|
| 226 |
+
line_search_fn: Optional[str] = None,
|
| 227 |
+
):
|
| 228 |
+
if isinstance(lr, Tensor) and lr.numel() != 1:
|
| 229 |
+
raise ValueError("Tensor lr must be 1-element")
|
| 230 |
+
if not 0.0 <= lr:
|
| 231 |
+
raise ValueError(f"Invalid learning rate: {lr}")
|
| 232 |
+
if max_eval is None:
|
| 233 |
+
max_eval = max_iter * 5 // 4
|
| 234 |
+
defaults = dict(
|
| 235 |
+
lr=lr,
|
| 236 |
+
max_iter=max_iter,
|
| 237 |
+
max_eval=max_eval,
|
| 238 |
+
tolerance_grad=tolerance_grad,
|
| 239 |
+
tolerance_change=tolerance_change,
|
| 240 |
+
history_size=history_size,
|
| 241 |
+
line_search_fn=line_search_fn,
|
| 242 |
+
)
|
| 243 |
+
super().__init__(params, defaults)
|
| 244 |
+
|
| 245 |
+
if len(self.param_groups) != 1:
|
| 246 |
+
raise ValueError(
|
| 247 |
+
"LBFGS doesn't support per-parameter options " "(parameter groups)"
|
| 248 |
+
)
|
| 249 |
+
|
| 250 |
+
self._params = self.param_groups[0]["params"]
|
| 251 |
+
self._numel_cache = None
|
| 252 |
+
|
| 253 |
+
def _numel(self):
|
| 254 |
+
if self._numel_cache is None:
|
| 255 |
+
self._numel_cache = sum(
|
| 256 |
+
2 * p.numel() if torch.is_complex(p) else p.numel()
|
| 257 |
+
for p in self._params
|
| 258 |
+
)
|
| 259 |
+
|
| 260 |
+
return self._numel_cache
|
| 261 |
+
|
| 262 |
+
def _gather_flat_grad(self):
|
| 263 |
+
views = []
|
| 264 |
+
for p in self._params:
|
| 265 |
+
if p.grad is None:
|
| 266 |
+
view = p.new(p.numel()).zero_()
|
| 267 |
+
elif p.grad.is_sparse:
|
| 268 |
+
view = p.grad.to_dense().view(-1)
|
| 269 |
+
else:
|
| 270 |
+
view = p.grad.view(-1)
|
| 271 |
+
if torch.is_complex(view):
|
| 272 |
+
view = torch.view_as_real(view).view(-1)
|
| 273 |
+
views.append(view)
|
| 274 |
+
return torch.cat(views, 0)
|
| 275 |
+
|
| 276 |
+
def _add_grad(self, step_size, update):
|
| 277 |
+
offset = 0
|
| 278 |
+
for p in self._params:
|
| 279 |
+
if torch.is_complex(p):
|
| 280 |
+
p = torch.view_as_real(p)
|
| 281 |
+
numel = p.numel()
|
| 282 |
+
# view as to avoid deprecated pointwise semantics
|
| 283 |
+
p.add_(update[offset : offset + numel].view_as(p), alpha=step_size)
|
| 284 |
+
offset += numel
|
| 285 |
+
assert offset == self._numel()
|
| 286 |
+
|
| 287 |
+
def _clone_param(self):
|
| 288 |
+
return [p.clone(memory_format=torch.contiguous_format) for p in self._params]
|
| 289 |
+
|
| 290 |
+
def _set_param(self, params_data):
|
| 291 |
+
for p, pdata in zip(self._params, params_data):
|
| 292 |
+
p.copy_(pdata)
|
| 293 |
+
|
| 294 |
+
def _directional_evaluate(self, closure, x, t, d):
|
| 295 |
+
self._add_grad(t, d)
|
| 296 |
+
loss = float(closure())
|
| 297 |
+
flat_grad = self._gather_flat_grad()
|
| 298 |
+
self._set_param(x)
|
| 299 |
+
return loss, flat_grad
|
| 300 |
+
|
| 301 |
+
@torch.no_grad()
|
| 302 |
+
def step(self, closure):
|
| 303 |
+
"""Perform a single optimization step.
|
| 304 |
+
|
| 305 |
+
Args:
|
| 306 |
+
closure (Callable): A closure that reevaluates the model
|
| 307 |
+
and returns the loss.
|
| 308 |
+
"""
|
| 309 |
+
assert len(self.param_groups) == 1
|
| 310 |
+
|
| 311 |
+
# Make sure the closure is always called with grad enabled
|
| 312 |
+
closure = torch.enable_grad()(closure)
|
| 313 |
+
|
| 314 |
+
group = self.param_groups[0]
|
| 315 |
+
lr = group["lr"]
|
| 316 |
+
max_iter = group["max_iter"]
|
| 317 |
+
max_eval = group["max_eval"]
|
| 318 |
+
tolerance_grad = group["tolerance_grad"]
|
| 319 |
+
tolerance_change = group["tolerance_change"]
|
| 320 |
+
line_search_fn = group["line_search_fn"]
|
| 321 |
+
history_size = group["history_size"]
|
| 322 |
+
|
| 323 |
+
# NOTE: LBFGS has only global state, but we register it as state for
|
| 324 |
+
# the first param, because this helps with casting in load_state_dict
|
| 325 |
+
state = self.state[self._params[0]]
|
| 326 |
+
state.setdefault("func_evals", 0)
|
| 327 |
+
state.setdefault("n_iter", 0)
|
| 328 |
+
|
| 329 |
+
# evaluate initial f(x) and df/dx
|
| 330 |
+
orig_loss = closure()
|
| 331 |
+
loss = float(orig_loss)
|
| 332 |
+
current_evals = 1
|
| 333 |
+
state["func_evals"] += 1
|
| 334 |
+
|
| 335 |
+
flat_grad = self._gather_flat_grad()
|
| 336 |
+
opt_cond = flat_grad.abs().max() <= tolerance_grad
|
| 337 |
+
|
| 338 |
+
# optimal condition
|
| 339 |
+
if opt_cond:
|
| 340 |
+
return orig_loss
|
| 341 |
+
|
| 342 |
+
# tensors cached in state (for tracing)
|
| 343 |
+
d = state.get("d")
|
| 344 |
+
t = state.get("t")
|
| 345 |
+
old_dirs = state.get("old_dirs")
|
| 346 |
+
old_stps = state.get("old_stps")
|
| 347 |
+
ro = state.get("ro")
|
| 348 |
+
H_diag = state.get("H_diag")
|
| 349 |
+
prev_flat_grad = state.get("prev_flat_grad")
|
| 350 |
+
prev_loss = state.get("prev_loss")
|
| 351 |
+
|
| 352 |
+
n_iter = 0
|
| 353 |
+
# optimize for a max of max_iter iterations
|
| 354 |
+
while n_iter < max_iter:
|
| 355 |
+
# keep track of nb of iterations
|
| 356 |
+
n_iter += 1
|
| 357 |
+
state["n_iter"] += 1
|
| 358 |
+
|
| 359 |
+
############################################################
|
| 360 |
+
# compute gradient descent direction
|
| 361 |
+
############################################################
|
| 362 |
+
if state["n_iter"] == 1:
|
| 363 |
+
d = flat_grad.neg()
|
| 364 |
+
old_dirs = []
|
| 365 |
+
old_stps = []
|
| 366 |
+
ro = []
|
| 367 |
+
H_diag = 1
|
| 368 |
+
else:
|
| 369 |
+
# do lbfgs update (update memory)
|
| 370 |
+
y = flat_grad.sub(prev_flat_grad)
|
| 371 |
+
s = d.mul(t)
|
| 372 |
+
ys = y.dot(s) # y*s
|
| 373 |
+
if ys > 1e-10:
|
| 374 |
+
# updating memory
|
| 375 |
+
if len(old_dirs) == history_size:
|
| 376 |
+
# shift history by one (limited-memory)
|
| 377 |
+
old_dirs.pop(0)
|
| 378 |
+
old_stps.pop(0)
|
| 379 |
+
ro.pop(0)
|
| 380 |
+
|
| 381 |
+
# store new direction/step
|
| 382 |
+
old_dirs.append(y)
|
| 383 |
+
old_stps.append(s)
|
| 384 |
+
ro.append(1.0 / ys)
|
| 385 |
+
|
| 386 |
+
# update scale of initial Hessian approximation
|
| 387 |
+
H_diag = ys / y.dot(y) # (y*y)
|
| 388 |
+
|
| 389 |
+
# compute the approximate (L-BFGS) inverse Hessian
|
| 390 |
+
# multiplied by the gradient
|
| 391 |
+
num_old = len(old_dirs)
|
| 392 |
+
|
| 393 |
+
if "al" not in state:
|
| 394 |
+
state["al"] = [None] * history_size
|
| 395 |
+
al = state["al"]
|
| 396 |
+
|
| 397 |
+
# iteration in L-BFGS loop collapsed to use just one buffer
|
| 398 |
+
q = flat_grad.neg()
|
| 399 |
+
for i in range(num_old - 1, -1, -1):
|
| 400 |
+
al[i] = old_stps[i].dot(q) * ro[i]
|
| 401 |
+
q.add_(old_dirs[i], alpha=-al[i])
|
| 402 |
+
|
| 403 |
+
# multiply by initial Hessian
|
| 404 |
+
# r/d is the final direction
|
| 405 |
+
d = r = torch.mul(q, H_diag)
|
| 406 |
+
for i in range(num_old):
|
| 407 |
+
be_i = old_dirs[i].dot(r) * ro[i]
|
| 408 |
+
r.add_(old_stps[i], alpha=al[i] - be_i)
|
| 409 |
+
|
| 410 |
+
if prev_flat_grad is None:
|
| 411 |
+
prev_flat_grad = flat_grad.clone(memory_format=torch.contiguous_format)
|
| 412 |
+
else:
|
| 413 |
+
prev_flat_grad.copy_(flat_grad)
|
| 414 |
+
prev_loss = loss
|
| 415 |
+
|
| 416 |
+
############################################################
|
| 417 |
+
# compute step length
|
| 418 |
+
############################################################
|
| 419 |
+
# reset initial guess for step size
|
| 420 |
+
if state["n_iter"] == 1:
|
| 421 |
+
t = min(1.0, 1.0 / flat_grad.abs().sum()) * lr
|
| 422 |
+
else:
|
| 423 |
+
t = lr
|
| 424 |
+
|
| 425 |
+
# directional derivative
|
| 426 |
+
gtd = flat_grad.dot(d) # g * d
|
| 427 |
+
|
| 428 |
+
# directional derivative is below tolerance
|
| 429 |
+
if gtd > -tolerance_change:
|
| 430 |
+
break
|
| 431 |
+
|
| 432 |
+
# optional line search: user function
|
| 433 |
+
ls_func_evals = 0
|
| 434 |
+
if line_search_fn is not None:
|
| 435 |
+
# perform line search, using user function
|
| 436 |
+
if line_search_fn != "strong_wolfe":
|
| 437 |
+
raise RuntimeError("only 'strong_wolfe' is supported")
|
| 438 |
+
else:
|
| 439 |
+
x_init = self._clone_param()
|
| 440 |
+
|
| 441 |
+
def obj_func(x, t, d):
|
| 442 |
+
return self._directional_evaluate(closure, x, t, d)
|
| 443 |
+
|
| 444 |
+
loss, flat_grad, t, ls_func_evals = _strong_wolfe(
|
| 445 |
+
obj_func, x_init, t, d, loss, flat_grad, gtd
|
| 446 |
+
)
|
| 447 |
+
self._add_grad(t, d)
|
| 448 |
+
opt_cond = flat_grad.abs().max() <= tolerance_grad
|
| 449 |
+
else:
|
| 450 |
+
# no line search, simply move with fixed-step
|
| 451 |
+
self._add_grad(t, d)
|
| 452 |
+
if n_iter != max_iter:
|
| 453 |
+
# re-evaluate function only if not in last iteration
|
| 454 |
+
# the reason we do this: in a stochastic setting,
|
| 455 |
+
# no use to re-evaluate that function here
|
| 456 |
+
with torch.enable_grad():
|
| 457 |
+
loss = float(closure())
|
| 458 |
+
flat_grad = self._gather_flat_grad()
|
| 459 |
+
opt_cond = flat_grad.abs().max() <= tolerance_grad
|
| 460 |
+
ls_func_evals = 1
|
| 461 |
+
|
| 462 |
+
# update func eval
|
| 463 |
+
current_evals += ls_func_evals
|
| 464 |
+
state["func_evals"] += ls_func_evals
|
| 465 |
+
|
| 466 |
+
############################################################
|
| 467 |
+
# check conditions
|
| 468 |
+
############################################################
|
| 469 |
+
if n_iter == max_iter:
|
| 470 |
+
break
|
| 471 |
+
|
| 472 |
+
if current_evals >= max_eval:
|
| 473 |
+
break
|
| 474 |
+
|
| 475 |
+
# optimal condition
|
| 476 |
+
if opt_cond:
|
| 477 |
+
break
|
| 478 |
+
|
| 479 |
+
# lack of progress
|
| 480 |
+
if d.mul(t).abs().max() <= tolerance_change:
|
| 481 |
+
break
|
| 482 |
+
|
| 483 |
+
if abs(loss - prev_loss) < tolerance_change:
|
| 484 |
+
break
|
| 485 |
+
|
| 486 |
+
state["d"] = d
|
| 487 |
+
state["t"] = t
|
| 488 |
+
state["old_dirs"] = old_dirs
|
| 489 |
+
state["old_stps"] = old_stps
|
| 490 |
+
state["ro"] = ro
|
| 491 |
+
state["H_diag"] = H_diag
|
| 492 |
+
state["prev_flat_grad"] = prev_flat_grad
|
| 493 |
+
state["prev_loss"] = prev_loss
|
| 494 |
+
|
| 495 |
+
return orig_loss
|
vllm/lib/python3.10/site-packages/torch/optim/lr_scheduler.py
ADDED
|
@@ -0,0 +1,2151 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# mypy: allow-untyped-defs
|
| 2 |
+
r"""Learning Rate Scheduler."""
|
| 3 |
+
import math
|
| 4 |
+
import types
|
| 5 |
+
import warnings
|
| 6 |
+
from bisect import bisect_right
|
| 7 |
+
from collections import Counter
|
| 8 |
+
from functools import partial, wraps
|
| 9 |
+
from typing import (
|
| 10 |
+
Any,
|
| 11 |
+
Callable,
|
| 12 |
+
cast,
|
| 13 |
+
Dict,
|
| 14 |
+
Iterable,
|
| 15 |
+
List,
|
| 16 |
+
Literal,
|
| 17 |
+
Optional,
|
| 18 |
+
Sequence,
|
| 19 |
+
SupportsFloat,
|
| 20 |
+
TypedDict,
|
| 21 |
+
Union,
|
| 22 |
+
)
|
| 23 |
+
from weakref import ref
|
| 24 |
+
|
| 25 |
+
from torch import inf, Tensor
|
| 26 |
+
|
| 27 |
+
from .optimizer import Optimizer
|
| 28 |
+
|
| 29 |
+
|
| 30 |
+
__all__ = [
|
| 31 |
+
"LambdaLR",
|
| 32 |
+
"MultiplicativeLR",
|
| 33 |
+
"StepLR",
|
| 34 |
+
"MultiStepLR",
|
| 35 |
+
"ConstantLR",
|
| 36 |
+
"LinearLR",
|
| 37 |
+
"ExponentialLR",
|
| 38 |
+
"SequentialLR",
|
| 39 |
+
"CosineAnnealingLR",
|
| 40 |
+
"ChainedScheduler",
|
| 41 |
+
"ReduceLROnPlateau",
|
| 42 |
+
"CyclicLR",
|
| 43 |
+
"CosineAnnealingWarmRestarts",
|
| 44 |
+
"OneCycleLR",
|
| 45 |
+
"PolynomialLR",
|
| 46 |
+
"LRScheduler",
|
| 47 |
+
]
|
| 48 |
+
|
| 49 |
+
EPOCH_DEPRECATION_WARNING = (
|
| 50 |
+
"The epoch parameter in `scheduler.step()` was not necessary and is being "
|
| 51 |
+
"deprecated where possible. Please use `scheduler.step()` to step the "
|
| 52 |
+
"scheduler. During the deprecation, if epoch is different from None, the "
|
| 53 |
+
"closed form is used instead of the new chainable form, where available. "
|
| 54 |
+
"Please open an issue if you are unable to replicate your use case: "
|
| 55 |
+
"https://github.com/pytorch/pytorch/issues/new/choose."
|
| 56 |
+
)
|
| 57 |
+
|
| 58 |
+
|
| 59 |
+
def _check_verbose_deprecated_warning(verbose):
|
| 60 |
+
"""Raise a warning when verbose is not the default value."""
|
| 61 |
+
if verbose != "deprecated":
|
| 62 |
+
warnings.warn(
|
| 63 |
+
"The verbose parameter is deprecated. Please use get_last_lr() "
|
| 64 |
+
"to access the learning rate.",
|
| 65 |
+
UserWarning,
|
| 66 |
+
)
|
| 67 |
+
return verbose
|
| 68 |
+
return False
|
| 69 |
+
|
| 70 |
+
|
| 71 |
+
def _format_param(name: str, optimizer: Optimizer, param):
|
| 72 |
+
"""Return correctly formatted lr/momentum for each param group."""
|
| 73 |
+
|
| 74 |
+
def _copy(_param):
|
| 75 |
+
return _param.clone() if isinstance(_param, Tensor) else _param
|
| 76 |
+
|
| 77 |
+
if isinstance(param, (list, tuple)):
|
| 78 |
+
if len(param) != len(optimizer.param_groups):
|
| 79 |
+
raise ValueError(
|
| 80 |
+
f"{name} must have the same length as optimizer.param_groups. "
|
| 81 |
+
f"{name} has {len(param)} values, param_groups has {len(optimizer.param_groups)}."
|
| 82 |
+
)
|
| 83 |
+
else:
|
| 84 |
+
param = [param] * len(optimizer.param_groups)
|
| 85 |
+
|
| 86 |
+
return list(map(_copy, param))
|
| 87 |
+
|
| 88 |
+
|
| 89 |
+
class LRScheduler:
|
| 90 |
+
r"""Adjusts the learning rate during optimization."""
|
| 91 |
+
|
| 92 |
+
_get_lr_called_within_step: bool = False
|
| 93 |
+
|
| 94 |
+
def __init__(
|
| 95 |
+
self, optimizer: Optimizer, last_epoch=-1, verbose="deprecated"
|
| 96 |
+
): # noqa: D107
|
| 97 |
+
# Attach optimizer
|
| 98 |
+
if not isinstance(optimizer, Optimizer):
|
| 99 |
+
raise TypeError(f"{type(optimizer).__name__} is not an Optimizer")
|
| 100 |
+
self.optimizer = optimizer
|
| 101 |
+
|
| 102 |
+
# Initialize epoch and base learning rates
|
| 103 |
+
if last_epoch == -1:
|
| 104 |
+
for group in optimizer.param_groups:
|
| 105 |
+
initial_lr = group["lr"]
|
| 106 |
+
if isinstance(initial_lr, Tensor):
|
| 107 |
+
initial_lr = initial_lr.clone()
|
| 108 |
+
group.setdefault("initial_lr", initial_lr)
|
| 109 |
+
else:
|
| 110 |
+
for i, group in enumerate(optimizer.param_groups):
|
| 111 |
+
if "initial_lr" not in group:
|
| 112 |
+
raise KeyError(
|
| 113 |
+
"param 'initial_lr' is not specified "
|
| 114 |
+
f"in param_groups[{i}] when resuming an optimizer"
|
| 115 |
+
)
|
| 116 |
+
self.base_lrs: List[float] = [
|
| 117 |
+
group["initial_lr"] for group in optimizer.param_groups
|
| 118 |
+
]
|
| 119 |
+
self.last_epoch = last_epoch
|
| 120 |
+
|
| 121 |
+
# Following https://github.com/pytorch/pytorch/issues/20124
|
| 122 |
+
# We would like to ensure that `lr_scheduler.step()` is called after
|
| 123 |
+
# `optimizer.step()`
|
| 124 |
+
def patch_track_step_called(opt: Optimizer):
|
| 125 |
+
if hasattr(opt.step, "_wrapped_by_lr_sched"):
|
| 126 |
+
# we've already patched
|
| 127 |
+
return opt.step
|
| 128 |
+
|
| 129 |
+
def wrap_step(step_fn):
|
| 130 |
+
opt_ref = ref(self.optimizer)
|
| 131 |
+
func = step_fn.__func__
|
| 132 |
+
|
| 133 |
+
@wraps(func)
|
| 134 |
+
def wrapper(*args, **kwargs):
|
| 135 |
+
opt = opt_ref()
|
| 136 |
+
opt._opt_called = True # type: ignore[union-attr]
|
| 137 |
+
return func.__get__(opt, opt.__class__)(*args, **kwargs)
|
| 138 |
+
|
| 139 |
+
wrapper._wrapped_by_lr_sched = True # type: ignore[attr-defined]
|
| 140 |
+
return wrapper
|
| 141 |
+
|
| 142 |
+
opt.step = wrap_step(opt.step) # type: ignore[method-assign]
|
| 143 |
+
|
| 144 |
+
patch_track_step_called(self.optimizer)
|
| 145 |
+
self.verbose = _check_verbose_deprecated_warning(verbose)
|
| 146 |
+
self._initial_step()
|
| 147 |
+
|
| 148 |
+
def _initial_step(self):
|
| 149 |
+
"""Initialize step counts and perform a step."""
|
| 150 |
+
self._step_count = 0
|
| 151 |
+
self.step()
|
| 152 |
+
|
| 153 |
+
def state_dict(self):
|
| 154 |
+
"""Return the state of the scheduler as a :class:`dict`.
|
| 155 |
+
|
| 156 |
+
It contains an entry for every variable in self.__dict__ which
|
| 157 |
+
is not the optimizer.
|
| 158 |
+
"""
|
| 159 |
+
return {
|
| 160 |
+
key: value for key, value in self.__dict__.items() if key != "optimizer"
|
| 161 |
+
}
|
| 162 |
+
|
| 163 |
+
def load_state_dict(self, state_dict: Dict[str, Any]):
|
| 164 |
+
"""Load the scheduler's state.
|
| 165 |
+
|
| 166 |
+
Args:
|
| 167 |
+
state_dict (dict): scheduler state. Should be an object returned
|
| 168 |
+
from a call to :meth:`state_dict`.
|
| 169 |
+
"""
|
| 170 |
+
self.__dict__.update(state_dict)
|
| 171 |
+
|
| 172 |
+
def get_last_lr(self) -> List[float]:
|
| 173 |
+
"""Return last computed learning rate by current scheduler."""
|
| 174 |
+
return self._last_lr
|
| 175 |
+
|
| 176 |
+
def get_lr(self) -> List[float]:
|
| 177 |
+
"""Compute learning rate using chainable form of the scheduler."""
|
| 178 |
+
raise NotImplementedError
|
| 179 |
+
|
| 180 |
+
def print_lr(
|
| 181 |
+
self,
|
| 182 |
+
is_verbose: bool,
|
| 183 |
+
group: Dict[str, Any],
|
| 184 |
+
lr: float,
|
| 185 |
+
epoch: Optional[int] = None,
|
| 186 |
+
):
|
| 187 |
+
"""Display the current learning rate.
|
| 188 |
+
|
| 189 |
+
.. deprecated:: 2.4
|
| 190 |
+
``print_lr()`` is deprecated. Please use ``get_last_lr()`` to access the
|
| 191 |
+
learning rate.
|
| 192 |
+
"""
|
| 193 |
+
warnings.warn(
|
| 194 |
+
"`LRScheduler.print_lr()` is being deprecated. To fetch the learning rate, "
|
| 195 |
+
"please use `get_last_lr()` instead. For more details, "
|
| 196 |
+
"see https://github.com/pytorch/pytorch/issues/99270.",
|
| 197 |
+
UserWarning,
|
| 198 |
+
)
|
| 199 |
+
if is_verbose:
|
| 200 |
+
if epoch is None:
|
| 201 |
+
print(f"Adjusting learning rate of group {group} to {lr:.4e}.")
|
| 202 |
+
else:
|
| 203 |
+
epoch_str = ("%.2f" if isinstance(epoch, float) else "%.5d") % epoch
|
| 204 |
+
print(
|
| 205 |
+
f"Epoch {epoch_str}: adjusting learning rate of group {group} to {lr:.4e}."
|
| 206 |
+
)
|
| 207 |
+
|
| 208 |
+
def step(self, epoch: Optional[int] = None):
|
| 209 |
+
"""Perform a step."""
|
| 210 |
+
# Raise a warning if old pattern is detected
|
| 211 |
+
# https://github.com/pytorch/pytorch/issues/20124
|
| 212 |
+
if self._step_count == 1:
|
| 213 |
+
if not hasattr(self.optimizer.step, "_wrapped_by_lr_sched"):
|
| 214 |
+
warnings.warn(
|
| 215 |
+
"Seems like `optimizer.step()` has been overridden after learning rate scheduler "
|
| 216 |
+
"initialization. Please, make sure to call `optimizer.step()` before "
|
| 217 |
+
"`lr_scheduler.step()`. See more details at "
|
| 218 |
+
"https://pytorch.org/docs/stable/optim.html#how-to-adjust-learning-rate",
|
| 219 |
+
UserWarning,
|
| 220 |
+
)
|
| 221 |
+
|
| 222 |
+
# Just check if there were two first lr_scheduler.step() calls before optimizer.step()
|
| 223 |
+
elif not getattr(self.optimizer, "_opt_called", False):
|
| 224 |
+
warnings.warn(
|
| 225 |
+
"Detected call of `lr_scheduler.step()` before `optimizer.step()`. "
|
| 226 |
+
"In PyTorch 1.1.0 and later, you should call them in the opposite order: "
|
| 227 |
+
"`optimizer.step()` before `lr_scheduler.step()`. Failure to do this "
|
| 228 |
+
"will result in PyTorch skipping the first value of the learning rate schedule. "
|
| 229 |
+
"See more details at "
|
| 230 |
+
"https://pytorch.org/docs/stable/optim.html#how-to-adjust-learning-rate",
|
| 231 |
+
UserWarning,
|
| 232 |
+
)
|
| 233 |
+
self._step_count += 1
|
| 234 |
+
|
| 235 |
+
with _enable_get_lr_call(self):
|
| 236 |
+
if epoch is None:
|
| 237 |
+
self.last_epoch += 1
|
| 238 |
+
values = self.get_lr()
|
| 239 |
+
else:
|
| 240 |
+
warnings.warn(EPOCH_DEPRECATION_WARNING, UserWarning)
|
| 241 |
+
self.last_epoch = epoch
|
| 242 |
+
if hasattr(self, "_get_closed_form_lr"):
|
| 243 |
+
values = cast(List[float], self._get_closed_form_lr())
|
| 244 |
+
else:
|
| 245 |
+
values = self.get_lr()
|
| 246 |
+
|
| 247 |
+
for i, data in enumerate(zip(self.optimizer.param_groups, values)):
|
| 248 |
+
param_group, lr = data
|
| 249 |
+
if isinstance(param_group["lr"], Tensor):
|
| 250 |
+
param_group["lr"].fill_(lr)
|
| 251 |
+
else:
|
| 252 |
+
param_group["lr"] = lr
|
| 253 |
+
|
| 254 |
+
self._last_lr: List[float] = [
|
| 255 |
+
group["lr"] for group in self.optimizer.param_groups
|
| 256 |
+
]
|
| 257 |
+
|
| 258 |
+
|
| 259 |
+
def _warn_get_lr_called_within_step(lr_scheduler: LRScheduler):
|
| 260 |
+
if not lr_scheduler._get_lr_called_within_step:
|
| 261 |
+
warnings.warn(
|
| 262 |
+
"To get the last learning rate computed by the scheduler, "
|
| 263 |
+
"please use `get_last_lr()`.",
|
| 264 |
+
UserWarning,
|
| 265 |
+
stacklevel=2,
|
| 266 |
+
)
|
| 267 |
+
|
| 268 |
+
|
| 269 |
+
# Including _LRScheduler for backwards compatibility
|
| 270 |
+
# Subclass instead of assign because we want __name__ of _LRScheduler to be _LRScheduler (assigning would make it LRScheduler).
|
| 271 |
+
class _LRScheduler(LRScheduler):
|
| 272 |
+
pass
|
| 273 |
+
|
| 274 |
+
|
| 275 |
+
class _enable_get_lr_call:
|
| 276 |
+
def __init__(self, o: LRScheduler):
|
| 277 |
+
self.o = o
|
| 278 |
+
|
| 279 |
+
def __enter__(self):
|
| 280 |
+
self.o._get_lr_called_within_step = True
|
| 281 |
+
return self
|
| 282 |
+
|
| 283 |
+
def __exit__(self, type, value, traceback):
|
| 284 |
+
self.o._get_lr_called_within_step = False
|
| 285 |
+
|
| 286 |
+
|
| 287 |
+
class LambdaLR(LRScheduler):
|
| 288 |
+
"""Sets the initial learning rate.
|
| 289 |
+
|
| 290 |
+
The learning rate of each parameter group is set to the initial lr
|
| 291 |
+
times a given function. When last_epoch=-1, sets initial lr as lr.
|
| 292 |
+
|
| 293 |
+
Args:
|
| 294 |
+
optimizer (Optimizer): Wrapped optimizer.
|
| 295 |
+
lr_lambda (function or list): A function which computes a multiplicative
|
| 296 |
+
factor given an integer parameter epoch, or a list of such
|
| 297 |
+
functions, one for each group in optimizer.param_groups.
|
| 298 |
+
last_epoch (int): The index of last epoch. Default: -1.
|
| 299 |
+
verbose (bool | str): If ``True``, prints a message to stdout for
|
| 300 |
+
each update. Default: ``False``.
|
| 301 |
+
|
| 302 |
+
.. deprecated:: 2.2
|
| 303 |
+
``verbose`` is deprecated. Please use ``get_last_lr()`` to access the
|
| 304 |
+
learning rate.
|
| 305 |
+
|
| 306 |
+
Example:
|
| 307 |
+
>>> # xdoctest: +SKIP
|
| 308 |
+
>>> # Assuming optimizer has two groups.
|
| 309 |
+
>>> lambda1 = lambda epoch: epoch // 30
|
| 310 |
+
>>> lambda2 = lambda epoch: 0.95 ** epoch
|
| 311 |
+
>>> scheduler = LambdaLR(optimizer, lr_lambda=[lambda1, lambda2])
|
| 312 |
+
>>> for epoch in range(100):
|
| 313 |
+
>>> train(...)
|
| 314 |
+
>>> validate(...)
|
| 315 |
+
>>> scheduler.step()
|
| 316 |
+
"""
|
| 317 |
+
|
| 318 |
+
def __init__(
|
| 319 |
+
self,
|
| 320 |
+
optimizer: Optimizer,
|
| 321 |
+
lr_lambda: Union[Callable[[int], float], List[Callable[[int], float]]],
|
| 322 |
+
last_epoch=-1,
|
| 323 |
+
verbose="deprecated",
|
| 324 |
+
): # noqa: D107
|
| 325 |
+
self.optimizer = optimizer
|
| 326 |
+
|
| 327 |
+
self.lr_lambdas: List[Callable[[int], float]]
|
| 328 |
+
if not isinstance(lr_lambda, list) and not isinstance(lr_lambda, tuple):
|
| 329 |
+
self.lr_lambdas = [lr_lambda] * len(optimizer.param_groups)
|
| 330 |
+
else:
|
| 331 |
+
if len(lr_lambda) != len(optimizer.param_groups):
|
| 332 |
+
raise ValueError(
|
| 333 |
+
f"Expected {len(optimizer.param_groups)} lr_lambdas, but got {len(lr_lambda)}"
|
| 334 |
+
)
|
| 335 |
+
self.lr_lambdas = list(lr_lambda)
|
| 336 |
+
super().__init__(optimizer, last_epoch, verbose)
|
| 337 |
+
|
| 338 |
+
def state_dict(self):
|
| 339 |
+
"""Return the state of the scheduler as a :class:`dict`.
|
| 340 |
+
|
| 341 |
+
It contains an entry for every variable in self.__dict__ which
|
| 342 |
+
is not the optimizer.
|
| 343 |
+
The learning rate lambda functions will only be saved if they are callable objects
|
| 344 |
+
and not if they are functions or lambdas.
|
| 345 |
+
|
| 346 |
+
When saving or loading the scheduler, please make sure to also save or load the state of the optimizer.
|
| 347 |
+
"""
|
| 348 |
+
state_dict = {
|
| 349 |
+
key: value
|
| 350 |
+
for key, value in self.__dict__.items()
|
| 351 |
+
if key not in ("optimizer", "lr_lambdas")
|
| 352 |
+
}
|
| 353 |
+
state_dict["lr_lambdas"] = [None] * len(self.lr_lambdas)
|
| 354 |
+
|
| 355 |
+
for idx, fn in enumerate(self.lr_lambdas):
|
| 356 |
+
if not isinstance(fn, types.FunctionType):
|
| 357 |
+
state_dict["lr_lambdas"][idx] = fn.__dict__.copy()
|
| 358 |
+
|
| 359 |
+
return state_dict
|
| 360 |
+
|
| 361 |
+
def load_state_dict(self, state_dict):
|
| 362 |
+
"""Load the scheduler's state.
|
| 363 |
+
|
| 364 |
+
When saving or loading the scheduler, please make sure to also save or load the state of the optimizer.
|
| 365 |
+
|
| 366 |
+
Args:
|
| 367 |
+
state_dict (dict): scheduler state. Should be an object returned
|
| 368 |
+
from a call to :meth:`state_dict`.
|
| 369 |
+
"""
|
| 370 |
+
lr_lambdas = state_dict.pop("lr_lambdas")
|
| 371 |
+
self.__dict__.update(state_dict)
|
| 372 |
+
# Restore state_dict keys in order to prevent side effects
|
| 373 |
+
# https://github.com/pytorch/pytorch/issues/32756
|
| 374 |
+
state_dict["lr_lambdas"] = lr_lambdas
|
| 375 |
+
|
| 376 |
+
for idx, fn in enumerate(lr_lambdas):
|
| 377 |
+
if fn is not None:
|
| 378 |
+
self.lr_lambdas[idx].__dict__.update(fn)
|
| 379 |
+
|
| 380 |
+
def get_lr(self):
|
| 381 |
+
"""Compute learning rate."""
|
| 382 |
+
_warn_get_lr_called_within_step(self)
|
| 383 |
+
|
| 384 |
+
return [
|
| 385 |
+
base_lr * lmbda(self.last_epoch)
|
| 386 |
+
for lmbda, base_lr in zip(self.lr_lambdas, self.base_lrs)
|
| 387 |
+
]
|
| 388 |
+
|
| 389 |
+
|
| 390 |
+
class MultiplicativeLR(LRScheduler):
|
| 391 |
+
"""Multiply the learning rate of each parameter group by the factor given in the specified function.
|
| 392 |
+
|
| 393 |
+
When last_epoch=-1, set initial lr as lr.
|
| 394 |
+
|
| 395 |
+
Args:
|
| 396 |
+
optimizer (Optimizer): Wrapped optimizer.
|
| 397 |
+
lr_lambda (function or list): A function which computes a multiplicative
|
| 398 |
+
factor given an integer parameter epoch, or a list of such
|
| 399 |
+
functions, one for each group in optimizer.param_groups.
|
| 400 |
+
last_epoch (int): The index of last epoch. Default: -1.
|
| 401 |
+
verbose (bool | str): If ``True``, prints a message to stdout for
|
| 402 |
+
each update. Default: ``False``.
|
| 403 |
+
|
| 404 |
+
.. deprecated:: 2.2
|
| 405 |
+
``verbose`` is deprecated. Please use ``get_last_lr()`` to access the
|
| 406 |
+
learning rate.
|
| 407 |
+
|
| 408 |
+
Example:
|
| 409 |
+
>>> # xdoctest: +SKIP
|
| 410 |
+
>>> lmbda = lambda epoch: 0.95
|
| 411 |
+
>>> scheduler = MultiplicativeLR(optimizer, lr_lambda=lmbda)
|
| 412 |
+
>>> for epoch in range(100):
|
| 413 |
+
>>> train(...)
|
| 414 |
+
>>> validate(...)
|
| 415 |
+
>>> scheduler.step()
|
| 416 |
+
"""
|
| 417 |
+
|
| 418 |
+
def __init__(
|
| 419 |
+
self,
|
| 420 |
+
optimizer: Optimizer,
|
| 421 |
+
lr_lambda: Union[Callable[[int], float], List[Callable[[int], float]]],
|
| 422 |
+
last_epoch=-1,
|
| 423 |
+
verbose="deprecated",
|
| 424 |
+
): # noqa: D107
|
| 425 |
+
self.optimizer = optimizer
|
| 426 |
+
|
| 427 |
+
self.lr_lambdas: List[Callable[[int], float]]
|
| 428 |
+
if not isinstance(lr_lambda, list) and not isinstance(lr_lambda, tuple):
|
| 429 |
+
self.lr_lambdas = [lr_lambda] * len(optimizer.param_groups)
|
| 430 |
+
else:
|
| 431 |
+
if len(lr_lambda) != len(optimizer.param_groups):
|
| 432 |
+
raise ValueError(
|
| 433 |
+
f"Expected {len(optimizer.param_groups)} lr_lambdas, but got {len(lr_lambda)}"
|
| 434 |
+
)
|
| 435 |
+
self.lr_lambdas = list(lr_lambda)
|
| 436 |
+
super().__init__(optimizer, last_epoch, verbose)
|
| 437 |
+
|
| 438 |
+
def state_dict(self):
|
| 439 |
+
"""Return the state of the scheduler as a :class:`dict`.
|
| 440 |
+
|
| 441 |
+
It contains an entry for every variable in self.__dict__ which
|
| 442 |
+
is not the optimizer.
|
| 443 |
+
The learning rate lambda functions will only be saved if they are callable objects
|
| 444 |
+
and not if they are functions or lambdas.
|
| 445 |
+
"""
|
| 446 |
+
state_dict = {
|
| 447 |
+
key: value
|
| 448 |
+
for key, value in self.__dict__.items()
|
| 449 |
+
if key not in ("optimizer", "lr_lambdas")
|
| 450 |
+
}
|
| 451 |
+
state_dict["lr_lambdas"] = [None] * len(self.lr_lambdas)
|
| 452 |
+
|
| 453 |
+
for idx, fn in enumerate(self.lr_lambdas):
|
| 454 |
+
if not isinstance(fn, types.FunctionType):
|
| 455 |
+
state_dict["lr_lambdas"][idx] = fn.__dict__.copy()
|
| 456 |
+
|
| 457 |
+
return state_dict
|
| 458 |
+
|
| 459 |
+
def load_state_dict(self, state_dict):
|
| 460 |
+
"""Load the scheduler's state.
|
| 461 |
+
|
| 462 |
+
Args:
|
| 463 |
+
state_dict (dict): scheduler state. Should be an object returned
|
| 464 |
+
from a call to :meth:`state_dict`.
|
| 465 |
+
"""
|
| 466 |
+
lr_lambdas = state_dict.pop("lr_lambdas")
|
| 467 |
+
self.__dict__.update(state_dict)
|
| 468 |
+
# Restore state_dict keys in order to prevent side effects
|
| 469 |
+
# https://github.com/pytorch/pytorch/issues/32756
|
| 470 |
+
state_dict["lr_lambdas"] = lr_lambdas
|
| 471 |
+
|
| 472 |
+
for idx, fn in enumerate(lr_lambdas):
|
| 473 |
+
if fn is not None:
|
| 474 |
+
self.lr_lambdas[idx].__dict__.update(fn)
|
| 475 |
+
|
| 476 |
+
def get_lr(self):
|
| 477 |
+
"""Compute the learning rate of each parameter group."""
|
| 478 |
+
_warn_get_lr_called_within_step(self)
|
| 479 |
+
|
| 480 |
+
if self.last_epoch > 0:
|
| 481 |
+
return [
|
| 482 |
+
group["lr"] * lmbda(self.last_epoch)
|
| 483 |
+
for lmbda, group in zip(self.lr_lambdas, self.optimizer.param_groups)
|
| 484 |
+
]
|
| 485 |
+
else:
|
| 486 |
+
return [group["lr"] for group in self.optimizer.param_groups]
|
| 487 |
+
|
| 488 |
+
|
| 489 |
+
class StepLR(LRScheduler):
|
| 490 |
+
"""Decays the learning rate of each parameter group by gamma every step_size epochs.
|
| 491 |
+
|
| 492 |
+
Notice that such decay can happen simultaneously with other changes to the learning rate
|
| 493 |
+
from outside this scheduler. When last_epoch=-1, sets initial lr as lr.
|
| 494 |
+
|
| 495 |
+
Args:
|
| 496 |
+
optimizer (Optimizer): Wrapped optimizer.
|
| 497 |
+
step_size (int): Period of learning rate decay.
|
| 498 |
+
gamma (float): Multiplicative factor of learning rate decay.
|
| 499 |
+
Default: 0.1.
|
| 500 |
+
last_epoch (int): The index of last epoch. Default: -1.
|
| 501 |
+
verbose (bool | str): If ``True``, prints a message to stdout for
|
| 502 |
+
each update. Default: ``False``.
|
| 503 |
+
|
| 504 |
+
.. deprecated:: 2.2
|
| 505 |
+
``verbose`` is deprecated. Please use ``get_last_lr()`` to access the
|
| 506 |
+
learning rate.
|
| 507 |
+
|
| 508 |
+
Example:
|
| 509 |
+
>>> # xdoctest: +SKIP
|
| 510 |
+
>>> # Assuming optimizer uses lr = 0.05 for all groups
|
| 511 |
+
>>> # lr = 0.05 if epoch < 30
|
| 512 |
+
>>> # lr = 0.005 if 30 <= epoch < 60
|
| 513 |
+
>>> # lr = 0.0005 if 60 <= epoch < 90
|
| 514 |
+
>>> # ...
|
| 515 |
+
>>> scheduler = StepLR(optimizer, step_size=30, gamma=0.1)
|
| 516 |
+
>>> for epoch in range(100):
|
| 517 |
+
>>> train(...)
|
| 518 |
+
>>> validate(...)
|
| 519 |
+
>>> scheduler.step()
|
| 520 |
+
"""
|
| 521 |
+
|
| 522 |
+
def __init__(
|
| 523 |
+
self,
|
| 524 |
+
optimizer: Optimizer,
|
| 525 |
+
step_size: int,
|
| 526 |
+
gamma=0.1,
|
| 527 |
+
last_epoch=-1,
|
| 528 |
+
verbose="deprecated",
|
| 529 |
+
): # noqa: D107
|
| 530 |
+
self.step_size = step_size
|
| 531 |
+
self.gamma = gamma
|
| 532 |
+
super().__init__(optimizer, last_epoch, verbose)
|
| 533 |
+
|
| 534 |
+
def get_lr(self):
|
| 535 |
+
"""Compute the learning rate of each parameter group."""
|
| 536 |
+
_warn_get_lr_called_within_step(self)
|
| 537 |
+
|
| 538 |
+
if (self.last_epoch == 0) or (self.last_epoch % self.step_size != 0):
|
| 539 |
+
return [group["lr"] for group in self.optimizer.param_groups]
|
| 540 |
+
return [group["lr"] * self.gamma for group in self.optimizer.param_groups]
|
| 541 |
+
|
| 542 |
+
def _get_closed_form_lr(self):
|
| 543 |
+
return [
|
| 544 |
+
base_lr * self.gamma ** (self.last_epoch // self.step_size)
|
| 545 |
+
for base_lr in self.base_lrs
|
| 546 |
+
]
|
| 547 |
+
|
| 548 |
+
|
| 549 |
+
class MultiStepLR(LRScheduler):
|
| 550 |
+
"""Decays the learning rate of each parameter group by gamma once the number of epoch reaches one of the milestones.
|
| 551 |
+
|
| 552 |
+
Notice that such decay can happen simultaneously with other changes to the learning rate
|
| 553 |
+
from outside this scheduler. When last_epoch=-1, sets initial lr as lr.
|
| 554 |
+
|
| 555 |
+
Args:
|
| 556 |
+
optimizer (Optimizer): Wrapped optimizer.
|
| 557 |
+
milestones (list): List of epoch indices. Must be increasing.
|
| 558 |
+
gamma (float): Multiplicative factor of learning rate decay.
|
| 559 |
+
Default: 0.1.
|
| 560 |
+
last_epoch (int): The index of last epoch. Default: -1.
|
| 561 |
+
verbose (bool | str): If ``True``, prints a message to stdout for
|
| 562 |
+
each update. Default: ``False``.
|
| 563 |
+
|
| 564 |
+
.. deprecated:: 2.2
|
| 565 |
+
``verbose`` is deprecated. Please use ``get_last_lr()`` to access the
|
| 566 |
+
learning rate.
|
| 567 |
+
|
| 568 |
+
Example:
|
| 569 |
+
>>> # xdoctest: +SKIP
|
| 570 |
+
>>> # Assuming optimizer uses lr = 0.05 for all groups
|
| 571 |
+
>>> # lr = 0.05 if epoch < 30
|
| 572 |
+
>>> # lr = 0.005 if 30 <= epoch < 80
|
| 573 |
+
>>> # lr = 0.0005 if epoch >= 80
|
| 574 |
+
>>> scheduler = MultiStepLR(optimizer, milestones=[30,80], gamma=0.1)
|
| 575 |
+
>>> for epoch in range(100):
|
| 576 |
+
>>> train(...)
|
| 577 |
+
>>> validate(...)
|
| 578 |
+
>>> scheduler.step()
|
| 579 |
+
"""
|
| 580 |
+
|
| 581 |
+
def __init__(
|
| 582 |
+
self,
|
| 583 |
+
optimizer: Optimizer,
|
| 584 |
+
milestones: Iterable[int],
|
| 585 |
+
gamma=0.1,
|
| 586 |
+
last_epoch=-1,
|
| 587 |
+
verbose="deprecated",
|
| 588 |
+
): # noqa: D107
|
| 589 |
+
self.milestones = Counter(milestones)
|
| 590 |
+
self.gamma = gamma
|
| 591 |
+
super().__init__(optimizer, last_epoch, verbose)
|
| 592 |
+
|
| 593 |
+
def get_lr(self):
|
| 594 |
+
"""Compute the learning rate of each parameter group."""
|
| 595 |
+
_warn_get_lr_called_within_step(self)
|
| 596 |
+
|
| 597 |
+
if self.last_epoch not in self.milestones:
|
| 598 |
+
return [group["lr"] for group in self.optimizer.param_groups]
|
| 599 |
+
return [
|
| 600 |
+
group["lr"] * self.gamma ** self.milestones[self.last_epoch]
|
| 601 |
+
for group in self.optimizer.param_groups
|
| 602 |
+
]
|
| 603 |
+
|
| 604 |
+
def _get_closed_form_lr(self):
|
| 605 |
+
milestones = sorted(self.milestones.elements())
|
| 606 |
+
return [
|
| 607 |
+
base_lr * self.gamma ** bisect_right(milestones, self.last_epoch)
|
| 608 |
+
for base_lr in self.base_lrs
|
| 609 |
+
]
|
| 610 |
+
|
| 611 |
+
|
| 612 |
+
class ConstantLR(LRScheduler):
|
| 613 |
+
"""Multiply the learning rate of each parameter group by a small constant factor.
|
| 614 |
+
|
| 615 |
+
The multiplication is done until the number of epoch reaches a pre-defined milestone: total_iters.
|
| 616 |
+
Notice that such multiplication of the small constant factor can
|
| 617 |
+
happen simultaneously with other changes to the learning rate from outside this scheduler.
|
| 618 |
+
When last_epoch=-1, sets initial lr as lr.
|
| 619 |
+
|
| 620 |
+
Args:
|
| 621 |
+
optimizer (Optimizer): Wrapped optimizer.
|
| 622 |
+
factor (float): The number we multiply learning rate until the milestone. Default: 1./3.
|
| 623 |
+
total_iters (int): The number of steps that the scheduler multiplies the learning rate by the factor.
|
| 624 |
+
Default: 5.
|
| 625 |
+
last_epoch (int): The index of the last epoch. Default: -1.
|
| 626 |
+
verbose (bool | str): If ``True``, prints a message to stdout for
|
| 627 |
+
each update. Default: ``False``.
|
| 628 |
+
|
| 629 |
+
.. deprecated:: 2.2
|
| 630 |
+
``verbose`` is deprecated. Please use ``get_last_lr()`` to access the
|
| 631 |
+
learning rate.
|
| 632 |
+
|
| 633 |
+
Example:
|
| 634 |
+
>>> # xdoctest: +SKIP
|
| 635 |
+
>>> # Assuming optimizer uses lr = 0.05 for all groups
|
| 636 |
+
>>> # lr = 0.025 if epoch == 0
|
| 637 |
+
>>> # lr = 0.025 if epoch == 1
|
| 638 |
+
>>> # lr = 0.025 if epoch == 2
|
| 639 |
+
>>> # lr = 0.025 if epoch == 3
|
| 640 |
+
>>> # lr = 0.05 if epoch >= 4
|
| 641 |
+
>>> scheduler = ConstantLR(optimizer, factor=0.5, total_iters=4)
|
| 642 |
+
>>> for epoch in range(100):
|
| 643 |
+
>>> train(...)
|
| 644 |
+
>>> validate(...)
|
| 645 |
+
>>> scheduler.step()
|
| 646 |
+
"""
|
| 647 |
+
|
| 648 |
+
def __init__(
|
| 649 |
+
self,
|
| 650 |
+
optimizer: Optimizer,
|
| 651 |
+
factor=1.0 / 3,
|
| 652 |
+
total_iters=5,
|
| 653 |
+
last_epoch=-1,
|
| 654 |
+
verbose="deprecated",
|
| 655 |
+
): # noqa: D107
|
| 656 |
+
if factor > 1.0 or factor < 0:
|
| 657 |
+
raise ValueError(
|
| 658 |
+
"Constant multiplicative factor expected to be between 0 and 1."
|
| 659 |
+
)
|
| 660 |
+
|
| 661 |
+
self.factor = factor
|
| 662 |
+
self.total_iters = total_iters
|
| 663 |
+
super().__init__(optimizer, last_epoch, verbose)
|
| 664 |
+
|
| 665 |
+
def get_lr(self):
|
| 666 |
+
"""Compute the learning rate of each parameter group."""
|
| 667 |
+
_warn_get_lr_called_within_step(self)
|
| 668 |
+
|
| 669 |
+
if self.last_epoch == 0:
|
| 670 |
+
return [group["lr"] * self.factor for group in self.optimizer.param_groups]
|
| 671 |
+
|
| 672 |
+
if self.last_epoch != self.total_iters:
|
| 673 |
+
return [group["lr"] for group in self.optimizer.param_groups]
|
| 674 |
+
|
| 675 |
+
return [
|
| 676 |
+
group["lr"] * (1.0 / self.factor) for group in self.optimizer.param_groups
|
| 677 |
+
]
|
| 678 |
+
|
| 679 |
+
def _get_closed_form_lr(self):
|
| 680 |
+
return [
|
| 681 |
+
base_lr
|
| 682 |
+
* (self.factor + (self.last_epoch >= self.total_iters) * (1 - self.factor))
|
| 683 |
+
for base_lr in self.base_lrs
|
| 684 |
+
]
|
| 685 |
+
|
| 686 |
+
|
| 687 |
+
class LinearLR(LRScheduler):
|
| 688 |
+
"""Decays the learning rate of each parameter group by linearly changing small multiplicative factor.
|
| 689 |
+
|
| 690 |
+
The multiplication is done until the number of epoch reaches a pre-defined milestone: total_iters.
|
| 691 |
+
Notice that such decay can happen simultaneously with other changes to the learning rate
|
| 692 |
+
from outside this scheduler. When last_epoch=-1, sets initial lr as lr.
|
| 693 |
+
|
| 694 |
+
Args:
|
| 695 |
+
optimizer (Optimizer): Wrapped optimizer.
|
| 696 |
+
start_factor (float): The number we multiply learning rate in the first epoch.
|
| 697 |
+
The multiplication factor changes towards end_factor in the following epochs.
|
| 698 |
+
Default: 1./3.
|
| 699 |
+
end_factor (float): The number we multiply learning rate at the end of linear changing
|
| 700 |
+
process. Default: 1.0.
|
| 701 |
+
total_iters (int): The number of iterations that multiplicative factor reaches to 1.
|
| 702 |
+
Default: 5.
|
| 703 |
+
last_epoch (int): The index of the last epoch. Default: -1.
|
| 704 |
+
verbose (bool | str): If ``True``, prints a message to stdout for
|
| 705 |
+
each update. Default: ``False``.
|
| 706 |
+
|
| 707 |
+
.. deprecated:: 2.2
|
| 708 |
+
``verbose`` is deprecated. Please use ``get_last_lr()`` to access the
|
| 709 |
+
learning rate.
|
| 710 |
+
|
| 711 |
+
Example:
|
| 712 |
+
>>> # xdoctest: +SKIP
|
| 713 |
+
>>> # Assuming optimizer uses lr = 0.05 for all groups
|
| 714 |
+
>>> # lr = 0.025 if epoch == 0
|
| 715 |
+
>>> # lr = 0.03125 if epoch == 1
|
| 716 |
+
>>> # lr = 0.0375 if epoch == 2
|
| 717 |
+
>>> # lr = 0.04375 if epoch == 3
|
| 718 |
+
>>> # lr = 0.05 if epoch >= 4
|
| 719 |
+
>>> scheduler = LinearLR(optimizer, start_factor=0.5, total_iters=4)
|
| 720 |
+
>>> for epoch in range(100):
|
| 721 |
+
>>> train(...)
|
| 722 |
+
>>> validate(...)
|
| 723 |
+
>>> scheduler.step()
|
| 724 |
+
"""
|
| 725 |
+
|
| 726 |
+
def __init__(
|
| 727 |
+
self,
|
| 728 |
+
optimizer: Optimizer,
|
| 729 |
+
start_factor=1.0 / 3,
|
| 730 |
+
end_factor=1.0,
|
| 731 |
+
total_iters=5,
|
| 732 |
+
last_epoch=-1,
|
| 733 |
+
verbose="deprecated",
|
| 734 |
+
): # noqa: D107
|
| 735 |
+
if start_factor > 1.0 or start_factor <= 0:
|
| 736 |
+
raise ValueError(
|
| 737 |
+
"Starting multiplicative factor expected to be greater than 0 and less or equal to 1."
|
| 738 |
+
)
|
| 739 |
+
|
| 740 |
+
if end_factor > 1.0 or end_factor < 0:
|
| 741 |
+
raise ValueError(
|
| 742 |
+
"Ending multiplicative factor expected to be between 0 and 1."
|
| 743 |
+
)
|
| 744 |
+
|
| 745 |
+
self.start_factor = start_factor
|
| 746 |
+
self.end_factor = end_factor
|
| 747 |
+
self.total_iters = total_iters
|
| 748 |
+
super().__init__(optimizer, last_epoch, verbose)
|
| 749 |
+
|
| 750 |
+
def get_lr(self):
|
| 751 |
+
"""Compute the learning rate."""
|
| 752 |
+
_warn_get_lr_called_within_step(self)
|
| 753 |
+
|
| 754 |
+
if self.last_epoch == 0:
|
| 755 |
+
return [
|
| 756 |
+
group["lr"] * self.start_factor for group in self.optimizer.param_groups
|
| 757 |
+
]
|
| 758 |
+
|
| 759 |
+
if self.last_epoch > self.total_iters:
|
| 760 |
+
return [group["lr"] for group in self.optimizer.param_groups]
|
| 761 |
+
|
| 762 |
+
return [
|
| 763 |
+
group["lr"]
|
| 764 |
+
* (
|
| 765 |
+
1.0
|
| 766 |
+
+ (self.end_factor - self.start_factor)
|
| 767 |
+
/ (
|
| 768 |
+
self.total_iters * self.start_factor
|
| 769 |
+
+ (self.last_epoch - 1) * (self.end_factor - self.start_factor)
|
| 770 |
+
)
|
| 771 |
+
)
|
| 772 |
+
for group in self.optimizer.param_groups
|
| 773 |
+
]
|
| 774 |
+
|
| 775 |
+
def _get_closed_form_lr(self):
|
| 776 |
+
return [
|
| 777 |
+
base_lr
|
| 778 |
+
* (
|
| 779 |
+
self.start_factor
|
| 780 |
+
+ (self.end_factor - self.start_factor)
|
| 781 |
+
* min(self.total_iters, self.last_epoch)
|
| 782 |
+
/ self.total_iters
|
| 783 |
+
)
|
| 784 |
+
for base_lr in self.base_lrs
|
| 785 |
+
]
|
| 786 |
+
|
| 787 |
+
|
| 788 |
+
class ExponentialLR(LRScheduler):
|
| 789 |
+
"""Decays the learning rate of each parameter group by gamma every epoch.
|
| 790 |
+
|
| 791 |
+
When last_epoch=-1, sets initial lr as lr.
|
| 792 |
+
|
| 793 |
+
Args:
|
| 794 |
+
optimizer (Optimizer): Wrapped optimizer.
|
| 795 |
+
gamma (float): Multiplicative factor of learning rate decay.
|
| 796 |
+
last_epoch (int): The index of last epoch. Default: -1.
|
| 797 |
+
verbose (bool | str): If ``True``, prints a message to stdout for
|
| 798 |
+
each update. Default: ``False``.
|
| 799 |
+
|
| 800 |
+
.. deprecated:: 2.2
|
| 801 |
+
``verbose`` is deprecated. Please use ``get_last_lr()`` to access the
|
| 802 |
+
learning rate.
|
| 803 |
+
"""
|
| 804 |
+
|
| 805 |
+
def __init__(
|
| 806 |
+
self, optimizer: Optimizer, gamma: float, last_epoch=-1, verbose="deprecated"
|
| 807 |
+
): # noqa: D107
|
| 808 |
+
self.gamma = gamma
|
| 809 |
+
super().__init__(optimizer, last_epoch, verbose)
|
| 810 |
+
|
| 811 |
+
def get_lr(self):
|
| 812 |
+
"""Compute the learning rate of each parameter group."""
|
| 813 |
+
_warn_get_lr_called_within_step(self)
|
| 814 |
+
|
| 815 |
+
if self.last_epoch == 0:
|
| 816 |
+
return [group["lr"] for group in self.optimizer.param_groups]
|
| 817 |
+
return [group["lr"] * self.gamma for group in self.optimizer.param_groups]
|
| 818 |
+
|
| 819 |
+
def _get_closed_form_lr(self):
|
| 820 |
+
return [base_lr * self.gamma**self.last_epoch for base_lr in self.base_lrs]
|
| 821 |
+
|
| 822 |
+
|
| 823 |
+
class SequentialLR(LRScheduler):
|
| 824 |
+
"""Contains a list of schedulers expected to be called sequentially during the optimization process.
|
| 825 |
+
|
| 826 |
+
Specifically, the schedulers will be called according to the milestone points, which should provide exact
|
| 827 |
+
intervals by which each scheduler should be called at a given epoch.
|
| 828 |
+
|
| 829 |
+
Args:
|
| 830 |
+
optimizer (Optimizer): Wrapped optimizer.
|
| 831 |
+
schedulers (list): List of chained schedulers.
|
| 832 |
+
milestones (list): List of integers that reflects milestone points.
|
| 833 |
+
last_epoch (int): The index of last epoch. Default: -1.
|
| 834 |
+
verbose (bool | str): Does nothing.
|
| 835 |
+
|
| 836 |
+
.. deprecated:: 2.2
|
| 837 |
+
``verbose`` is deprecated. Please use ``get_last_lr()`` to access the
|
| 838 |
+
learning rate.
|
| 839 |
+
|
| 840 |
+
Example:
|
| 841 |
+
>>> # xdoctest: +SKIP
|
| 842 |
+
>>> # Assuming optimizer uses lr = 1. for all groups
|
| 843 |
+
>>> # lr = 0.1 if epoch == 0
|
| 844 |
+
>>> # lr = 0.1 if epoch == 1
|
| 845 |
+
>>> # lr = 0.9 if epoch == 2
|
| 846 |
+
>>> # lr = 0.81 if epoch == 3
|
| 847 |
+
>>> # lr = 0.729 if epoch == 4
|
| 848 |
+
>>> scheduler1 = ConstantLR(optimizer, factor=0.1, total_iters=2)
|
| 849 |
+
>>> scheduler2 = ExponentialLR(optimizer, gamma=0.9)
|
| 850 |
+
>>> scheduler = SequentialLR(optimizer, schedulers=[scheduler1, scheduler2], milestones=[2])
|
| 851 |
+
>>> for epoch in range(100):
|
| 852 |
+
>>> train(...)
|
| 853 |
+
>>> validate(...)
|
| 854 |
+
>>> scheduler.step()
|
| 855 |
+
"""
|
| 856 |
+
|
| 857 |
+
def __init__(
|
| 858 |
+
self,
|
| 859 |
+
optimizer: Optimizer,
|
| 860 |
+
schedulers: List[LRScheduler],
|
| 861 |
+
milestones: List[int],
|
| 862 |
+
last_epoch=-1,
|
| 863 |
+
verbose="deprecated",
|
| 864 |
+
): # noqa: D107
|
| 865 |
+
if len(schedulers) < 1:
|
| 866 |
+
raise ValueError(
|
| 867 |
+
f"{self.__class__.__name__} expects at least one scheduler, but got no scheduler."
|
| 868 |
+
)
|
| 869 |
+
|
| 870 |
+
for scheduler_idx, scheduler in enumerate(schedulers):
|
| 871 |
+
if not hasattr(scheduler, "optimizer"):
|
| 872 |
+
raise TypeError(
|
| 873 |
+
f"{self.__class__.__name__} at index {scheduler_idx} should have `optimizer` as its attribute."
|
| 874 |
+
)
|
| 875 |
+
if isinstance(scheduler, ReduceLROnPlateau):
|
| 876 |
+
raise ValueError(
|
| 877 |
+
f"{self.__class__.__name__} does not support `ReduceLROnPlateau` scheduler as it "
|
| 878 |
+
"requires additional kwargs to be specified when calling `step`, "
|
| 879 |
+
f"but got one at index {scheduler_idx} in the given schedulers sequence."
|
| 880 |
+
)
|
| 881 |
+
if optimizer != scheduler.optimizer:
|
| 882 |
+
raise ValueError(
|
| 883 |
+
f"{self.__class__.__name__} expects all schedulers to belong to the same optimizer, but "
|
| 884 |
+
f"got scheduler {scheduler.__class__.__name__} at index {scheduler_idx} has {scheduler.optimizer}, "
|
| 885 |
+
f"which is different from {optimizer.__class__.__name__}."
|
| 886 |
+
)
|
| 887 |
+
|
| 888 |
+
if len(milestones) != len(schedulers) - 1:
|
| 889 |
+
raise ValueError(
|
| 890 |
+
"Sequential Schedulers expects number of schedulers provided to be one more "
|
| 891 |
+
f"than the number of milestone points, but got number of schedulers {len(schedulers)} and the "
|
| 892 |
+
f"number of milestones to be equal to {len(milestones)}"
|
| 893 |
+
)
|
| 894 |
+
_check_verbose_deprecated_warning(verbose)
|
| 895 |
+
self._schedulers = schedulers
|
| 896 |
+
self._milestones = milestones
|
| 897 |
+
self.last_epoch = last_epoch + 1
|
| 898 |
+
self.optimizer = optimizer
|
| 899 |
+
|
| 900 |
+
# Reset learning rates back to initial values
|
| 901 |
+
for group in self.optimizer.param_groups:
|
| 902 |
+
group["lr"] = group["initial_lr"]
|
| 903 |
+
|
| 904 |
+
# "Undo" the step performed by other schedulers
|
| 905 |
+
for scheduler in self._schedulers:
|
| 906 |
+
scheduler.last_epoch -= 1
|
| 907 |
+
|
| 908 |
+
# Perform the initial step for only the first scheduler
|
| 909 |
+
self._schedulers[0]._initial_step()
|
| 910 |
+
|
| 911 |
+
self._last_lr = schedulers[0].get_last_lr()
|
| 912 |
+
|
| 913 |
+
def step(self):
|
| 914 |
+
"""Perform a step."""
|
| 915 |
+
self.last_epoch += 1
|
| 916 |
+
idx = bisect_right(self._milestones, self.last_epoch)
|
| 917 |
+
scheduler = self._schedulers[idx]
|
| 918 |
+
if idx > 0 and self._milestones[idx - 1] == self.last_epoch:
|
| 919 |
+
scheduler.step(0)
|
| 920 |
+
else:
|
| 921 |
+
scheduler.step()
|
| 922 |
+
|
| 923 |
+
self._last_lr = scheduler.get_last_lr()
|
| 924 |
+
|
| 925 |
+
def state_dict(self):
|
| 926 |
+
"""Return the state of the scheduler as a :class:`dict`.
|
| 927 |
+
|
| 928 |
+
It contains an entry for every variable in self.__dict__ which
|
| 929 |
+
is not the optimizer.
|
| 930 |
+
The wrapped scheduler states will also be saved.
|
| 931 |
+
"""
|
| 932 |
+
state_dict = {
|
| 933 |
+
key: value
|
| 934 |
+
for key, value in self.__dict__.items()
|
| 935 |
+
if key not in ("optimizer", "_schedulers")
|
| 936 |
+
}
|
| 937 |
+
state_dict["_schedulers"] = [None] * len(self._schedulers)
|
| 938 |
+
|
| 939 |
+
for idx, s in enumerate(self._schedulers):
|
| 940 |
+
state_dict["_schedulers"][idx] = s.state_dict()
|
| 941 |
+
|
| 942 |
+
return state_dict
|
| 943 |
+
|
| 944 |
+
def load_state_dict(self, state_dict):
|
| 945 |
+
"""Load the scheduler's state.
|
| 946 |
+
|
| 947 |
+
Args:
|
| 948 |
+
state_dict (dict): scheduler state. Should be an object returned
|
| 949 |
+
from a call to :meth:`state_dict`.
|
| 950 |
+
"""
|
| 951 |
+
_schedulers = state_dict.pop("_schedulers")
|
| 952 |
+
self.__dict__.update(state_dict)
|
| 953 |
+
# Restore state_dict keys in order to prevent side effects
|
| 954 |
+
# https://github.com/pytorch/pytorch/issues/32756
|
| 955 |
+
state_dict["_schedulers"] = _schedulers
|
| 956 |
+
|
| 957 |
+
for idx, s in enumerate(_schedulers):
|
| 958 |
+
self._schedulers[idx].load_state_dict(s)
|
| 959 |
+
|
| 960 |
+
|
| 961 |
+
class PolynomialLR(LRScheduler):
|
| 962 |
+
"""Decays the learning rate of each parameter group using a polynomial function in the given total_iters.
|
| 963 |
+
|
| 964 |
+
When last_epoch=-1, sets initial lr as lr.
|
| 965 |
+
|
| 966 |
+
Args:
|
| 967 |
+
optimizer (Optimizer): Wrapped optimizer.
|
| 968 |
+
total_iters (int): The number of steps that the scheduler decays the learning rate. Default: 5.
|
| 969 |
+
power (float): The power of the polynomial. Default: 1.0.
|
| 970 |
+
verbose (bool | str): If ``True``, prints a message to stdout for
|
| 971 |
+
each update. Default: ``False``.
|
| 972 |
+
|
| 973 |
+
.. deprecated:: 2.2
|
| 974 |
+
``verbose`` is deprecated. Please use ``get_last_lr()`` to access the
|
| 975 |
+
learning rate.
|
| 976 |
+
|
| 977 |
+
Example:
|
| 978 |
+
>>> # xdoctest: +SKIP("undefined vars")
|
| 979 |
+
>>> # Assuming optimizer uses lr = 0.001 for all groups
|
| 980 |
+
>>> # lr = 0.001 if epoch == 0
|
| 981 |
+
>>> # lr = 0.00075 if epoch == 1
|
| 982 |
+
>>> # lr = 0.00050 if epoch == 2
|
| 983 |
+
>>> # lr = 0.00025 if epoch == 3
|
| 984 |
+
>>> # lr = 0.0 if epoch >= 4
|
| 985 |
+
>>> scheduler = PolynomialLR(optimizer, total_iters=4, power=1.0)
|
| 986 |
+
>>> for epoch in range(100):
|
| 987 |
+
>>> train(...)
|
| 988 |
+
>>> validate(...)
|
| 989 |
+
>>> scheduler.step()
|
| 990 |
+
"""
|
| 991 |
+
|
| 992 |
+
def __init__(
|
| 993 |
+
self,
|
| 994 |
+
optimizer: Optimizer,
|
| 995 |
+
total_iters=5,
|
| 996 |
+
power=1.0,
|
| 997 |
+
last_epoch=-1,
|
| 998 |
+
verbose="deprecated",
|
| 999 |
+
): # noqa: D107
|
| 1000 |
+
self.total_iters = total_iters
|
| 1001 |
+
self.power = power
|
| 1002 |
+
super().__init__(optimizer, last_epoch, verbose)
|
| 1003 |
+
|
| 1004 |
+
def get_lr(self):
|
| 1005 |
+
"""Compute the learning rate."""
|
| 1006 |
+
_warn_get_lr_called_within_step(self)
|
| 1007 |
+
|
| 1008 |
+
if self.last_epoch == 0 or self.last_epoch > self.total_iters:
|
| 1009 |
+
return [group["lr"] for group in self.optimizer.param_groups]
|
| 1010 |
+
|
| 1011 |
+
decay_factor = (
|
| 1012 |
+
(1.0 - self.last_epoch / self.total_iters)
|
| 1013 |
+
/ (1.0 - (self.last_epoch - 1) / self.total_iters)
|
| 1014 |
+
) ** self.power
|
| 1015 |
+
return [group["lr"] * decay_factor for group in self.optimizer.param_groups]
|
| 1016 |
+
|
| 1017 |
+
def _get_closed_form_lr(self):
|
| 1018 |
+
return [
|
| 1019 |
+
(
|
| 1020 |
+
base_lr
|
| 1021 |
+
* (1.0 - min(self.total_iters, self.last_epoch) / self.total_iters)
|
| 1022 |
+
** self.power
|
| 1023 |
+
)
|
| 1024 |
+
for base_lr in self.base_lrs
|
| 1025 |
+
]
|
| 1026 |
+
|
| 1027 |
+
|
| 1028 |
+
class CosineAnnealingLR(LRScheduler):
|
| 1029 |
+
r"""Set the learning rate of each parameter group using a cosine annealing schedule.
|
| 1030 |
+
|
| 1031 |
+
The :math:`\eta_{max}` is set to the initial lr and
|
| 1032 |
+
:math:`T_{cur}` is the number of epochs since the last restart in SGDR:
|
| 1033 |
+
|
| 1034 |
+
.. math::
|
| 1035 |
+
\begin{aligned}
|
| 1036 |
+
\eta_t & = \eta_{min} + \frac{1}{2}(\eta_{max} - \eta_{min})\left(1
|
| 1037 |
+
+ \cos\left(\frac{T_{cur}}{T_{max}}\pi\right)\right),
|
| 1038 |
+
& T_{cur} \neq (2k+1)T_{max}; \\
|
| 1039 |
+
\eta_{t+1} & = \eta_{t} + \frac{1}{2}(\eta_{max} - \eta_{min})
|
| 1040 |
+
\left(1 - \cos\left(\frac{1}{T_{max}}\pi\right)\right),
|
| 1041 |
+
& T_{cur} = (2k+1)T_{max}.
|
| 1042 |
+
\end{aligned}
|
| 1043 |
+
|
| 1044 |
+
When last_epoch=-1, sets initial lr as lr. Notice that because the schedule
|
| 1045 |
+
is defined recursively, the learning rate can be simultaneously modified
|
| 1046 |
+
outside this scheduler by other operators. If the learning rate is set
|
| 1047 |
+
solely by this scheduler, the learning rate at each step becomes:
|
| 1048 |
+
|
| 1049 |
+
.. math::
|
| 1050 |
+
\eta_t = \eta_{min} + \frac{1}{2}(\eta_{max} - \eta_{min})\left(1 +
|
| 1051 |
+
\cos\left(\frac{T_{cur}}{T_{max}}\pi\right)\right)
|
| 1052 |
+
|
| 1053 |
+
It has been proposed in
|
| 1054 |
+
`SGDR: Stochastic Gradient Descent with Warm Restarts`_. Note that this only
|
| 1055 |
+
implements the cosine annealing part of SGDR, and not the restarts.
|
| 1056 |
+
|
| 1057 |
+
Args:
|
| 1058 |
+
optimizer (Optimizer): Wrapped optimizer.
|
| 1059 |
+
T_max (int): Maximum number of iterations.
|
| 1060 |
+
eta_min (float): Minimum learning rate. Default: 0.
|
| 1061 |
+
last_epoch (int): The index of last epoch. Default: -1.
|
| 1062 |
+
verbose (bool | str): If ``True``, prints a message to stdout for
|
| 1063 |
+
each update. Default: ``False``.
|
| 1064 |
+
|
| 1065 |
+
.. deprecated:: 2.2
|
| 1066 |
+
``verbose`` is deprecated. Please use ``get_last_lr()`` to access the
|
| 1067 |
+
learning rate.
|
| 1068 |
+
|
| 1069 |
+
.. _SGDR\: Stochastic Gradient Descent with Warm Restarts:
|
| 1070 |
+
https://arxiv.org/abs/1608.03983
|
| 1071 |
+
"""
|
| 1072 |
+
|
| 1073 |
+
def __init__(
|
| 1074 |
+
self,
|
| 1075 |
+
optimizer: Optimizer,
|
| 1076 |
+
T_max: int,
|
| 1077 |
+
eta_min=0.0,
|
| 1078 |
+
last_epoch=-1,
|
| 1079 |
+
verbose="deprecated",
|
| 1080 |
+
): # noqa: D107
|
| 1081 |
+
self.T_max = T_max
|
| 1082 |
+
self.eta_min = eta_min
|
| 1083 |
+
super().__init__(optimizer, last_epoch, verbose)
|
| 1084 |
+
|
| 1085 |
+
def get_lr(self):
|
| 1086 |
+
"""Retrieve the learning rate of each parameter group."""
|
| 1087 |
+
_warn_get_lr_called_within_step(self)
|
| 1088 |
+
|
| 1089 |
+
if self.last_epoch == 0:
|
| 1090 |
+
return [group["lr"] for group in self.optimizer.param_groups]
|
| 1091 |
+
elif self._step_count == 1 and self.last_epoch > 0:
|
| 1092 |
+
return [
|
| 1093 |
+
self.eta_min
|
| 1094 |
+
+ (base_lr - self.eta_min)
|
| 1095 |
+
* (1 + math.cos((self.last_epoch) * math.pi / self.T_max))
|
| 1096 |
+
/ 2
|
| 1097 |
+
for base_lr, group in zip(self.base_lrs, self.optimizer.param_groups)
|
| 1098 |
+
]
|
| 1099 |
+
elif (self.last_epoch - 1 - self.T_max) % (2 * self.T_max) == 0:
|
| 1100 |
+
return [
|
| 1101 |
+
group["lr"]
|
| 1102 |
+
+ (base_lr - self.eta_min) * (1 - math.cos(math.pi / self.T_max)) / 2
|
| 1103 |
+
for base_lr, group in zip(self.base_lrs, self.optimizer.param_groups)
|
| 1104 |
+
]
|
| 1105 |
+
return [
|
| 1106 |
+
(1 + math.cos(math.pi * self.last_epoch / self.T_max))
|
| 1107 |
+
/ (1 + math.cos(math.pi * (self.last_epoch - 1) / self.T_max))
|
| 1108 |
+
* (group["lr"] - self.eta_min)
|
| 1109 |
+
+ self.eta_min
|
| 1110 |
+
for group in self.optimizer.param_groups
|
| 1111 |
+
]
|
| 1112 |
+
|
| 1113 |
+
def _get_closed_form_lr(self):
|
| 1114 |
+
return [
|
| 1115 |
+
self.eta_min
|
| 1116 |
+
+ (base_lr - self.eta_min)
|
| 1117 |
+
* (1 + math.cos(math.pi * self.last_epoch / self.T_max))
|
| 1118 |
+
/ 2
|
| 1119 |
+
for base_lr in self.base_lrs
|
| 1120 |
+
]
|
| 1121 |
+
|
| 1122 |
+
|
| 1123 |
+
class ChainedScheduler(LRScheduler):
|
| 1124 |
+
"""Chains a list of learning rate schedulers.
|
| 1125 |
+
|
| 1126 |
+
Takes in a sequence of chainable learning rate schedulers and calls their
|
| 1127 |
+
step() functions consecutively in just one call to step().
|
| 1128 |
+
|
| 1129 |
+
Args:
|
| 1130 |
+
schedulers (sequence): sequence of chained schedulers.
|
| 1131 |
+
optimizer (Optimizer, optional): Wrapped optimizer. Default: None.
|
| 1132 |
+
|
| 1133 |
+
Example:
|
| 1134 |
+
>>> # xdoctest: +SKIP
|
| 1135 |
+
>>> # Assuming optimizer uses lr = 1. for all groups
|
| 1136 |
+
>>> # lr = 0.09 if epoch == 0
|
| 1137 |
+
>>> # lr = 0.081 if epoch == 1
|
| 1138 |
+
>>> # lr = 0.729 if epoch == 2
|
| 1139 |
+
>>> # lr = 0.6561 if epoch == 3
|
| 1140 |
+
>>> # lr = 0.59049 if epoch >= 4
|
| 1141 |
+
>>> scheduler1 = ConstantLR(optimizer, factor=0.1, total_iters=2)
|
| 1142 |
+
>>> scheduler2 = ExponentialLR(optimizer, gamma=0.9)
|
| 1143 |
+
>>> scheduler = ChainedScheduler([scheduler1, scheduler2], optimizer=optimizer)
|
| 1144 |
+
>>> for epoch in range(100):
|
| 1145 |
+
>>> train(...)
|
| 1146 |
+
>>> validate(...)
|
| 1147 |
+
>>> scheduler.step()
|
| 1148 |
+
"""
|
| 1149 |
+
|
| 1150 |
+
def __init__(
|
| 1151 |
+
self, schedulers: Sequence[LRScheduler], optimizer: Optional[Optimizer] = None
|
| 1152 |
+
): # noqa: D107
|
| 1153 |
+
if len(schedulers) < 1:
|
| 1154 |
+
raise ValueError(
|
| 1155 |
+
f"{self.__class__.__name__} expects at least one scheduler to be chained, but got no scheduler."
|
| 1156 |
+
)
|
| 1157 |
+
|
| 1158 |
+
optimizer = optimizer or schedulers[0].optimizer
|
| 1159 |
+
for scheduler_idx, scheduler in enumerate(schedulers):
|
| 1160 |
+
if not hasattr(scheduler, "optimizer"):
|
| 1161 |
+
raise TypeError(
|
| 1162 |
+
f"{self.__class__.__name__} at index {scheduler_idx} should have `optimizer` as its attribute."
|
| 1163 |
+
)
|
| 1164 |
+
if isinstance(scheduler, ReduceLROnPlateau):
|
| 1165 |
+
raise ValueError(
|
| 1166 |
+
f"{self.__class__.__name__} does not support `ReduceLROnPlateau` scheduler as it "
|
| 1167 |
+
"requires additional kwargs to be specified when calling `step`, "
|
| 1168 |
+
f"but got one at index {scheduler_idx} in the given schedulers sequence."
|
| 1169 |
+
)
|
| 1170 |
+
if optimizer != scheduler.optimizer:
|
| 1171 |
+
raise ValueError(
|
| 1172 |
+
f"{self.__class__.__name__} expects all schedulers to belong to the same optimizer, but "
|
| 1173 |
+
f"got scheduler {scheduler.__class__.__name__} at index {scheduler_idx} has {scheduler.optimizer}, "
|
| 1174 |
+
f"which is different from {optimizer.__class__.__name__}."
|
| 1175 |
+
)
|
| 1176 |
+
self._schedulers = schedulers
|
| 1177 |
+
self.optimizer = optimizer
|
| 1178 |
+
self._last_lr = [
|
| 1179 |
+
group["lr"] for group in self._schedulers[-1].optimizer.param_groups
|
| 1180 |
+
]
|
| 1181 |
+
|
| 1182 |
+
def step(self):
|
| 1183 |
+
"""Perform a step."""
|
| 1184 |
+
for scheduler in self._schedulers:
|
| 1185 |
+
scheduler.step()
|
| 1186 |
+
self._last_lr = [
|
| 1187 |
+
group["lr"] for group in self._schedulers[-1].optimizer.param_groups
|
| 1188 |
+
]
|
| 1189 |
+
|
| 1190 |
+
def state_dict(self):
|
| 1191 |
+
"""Return the state of the scheduler as a :class:`dict`.
|
| 1192 |
+
|
| 1193 |
+
It contains an entry for every variable in self.__dict__ which
|
| 1194 |
+
is not the optimizer.
|
| 1195 |
+
The wrapped scheduler states will also be saved.
|
| 1196 |
+
"""
|
| 1197 |
+
state_dict = {
|
| 1198 |
+
key: value
|
| 1199 |
+
for key, value in self.__dict__.items()
|
| 1200 |
+
if key not in ("optimizer", "_schedulers")
|
| 1201 |
+
}
|
| 1202 |
+
state_dict["_schedulers"] = [None] * len(self._schedulers)
|
| 1203 |
+
|
| 1204 |
+
for idx, s in enumerate(self._schedulers):
|
| 1205 |
+
state_dict["_schedulers"][idx] = s.state_dict()
|
| 1206 |
+
|
| 1207 |
+
return state_dict
|
| 1208 |
+
|
| 1209 |
+
def load_state_dict(self, state_dict):
|
| 1210 |
+
"""Load the scheduler's state.
|
| 1211 |
+
|
| 1212 |
+
Args:
|
| 1213 |
+
state_dict (dict): scheduler state. Should be an object returned
|
| 1214 |
+
from a call to :meth:`state_dict`.
|
| 1215 |
+
"""
|
| 1216 |
+
_schedulers = state_dict.pop("_schedulers")
|
| 1217 |
+
self.__dict__.update(state_dict)
|
| 1218 |
+
# Restore state_dict keys in order to prevent side effects
|
| 1219 |
+
# https://github.com/pytorch/pytorch/issues/32756
|
| 1220 |
+
state_dict["_schedulers"] = _schedulers
|
| 1221 |
+
|
| 1222 |
+
for idx, s in enumerate(_schedulers):
|
| 1223 |
+
self._schedulers[idx].load_state_dict(s)
|
| 1224 |
+
|
| 1225 |
+
|
| 1226 |
+
class ReduceLROnPlateau(LRScheduler):
|
| 1227 |
+
"""Reduce learning rate when a metric has stopped improving.
|
| 1228 |
+
|
| 1229 |
+
Models often benefit from reducing the learning rate by a factor
|
| 1230 |
+
of 2-10 once learning stagnates. This scheduler reads a metrics
|
| 1231 |
+
quantity and if no improvement is seen for a 'patience' number
|
| 1232 |
+
of epochs, the learning rate is reduced.
|
| 1233 |
+
|
| 1234 |
+
Args:
|
| 1235 |
+
optimizer (Optimizer): Wrapped optimizer.
|
| 1236 |
+
mode (str): One of `min`, `max`. In `min` mode, lr will
|
| 1237 |
+
be reduced when the quantity monitored has stopped
|
| 1238 |
+
decreasing; in `max` mode it will be reduced when the
|
| 1239 |
+
quantity monitored has stopped increasing. Default: 'min'.
|
| 1240 |
+
factor (float): Factor by which the learning rate will be
|
| 1241 |
+
reduced. new_lr = lr * factor. Default: 0.1.
|
| 1242 |
+
patience (int): The number of allowed epochs with no improvement after
|
| 1243 |
+
which the learning rate will be reduced.
|
| 1244 |
+
For example, consider the case of having no patience (`patience = 0`).
|
| 1245 |
+
In the first epoch, a baseline is established and is always considered good as there's no previous baseline.
|
| 1246 |
+
In the second epoch, if the performance is worse than the baseline,
|
| 1247 |
+
we have what is considered an intolerable epoch.
|
| 1248 |
+
Since the count of intolerable epochs (1) is greater than the patience level (0),
|
| 1249 |
+
the learning rate is reduced at the end of this epoch.
|
| 1250 |
+
From the third epoch onwards, the learning rate continues to be reduced at the end of each epoch
|
| 1251 |
+
if the performance is worse than the baseline. If the performance improves or remains the same,
|
| 1252 |
+
the learning rate is not adjusted.
|
| 1253 |
+
Default: 10.
|
| 1254 |
+
threshold (float): Threshold for measuring the new optimum,
|
| 1255 |
+
to only focus on significant changes. Default: 1e-4.
|
| 1256 |
+
threshold_mode (str): One of `rel`, `abs`. In `rel` mode,
|
| 1257 |
+
dynamic_threshold = best * ( 1 + threshold ) in 'max'
|
| 1258 |
+
mode or best * ( 1 - threshold ) in `min` mode.
|
| 1259 |
+
In `abs` mode, dynamic_threshold = best + threshold in
|
| 1260 |
+
`max` mode or best - threshold in `min` mode. Default: 'rel'.
|
| 1261 |
+
cooldown (int): Number of epochs to wait before resuming
|
| 1262 |
+
normal operation after lr has been reduced. Default: 0.
|
| 1263 |
+
min_lr (float or list): A scalar or a list of scalars. A
|
| 1264 |
+
lower bound on the learning rate of all param groups
|
| 1265 |
+
or each group respectively. Default: 0.
|
| 1266 |
+
eps (float): Minimal decay applied to lr. If the difference
|
| 1267 |
+
between new and old lr is smaller than eps, the update is
|
| 1268 |
+
ignored. Default: 1e-8.
|
| 1269 |
+
verbose (bool | str): If ``True``, prints a message to stdout for
|
| 1270 |
+
each update. Default: ``False``.
|
| 1271 |
+
|
| 1272 |
+
.. deprecated:: 2.2
|
| 1273 |
+
``verbose`` is deprecated. Please use ``get_last_lr()`` to access the
|
| 1274 |
+
learning rate.
|
| 1275 |
+
|
| 1276 |
+
Example:
|
| 1277 |
+
>>> # xdoctest: +SKIP
|
| 1278 |
+
>>> optimizer = torch.optim.SGD(model.parameters(), lr=0.1, momentum=0.9)
|
| 1279 |
+
>>> scheduler = ReduceLROnPlateau(optimizer, 'min')
|
| 1280 |
+
>>> for epoch in range(10):
|
| 1281 |
+
>>> train(...)
|
| 1282 |
+
>>> val_loss = validate(...)
|
| 1283 |
+
>>> # Note that step should be called after validate()
|
| 1284 |
+
>>> scheduler.step(val_loss)
|
| 1285 |
+
"""
|
| 1286 |
+
|
| 1287 |
+
def __init__(
|
| 1288 |
+
self,
|
| 1289 |
+
optimizer: Optimizer,
|
| 1290 |
+
mode: Literal["min", "max"] = "min",
|
| 1291 |
+
factor=0.1,
|
| 1292 |
+
patience=10,
|
| 1293 |
+
threshold=1e-4,
|
| 1294 |
+
threshold_mode: Literal["rel", "abs"] = "rel",
|
| 1295 |
+
cooldown=0,
|
| 1296 |
+
min_lr: Union[List[float], float] = 0,
|
| 1297 |
+
eps=1e-8,
|
| 1298 |
+
verbose="deprecated",
|
| 1299 |
+
): # noqa: D107
|
| 1300 |
+
if factor >= 1.0:
|
| 1301 |
+
raise ValueError("Factor should be < 1.0.")
|
| 1302 |
+
self.factor = factor
|
| 1303 |
+
|
| 1304 |
+
# Attach optimizer
|
| 1305 |
+
if not isinstance(optimizer, Optimizer):
|
| 1306 |
+
raise TypeError(f"{type(optimizer).__name__} is not an Optimizer")
|
| 1307 |
+
self.optimizer = optimizer
|
| 1308 |
+
|
| 1309 |
+
if isinstance(min_lr, (list, tuple)):
|
| 1310 |
+
if len(min_lr) != len(optimizer.param_groups):
|
| 1311 |
+
raise ValueError(
|
| 1312 |
+
f"expected {len(optimizer.param_groups)} min_lrs, got {len(min_lr)}"
|
| 1313 |
+
)
|
| 1314 |
+
self.min_lrs = list(min_lr)
|
| 1315 |
+
else:
|
| 1316 |
+
self.min_lrs = [min_lr] * len(optimizer.param_groups)
|
| 1317 |
+
|
| 1318 |
+
self.patience = patience
|
| 1319 |
+
|
| 1320 |
+
self.verbose = _check_verbose_deprecated_warning(verbose)
|
| 1321 |
+
self.cooldown = cooldown
|
| 1322 |
+
self.cooldown_counter = 0
|
| 1323 |
+
self.mode = mode
|
| 1324 |
+
self.threshold = threshold
|
| 1325 |
+
self.threshold_mode = threshold_mode
|
| 1326 |
+
self.best: float
|
| 1327 |
+
self.num_bad_epochs: int
|
| 1328 |
+
self.mode_worse: float # the worse value for the chosen mode
|
| 1329 |
+
self.eps = eps
|
| 1330 |
+
self.last_epoch = 0
|
| 1331 |
+
self._last_lr = [group["lr"] for group in self.optimizer.param_groups]
|
| 1332 |
+
self._init_is_better(
|
| 1333 |
+
mode=mode, threshold=threshold, threshold_mode=threshold_mode
|
| 1334 |
+
)
|
| 1335 |
+
self._reset()
|
| 1336 |
+
|
| 1337 |
+
def _reset(self):
|
| 1338 |
+
"""Reset num_bad_epochs counter and cooldown counter."""
|
| 1339 |
+
self.best = self.mode_worse
|
| 1340 |
+
self.cooldown_counter = 0
|
| 1341 |
+
self.num_bad_epochs = 0
|
| 1342 |
+
|
| 1343 |
+
def step(self, metrics: SupportsFloat, epoch=None): # type: ignore[override]
|
| 1344 |
+
"""Perform a step."""
|
| 1345 |
+
# convert `metrics` to float, in case it's a zero-dim Tensor
|
| 1346 |
+
current = float(metrics)
|
| 1347 |
+
if epoch is None:
|
| 1348 |
+
epoch = self.last_epoch + 1
|
| 1349 |
+
else:
|
| 1350 |
+
warnings.warn(EPOCH_DEPRECATION_WARNING, UserWarning)
|
| 1351 |
+
self.last_epoch = epoch
|
| 1352 |
+
|
| 1353 |
+
if self.is_better(current, self.best):
|
| 1354 |
+
self.best = current
|
| 1355 |
+
self.num_bad_epochs = 0
|
| 1356 |
+
else:
|
| 1357 |
+
self.num_bad_epochs += 1
|
| 1358 |
+
|
| 1359 |
+
if self.in_cooldown:
|
| 1360 |
+
self.cooldown_counter -= 1
|
| 1361 |
+
self.num_bad_epochs = 0 # ignore any bad epochs in cooldown
|
| 1362 |
+
|
| 1363 |
+
if self.num_bad_epochs > self.patience:
|
| 1364 |
+
self._reduce_lr(epoch)
|
| 1365 |
+
self.cooldown_counter = self.cooldown
|
| 1366 |
+
self.num_bad_epochs = 0
|
| 1367 |
+
|
| 1368 |
+
self._last_lr = [group["lr"] for group in self.optimizer.param_groups]
|
| 1369 |
+
|
| 1370 |
+
def _reduce_lr(self, epoch):
|
| 1371 |
+
for i, param_group in enumerate(self.optimizer.param_groups):
|
| 1372 |
+
old_lr = float(param_group["lr"])
|
| 1373 |
+
new_lr = max(old_lr * self.factor, self.min_lrs[i])
|
| 1374 |
+
if old_lr - new_lr > self.eps:
|
| 1375 |
+
param_group["lr"] = new_lr
|
| 1376 |
+
|
| 1377 |
+
@property
|
| 1378 |
+
def in_cooldown(self): # noqa: D102
|
| 1379 |
+
return self.cooldown_counter > 0
|
| 1380 |
+
|
| 1381 |
+
def is_better(self, a, best): # noqa: D102
|
| 1382 |
+
if self.mode == "min" and self.threshold_mode == "rel":
|
| 1383 |
+
rel_epsilon = 1.0 - self.threshold
|
| 1384 |
+
return a < best * rel_epsilon
|
| 1385 |
+
|
| 1386 |
+
elif self.mode == "min" and self.threshold_mode == "abs":
|
| 1387 |
+
return a < best - self.threshold
|
| 1388 |
+
|
| 1389 |
+
elif self.mode == "max" and self.threshold_mode == "rel":
|
| 1390 |
+
rel_epsilon = self.threshold + 1.0
|
| 1391 |
+
return a > best * rel_epsilon
|
| 1392 |
+
|
| 1393 |
+
else: # mode == 'max' and epsilon_mode == 'abs':
|
| 1394 |
+
return a > best + self.threshold
|
| 1395 |
+
|
| 1396 |
+
def _init_is_better(self, mode, threshold, threshold_mode):
|
| 1397 |
+
if mode not in {"min", "max"}:
|
| 1398 |
+
raise ValueError("mode " + mode + " is unknown!")
|
| 1399 |
+
if threshold_mode not in {"rel", "abs"}:
|
| 1400 |
+
raise ValueError("threshold mode " + threshold_mode + " is unknown!")
|
| 1401 |
+
|
| 1402 |
+
if mode == "min":
|
| 1403 |
+
self.mode_worse = inf
|
| 1404 |
+
else: # mode == 'max':
|
| 1405 |
+
self.mode_worse = -inf
|
| 1406 |
+
|
| 1407 |
+
self.mode = mode
|
| 1408 |
+
self.threshold = threshold
|
| 1409 |
+
self.threshold_mode = threshold_mode
|
| 1410 |
+
|
| 1411 |
+
def state_dict(self): # noqa: D102
|
| 1412 |
+
return {
|
| 1413 |
+
key: value for key, value in self.__dict__.items() if key != "optimizer"
|
| 1414 |
+
}
|
| 1415 |
+
|
| 1416 |
+
def load_state_dict(self, state_dict):
|
| 1417 |
+
"""Load the scheduler's state."""
|
| 1418 |
+
self.__dict__.update(state_dict)
|
| 1419 |
+
self._init_is_better(
|
| 1420 |
+
mode=self.mode, threshold=self.threshold, threshold_mode=self.threshold_mode
|
| 1421 |
+
)
|
| 1422 |
+
|
| 1423 |
+
|
| 1424 |
+
class CyclicLR(LRScheduler):
|
| 1425 |
+
r"""Sets the learning rate of each parameter group according to cyclical learning rate policy (CLR).
|
| 1426 |
+
|
| 1427 |
+
The policy cycles the learning rate between two boundaries with a constant frequency,
|
| 1428 |
+
as detailed in the paper `Cyclical Learning Rates for Training Neural Networks`_.
|
| 1429 |
+
The distance between the two boundaries can be scaled on a per-iteration
|
| 1430 |
+
or per-cycle basis.
|
| 1431 |
+
|
| 1432 |
+
Cyclical learning rate policy changes the learning rate after every batch.
|
| 1433 |
+
`step` should be called after a batch has been used for training.
|
| 1434 |
+
|
| 1435 |
+
This class has three built-in policies, as put forth in the paper:
|
| 1436 |
+
|
| 1437 |
+
* "triangular": A basic triangular cycle without amplitude scaling.
|
| 1438 |
+
* "triangular2": A basic triangular cycle that scales initial amplitude by half each cycle.
|
| 1439 |
+
* "exp_range": A cycle that scales initial amplitude by :math:`\text{gamma}^{\text{cycle iterations}}`
|
| 1440 |
+
at each cycle iteration.
|
| 1441 |
+
|
| 1442 |
+
This implementation was adapted from the github repo: `bckenstler/CLR`_
|
| 1443 |
+
|
| 1444 |
+
Args:
|
| 1445 |
+
optimizer (Optimizer): Wrapped optimizer.
|
| 1446 |
+
base_lr (float or list): Initial learning rate which is the
|
| 1447 |
+
lower boundary in the cycle for each parameter group.
|
| 1448 |
+
max_lr (float or list): Upper learning rate boundaries in the cycle
|
| 1449 |
+
for each parameter group. Functionally,
|
| 1450 |
+
it defines the cycle amplitude (max_lr - base_lr).
|
| 1451 |
+
The lr at any cycle is the sum of base_lr
|
| 1452 |
+
and some scaling of the amplitude; therefore
|
| 1453 |
+
max_lr may not actually be reached depending on
|
| 1454 |
+
scaling function.
|
| 1455 |
+
step_size_up (int): Number of training iterations in the
|
| 1456 |
+
increasing half of a cycle. Default: 2000
|
| 1457 |
+
step_size_down (int): Number of training iterations in the
|
| 1458 |
+
decreasing half of a cycle. If step_size_down is None,
|
| 1459 |
+
it is set to step_size_up. Default: None
|
| 1460 |
+
mode (str): One of {triangular, triangular2, exp_range}.
|
| 1461 |
+
Values correspond to policies detailed above.
|
| 1462 |
+
If scale_fn is not None, this argument is ignored.
|
| 1463 |
+
Default: 'triangular'
|
| 1464 |
+
gamma (float): Constant in 'exp_range' scaling function:
|
| 1465 |
+
gamma**(cycle iterations)
|
| 1466 |
+
Default: 1.0
|
| 1467 |
+
scale_fn (function): Custom scaling policy defined by a single
|
| 1468 |
+
argument lambda function, where
|
| 1469 |
+
0 <= scale_fn(x) <= 1 for all x >= 0.
|
| 1470 |
+
If specified, then 'mode' is ignored.
|
| 1471 |
+
Default: None
|
| 1472 |
+
scale_mode (str): {'cycle', 'iterations'}.
|
| 1473 |
+
Defines whether scale_fn is evaluated on
|
| 1474 |
+
cycle number or cycle iterations (training
|
| 1475 |
+
iterations since start of cycle).
|
| 1476 |
+
Default: 'cycle'
|
| 1477 |
+
cycle_momentum (bool): If ``True``, momentum is cycled inversely
|
| 1478 |
+
to learning rate between 'base_momentum' and 'max_momentum'.
|
| 1479 |
+
Default: True
|
| 1480 |
+
base_momentum (float or list): Lower momentum boundaries in the cycle
|
| 1481 |
+
for each parameter group. Note that momentum is cycled inversely
|
| 1482 |
+
to learning rate; at the peak of a cycle, momentum is
|
| 1483 |
+
'base_momentum' and learning rate is 'max_lr'.
|
| 1484 |
+
Default: 0.8
|
| 1485 |
+
max_momentum (float or list): Upper momentum boundaries in the cycle
|
| 1486 |
+
for each parameter group. Functionally,
|
| 1487 |
+
it defines the cycle amplitude (max_momentum - base_momentum).
|
| 1488 |
+
The momentum at any cycle is the difference of max_momentum
|
| 1489 |
+
and some scaling of the amplitude; therefore
|
| 1490 |
+
base_momentum may not actually be reached depending on
|
| 1491 |
+
scaling function. Note that momentum is cycled inversely
|
| 1492 |
+
to learning rate; at the start of a cycle, momentum is 'max_momentum'
|
| 1493 |
+
and learning rate is 'base_lr'
|
| 1494 |
+
Default: 0.9
|
| 1495 |
+
last_epoch (int): The index of the last batch. This parameter is used when
|
| 1496 |
+
resuming a training job. Since `step()` should be invoked after each
|
| 1497 |
+
batch instead of after each epoch, this number represents the total
|
| 1498 |
+
number of *batches* computed, not the total number of epochs computed.
|
| 1499 |
+
When last_epoch=-1, the schedule is started from the beginning.
|
| 1500 |
+
Default: -1
|
| 1501 |
+
verbose (bool | str): If ``True``, prints a message to stdout for
|
| 1502 |
+
each update. Default: ``False``.
|
| 1503 |
+
|
| 1504 |
+
.. deprecated:: 2.2
|
| 1505 |
+
``verbose`` is deprecated. Please use ``get_last_lr()`` to access the
|
| 1506 |
+
learning rate.
|
| 1507 |
+
|
| 1508 |
+
Example:
|
| 1509 |
+
>>> # xdoctest: +SKIP
|
| 1510 |
+
>>> optimizer = torch.optim.SGD(model.parameters(), lr=0.1, momentum=0.9)
|
| 1511 |
+
>>> scheduler = torch.optim.lr_scheduler.CyclicLR(optimizer, base_lr=0.01, max_lr=0.1)
|
| 1512 |
+
>>> data_loader = torch.utils.data.DataLoader(...)
|
| 1513 |
+
>>> for epoch in range(10):
|
| 1514 |
+
>>> for batch in data_loader:
|
| 1515 |
+
>>> train_batch(...)
|
| 1516 |
+
>>> scheduler.step()
|
| 1517 |
+
|
| 1518 |
+
|
| 1519 |
+
.. _Cyclical Learning Rates for Training Neural Networks: https://arxiv.org/abs/1506.01186
|
| 1520 |
+
.. _bckenstler/CLR: https://github.com/bckenstler/CLR
|
| 1521 |
+
"""
|
| 1522 |
+
|
| 1523 |
+
def __init__(
|
| 1524 |
+
self,
|
| 1525 |
+
optimizer: Optimizer,
|
| 1526 |
+
base_lr: Union[float, List[float]],
|
| 1527 |
+
max_lr: Union[float, List[float]],
|
| 1528 |
+
step_size_up=2000,
|
| 1529 |
+
step_size_down: Optional[int] = None,
|
| 1530 |
+
mode: Literal["triangular", "triangular2", "exp_range"] = "triangular",
|
| 1531 |
+
gamma=1.0,
|
| 1532 |
+
scale_fn: Optional[Callable[[float], float]] = None,
|
| 1533 |
+
scale_mode: Literal["cycle", "iterations"] = "cycle",
|
| 1534 |
+
cycle_momentum=True,
|
| 1535 |
+
base_momentum=0.8,
|
| 1536 |
+
max_momentum=0.9,
|
| 1537 |
+
last_epoch=-1,
|
| 1538 |
+
verbose="deprecated",
|
| 1539 |
+
): # noqa: D107
|
| 1540 |
+
# Attach optimizer
|
| 1541 |
+
if not isinstance(optimizer, Optimizer):
|
| 1542 |
+
raise TypeError(f"{type(optimizer).__name__} is not an Optimizer")
|
| 1543 |
+
self.optimizer = optimizer
|
| 1544 |
+
|
| 1545 |
+
base_lrs = _format_param("base_lr", optimizer, base_lr)
|
| 1546 |
+
if last_epoch == -1:
|
| 1547 |
+
for lr, group in zip(base_lrs, optimizer.param_groups):
|
| 1548 |
+
if isinstance(group["lr"], Tensor):
|
| 1549 |
+
lr_val = lr.item() if isinstance(lr, Tensor) else lr
|
| 1550 |
+
group["lr"].fill_(lr_val)
|
| 1551 |
+
else:
|
| 1552 |
+
group["lr"] = lr
|
| 1553 |
+
|
| 1554 |
+
self.max_lrs = _format_param("max_lr", optimizer, max_lr)
|
| 1555 |
+
|
| 1556 |
+
step_size_up = float(step_size_up)
|
| 1557 |
+
step_size_down = (
|
| 1558 |
+
float(step_size_down) if step_size_down is not None else step_size_up
|
| 1559 |
+
)
|
| 1560 |
+
self.total_size = step_size_up + step_size_down
|
| 1561 |
+
self.step_ratio = step_size_up / self.total_size
|
| 1562 |
+
|
| 1563 |
+
if mode not in ["triangular", "triangular2", "exp_range"] and scale_fn is None:
|
| 1564 |
+
raise ValueError("mode is invalid and scale_fn is None")
|
| 1565 |
+
|
| 1566 |
+
self.mode = mode
|
| 1567 |
+
self.gamma = gamma
|
| 1568 |
+
|
| 1569 |
+
self._scale_fn_ref: Callable[[float], float]
|
| 1570 |
+
self._scale_fn_custom = scale_fn
|
| 1571 |
+
self.scale_mode = scale_mode
|
| 1572 |
+
self._init_scale_fn()
|
| 1573 |
+
|
| 1574 |
+
self.cycle_momentum = cycle_momentum
|
| 1575 |
+
if cycle_momentum:
|
| 1576 |
+
if (
|
| 1577 |
+
"momentum" not in optimizer.defaults
|
| 1578 |
+
and "betas" not in optimizer.defaults
|
| 1579 |
+
):
|
| 1580 |
+
raise ValueError(
|
| 1581 |
+
"optimizer must support momentum or beta1 with `cycle_momentum` option enabled"
|
| 1582 |
+
)
|
| 1583 |
+
|
| 1584 |
+
self.use_beta1 = "betas" in self.optimizer.defaults
|
| 1585 |
+
self.base_momentums = _format_param(
|
| 1586 |
+
"base_momentum", optimizer, base_momentum
|
| 1587 |
+
)
|
| 1588 |
+
self.max_momentums = _format_param("max_momentum", optimizer, max_momentum)
|
| 1589 |
+
if last_epoch == -1:
|
| 1590 |
+
for m_momentum, b_momentum, group in zip(
|
| 1591 |
+
self.max_momentums, self.base_momentums, optimizer.param_groups
|
| 1592 |
+
):
|
| 1593 |
+
if self.use_beta1:
|
| 1594 |
+
group["betas"] = (m_momentum, *group["betas"][1:])
|
| 1595 |
+
else:
|
| 1596 |
+
group["momentum"] = m_momentum
|
| 1597 |
+
group["max_momentum"] = m_momentum
|
| 1598 |
+
group["base_momentum"] = b_momentum
|
| 1599 |
+
|
| 1600 |
+
super().__init__(optimizer, last_epoch, verbose)
|
| 1601 |
+
self.base_lrs = base_lrs
|
| 1602 |
+
|
| 1603 |
+
def _init_scale_fn(self):
|
| 1604 |
+
if self._scale_fn_custom is not None:
|
| 1605 |
+
return
|
| 1606 |
+
if self.mode == "triangular":
|
| 1607 |
+
self._scale_fn_ref = self._triangular_scale_fn
|
| 1608 |
+
self.scale_mode = "cycle"
|
| 1609 |
+
elif self.mode == "triangular2":
|
| 1610 |
+
self._scale_fn_ref = self._triangular2_scale_fn
|
| 1611 |
+
self.scale_mode = "cycle"
|
| 1612 |
+
elif self.mode == "exp_range":
|
| 1613 |
+
self._scale_fn_ref = partial(self._exp_range_scale_fn, self.gamma)
|
| 1614 |
+
self.scale_mode = "iterations"
|
| 1615 |
+
|
| 1616 |
+
def scale_fn(self, x) -> float:
|
| 1617 |
+
"""Get the scaling policy."""
|
| 1618 |
+
if self._scale_fn_custom is not None:
|
| 1619 |
+
return self._scale_fn_custom(x)
|
| 1620 |
+
else:
|
| 1621 |
+
return self._scale_fn_ref(x) # static method
|
| 1622 |
+
|
| 1623 |
+
@staticmethod
|
| 1624 |
+
def _triangular_scale_fn(x: float) -> float:
|
| 1625 |
+
return 1.0
|
| 1626 |
+
|
| 1627 |
+
@staticmethod
|
| 1628 |
+
def _triangular2_scale_fn(x: float) -> float:
|
| 1629 |
+
return 1 / (2.0 ** (x - 1))
|
| 1630 |
+
|
| 1631 |
+
@staticmethod
|
| 1632 |
+
def _exp_range_scale_fn(gamma: float, x: float) -> float:
|
| 1633 |
+
return gamma**x
|
| 1634 |
+
|
| 1635 |
+
def get_lr(self):
|
| 1636 |
+
"""Calculate the learning rate at batch index.
|
| 1637 |
+
|
| 1638 |
+
This function treats `self.last_epoch` as the last batch index.
|
| 1639 |
+
|
| 1640 |
+
If `self.cycle_momentum` is ``True``, this function has a side effect of
|
| 1641 |
+
updating the optimizer's momentum.
|
| 1642 |
+
"""
|
| 1643 |
+
_warn_get_lr_called_within_step(self)
|
| 1644 |
+
|
| 1645 |
+
cycle = math.floor(1 + self.last_epoch / self.total_size)
|
| 1646 |
+
x = 1.0 + self.last_epoch / self.total_size - cycle
|
| 1647 |
+
if x <= self.step_ratio:
|
| 1648 |
+
scale_factor = x / self.step_ratio
|
| 1649 |
+
else:
|
| 1650 |
+
scale_factor = (x - 1) / (self.step_ratio - 1)
|
| 1651 |
+
|
| 1652 |
+
lrs = []
|
| 1653 |
+
for base_lr, max_lr in zip(self.base_lrs, self.max_lrs):
|
| 1654 |
+
base_height = (max_lr - base_lr) * scale_factor
|
| 1655 |
+
if self.scale_mode == "cycle":
|
| 1656 |
+
lr = base_lr + base_height * self.scale_fn(cycle)
|
| 1657 |
+
else:
|
| 1658 |
+
lr = base_lr + base_height * self.scale_fn(self.last_epoch)
|
| 1659 |
+
lrs.append(lr)
|
| 1660 |
+
|
| 1661 |
+
if self.cycle_momentum:
|
| 1662 |
+
momentums = []
|
| 1663 |
+
for base_momentum, max_momentum in zip(
|
| 1664 |
+
self.base_momentums, self.max_momentums
|
| 1665 |
+
):
|
| 1666 |
+
base_height = (max_momentum - base_momentum) * scale_factor
|
| 1667 |
+
if self.scale_mode == "cycle":
|
| 1668 |
+
momentum = max_momentum - base_height * self.scale_fn(cycle)
|
| 1669 |
+
else:
|
| 1670 |
+
momentum = max_momentum - base_height * self.scale_fn(
|
| 1671 |
+
self.last_epoch
|
| 1672 |
+
)
|
| 1673 |
+
momentums.append(momentum)
|
| 1674 |
+
for param_group, momentum in zip(self.optimizer.param_groups, momentums):
|
| 1675 |
+
if self.use_beta1:
|
| 1676 |
+
param_group["betas"] = (momentum, *param_group["betas"][1:])
|
| 1677 |
+
else:
|
| 1678 |
+
param_group["momentum"] = momentum
|
| 1679 |
+
|
| 1680 |
+
return lrs
|
| 1681 |
+
|
| 1682 |
+
def state_dict(self): # noqa: D102
|
| 1683 |
+
state = super().state_dict()
|
| 1684 |
+
# We are dropping the `_scale_fn_ref` attribute because it is a
|
| 1685 |
+
# `weakref.WeakMethod` and can't be pickled.
|
| 1686 |
+
state.pop("_scale_fn_ref", None)
|
| 1687 |
+
fn = state.pop("_scale_fn_custom")
|
| 1688 |
+
state["_scale_fn_custom"] = None
|
| 1689 |
+
if fn is not None and not isinstance(fn, types.FunctionType):
|
| 1690 |
+
# The _scale_fn_custom will only be saved if it is a callable object
|
| 1691 |
+
# and not if it is a function or lambda.
|
| 1692 |
+
state["_scale_fn_custom"] = fn.__dict__.copy()
|
| 1693 |
+
|
| 1694 |
+
return state
|
| 1695 |
+
|
| 1696 |
+
def load_state_dict(self, state_dict):
|
| 1697 |
+
"""Load the scheduler's state."""
|
| 1698 |
+
fn = state_dict.pop("_scale_fn_custom")
|
| 1699 |
+
super().load_state_dict(state_dict)
|
| 1700 |
+
if fn is not None:
|
| 1701 |
+
self._scale_fn_custom.__dict__.update(fn)
|
| 1702 |
+
self._init_scale_fn()
|
| 1703 |
+
|
| 1704 |
+
|
| 1705 |
+
class CosineAnnealingWarmRestarts(LRScheduler):
|
| 1706 |
+
r"""Set the learning rate of each parameter group using a cosine annealing schedule.
|
| 1707 |
+
|
| 1708 |
+
The :math:`\eta_{max}` is set to the initial lr, :math:`T_{cur}`
|
| 1709 |
+
is the number of epochs since the last restart and :math:`T_{i}` is the number
|
| 1710 |
+
of epochs between two warm restarts in SGDR:
|
| 1711 |
+
|
| 1712 |
+
.. math::
|
| 1713 |
+
\eta_t = \eta_{min} + \frac{1}{2}(\eta_{max} - \eta_{min})\left(1 +
|
| 1714 |
+
\cos\left(\frac{T_{cur}}{T_{i}}\pi\right)\right)
|
| 1715 |
+
|
| 1716 |
+
When :math:`T_{cur}=T_{i}`, set :math:`\eta_t = \eta_{min}`.
|
| 1717 |
+
When :math:`T_{cur}=0` after restart, set :math:`\eta_t=\eta_{max}`.
|
| 1718 |
+
|
| 1719 |
+
It has been proposed in
|
| 1720 |
+
`SGDR: Stochastic Gradient Descent with Warm Restarts`_.
|
| 1721 |
+
|
| 1722 |
+
Args:
|
| 1723 |
+
optimizer (Optimizer): Wrapped optimizer.
|
| 1724 |
+
T_0 (int): Number of iterations until the first restart.
|
| 1725 |
+
T_mult (int, optional): A factor by which :math:`T_{i}` increases after a restart. Default: 1.
|
| 1726 |
+
eta_min (float, optional): Minimum learning rate. Default: 0.
|
| 1727 |
+
last_epoch (int, optional): The index of the last epoch. Default: -1.
|
| 1728 |
+
verbose (bool | str): If ``True``, prints a message to stdout for
|
| 1729 |
+
each update. Default: ``False``.
|
| 1730 |
+
|
| 1731 |
+
.. deprecated:: 2.2
|
| 1732 |
+
``verbose`` is deprecated. Please use ``get_last_lr()`` to access the
|
| 1733 |
+
learning rate.
|
| 1734 |
+
|
| 1735 |
+
.. _SGDR\: Stochastic Gradient Descent with Warm Restarts:
|
| 1736 |
+
https://arxiv.org/abs/1608.03983
|
| 1737 |
+
"""
|
| 1738 |
+
|
| 1739 |
+
def __init__(
|
| 1740 |
+
self,
|
| 1741 |
+
optimizer: Optimizer,
|
| 1742 |
+
T_0: int,
|
| 1743 |
+
T_mult=1,
|
| 1744 |
+
eta_min=0.0,
|
| 1745 |
+
last_epoch=-1,
|
| 1746 |
+
verbose="deprecated",
|
| 1747 |
+
): # noqa: D107
|
| 1748 |
+
if T_0 <= 0 or not isinstance(T_0, int):
|
| 1749 |
+
raise ValueError(f"Expected positive integer T_0, but got {T_0}")
|
| 1750 |
+
if T_mult < 1 or not isinstance(T_mult, int):
|
| 1751 |
+
raise ValueError(f"Expected integer T_mult >= 1, but got {T_mult}")
|
| 1752 |
+
if not isinstance(eta_min, (float, int)):
|
| 1753 |
+
raise ValueError(
|
| 1754 |
+
f"Expected float or int eta_min, but got {eta_min} of type {type(eta_min)}"
|
| 1755 |
+
)
|
| 1756 |
+
self.T_0 = T_0
|
| 1757 |
+
self.T_i = T_0
|
| 1758 |
+
self.T_mult = T_mult
|
| 1759 |
+
self.eta_min = eta_min
|
| 1760 |
+
self.T_cur = last_epoch
|
| 1761 |
+
super().__init__(optimizer, last_epoch, verbose)
|
| 1762 |
+
|
| 1763 |
+
def get_lr(self):
|
| 1764 |
+
"""Compute the initial learning rate."""
|
| 1765 |
+
_warn_get_lr_called_within_step(self)
|
| 1766 |
+
|
| 1767 |
+
return [
|
| 1768 |
+
self.eta_min
|
| 1769 |
+
+ (base_lr - self.eta_min)
|
| 1770 |
+
* (1 + math.cos(math.pi * self.T_cur / self.T_i))
|
| 1771 |
+
/ 2
|
| 1772 |
+
for base_lr in self.base_lrs
|
| 1773 |
+
]
|
| 1774 |
+
|
| 1775 |
+
def step(self, epoch=None):
|
| 1776 |
+
"""Step could be called after every batch update.
|
| 1777 |
+
|
| 1778 |
+
Example:
|
| 1779 |
+
>>> # xdoctest: +SKIP("Undefined vars")
|
| 1780 |
+
>>> scheduler = CosineAnnealingWarmRestarts(optimizer, T_0, T_mult)
|
| 1781 |
+
>>> iters = len(dataloader)
|
| 1782 |
+
>>> for epoch in range(20):
|
| 1783 |
+
>>> for i, sample in enumerate(dataloader):
|
| 1784 |
+
>>> inputs, labels = sample['inputs'], sample['labels']
|
| 1785 |
+
>>> optimizer.zero_grad()
|
| 1786 |
+
>>> outputs = net(inputs)
|
| 1787 |
+
>>> loss = criterion(outputs, labels)
|
| 1788 |
+
>>> loss.backward()
|
| 1789 |
+
>>> optimizer.step()
|
| 1790 |
+
>>> scheduler.step(epoch + i / iters)
|
| 1791 |
+
|
| 1792 |
+
This function can be called in an interleaved way.
|
| 1793 |
+
|
| 1794 |
+
Example:
|
| 1795 |
+
>>> # xdoctest: +SKIP("Undefined vars")
|
| 1796 |
+
>>> scheduler = CosineAnnealingWarmRestarts(optimizer, T_0, T_mult)
|
| 1797 |
+
>>> for epoch in range(20):
|
| 1798 |
+
>>> scheduler.step()
|
| 1799 |
+
>>> scheduler.step(26)
|
| 1800 |
+
>>> scheduler.step() # scheduler.step(27), instead of scheduler(20)
|
| 1801 |
+
"""
|
| 1802 |
+
if epoch is None and self.last_epoch < 0:
|
| 1803 |
+
epoch = 0
|
| 1804 |
+
|
| 1805 |
+
if epoch is None:
|
| 1806 |
+
epoch = self.last_epoch + 1
|
| 1807 |
+
self.T_cur = self.T_cur + 1
|
| 1808 |
+
if self.T_cur >= self.T_i:
|
| 1809 |
+
self.T_cur = self.T_cur - self.T_i
|
| 1810 |
+
self.T_i = self.T_i * self.T_mult
|
| 1811 |
+
else:
|
| 1812 |
+
if epoch < 0:
|
| 1813 |
+
raise ValueError(f"Expected non-negative epoch, but got {epoch}")
|
| 1814 |
+
if epoch >= self.T_0:
|
| 1815 |
+
if self.T_mult == 1:
|
| 1816 |
+
self.T_cur = epoch % self.T_0
|
| 1817 |
+
else:
|
| 1818 |
+
n = int(
|
| 1819 |
+
math.log(
|
| 1820 |
+
(epoch / self.T_0 * (self.T_mult - 1) + 1), self.T_mult
|
| 1821 |
+
)
|
| 1822 |
+
)
|
| 1823 |
+
self.T_cur = epoch - self.T_0 * (self.T_mult**n - 1) / (
|
| 1824 |
+
self.T_mult - 1
|
| 1825 |
+
)
|
| 1826 |
+
self.T_i = self.T_0 * self.T_mult ** (n)
|
| 1827 |
+
else:
|
| 1828 |
+
self.T_i = self.T_0
|
| 1829 |
+
self.T_cur = epoch
|
| 1830 |
+
self.last_epoch = math.floor(epoch)
|
| 1831 |
+
|
| 1832 |
+
with _enable_get_lr_call(self):
|
| 1833 |
+
for i, data in enumerate(zip(self.optimizer.param_groups, self.get_lr())):
|
| 1834 |
+
param_group, lr = data
|
| 1835 |
+
param_group["lr"] = lr
|
| 1836 |
+
|
| 1837 |
+
self._last_lr = [group["lr"] for group in self.optimizer.param_groups]
|
| 1838 |
+
|
| 1839 |
+
|
| 1840 |
+
class _SchedulePhase(TypedDict):
|
| 1841 |
+
end_step: float
|
| 1842 |
+
start_lr: str
|
| 1843 |
+
end_lr: str
|
| 1844 |
+
start_momentum: str
|
| 1845 |
+
end_momentum: str
|
| 1846 |
+
|
| 1847 |
+
|
| 1848 |
+
class OneCycleLR(LRScheduler):
|
| 1849 |
+
r"""Sets the learning rate of each parameter group according to the 1cycle learning rate policy.
|
| 1850 |
+
|
| 1851 |
+
The 1cycle policy anneals the learning rate from an initial learning rate to some maximum
|
| 1852 |
+
learning rate and then from that maximum learning rate to some minimum learning rate much
|
| 1853 |
+
lower than the initial learning rate.
|
| 1854 |
+
This policy was initially described in the paper `Super-Convergence:
|
| 1855 |
+
Very Fast Training of Neural Networks Using Large Learning Rates`_.
|
| 1856 |
+
|
| 1857 |
+
The 1cycle learning rate policy changes the learning rate after every batch.
|
| 1858 |
+
`step` should be called after a batch has been used for training.
|
| 1859 |
+
|
| 1860 |
+
This scheduler is not chainable.
|
| 1861 |
+
|
| 1862 |
+
Note also that the total number of steps in the cycle can be determined in one
|
| 1863 |
+
of two ways (listed in order of precedence):
|
| 1864 |
+
|
| 1865 |
+
#. A value for total_steps is explicitly provided.
|
| 1866 |
+
#. A number of epochs (epochs) and a number of steps per epoch
|
| 1867 |
+
(steps_per_epoch) are provided.
|
| 1868 |
+
In this case, the number of total steps is inferred by
|
| 1869 |
+
total_steps = epochs * steps_per_epoch
|
| 1870 |
+
|
| 1871 |
+
You must either provide a value for total_steps or provide a value for both
|
| 1872 |
+
epochs and steps_per_epoch.
|
| 1873 |
+
|
| 1874 |
+
The default behaviour of this scheduler follows the fastai implementation of 1cycle, which
|
| 1875 |
+
claims that "unpublished work has shown even better results by using only two phases". To
|
| 1876 |
+
mimic the behaviour of the original paper instead, set ``three_phase=True``.
|
| 1877 |
+
|
| 1878 |
+
Args:
|
| 1879 |
+
optimizer (Optimizer): Wrapped optimizer.
|
| 1880 |
+
max_lr (float or list): Upper learning rate boundaries in the cycle
|
| 1881 |
+
for each parameter group.
|
| 1882 |
+
total_steps (int): The total number of steps in the cycle. Note that
|
| 1883 |
+
if a value is not provided here, then it must be inferred by providing
|
| 1884 |
+
a value for epochs and steps_per_epoch.
|
| 1885 |
+
Default: None
|
| 1886 |
+
epochs (int): The number of epochs to train for. This is used along
|
| 1887 |
+
with steps_per_epoch in order to infer the total number of steps in the cycle
|
| 1888 |
+
if a value for total_steps is not provided.
|
| 1889 |
+
Default: None
|
| 1890 |
+
steps_per_epoch (int): The number of steps per epoch to train for. This is
|
| 1891 |
+
used along with epochs in order to infer the total number of steps in the
|
| 1892 |
+
cycle if a value for total_steps is not provided.
|
| 1893 |
+
Default: None
|
| 1894 |
+
pct_start (float): The percentage of the cycle (in number of steps) spent
|
| 1895 |
+
increasing the learning rate.
|
| 1896 |
+
Default: 0.3
|
| 1897 |
+
anneal_strategy (str): {'cos', 'linear'}
|
| 1898 |
+
Specifies the annealing strategy: "cos" for cosine annealing, "linear" for
|
| 1899 |
+
linear annealing.
|
| 1900 |
+
Default: 'cos'
|
| 1901 |
+
cycle_momentum (bool): If ``True``, momentum is cycled inversely
|
| 1902 |
+
to learning rate between 'base_momentum' and 'max_momentum'.
|
| 1903 |
+
Default: True
|
| 1904 |
+
base_momentum (float or list): Lower momentum boundaries in the cycle
|
| 1905 |
+
for each parameter group. Note that momentum is cycled inversely
|
| 1906 |
+
to learning rate; at the peak of a cycle, momentum is
|
| 1907 |
+
'base_momentum' and learning rate is 'max_lr'.
|
| 1908 |
+
Default: 0.85
|
| 1909 |
+
max_momentum (float or list): Upper momentum boundaries in the cycle
|
| 1910 |
+
for each parameter group. Functionally,
|
| 1911 |
+
it defines the cycle amplitude (max_momentum - base_momentum).
|
| 1912 |
+
Note that momentum is cycled inversely
|
| 1913 |
+
to learning rate; at the start of a cycle, momentum is 'max_momentum'
|
| 1914 |
+
and learning rate is 'base_lr'
|
| 1915 |
+
Default: 0.95
|
| 1916 |
+
div_factor (float): Determines the initial learning rate via
|
| 1917 |
+
initial_lr = max_lr/div_factor
|
| 1918 |
+
Default: 25
|
| 1919 |
+
final_div_factor (float): Determines the minimum learning rate via
|
| 1920 |
+
min_lr = initial_lr/final_div_factor
|
| 1921 |
+
Default: 1e4
|
| 1922 |
+
three_phase (bool): If ``True``, use a third phase of the schedule to annihilate the
|
| 1923 |
+
learning rate according to 'final_div_factor' instead of modifying the second
|
| 1924 |
+
phase (the first two phases will be symmetrical about the step indicated by
|
| 1925 |
+
'pct_start').
|
| 1926 |
+
last_epoch (int): The index of the last batch. This parameter is used when
|
| 1927 |
+
resuming a training job. Since `step()` should be invoked after each
|
| 1928 |
+
batch instead of after each epoch, this number represents the total
|
| 1929 |
+
number of *batches* computed, not the total number of epochs computed.
|
| 1930 |
+
When last_epoch=-1, the schedule is started from the beginning.
|
| 1931 |
+
Default: -1
|
| 1932 |
+
verbose (bool | str): If ``True``, prints a message to stdout for
|
| 1933 |
+
each update. Default: ``False``.
|
| 1934 |
+
|
| 1935 |
+
.. deprecated:: 2.2
|
| 1936 |
+
``verbose`` is deprecated. Please use ``get_last_lr()`` to access the
|
| 1937 |
+
learning rate.
|
| 1938 |
+
|
| 1939 |
+
Example:
|
| 1940 |
+
>>> # xdoctest: +SKIP
|
| 1941 |
+
>>> data_loader = torch.utils.data.DataLoader(...)
|
| 1942 |
+
>>> optimizer = torch.optim.SGD(model.parameters(), lr=1e-4, momentum=0.9)
|
| 1943 |
+
>>> scheduler = torch.optim.lr_scheduler.OneCycleLR(optimizer, max_lr=0.01, steps_per_epoch=len(data_loader), epochs=10)
|
| 1944 |
+
>>> for epoch in range(10):
|
| 1945 |
+
>>> for batch in data_loader:
|
| 1946 |
+
>>> train_batch(...)
|
| 1947 |
+
>>> optimizer.step()
|
| 1948 |
+
>>> scheduler.step()
|
| 1949 |
+
|
| 1950 |
+
|
| 1951 |
+
.. _Super-Convergence\: Very Fast Training of Neural Networks Using Large Learning Rates:
|
| 1952 |
+
https://arxiv.org/abs/1708.07120
|
| 1953 |
+
"""
|
| 1954 |
+
|
| 1955 |
+
def __init__(
|
| 1956 |
+
self,
|
| 1957 |
+
optimizer: Optimizer,
|
| 1958 |
+
max_lr: Union[float, List[float]],
|
| 1959 |
+
total_steps: Optional[int] = None,
|
| 1960 |
+
epochs: Optional[int] = None,
|
| 1961 |
+
steps_per_epoch: Optional[int] = None,
|
| 1962 |
+
pct_start=0.3,
|
| 1963 |
+
anneal_strategy: Literal["cos", "linear"] = "cos",
|
| 1964 |
+
cycle_momentum=True,
|
| 1965 |
+
base_momentum: Union[float, List[float]] = 0.85,
|
| 1966 |
+
max_momentum: Union[float, List[float]] = 0.95,
|
| 1967 |
+
div_factor=25.0,
|
| 1968 |
+
final_div_factor=1e4,
|
| 1969 |
+
three_phase=False,
|
| 1970 |
+
last_epoch=-1,
|
| 1971 |
+
verbose="deprecated",
|
| 1972 |
+
): # noqa: D107
|
| 1973 |
+
# Validate optimizer
|
| 1974 |
+
if not isinstance(optimizer, Optimizer):
|
| 1975 |
+
raise TypeError(f"{type(optimizer).__name__} is not an Optimizer")
|
| 1976 |
+
self.optimizer = optimizer
|
| 1977 |
+
|
| 1978 |
+
# Validate total_steps
|
| 1979 |
+
if total_steps is not None:
|
| 1980 |
+
if total_steps <= 0 or not isinstance(total_steps, int):
|
| 1981 |
+
raise ValueError(
|
| 1982 |
+
f"Expected positive integer total_steps, but got {total_steps}"
|
| 1983 |
+
)
|
| 1984 |
+
self.total_steps = total_steps
|
| 1985 |
+
elif epochs is not None and steps_per_epoch is not None:
|
| 1986 |
+
if not isinstance(epochs, int) or epochs <= 0:
|
| 1987 |
+
raise ValueError(f"Expected positive integer epochs, but got {epochs}")
|
| 1988 |
+
if not isinstance(steps_per_epoch, int) or steps_per_epoch <= 0:
|
| 1989 |
+
raise ValueError(
|
| 1990 |
+
f"Expected positive integer steps_per_epoch, but got {steps_per_epoch}"
|
| 1991 |
+
)
|
| 1992 |
+
self.total_steps = epochs * steps_per_epoch
|
| 1993 |
+
else:
|
| 1994 |
+
raise ValueError(
|
| 1995 |
+
"You must define either total_steps OR (epochs AND steps_per_epoch)"
|
| 1996 |
+
)
|
| 1997 |
+
|
| 1998 |
+
self._schedule_phases: List[_SchedulePhase]
|
| 1999 |
+
if three_phase:
|
| 2000 |
+
self._schedule_phases = [
|
| 2001 |
+
{
|
| 2002 |
+
"end_step": float(pct_start * self.total_steps) - 1,
|
| 2003 |
+
"start_lr": "initial_lr",
|
| 2004 |
+
"end_lr": "max_lr",
|
| 2005 |
+
"start_momentum": "max_momentum",
|
| 2006 |
+
"end_momentum": "base_momentum",
|
| 2007 |
+
},
|
| 2008 |
+
{
|
| 2009 |
+
"end_step": float(2 * pct_start * self.total_steps) - 2,
|
| 2010 |
+
"start_lr": "max_lr",
|
| 2011 |
+
"end_lr": "initial_lr",
|
| 2012 |
+
"start_momentum": "base_momentum",
|
| 2013 |
+
"end_momentum": "max_momentum",
|
| 2014 |
+
},
|
| 2015 |
+
{
|
| 2016 |
+
"end_step": self.total_steps - 1,
|
| 2017 |
+
"start_lr": "initial_lr",
|
| 2018 |
+
"end_lr": "min_lr",
|
| 2019 |
+
"start_momentum": "max_momentum",
|
| 2020 |
+
"end_momentum": "max_momentum",
|
| 2021 |
+
},
|
| 2022 |
+
]
|
| 2023 |
+
else:
|
| 2024 |
+
self._schedule_phases = [
|
| 2025 |
+
{
|
| 2026 |
+
"end_step": float(pct_start * self.total_steps) - 1,
|
| 2027 |
+
"start_lr": "initial_lr",
|
| 2028 |
+
"end_lr": "max_lr",
|
| 2029 |
+
"start_momentum": "max_momentum",
|
| 2030 |
+
"end_momentum": "base_momentum",
|
| 2031 |
+
},
|
| 2032 |
+
{
|
| 2033 |
+
"end_step": self.total_steps - 1,
|
| 2034 |
+
"start_lr": "max_lr",
|
| 2035 |
+
"end_lr": "min_lr",
|
| 2036 |
+
"start_momentum": "base_momentum",
|
| 2037 |
+
"end_momentum": "max_momentum",
|
| 2038 |
+
},
|
| 2039 |
+
]
|
| 2040 |
+
|
| 2041 |
+
# Validate pct_start
|
| 2042 |
+
if pct_start < 0 or pct_start > 1 or not isinstance(pct_start, float):
|
| 2043 |
+
raise ValueError(
|
| 2044 |
+
f"Expected float between 0 and 1 pct_start, but got {pct_start}"
|
| 2045 |
+
)
|
| 2046 |
+
|
| 2047 |
+
# Validate anneal_strategy
|
| 2048 |
+
if anneal_strategy not in ["cos", "linear"]:
|
| 2049 |
+
raise ValueError(
|
| 2050 |
+
f"anneal_strategy must be one of 'cos' or 'linear', instead got {anneal_strategy}"
|
| 2051 |
+
)
|
| 2052 |
+
else:
|
| 2053 |
+
self._anneal_func_type = anneal_strategy
|
| 2054 |
+
|
| 2055 |
+
# Initialize learning rate variables
|
| 2056 |
+
max_lrs = _format_param("max_lr", self.optimizer, max_lr)
|
| 2057 |
+
if last_epoch == -1:
|
| 2058 |
+
for idx, group in enumerate(self.optimizer.param_groups):
|
| 2059 |
+
group["initial_lr"] = max_lrs[idx] / div_factor
|
| 2060 |
+
group["max_lr"] = max_lrs[idx]
|
| 2061 |
+
group["min_lr"] = group["initial_lr"] / final_div_factor
|
| 2062 |
+
|
| 2063 |
+
# Initialize momentum variables
|
| 2064 |
+
self.cycle_momentum = cycle_momentum
|
| 2065 |
+
if self.cycle_momentum:
|
| 2066 |
+
if (
|
| 2067 |
+
"momentum" not in self.optimizer.defaults
|
| 2068 |
+
and "betas" not in self.optimizer.defaults
|
| 2069 |
+
):
|
| 2070 |
+
raise ValueError(
|
| 2071 |
+
"optimizer must support momentum or beta1 with `cycle_momentum` option enabled"
|
| 2072 |
+
)
|
| 2073 |
+
self.use_beta1 = "betas" in self.optimizer.defaults
|
| 2074 |
+
max_momentums = _format_param("max_momentum", optimizer, max_momentum)
|
| 2075 |
+
base_momentums = _format_param("base_momentum", optimizer, base_momentum)
|
| 2076 |
+
if last_epoch == -1:
|
| 2077 |
+
for m_momentum, b_momentum, group in zip(
|
| 2078 |
+
max_momentums, base_momentums, optimizer.param_groups
|
| 2079 |
+
):
|
| 2080 |
+
if self.use_beta1:
|
| 2081 |
+
group["betas"] = (m_momentum, *group["betas"][1:])
|
| 2082 |
+
else:
|
| 2083 |
+
group["momentum"] = m_momentum
|
| 2084 |
+
group["max_momentum"] = m_momentum
|
| 2085 |
+
group["base_momentum"] = b_momentum
|
| 2086 |
+
|
| 2087 |
+
super().__init__(optimizer, last_epoch, verbose)
|
| 2088 |
+
|
| 2089 |
+
def _anneal_func(self, *args, **kwargs):
|
| 2090 |
+
if hasattr(self, "_anneal_func_type"):
|
| 2091 |
+
if self._anneal_func_type == "cos":
|
| 2092 |
+
return self._annealing_cos(*args, **kwargs)
|
| 2093 |
+
elif self._anneal_func_type == "linear":
|
| 2094 |
+
return self._annealing_linear(*args, **kwargs)
|
| 2095 |
+
else:
|
| 2096 |
+
raise ValueError(f"Unknown _anneal_func_type: {self._anneal_func_type}")
|
| 2097 |
+
else:
|
| 2098 |
+
# For BC
|
| 2099 |
+
return self.anneal_func(*args, **kwargs) # type: ignore[attr-defined]
|
| 2100 |
+
|
| 2101 |
+
@staticmethod
|
| 2102 |
+
def _annealing_cos(start, end, pct):
|
| 2103 |
+
"""Cosine anneal from `start` to `end` as pct goes from 0.0 to 1.0."""
|
| 2104 |
+
cos_out = math.cos(math.pi * pct) + 1
|
| 2105 |
+
return end + (start - end) / 2.0 * cos_out
|
| 2106 |
+
|
| 2107 |
+
@staticmethod
|
| 2108 |
+
def _annealing_linear(start, end, pct):
|
| 2109 |
+
"""Linearly anneal from `start` to `end` as pct goes from 0.0 to 1.0."""
|
| 2110 |
+
return (end - start) * pct + start
|
| 2111 |
+
|
| 2112 |
+
def get_lr(self):
|
| 2113 |
+
"""Compute the learning rate of each parameter group."""
|
| 2114 |
+
_warn_get_lr_called_within_step(self)
|
| 2115 |
+
|
| 2116 |
+
lrs = []
|
| 2117 |
+
step_num = self.last_epoch
|
| 2118 |
+
|
| 2119 |
+
if step_num > self.total_steps:
|
| 2120 |
+
raise ValueError(
|
| 2121 |
+
f"Tried to step {step_num} times. The specified number of total steps is {self.total_steps}" # noqa: UP032
|
| 2122 |
+
)
|
| 2123 |
+
|
| 2124 |
+
for group in self.optimizer.param_groups:
|
| 2125 |
+
start_step = 0.0
|
| 2126 |
+
for i, phase in enumerate(self._schedule_phases):
|
| 2127 |
+
end_step = phase["end_step"]
|
| 2128 |
+
if step_num <= end_step or i == len(self._schedule_phases) - 1:
|
| 2129 |
+
pct = (step_num - start_step) / (end_step - start_step)
|
| 2130 |
+
computed_lr = self._anneal_func(
|
| 2131 |
+
group[phase["start_lr"]], group[phase["end_lr"]], pct
|
| 2132 |
+
)
|
| 2133 |
+
if self.cycle_momentum:
|
| 2134 |
+
computed_momentum = self._anneal_func(
|
| 2135 |
+
group[phase["start_momentum"]],
|
| 2136 |
+
group[phase["end_momentum"]],
|
| 2137 |
+
pct,
|
| 2138 |
+
)
|
| 2139 |
+
break
|
| 2140 |
+
start_step = phase["end_step"]
|
| 2141 |
+
|
| 2142 |
+
lrs.append(computed_lr) # type: ignore[possibly-undefined]
|
| 2143 |
+
if self.cycle_momentum:
|
| 2144 |
+
if self.use_beta1:
|
| 2145 |
+
group["betas"] = (computed_momentum, *group["betas"][1:]) # type: ignore[possibly-undefined]
|
| 2146 |
+
else:
|
| 2147 |
+
group[
|
| 2148 |
+
"momentum"
|
| 2149 |
+
] = computed_momentum # type: ignore[possibly-undefined]
|
| 2150 |
+
|
| 2151 |
+
return lrs
|
vllm/lib/python3.10/site-packages/torch/optim/optimizer.py
ADDED
|
@@ -0,0 +1,1052 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# mypy: allow-untyped-decorators
|
| 2 |
+
# mypy: allow-untyped-defs
|
| 3 |
+
"""Base optimizer."""
|
| 4 |
+
import functools
|
| 5 |
+
import warnings
|
| 6 |
+
from collections import defaultdict, OrderedDict
|
| 7 |
+
from copy import deepcopy
|
| 8 |
+
from itertools import chain
|
| 9 |
+
from typing import (
|
| 10 |
+
Any,
|
| 11 |
+
Callable,
|
| 12 |
+
cast,
|
| 13 |
+
DefaultDict,
|
| 14 |
+
Dict,
|
| 15 |
+
Hashable,
|
| 16 |
+
Iterable,
|
| 17 |
+
List,
|
| 18 |
+
Optional,
|
| 19 |
+
overload,
|
| 20 |
+
Set,
|
| 21 |
+
Tuple,
|
| 22 |
+
TypeVar,
|
| 23 |
+
Union,
|
| 24 |
+
)
|
| 25 |
+
from typing_extensions import ParamSpec, Self, TypeAlias
|
| 26 |
+
|
| 27 |
+
import torch
|
| 28 |
+
import torch.utils.hooks as hooks
|
| 29 |
+
from torch._utils import is_compiling
|
| 30 |
+
from torch.utils._foreach_utils import (
|
| 31 |
+
_get_foreach_kernels_supported_devices,
|
| 32 |
+
_get_fused_kernels_supported_devices,
|
| 33 |
+
_group_tensors_by_device_and_dtype,
|
| 34 |
+
Indices,
|
| 35 |
+
TensorListList,
|
| 36 |
+
)
|
| 37 |
+
from torch.utils.hooks import RemovableHandle
|
| 38 |
+
|
| 39 |
+
|
| 40 |
+
Args: TypeAlias = Tuple[Any, ...]
|
| 41 |
+
Kwargs: TypeAlias = Dict[str, Any]
|
| 42 |
+
StateDict: TypeAlias = Dict[str, Any]
|
| 43 |
+
DeviceDict = Dict[Optional[torch.device], torch.Tensor]
|
| 44 |
+
|
| 45 |
+
|
| 46 |
+
GlobalOptimizerPreHook: TypeAlias = Callable[
|
| 47 |
+
["Optimizer", Args, Kwargs], Optional[Tuple[Args, Kwargs]]
|
| 48 |
+
]
|
| 49 |
+
GlobalOptimizerPostHook: TypeAlias = Callable[["Optimizer", Args, Kwargs], None]
|
| 50 |
+
|
| 51 |
+
__all__ = [
|
| 52 |
+
"Optimizer",
|
| 53 |
+
"register_optimizer_step_pre_hook",
|
| 54 |
+
"register_optimizer_step_post_hook",
|
| 55 |
+
]
|
| 56 |
+
_global_optimizer_pre_hooks: Dict[int, GlobalOptimizerPreHook] = OrderedDict()
|
| 57 |
+
_global_optimizer_post_hooks: Dict[int, GlobalOptimizerPostHook] = OrderedDict()
|
| 58 |
+
_foreach_supported_types = [torch.Tensor, torch.nn.parameter.Parameter]
|
| 59 |
+
|
| 60 |
+
|
| 61 |
+
class _RequiredParameter:
|
| 62 |
+
"""Singleton class representing a required parameter for an Optimizer."""
|
| 63 |
+
|
| 64 |
+
def __repr__(self) -> str:
|
| 65 |
+
return "<required parameter>"
|
| 66 |
+
|
| 67 |
+
|
| 68 |
+
required = _RequiredParameter()
|
| 69 |
+
|
| 70 |
+
|
| 71 |
+
def _use_grad_for_differentiable(func):
|
| 72 |
+
def _use_grad(self, *args, **kwargs):
|
| 73 |
+
import torch._dynamo
|
| 74 |
+
|
| 75 |
+
prev_grad = torch.is_grad_enabled()
|
| 76 |
+
try:
|
| 77 |
+
# Note on graph break below:
|
| 78 |
+
# we need to graph break to ensure that aot respects the no_grad annotation.
|
| 79 |
+
# This is important for perf because without this, functionalization will generate an epilogue
|
| 80 |
+
# which updates the mutated parameters of the optimizer which is *not* visible to inductor, as a result,
|
| 81 |
+
# inductor will allocate for every parameter in the model, which is horrible.
|
| 82 |
+
# With this, aot correctly sees that this is an inference graph, and functionalization will generate
|
| 83 |
+
# an epilogue which is appended to the graph, which *is* visible to inductor, as a result, inductor sees that
|
| 84 |
+
# step is in place and is able to avoid the extra allocation.
|
| 85 |
+
# In the future, we will either 1) continue to graph break on backward, so this graph break does not matter
|
| 86 |
+
# or 2) have a fully fused forward and backward graph, which will have no_grad by default, and we can remove this
|
| 87 |
+
# graph break to allow the fully fused fwd-bwd-optimizer graph to be compiled.
|
| 88 |
+
# see https://github.com/pytorch/pytorch/issues/104053
|
| 89 |
+
torch.set_grad_enabled(self.defaults["differentiable"])
|
| 90 |
+
torch._dynamo.graph_break()
|
| 91 |
+
ret = func(self, *args, **kwargs)
|
| 92 |
+
finally:
|
| 93 |
+
torch._dynamo.graph_break()
|
| 94 |
+
torch.set_grad_enabled(prev_grad)
|
| 95 |
+
return ret
|
| 96 |
+
|
| 97 |
+
functools.update_wrapper(_use_grad, func)
|
| 98 |
+
return _use_grad
|
| 99 |
+
|
| 100 |
+
|
| 101 |
+
def _get_value(x):
|
| 102 |
+
# item is significantly faster than a cpu tensor in eager mode
|
| 103 |
+
if not torch.jit.is_scripting() and is_compiling():
|
| 104 |
+
return x
|
| 105 |
+
else:
|
| 106 |
+
return x.item() if isinstance(x, torch.Tensor) else x
|
| 107 |
+
|
| 108 |
+
|
| 109 |
+
def _stack_if_compiling(x):
|
| 110 |
+
if not torch.jit.is_scripting() and is_compiling():
|
| 111 |
+
return torch.stack(x)
|
| 112 |
+
else:
|
| 113 |
+
return x
|
| 114 |
+
|
| 115 |
+
|
| 116 |
+
def _disable_dynamo_if_unsupported(single_tensor_fn=None):
|
| 117 |
+
# workaround for torchscript BC
|
| 118 |
+
# it requires all called functions to be in the
|
| 119 |
+
# global environment at the site at which the
|
| 120 |
+
# maybe_fallback closure is created
|
| 121 |
+
if single_tensor_fn:
|
| 122 |
+
globals()[single_tensor_fn.__name__] = single_tensor_fn
|
| 123 |
+
|
| 124 |
+
def wrapper(func):
|
| 125 |
+
import inspect
|
| 126 |
+
|
| 127 |
+
disabled_func = torch._disable_dynamo(func)
|
| 128 |
+
ps = inspect.signature(func).parameters
|
| 129 |
+
has_state_steps = True
|
| 130 |
+
try:
|
| 131 |
+
state_steps_ind = list(ps.keys()).index("state_steps")
|
| 132 |
+
except ValueError:
|
| 133 |
+
has_state_steps = False
|
| 134 |
+
|
| 135 |
+
# Today, there are cases where we stack state steps
|
| 136 |
+
# and pass them as the value arg of foreach ops.
|
| 137 |
+
# Having state steps on cuda as the value arg is not supported in eager,
|
| 138 |
+
# but this only occurs in the rare case that the user explicitly deletes
|
| 139 |
+
# the capturable flag. If capturable=True, this is not a problem.
|
| 140 |
+
@functools.wraps(func)
|
| 141 |
+
def maybe_fallback(*args, **kwargs):
|
| 142 |
+
if is_compiling() and (
|
| 143 |
+
not kwargs.get("capturable", False)
|
| 144 |
+
and has_state_steps
|
| 145 |
+
and (args[state_steps_ind] and args[state_steps_ind][0].is_cuda)
|
| 146 |
+
or (
|
| 147 |
+
"state_steps" in kwargs
|
| 148 |
+
and kwargs["state_steps"]
|
| 149 |
+
and kwargs["state_steps"][0].is_cuda
|
| 150 |
+
)
|
| 151 |
+
):
|
| 152 |
+
return disabled_func(*args, **kwargs)
|
| 153 |
+
else:
|
| 154 |
+
return func(*args, **kwargs)
|
| 155 |
+
|
| 156 |
+
return maybe_fallback
|
| 157 |
+
|
| 158 |
+
return wrapper
|
| 159 |
+
|
| 160 |
+
|
| 161 |
+
# For any optimizer with a faster implementation, we attempt to default to the
|
| 162 |
+
# fastest + stablest whenever possible. For foreach, the requirements are to have
|
| 163 |
+
# native params all on CUDA. For fused, there's currently the additional requirement
|
| 164 |
+
# that the tensors' dtypes must be floating point. Neither alternative supports
|
| 165 |
+
# torch.jit.script nor differentiable, so we fall back to the single tensor
|
| 166 |
+
# implementation in those cases.
|
| 167 |
+
def _default_to_fused_or_foreach(
|
| 168 |
+
params: List[torch.Tensor], differentiable: bool, use_fused: bool = False
|
| 169 |
+
) -> Tuple[bool, bool]:
|
| 170 |
+
if torch.jit.is_scripting() or differentiable:
|
| 171 |
+
return False, False
|
| 172 |
+
|
| 173 |
+
fused_supported_devices = _get_fused_kernels_supported_devices()
|
| 174 |
+
foreach_supported_devices = _get_foreach_kernels_supported_devices()
|
| 175 |
+
fused = use_fused and all(
|
| 176 |
+
p is None
|
| 177 |
+
or (
|
| 178 |
+
type(p) in _foreach_supported_types
|
| 179 |
+
and p.device.type in fused_supported_devices
|
| 180 |
+
and torch.is_floating_point(p)
|
| 181 |
+
)
|
| 182 |
+
for p in params
|
| 183 |
+
)
|
| 184 |
+
foreach = not fused and all(
|
| 185 |
+
p is None
|
| 186 |
+
or (
|
| 187 |
+
type(p) in _foreach_supported_types
|
| 188 |
+
and p.device.type in foreach_supported_devices
|
| 189 |
+
)
|
| 190 |
+
for p in params
|
| 191 |
+
)
|
| 192 |
+
return fused, foreach
|
| 193 |
+
|
| 194 |
+
|
| 195 |
+
def _device_dtype_check_for_fused(
|
| 196 |
+
p: torch.Tensor, cuda_unsupported: bool = False
|
| 197 |
+
) -> None:
|
| 198 |
+
fused_supported_devices = _get_fused_kernels_supported_devices()
|
| 199 |
+
if cuda_unsupported:
|
| 200 |
+
fused_supported_devices.remove("cuda")
|
| 201 |
+
if not (p.device.type in fused_supported_devices and torch.is_floating_point(p)):
|
| 202 |
+
raise RuntimeError(
|
| 203 |
+
"`fused=True` requires all the params to be floating point Tensors of "
|
| 204 |
+
f"supported devices: {fused_supported_devices} but {p.dtype} and {p.device.type}"
|
| 205 |
+
)
|
| 206 |
+
|
| 207 |
+
|
| 208 |
+
def _view_as_real(params, *state_and_grads):
|
| 209 |
+
for i, p in enumerate(params):
|
| 210 |
+
if torch.is_complex(p):
|
| 211 |
+
params[i] = torch.view_as_real(params[i])
|
| 212 |
+
for s in state_and_grads:
|
| 213 |
+
s[i] = torch.view_as_real(s[i])
|
| 214 |
+
|
| 215 |
+
|
| 216 |
+
def _get_scalar_dtype(is_fused=None):
|
| 217 |
+
if is_fused:
|
| 218 |
+
return torch.float32
|
| 219 |
+
return (
|
| 220 |
+
torch.float64 if torch.get_default_dtype() == torch.float64 else torch.float32
|
| 221 |
+
)
|
| 222 |
+
|
| 223 |
+
|
| 224 |
+
def _get_capturable_supported_devices(supports_xla: bool = True) -> List[str]:
|
| 225 |
+
r"""Return the device type list that supports capturable optimizer."""
|
| 226 |
+
capturable_supported_devices = ["cuda", "xpu", "hpu"]
|
| 227 |
+
if not torch.jit.is_scripting():
|
| 228 |
+
capturable_supported_devices.append(torch._C._get_privateuse1_backend_name())
|
| 229 |
+
if supports_xla:
|
| 230 |
+
capturable_supported_devices.append("xla")
|
| 231 |
+
return capturable_supported_devices
|
| 232 |
+
|
| 233 |
+
|
| 234 |
+
# Common doc strings among optimizers
|
| 235 |
+
_foreach_doc = r"""foreach (bool, optional): whether foreach implementation of optimizer
|
| 236 |
+
is used. If unspecified by the user (so foreach is None), we will try to use
|
| 237 |
+
foreach over the for-loop implementation on CUDA, since it is usually
|
| 238 |
+
significantly more performant. Note that the foreach implementation uses
|
| 239 |
+
~ sizeof(params) more peak memory than the for-loop version due to the intermediates
|
| 240 |
+
being a tensorlist vs just one tensor. If memory is prohibitive, batch fewer
|
| 241 |
+
parameters through the optimizer at a time or switch this flag to False (default: None)"""
|
| 242 |
+
|
| 243 |
+
_fused_doc = r"""fused (bool, optional): whether the fused implementation is used.
|
| 244 |
+
Currently, `torch.float64`, `torch.float32`, `torch.float16`, and `torch.bfloat16`
|
| 245 |
+
are supported. (default: None)
|
| 246 |
+
|
| 247 |
+
.. note:: The foreach and fused implementations are typically faster than the for-loop,
|
| 248 |
+
single-tensor implementation, with fused being theoretically fastest with both
|
| 249 |
+
vertical and horizontal fusion. As such, if the user has not specified either
|
| 250 |
+
flag (i.e., when foreach = fused = None), we will attempt defaulting to the foreach
|
| 251 |
+
implementation when the tensors are all on CUDA. Why not fused? Since the fused
|
| 252 |
+
implementation is relatively new, we want to give it sufficient bake-in time.
|
| 253 |
+
To specify fused, pass True for fused. To force running the for-loop
|
| 254 |
+
implementation, pass False for either foreach or fused. """
|
| 255 |
+
|
| 256 |
+
_capturable_doc = r"""capturable (bool, optional): whether this instance is safe to
|
| 257 |
+
capture in a CUDA graph. Passing True can impair ungraphed performance,
|
| 258 |
+
so if you don't intend to graph capture this instance, leave it False
|
| 259 |
+
(default: False)"""
|
| 260 |
+
|
| 261 |
+
_differentiable_doc = r"""differentiable (bool, optional): whether autograd should
|
| 262 |
+
occur through the optimizer step in training. Otherwise, the step()
|
| 263 |
+
function runs in a torch.no_grad() context. Setting to True can impair
|
| 264 |
+
performance, so leave it False if you don't intend to run autograd
|
| 265 |
+
through this instance (default: False)"""
|
| 266 |
+
|
| 267 |
+
_maximize_doc = r"""maximize (bool, optional): maximize the objective with respect to the
|
| 268 |
+
params, instead of minimizing (default: False)"""
|
| 269 |
+
|
| 270 |
+
|
| 271 |
+
def register_optimizer_step_pre_hook(hook: GlobalOptimizerPreHook) -> RemovableHandle:
|
| 272 |
+
r"""Register a pre hook common to all optimizers.
|
| 273 |
+
|
| 274 |
+
The hook should have the following signature::
|
| 275 |
+
|
| 276 |
+
hook(optimizer, args, kwargs) -> None or modified args and kwargs
|
| 277 |
+
|
| 278 |
+
Args:
|
| 279 |
+
hook (Callable): A user defined hook which is registered on all optimizers.
|
| 280 |
+
|
| 281 |
+
Returns:
|
| 282 |
+
:class:`torch.utils.hooks.RemovableHandle`:
|
| 283 |
+
a handle that can be used to remove the added hook by calling
|
| 284 |
+
``handle.remove()``
|
| 285 |
+
"""
|
| 286 |
+
handle = hooks.RemovableHandle(_global_optimizer_pre_hooks)
|
| 287 |
+
_global_optimizer_pre_hooks[handle.id] = hook
|
| 288 |
+
return handle
|
| 289 |
+
|
| 290 |
+
|
| 291 |
+
def register_optimizer_step_post_hook(hook: GlobalOptimizerPostHook) -> RemovableHandle:
|
| 292 |
+
r"""Register a post hook common to all optimizers.
|
| 293 |
+
|
| 294 |
+
The hook should have the following signature::
|
| 295 |
+
|
| 296 |
+
hook(optimizer, args, kwargs) -> None
|
| 297 |
+
|
| 298 |
+
Args:
|
| 299 |
+
hook (Callable): A user defined hook which is registered on all optimizers.
|
| 300 |
+
|
| 301 |
+
Returns:
|
| 302 |
+
:class:`torch.utils.hooks.RemovableHandle`:
|
| 303 |
+
a handle that can be used to remove the added hook by calling
|
| 304 |
+
``handle.remove()``
|
| 305 |
+
"""
|
| 306 |
+
handle = hooks.RemovableHandle(_global_optimizer_post_hooks)
|
| 307 |
+
_global_optimizer_post_hooks[handle.id] = hook
|
| 308 |
+
return handle
|
| 309 |
+
|
| 310 |
+
|
| 311 |
+
ParamsT: TypeAlias = Union[Iterable[torch.Tensor], Iterable[Dict[str, Any]]]
|
| 312 |
+
|
| 313 |
+
_P = ParamSpec("_P")
|
| 314 |
+
R = TypeVar("R")
|
| 315 |
+
T = TypeVar("T")
|
| 316 |
+
|
| 317 |
+
|
| 318 |
+
class Optimizer:
|
| 319 |
+
r"""Base class for all optimizers.
|
| 320 |
+
|
| 321 |
+
.. warning::
|
| 322 |
+
Parameters need to be specified as collections that have a deterministic
|
| 323 |
+
ordering that is consistent between runs. Examples of objects that don't
|
| 324 |
+
satisfy those properties are sets and iterators over values of dictionaries.
|
| 325 |
+
|
| 326 |
+
Args:
|
| 327 |
+
params (iterable): an iterable of :class:`torch.Tensor` s or
|
| 328 |
+
:class:`dict` s. Specifies what Tensors should be optimized.
|
| 329 |
+
defaults: (dict): a dict containing default values of optimization
|
| 330 |
+
options (used when a parameter group doesn't specify them).
|
| 331 |
+
"""
|
| 332 |
+
|
| 333 |
+
OptimizerPreHook: TypeAlias = Callable[[Self, Args, Kwargs], Optional[Tuple[Args, Kwargs]]] # type: ignore[misc]
|
| 334 |
+
OptimizerPostHook: TypeAlias = Callable[[Self, Args, Kwargs], None] # type: ignore[misc]
|
| 335 |
+
|
| 336 |
+
_optimizer_step_pre_hooks: Dict[int, OptimizerPreHook]
|
| 337 |
+
_optimizer_step_post_hooks: Dict[int, OptimizerPostHook]
|
| 338 |
+
_optimizer_state_dict_pre_hooks: 'OrderedDict[int, Callable[["Optimizer"], None]]'
|
| 339 |
+
_optimizer_state_dict_post_hooks: 'OrderedDict[int, Callable[["Optimizer", StateDict], Optional[StateDict]]]'
|
| 340 |
+
_optimizer_load_state_dict_pre_hooks: 'OrderedDict[int, Callable[["Optimizer", StateDict], Optional[StateDict]]]'
|
| 341 |
+
_optimizer_load_state_dict_post_hooks: 'OrderedDict[int, Callable[["Optimizer"], None]]'
|
| 342 |
+
|
| 343 |
+
def __init__(self, params: ParamsT, defaults: Dict[str, Any]) -> None: # noqa: D107
|
| 344 |
+
torch._C._log_api_usage_once("python.optimizer")
|
| 345 |
+
self.defaults = defaults
|
| 346 |
+
self._optimizer_step_pre_hooks = OrderedDict()
|
| 347 |
+
self._optimizer_step_post_hooks = OrderedDict()
|
| 348 |
+
self._optimizer_state_dict_pre_hooks = OrderedDict()
|
| 349 |
+
self._optimizer_state_dict_post_hooks = OrderedDict()
|
| 350 |
+
self._optimizer_load_state_dict_pre_hooks = OrderedDict()
|
| 351 |
+
self._optimizer_load_state_dict_post_hooks = OrderedDict()
|
| 352 |
+
|
| 353 |
+
self._patch_step_function()
|
| 354 |
+
|
| 355 |
+
if isinstance(params, torch.Tensor):
|
| 356 |
+
raise TypeError(
|
| 357 |
+
"params argument given to the optimizer should be "
|
| 358 |
+
"an iterable of Tensors or dicts, but got " + torch.typename(params)
|
| 359 |
+
)
|
| 360 |
+
|
| 361 |
+
self.state: DefaultDict[torch.Tensor, Any] = defaultdict(dict)
|
| 362 |
+
self.param_groups: List[Dict[str, Any]] = []
|
| 363 |
+
|
| 364 |
+
param_groups = list(params)
|
| 365 |
+
if len(param_groups) == 0:
|
| 366 |
+
raise ValueError("optimizer got an empty parameter list")
|
| 367 |
+
if not isinstance(param_groups[0], dict):
|
| 368 |
+
param_groups = [{"params": param_groups}]
|
| 369 |
+
|
| 370 |
+
for param_group in param_groups:
|
| 371 |
+
self.add_param_group(cast(dict, param_group))
|
| 372 |
+
|
| 373 |
+
# Allows _cuda_graph_capture_health_check to rig a poor man's TORCH_WARN_ONCE in python,
|
| 374 |
+
# which I don't think exists
|
| 375 |
+
# https://github.com/pytorch/pytorch/issues/72948
|
| 376 |
+
self._warned_capturable_if_run_uncaptured = True
|
| 377 |
+
|
| 378 |
+
def __getstate__(self) -> Dict[str, Any]: # noqa: D105
|
| 379 |
+
return {
|
| 380 |
+
"defaults": self.defaults,
|
| 381 |
+
"state": self.state,
|
| 382 |
+
"param_groups": self.param_groups,
|
| 383 |
+
}
|
| 384 |
+
|
| 385 |
+
def __setstate__(self, state: Dict[str, Any]) -> None: # noqa: D105
|
| 386 |
+
self.__dict__.update(state)
|
| 387 |
+
if "_optimizer_step_pre_hooks" not in self.__dict__:
|
| 388 |
+
self._optimizer_step_pre_hooks = OrderedDict()
|
| 389 |
+
if "_optimizer_step_post_hooks" not in self.__dict__:
|
| 390 |
+
self._optimizer_step_post_hooks = OrderedDict()
|
| 391 |
+
if "_optimizer_state_dict_pre_hooks" not in self.__dict__:
|
| 392 |
+
self._optimizer_state_dict_pre_hooks = OrderedDict()
|
| 393 |
+
if "_optimizer_state_dict_post_hooks" not in self.__dict__:
|
| 394 |
+
self._optimizer_state_dict_post_hooks = OrderedDict()
|
| 395 |
+
if "_optimizer_load_state_dict_pre_hooks" not in self.__dict__:
|
| 396 |
+
self._optimizer_load_state_dict_pre_hooks = OrderedDict()
|
| 397 |
+
if "_optimizer_load_state_dict_post_hooks" not in self.__dict__:
|
| 398 |
+
self._optimizer_load_state_dict_post_hooks = OrderedDict()
|
| 399 |
+
self._patch_step_function() # To support multiprocessing pickle/unpickle
|
| 400 |
+
self.defaults.setdefault("differentiable", False)
|
| 401 |
+
|
| 402 |
+
def __repr__(self) -> str: # noqa: D105
|
| 403 |
+
format_string = self.__class__.__name__ + " ("
|
| 404 |
+
for i, group in enumerate(self.param_groups):
|
| 405 |
+
format_string += "\n"
|
| 406 |
+
format_string += f"Parameter Group {i}\n"
|
| 407 |
+
for key in sorted(group.keys()):
|
| 408 |
+
if key != "params":
|
| 409 |
+
format_string += f" {key}: {group[key]}\n"
|
| 410 |
+
format_string += ")"
|
| 411 |
+
return format_string
|
| 412 |
+
|
| 413 |
+
# Currently needed by Adam and AdamW
|
| 414 |
+
def _cuda_graph_capture_health_check(self) -> None:
|
| 415 |
+
# Note [torch.compile x capturable]
|
| 416 |
+
# If we are compiling, we try to take the capturable path automatically by
|
| 417 |
+
# setting the flag to True during tracing. Due to this, we skip all the checks
|
| 418 |
+
# normally required for determining whether we can use CUDA graphs and
|
| 419 |
+
# shunt the responsibility to torch.inductor. This saves time during tracing
|
| 420 |
+
# since the checks are slow without sacrificing UX since inductor will warn
|
| 421 |
+
# later if CUDA graphs cannot be enabled, e.g.,
|
| 422 |
+
# https://github.com/pytorch/pytorch/blob/d3ba8901d8640eb16f88b2bfef9df7fa383d4b47/torch/_inductor/compile_fx.py#L390.
|
| 423 |
+
# Thus, when compiling, inductor will determine if cudagraphs
|
| 424 |
+
# can be enabled based on whether there is input mutation or CPU tensors.
|
| 425 |
+
if (
|
| 426 |
+
not is_compiling()
|
| 427 |
+
and torch.backends.cuda.is_built()
|
| 428 |
+
and torch.cuda.is_available()
|
| 429 |
+
):
|
| 430 |
+
capturing = torch.cuda.is_current_stream_capturing()
|
| 431 |
+
|
| 432 |
+
if capturing and not all(
|
| 433 |
+
group["capturable"] for group in self.param_groups
|
| 434 |
+
):
|
| 435 |
+
raise RuntimeError(
|
| 436 |
+
"Attempting CUDA graph capture of step() for an instance of "
|
| 437 |
+
+ self.__class__.__name__
|
| 438 |
+
+ " but param_groups' capturable is False."
|
| 439 |
+
)
|
| 440 |
+
|
| 441 |
+
if (
|
| 442 |
+
(not getattr(self, "_warned_capturable_if_run_uncaptured", False))
|
| 443 |
+
and all(group["capturable"] for group in self.param_groups)
|
| 444 |
+
and (not capturing)
|
| 445 |
+
):
|
| 446 |
+
warnings.warn(
|
| 447 |
+
"This instance was constructed with capturable=True or some of all the param_groups came with capturable=True, "
|
| 448 |
+
"but step() is running without CUDA graph capture. If you never intend to graph-capture this "
|
| 449 |
+
"instance, capturable=True can impair performance, and you should set capturable=False."
|
| 450 |
+
)
|
| 451 |
+
self._warned_capturable_if_run_uncaptured = True
|
| 452 |
+
|
| 453 |
+
def _optimizer_step_code(self) -> None:
|
| 454 |
+
"""Entry point for `torch.profile.profiler`.
|
| 455 |
+
|
| 456 |
+
When python tracing is enabled the profiler will hook into this
|
| 457 |
+
function at the CPython level to inspect the optimizer's parameters and
|
| 458 |
+
param groups. It is called it after `step()` since many optimizers
|
| 459 |
+
lazily initialize state.
|
| 460 |
+
|
| 461 |
+
This is a workaround due to lack of a proper step hook on the optimizer,
|
| 462 |
+
and will be removed if it exists.
|
| 463 |
+
"""
|
| 464 |
+
|
| 465 |
+
@staticmethod
|
| 466 |
+
def profile_hook_step(func: Callable[_P, R]) -> Callable[_P, R]: # noqa: D102
|
| 467 |
+
@functools.wraps(func)
|
| 468 |
+
def wrapper(*args: _P.args, **kwargs: _P.kwargs) -> R:
|
| 469 |
+
self, *_ = args
|
| 470 |
+
self = cast(Optimizer, self)
|
| 471 |
+
profile_name = f"Optimizer.step#{self.__class__.__name__}.step"
|
| 472 |
+
with torch.autograd.profiler.record_function(profile_name):
|
| 473 |
+
# call optimizer step pre hooks
|
| 474 |
+
for pre_hook in chain(
|
| 475 |
+
_global_optimizer_pre_hooks.values(),
|
| 476 |
+
self._optimizer_step_pre_hooks.values(),
|
| 477 |
+
):
|
| 478 |
+
result = pre_hook(self, args, kwargs)
|
| 479 |
+
if result is not None:
|
| 480 |
+
if isinstance(result, tuple) and len(result) == 2:
|
| 481 |
+
args, kwargs = result # type: ignore[assignment]
|
| 482 |
+
else:
|
| 483 |
+
raise RuntimeError(
|
| 484 |
+
f"{func} must return None or a tuple of (new_args, new_kwargs), but got {result}."
|
| 485 |
+
)
|
| 486 |
+
|
| 487 |
+
out = func(*args, **kwargs)
|
| 488 |
+
self._optimizer_step_code()
|
| 489 |
+
|
| 490 |
+
# call optimizer step post hooks
|
| 491 |
+
for post_hook in chain(
|
| 492 |
+
self._optimizer_step_post_hooks.values(),
|
| 493 |
+
_global_optimizer_post_hooks.values(),
|
| 494 |
+
):
|
| 495 |
+
post_hook(self, args, kwargs)
|
| 496 |
+
|
| 497 |
+
return out
|
| 498 |
+
|
| 499 |
+
return wrapper
|
| 500 |
+
|
| 501 |
+
@staticmethod
|
| 502 |
+
def _group_tensors_by_device_and_dtype(
|
| 503 |
+
tensorlistlist: TensorListList,
|
| 504 |
+
with_indices: bool = False,
|
| 505 |
+
) -> Union[
|
| 506 |
+
Dict[Tuple[None, None], Tuple[TensorListList, Indices]],
|
| 507 |
+
Dict[Tuple[torch.device, torch.dtype], Tuple[TensorListList, Indices]],
|
| 508 |
+
]:
|
| 509 |
+
"""Group a list of lists of tensors by device and dtype.
|
| 510 |
+
|
| 511 |
+
Skips this step if we are compiling since this will occur during inductor lowering.
|
| 512 |
+
"""
|
| 513 |
+
if is_compiling():
|
| 514 |
+
return {(None, None): (tensorlistlist, list(range(len(tensorlistlist[0]))))}
|
| 515 |
+
else:
|
| 516 |
+
return _group_tensors_by_device_and_dtype(tensorlistlist, with_indices) # type: ignore[return-value, arg-type]
|
| 517 |
+
|
| 518 |
+
def _patch_step_function(self) -> None:
|
| 519 |
+
self._zero_grad_profile_name = (
|
| 520 |
+
f"Optimizer.zero_grad#{self.__class__.__name__}.zero_grad"
|
| 521 |
+
)
|
| 522 |
+
hooked = getattr(self.__class__.step, "hooked", None)
|
| 523 |
+
if not hooked:
|
| 524 |
+
self.__class__.step = self.profile_hook_step(self.__class__.step) # type: ignore[assignment]
|
| 525 |
+
self.__class__.step.hooked = True # type: ignore[attr-defined]
|
| 526 |
+
|
| 527 |
+
def register_step_pre_hook(self, hook: OptimizerPreHook) -> RemovableHandle:
|
| 528 |
+
r"""Register an optimizer step pre hook which will be called before optimizer step.
|
| 529 |
+
|
| 530 |
+
It should have the following signature::
|
| 531 |
+
|
| 532 |
+
hook(optimizer, args, kwargs) -> None or modified args and kwargs
|
| 533 |
+
|
| 534 |
+
The ``optimizer`` argument is the optimizer instance being used. If
|
| 535 |
+
args and kwargs are modified by the pre-hook, then the transformed
|
| 536 |
+
values are returned as a tuple containing the new_args and new_kwargs.
|
| 537 |
+
|
| 538 |
+
Args:
|
| 539 |
+
hook (Callable): The user defined hook to be registered.
|
| 540 |
+
|
| 541 |
+
Returns:
|
| 542 |
+
:class:`torch.utils.hooks.RemovableHandle`:
|
| 543 |
+
a handle that can be used to remove the added hook by calling
|
| 544 |
+
``handle.remove()``
|
| 545 |
+
"""
|
| 546 |
+
handle = hooks.RemovableHandle(self._optimizer_step_pre_hooks)
|
| 547 |
+
self._optimizer_step_pre_hooks[handle.id] = hook
|
| 548 |
+
return handle
|
| 549 |
+
|
| 550 |
+
def register_step_post_hook(self, hook: OptimizerPostHook) -> RemovableHandle:
|
| 551 |
+
r"""Register an optimizer step post hook which will be called after optimizer step.
|
| 552 |
+
|
| 553 |
+
It should have the following signature::
|
| 554 |
+
|
| 555 |
+
hook(optimizer, args, kwargs) -> None
|
| 556 |
+
|
| 557 |
+
The ``optimizer`` argument is the optimizer instance being used.
|
| 558 |
+
|
| 559 |
+
Args:
|
| 560 |
+
hook (Callable): The user defined hook to be registered.
|
| 561 |
+
|
| 562 |
+
Returns:
|
| 563 |
+
:class:`torch.utils.hooks.RemovableHandle`:
|
| 564 |
+
a handle that can be used to remove the added hook by calling
|
| 565 |
+
``handle.remove()``
|
| 566 |
+
"""
|
| 567 |
+
handle = hooks.RemovableHandle(self._optimizer_step_post_hooks)
|
| 568 |
+
self._optimizer_step_post_hooks[handle.id] = hook
|
| 569 |
+
return handle
|
| 570 |
+
|
| 571 |
+
def register_state_dict_pre_hook(
|
| 572 |
+
self, hook: Callable[["Optimizer"], None], prepend: bool = False
|
| 573 |
+
) -> RemovableHandle: # noqa: D101
|
| 574 |
+
r"""Register a state dict pre-hook which will be called before :meth:`~torch.optim.Optimizer.state_dict` is called.
|
| 575 |
+
|
| 576 |
+
It should have the following signature::
|
| 577 |
+
|
| 578 |
+
hook(optimizer) -> None
|
| 579 |
+
|
| 580 |
+
The ``optimizer`` argument is the optimizer instance being used.
|
| 581 |
+
The hook will be called with argument ``self`` before calling ``state_dict`` on ``self``.
|
| 582 |
+
The registered hook can be used to perform pre-processing before the ``state_dict``
|
| 583 |
+
call is made.
|
| 584 |
+
|
| 585 |
+
Args:
|
| 586 |
+
hook (Callable): The user defined hook to be registered.
|
| 587 |
+
prepend (bool): If True, the provided pre ``hook`` will be fired before
|
| 588 |
+
all the already registered pre-hooks on ``state_dict``. Otherwise,
|
| 589 |
+
the provided ``hook`` will be fired after all the already registered
|
| 590 |
+
pre-hooks. (default: False)
|
| 591 |
+
|
| 592 |
+
Returns:
|
| 593 |
+
:class:`torch.utils.hooks.RemoveableHandle`:
|
| 594 |
+
a handle that can be used to remove the added hook by calling
|
| 595 |
+
``handle.remove()``
|
| 596 |
+
"""
|
| 597 |
+
handle = hooks.RemovableHandle(self._optimizer_state_dict_pre_hooks)
|
| 598 |
+
self._optimizer_state_dict_pre_hooks[handle.id] = hook
|
| 599 |
+
if prepend:
|
| 600 |
+
self._optimizer_state_dict_pre_hooks.move_to_end(handle.id, last=False)
|
| 601 |
+
return handle
|
| 602 |
+
|
| 603 |
+
def register_state_dict_post_hook(
|
| 604 |
+
self,
|
| 605 |
+
hook: Callable[["Optimizer", StateDict], Optional[StateDict]],
|
| 606 |
+
prepend: bool = False,
|
| 607 |
+
) -> RemovableHandle:
|
| 608 |
+
r"""Register a state dict post-hook which will be called after :meth:`~torch.optim.Optimizer.state_dict` is called.
|
| 609 |
+
|
| 610 |
+
It should have the following signature::
|
| 611 |
+
|
| 612 |
+
hook(optimizer, state_dict) -> state_dict or None
|
| 613 |
+
|
| 614 |
+
The hook will be called with arguments ``self`` and ``state_dict`` after generating
|
| 615 |
+
a ``state_dict`` on ``self``. The hook may modify the state_dict inplace or optionally
|
| 616 |
+
return a new one. The registered hook can be used to perform post-processing
|
| 617 |
+
on the ``state_dict`` before it is returned.
|
| 618 |
+
|
| 619 |
+
Args:
|
| 620 |
+
hook (Callable): The user defined hook to be registered.
|
| 621 |
+
prepend (bool): If True, the provided post ``hook`` will be fired before
|
| 622 |
+
all the already registered post-hooks on ``state_dict``. Otherwise,
|
| 623 |
+
the provided ``hook`` will be fired after all the already registered
|
| 624 |
+
post-hooks. (default: False)
|
| 625 |
+
|
| 626 |
+
Returns:
|
| 627 |
+
:class:`torch.utils.hooks.RemoveableHandle`:
|
| 628 |
+
a handle that can be used to remove the added hook by calling
|
| 629 |
+
``handle.remove()``
|
| 630 |
+
"""
|
| 631 |
+
handle = hooks.RemovableHandle(self._optimizer_state_dict_post_hooks)
|
| 632 |
+
self._optimizer_state_dict_post_hooks[handle.id] = hook
|
| 633 |
+
if prepend:
|
| 634 |
+
self._optimizer_state_dict_post_hooks.move_to_end(handle.id, last=False)
|
| 635 |
+
return handle
|
| 636 |
+
|
| 637 |
+
@torch._disable_dynamo
|
| 638 |
+
def state_dict(self) -> StateDict:
|
| 639 |
+
r"""Return the state of the optimizer as a :class:`dict`.
|
| 640 |
+
|
| 641 |
+
It contains two entries:
|
| 642 |
+
|
| 643 |
+
* ``state``: a Dict holding current optimization state. Its content
|
| 644 |
+
differs between optimizer classes, but some common characteristics
|
| 645 |
+
hold. For example, state is saved per parameter, and the parameter
|
| 646 |
+
itself is NOT saved. ``state`` is a Dictionary mapping parameter ids
|
| 647 |
+
to a Dict with state corresponding to each parameter.
|
| 648 |
+
* ``param_groups``: a List containing all parameter groups where each
|
| 649 |
+
parameter group is a Dict. Each parameter group contains metadata
|
| 650 |
+
specific to the optimizer, such as learning rate and weight decay,
|
| 651 |
+
as well as a List of parameter IDs of the parameters in the group.
|
| 652 |
+
|
| 653 |
+
NOTE: The parameter IDs may look like indices but they are just IDs
|
| 654 |
+
associating state with param_group. When loading from a state_dict,
|
| 655 |
+
the optimizer will zip the param_group ``params`` (int IDs) and the
|
| 656 |
+
optimizer ``param_groups`` (actual ``nn.Parameter`` s) in order to
|
| 657 |
+
match state WITHOUT additional verification.
|
| 658 |
+
|
| 659 |
+
A returned state dict might look something like:
|
| 660 |
+
|
| 661 |
+
.. code-block:: text
|
| 662 |
+
|
| 663 |
+
{
|
| 664 |
+
'state': {
|
| 665 |
+
0: {'momentum_buffer': tensor(...), ...},
|
| 666 |
+
1: {'momentum_buffer': tensor(...), ...},
|
| 667 |
+
2: {'momentum_buffer': tensor(...), ...},
|
| 668 |
+
3: {'momentum_buffer': tensor(...), ...}
|
| 669 |
+
},
|
| 670 |
+
'param_groups': [
|
| 671 |
+
{
|
| 672 |
+
'lr': 0.01,
|
| 673 |
+
'weight_decay': 0,
|
| 674 |
+
...
|
| 675 |
+
'params': [0]
|
| 676 |
+
},
|
| 677 |
+
{
|
| 678 |
+
'lr': 0.001,
|
| 679 |
+
'weight_decay': 0.5,
|
| 680 |
+
...
|
| 681 |
+
'params': [1, 2, 3]
|
| 682 |
+
}
|
| 683 |
+
]
|
| 684 |
+
}
|
| 685 |
+
|
| 686 |
+
"""
|
| 687 |
+
for pre_hook in self._optimizer_state_dict_pre_hooks.values():
|
| 688 |
+
pre_hook(self)
|
| 689 |
+
|
| 690 |
+
# Save order indices instead of Tensors
|
| 691 |
+
param_mappings: Dict[int, int] = {}
|
| 692 |
+
start_index = 0
|
| 693 |
+
|
| 694 |
+
def pack_group(group: Dict[str, Any]) -> Dict[str, Any]:
|
| 695 |
+
nonlocal start_index
|
| 696 |
+
packed = {k: v for k, v in group.items() if k != "params"}
|
| 697 |
+
param_mappings.update(
|
| 698 |
+
{
|
| 699 |
+
id(p): i
|
| 700 |
+
for i, p in enumerate(group["params"], start_index)
|
| 701 |
+
if id(p) not in param_mappings
|
| 702 |
+
}
|
| 703 |
+
)
|
| 704 |
+
packed["params"] = [param_mappings[id(p)] for p in group["params"]]
|
| 705 |
+
start_index += len(packed["params"])
|
| 706 |
+
return packed
|
| 707 |
+
|
| 708 |
+
param_groups = [pack_group(g) for g in self.param_groups]
|
| 709 |
+
# Remap state to use order indices as keys
|
| 710 |
+
packed_state = {
|
| 711 |
+
(param_mappings[id(k)] if isinstance(k, torch.Tensor) else k): v
|
| 712 |
+
for k, v in self.state.items()
|
| 713 |
+
}
|
| 714 |
+
|
| 715 |
+
state_dict = {
|
| 716 |
+
"state": packed_state,
|
| 717 |
+
"param_groups": param_groups,
|
| 718 |
+
}
|
| 719 |
+
|
| 720 |
+
for post_hook in self._optimizer_state_dict_post_hooks.values():
|
| 721 |
+
hook_result = post_hook(self, state_dict)
|
| 722 |
+
if hook_result is not None:
|
| 723 |
+
state_dict = hook_result
|
| 724 |
+
return state_dict
|
| 725 |
+
|
| 726 |
+
@staticmethod
|
| 727 |
+
def _process_value_according_to_param_policy(
|
| 728 |
+
param: torch.Tensor,
|
| 729 |
+
value: torch.Tensor,
|
| 730 |
+
param_id: int,
|
| 731 |
+
param_groups: List[Dict[Any, Any]],
|
| 732 |
+
key: Hashable = None,
|
| 733 |
+
) -> torch.Tensor:
|
| 734 |
+
# Floating-point types are a bit special here. They are the only ones
|
| 735 |
+
# that are assumed to always match the type of params.
|
| 736 |
+
# Make sure state['step'] is not casted https://github.com/pytorch/pytorch/issues/74424
|
| 737 |
+
# UNLESS fused or capturable, see note [special device hosting for step]
|
| 738 |
+
fused = False
|
| 739 |
+
capturable = False
|
| 740 |
+
assert param_groups is not None
|
| 741 |
+
for pg in param_groups:
|
| 742 |
+
if param_id in pg["params"]:
|
| 743 |
+
fused = pg["fused"] if "fused" in pg else False
|
| 744 |
+
capturable = pg["capturable"] if "capturable" in pg else False
|
| 745 |
+
break
|
| 746 |
+
if key == "step":
|
| 747 |
+
if capturable or fused:
|
| 748 |
+
return value.to(dtype=torch.float32, device=param.device)
|
| 749 |
+
else:
|
| 750 |
+
return value
|
| 751 |
+
else:
|
| 752 |
+
if param.is_floating_point():
|
| 753 |
+
return value.to(dtype=param.dtype, device=param.device)
|
| 754 |
+
else:
|
| 755 |
+
return value.to(device=param.device)
|
| 756 |
+
|
| 757 |
+
def register_load_state_dict_pre_hook(
|
| 758 |
+
self,
|
| 759 |
+
hook: Callable[["Optimizer", StateDict], Optional[StateDict]],
|
| 760 |
+
prepend: bool = False,
|
| 761 |
+
) -> RemovableHandle: # noqa: D205 D400
|
| 762 |
+
r"""Register a load_state_dict pre-hook which will be called before
|
| 763 |
+
:meth:`~torch.optim.Optimizer.load_state_dict` is called. It should have the
|
| 764 |
+
following signature::
|
| 765 |
+
|
| 766 |
+
hook(optimizer, state_dict) -> state_dict or None
|
| 767 |
+
|
| 768 |
+
The ``optimizer`` argument is the optimizer instance being used and the
|
| 769 |
+
``state_dict`` argument is a shallow copy of the ``state_dict`` the user
|
| 770 |
+
passed in to ``load_state_dict``. The hook may modify the state_dict inplace
|
| 771 |
+
or optionally return a new one. If a state_dict is returned, it will be used
|
| 772 |
+
to be loaded into the optimizer.
|
| 773 |
+
|
| 774 |
+
The hook will be called with argument ``self`` and ``state_dict`` before
|
| 775 |
+
calling ``load_state_dict`` on ``self``. The registered hook can be used to
|
| 776 |
+
perform pre-processing before the ``load_state_dict`` call is made.
|
| 777 |
+
|
| 778 |
+
Args:
|
| 779 |
+
hook (Callable): The user defined hook to be registered.
|
| 780 |
+
prepend (bool): If True, the provided pre ``hook`` will be fired before
|
| 781 |
+
all the already registered pre-hooks on ``load_state_dict``. Otherwise,
|
| 782 |
+
the provided ``hook`` will be fired after all the already registered
|
| 783 |
+
pre-hooks. (default: False)
|
| 784 |
+
|
| 785 |
+
Returns:
|
| 786 |
+
:class:`torch.utils.hooks.RemoveableHandle`:
|
| 787 |
+
a handle that can be used to remove the added hook by calling
|
| 788 |
+
``handle.remove()``
|
| 789 |
+
"""
|
| 790 |
+
handle = hooks.RemovableHandle(self._optimizer_load_state_dict_pre_hooks)
|
| 791 |
+
self._optimizer_load_state_dict_pre_hooks[handle.id] = hook
|
| 792 |
+
if prepend:
|
| 793 |
+
self._optimizer_load_state_dict_pre_hooks.move_to_end(handle.id, last=False)
|
| 794 |
+
return handle
|
| 795 |
+
|
| 796 |
+
def register_load_state_dict_post_hook(
|
| 797 |
+
self, hook: Callable[["Optimizer"], None], prepend: bool = False
|
| 798 |
+
) -> RemovableHandle: # noqa: D205 D400
|
| 799 |
+
r"""Register a load_state_dict post-hook which will be called after
|
| 800 |
+
:meth:`~torch.optim.Optimizer.load_state_dict` is called. It should have the
|
| 801 |
+
following signature::
|
| 802 |
+
|
| 803 |
+
hook(optimizer) -> None
|
| 804 |
+
|
| 805 |
+
The ``optimizer`` argument is the optimizer instance being used.
|
| 806 |
+
|
| 807 |
+
The hook will be called with argument ``self`` after calling
|
| 808 |
+
``load_state_dict`` on ``self``. The registered hook can be used to
|
| 809 |
+
perform post-processing after ``load_state_dict`` has loaded the
|
| 810 |
+
``state_dict``.
|
| 811 |
+
|
| 812 |
+
Args:
|
| 813 |
+
hook (Callable): The user defined hook to be registered.
|
| 814 |
+
prepend (bool): If True, the provided post ``hook`` will be fired before
|
| 815 |
+
all the already registered post-hooks on ``load_state_dict``. Otherwise,
|
| 816 |
+
the provided ``hook`` will be fired after all the already registered
|
| 817 |
+
post-hooks. (default: False)
|
| 818 |
+
|
| 819 |
+
Returns:
|
| 820 |
+
:class:`torch.utils.hooks.RemoveableHandle`:
|
| 821 |
+
a handle that can be used to remove the added hook by calling
|
| 822 |
+
``handle.remove()``
|
| 823 |
+
"""
|
| 824 |
+
handle = hooks.RemovableHandle(self._optimizer_load_state_dict_post_hooks)
|
| 825 |
+
self._optimizer_load_state_dict_post_hooks[handle.id] = hook
|
| 826 |
+
if prepend:
|
| 827 |
+
self._optimizer_load_state_dict_post_hooks.move_to_end(handle.id, last=False) # type: ignore[attr-defined]
|
| 828 |
+
return handle
|
| 829 |
+
|
| 830 |
+
@torch._disable_dynamo
|
| 831 |
+
def load_state_dict(self, state_dict: StateDict) -> None:
|
| 832 |
+
r"""Load the optimizer state.
|
| 833 |
+
|
| 834 |
+
Args:
|
| 835 |
+
state_dict (dict): optimizer state. Should be an object returned
|
| 836 |
+
from a call to :meth:`state_dict`.
|
| 837 |
+
"""
|
| 838 |
+
# shallow copy, to be consistent with module API
|
| 839 |
+
state_dict = state_dict.copy()
|
| 840 |
+
|
| 841 |
+
for pre_hook in self._optimizer_load_state_dict_pre_hooks.values():
|
| 842 |
+
hook_result = pre_hook(self, state_dict)
|
| 843 |
+
if hook_result is not None:
|
| 844 |
+
state_dict = hook_result
|
| 845 |
+
|
| 846 |
+
# Validate the state_dict
|
| 847 |
+
groups = self.param_groups
|
| 848 |
+
|
| 849 |
+
# Deepcopy as we write into saved_groups later to update state
|
| 850 |
+
saved_groups = deepcopy(state_dict["param_groups"])
|
| 851 |
+
|
| 852 |
+
if len(groups) != len(saved_groups):
|
| 853 |
+
raise ValueError(
|
| 854 |
+
"loaded state dict has a different number of " "parameter groups"
|
| 855 |
+
)
|
| 856 |
+
param_lens = (len(g["params"]) for g in groups)
|
| 857 |
+
saved_lens = (len(g["params"]) for g in saved_groups)
|
| 858 |
+
if any(p_len != s_len for p_len, s_len in zip(param_lens, saved_lens)):
|
| 859 |
+
raise ValueError(
|
| 860 |
+
"loaded state dict contains a parameter group "
|
| 861 |
+
"that doesn't match the size of optimizer's group"
|
| 862 |
+
)
|
| 863 |
+
|
| 864 |
+
# Update the state
|
| 865 |
+
id_map = dict(
|
| 866 |
+
zip(
|
| 867 |
+
chain.from_iterable(g["params"] for g in saved_groups),
|
| 868 |
+
chain.from_iterable(g["params"] for g in groups),
|
| 869 |
+
)
|
| 870 |
+
)
|
| 871 |
+
|
| 872 |
+
def _cast(param, value, param_id=None, param_groups=None, key=None):
|
| 873 |
+
r"""Make a deep copy of value, casting all tensors to device of param."""
|
| 874 |
+
if isinstance(value, torch.Tensor):
|
| 875 |
+
return Optimizer._process_value_according_to_param_policy(
|
| 876 |
+
param, value, param_id, param_groups, key
|
| 877 |
+
)
|
| 878 |
+
elif isinstance(value, dict):
|
| 879 |
+
return {
|
| 880 |
+
k: _cast(
|
| 881 |
+
param, v, param_id=param_id, param_groups=param_groups, key=k
|
| 882 |
+
)
|
| 883 |
+
for k, v in value.items()
|
| 884 |
+
}
|
| 885 |
+
elif isinstance(value, Iterable):
|
| 886 |
+
return type(value)(_cast(param, v, param_id=param_id, param_groups=param_groups) for v in value) # type: ignore[call-arg]
|
| 887 |
+
else:
|
| 888 |
+
return value
|
| 889 |
+
|
| 890 |
+
# Copy state assigned to params (and cast tensors to appropriate types).
|
| 891 |
+
# State that is not assigned to params is copied as is (needed for
|
| 892 |
+
# backward compatibility).
|
| 893 |
+
state: DefaultDict[torch.Tensor, Dict[Any, Any]] = defaultdict(dict)
|
| 894 |
+
for k, v in state_dict["state"].items():
|
| 895 |
+
if k in id_map:
|
| 896 |
+
param = id_map[k]
|
| 897 |
+
state[param] = _cast(
|
| 898 |
+
param, v, param_id=k, param_groups=state_dict["param_groups"]
|
| 899 |
+
)
|
| 900 |
+
else:
|
| 901 |
+
state[k] = v
|
| 902 |
+
|
| 903 |
+
# Update parameter groups, setting their 'params' value
|
| 904 |
+
def update_group(
|
| 905 |
+
group: Dict[str, Any], new_group: Dict[str, Any]
|
| 906 |
+
) -> Dict[str, Any]:
|
| 907 |
+
new_group["params"] = group["params"]
|
| 908 |
+
return new_group
|
| 909 |
+
|
| 910 |
+
param_groups = [update_group(g, ng) for g, ng in zip(groups, saved_groups)]
|
| 911 |
+
self.__setstate__({"state": state, "param_groups": param_groups})
|
| 912 |
+
|
| 913 |
+
for post_hook in self._optimizer_load_state_dict_post_hooks.values():
|
| 914 |
+
post_hook(self)
|
| 915 |
+
|
| 916 |
+
@torch._disable_dynamo
|
| 917 |
+
def zero_grad(self, set_to_none: bool = True) -> None:
|
| 918 |
+
r"""Reset the gradients of all optimized :class:`torch.Tensor` s.
|
| 919 |
+
|
| 920 |
+
Args:
|
| 921 |
+
set_to_none (bool): instead of setting to zero, set the grads to None.
|
| 922 |
+
This will in general have lower memory footprint, and can modestly improve performance.
|
| 923 |
+
However, it changes certain behaviors. For example:
|
| 924 |
+
1. When the user tries to access a gradient and perform manual ops on it,
|
| 925 |
+
a None attribute or a Tensor full of 0s will behave differently.
|
| 926 |
+
2. If the user requests ``zero_grad(set_to_none=True)`` followed by a backward pass, ``.grad``\ s
|
| 927 |
+
are guaranteed to be None for params that did not receive a gradient.
|
| 928 |
+
3. ``torch.optim`` optimizers have a different behavior if the gradient is 0 or None
|
| 929 |
+
(in one case it does the step with a gradient of 0 and in the other it skips
|
| 930 |
+
the step altogether).
|
| 931 |
+
"""
|
| 932 |
+
foreach = self.defaults.get("foreach", False) or self.defaults.get(
|
| 933 |
+
"fused", False
|
| 934 |
+
)
|
| 935 |
+
|
| 936 |
+
if not hasattr(self, "_zero_grad_profile_name"):
|
| 937 |
+
self._patch_step_function()
|
| 938 |
+
|
| 939 |
+
per_device_and_dtype_grads: Optional[
|
| 940 |
+
DefaultDict[torch.device, DefaultDict[torch.dtype, List[torch.Tensor]]]
|
| 941 |
+
]
|
| 942 |
+
if foreach:
|
| 943 |
+
per_device_and_dtype_grads = defaultdict(lambda: defaultdict(list))
|
| 944 |
+
else:
|
| 945 |
+
per_device_and_dtype_grads = None
|
| 946 |
+
|
| 947 |
+
with torch.autograd.profiler.record_function(self._zero_grad_profile_name):
|
| 948 |
+
for group in self.param_groups:
|
| 949 |
+
for p in group["params"]:
|
| 950 |
+
if p.grad is not None:
|
| 951 |
+
if set_to_none:
|
| 952 |
+
p.grad = None
|
| 953 |
+
else:
|
| 954 |
+
if p.grad.grad_fn is not None:
|
| 955 |
+
p.grad.detach_()
|
| 956 |
+
else:
|
| 957 |
+
p.grad.requires_grad_(False)
|
| 958 |
+
if not foreach or p.grad.is_sparse:
|
| 959 |
+
p.grad.zero_()
|
| 960 |
+
else:
|
| 961 |
+
assert per_device_and_dtype_grads is not None
|
| 962 |
+
per_device_and_dtype_grads[p.grad.device][
|
| 963 |
+
p.grad.dtype
|
| 964 |
+
].append(p.grad)
|
| 965 |
+
if foreach:
|
| 966 |
+
assert per_device_and_dtype_grads is not None
|
| 967 |
+
for per_dtype_grads in per_device_and_dtype_grads.values():
|
| 968 |
+
for grads in per_dtype_grads.values():
|
| 969 |
+
torch._foreach_zero_(grads)
|
| 970 |
+
|
| 971 |
+
@overload
|
| 972 |
+
def step(self, closure: None = ...) -> None:
|
| 973 |
+
...
|
| 974 |
+
|
| 975 |
+
@overload
|
| 976 |
+
def step(self, closure: Callable[[], float]) -> float:
|
| 977 |
+
...
|
| 978 |
+
|
| 979 |
+
def step(self, closure: Optional[Callable[[], float]] = None) -> Optional[float]:
|
| 980 |
+
r"""Perform a single optimization step to update parameter.
|
| 981 |
+
|
| 982 |
+
Args:
|
| 983 |
+
closure (Callable): A closure that reevaluates the model and
|
| 984 |
+
returns the loss. Optional for most optimizers.
|
| 985 |
+
|
| 986 |
+
.. note::
|
| 987 |
+
Unless otherwise specified, this function should not modify the
|
| 988 |
+
``.grad`` field of the parameters.
|
| 989 |
+
"""
|
| 990 |
+
raise NotImplementedError
|
| 991 |
+
|
| 992 |
+
@torch._disable_dynamo
|
| 993 |
+
def add_param_group(self, param_group: Dict[str, Any]) -> None:
|
| 994 |
+
r"""Add a param group to the :class:`Optimizer` s `param_groups`.
|
| 995 |
+
|
| 996 |
+
This can be useful when fine tuning a pre-trained network as frozen layers can be made
|
| 997 |
+
trainable and added to the :class:`Optimizer` as training progresses.
|
| 998 |
+
|
| 999 |
+
Args:
|
| 1000 |
+
param_group (dict): Specifies what Tensors should be optimized along with group
|
| 1001 |
+
specific optimization options.
|
| 1002 |
+
"""
|
| 1003 |
+
if not isinstance(param_group, dict):
|
| 1004 |
+
raise TypeError(f"param_group must be a dict, but got {type(param_group)}")
|
| 1005 |
+
|
| 1006 |
+
params = param_group["params"]
|
| 1007 |
+
if isinstance(params, torch.Tensor):
|
| 1008 |
+
param_group["params"] = [params]
|
| 1009 |
+
elif isinstance(params, set):
|
| 1010 |
+
raise TypeError(
|
| 1011 |
+
"optimizer parameters need to be organized in ordered collections, but "
|
| 1012 |
+
"the ordering of tensors in sets will change between runs. Please use a list instead."
|
| 1013 |
+
)
|
| 1014 |
+
else:
|
| 1015 |
+
param_group["params"] = list(params)
|
| 1016 |
+
|
| 1017 |
+
for param in param_group["params"]:
|
| 1018 |
+
if not isinstance(param, torch.Tensor):
|
| 1019 |
+
raise TypeError(
|
| 1020 |
+
"optimizer can only optimize Tensors, "
|
| 1021 |
+
"but one of the params is " + torch.typename(param)
|
| 1022 |
+
)
|
| 1023 |
+
if not self.defaults.get("differentiable", None) and not (
|
| 1024 |
+
param.is_leaf or param.retains_grad
|
| 1025 |
+
):
|
| 1026 |
+
raise ValueError("can't optimize a non-leaf Tensor")
|
| 1027 |
+
|
| 1028 |
+
for name, default in self.defaults.items():
|
| 1029 |
+
if default is required and name not in param_group:
|
| 1030 |
+
raise ValueError(
|
| 1031 |
+
f"parameter group didn't specify a value of required optimization parameter {name}"
|
| 1032 |
+
)
|
| 1033 |
+
else:
|
| 1034 |
+
param_group.setdefault(name, default)
|
| 1035 |
+
|
| 1036 |
+
params = param_group["params"]
|
| 1037 |
+
if len(params) != len(set(params)):
|
| 1038 |
+
warnings.warn(
|
| 1039 |
+
"optimizer contains a parameter group with duplicate parameters; "
|
| 1040 |
+
"in future, this will cause an error; "
|
| 1041 |
+
"see github.com/pytorch/pytorch/issues/40967 for more information",
|
| 1042 |
+
stacklevel=3,
|
| 1043 |
+
)
|
| 1044 |
+
|
| 1045 |
+
param_set: Set[torch.Tensor] = set()
|
| 1046 |
+
for group in self.param_groups:
|
| 1047 |
+
param_set.update(set(group["params"]))
|
| 1048 |
+
|
| 1049 |
+
if not param_set.isdisjoint(set(param_group["params"])):
|
| 1050 |
+
raise ValueError("some parameters appear in more than one parameter group")
|
| 1051 |
+
|
| 1052 |
+
self.param_groups.append(param_group)
|
vllm/lib/python3.10/site-packages/torch/optim/radam.py
ADDED
|
@@ -0,0 +1,608 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# mypy: allow-untyped-decorators
|
| 2 |
+
# mypy: allow-untyped-defs
|
| 3 |
+
r"""Implementation for the RAdam algorithm."""
|
| 4 |
+
from typing import cast, List, Optional, Tuple, Union
|
| 5 |
+
|
| 6 |
+
import torch
|
| 7 |
+
from torch import Tensor
|
| 8 |
+
|
| 9 |
+
from .optimizer import (
|
| 10 |
+
_capturable_doc,
|
| 11 |
+
_default_to_fused_or_foreach,
|
| 12 |
+
_differentiable_doc,
|
| 13 |
+
_disable_dynamo_if_unsupported,
|
| 14 |
+
_foreach_doc,
|
| 15 |
+
_get_capturable_supported_devices,
|
| 16 |
+
_get_scalar_dtype,
|
| 17 |
+
_get_value,
|
| 18 |
+
_maximize_doc,
|
| 19 |
+
_use_grad_for_differentiable,
|
| 20 |
+
_view_as_real,
|
| 21 |
+
Optimizer,
|
| 22 |
+
ParamsT,
|
| 23 |
+
)
|
| 24 |
+
|
| 25 |
+
|
| 26 |
+
__all__ = ["RAdam", "radam"]
|
| 27 |
+
|
| 28 |
+
|
| 29 |
+
class RAdam(Optimizer): # noqa: D101
|
| 30 |
+
def __init__(
|
| 31 |
+
self,
|
| 32 |
+
params: ParamsT,
|
| 33 |
+
lr: Union[float, Tensor] = 1e-3,
|
| 34 |
+
betas: Tuple[float, float] = (0.9, 0.999),
|
| 35 |
+
eps: float = 1e-8,
|
| 36 |
+
weight_decay: float = 0,
|
| 37 |
+
decoupled_weight_decay: bool = False,
|
| 38 |
+
*,
|
| 39 |
+
foreach: Optional[bool] = None,
|
| 40 |
+
maximize: bool = False,
|
| 41 |
+
capturable: bool = False,
|
| 42 |
+
differentiable: bool = False,
|
| 43 |
+
): # noqa: D107
|
| 44 |
+
if isinstance(lr, Tensor) and lr.numel() != 1:
|
| 45 |
+
raise ValueError("Tensor lr must be 1-element")
|
| 46 |
+
if not 0.0 <= lr:
|
| 47 |
+
raise ValueError(f"Invalid learning rate: {lr}")
|
| 48 |
+
if not 0.0 <= eps:
|
| 49 |
+
raise ValueError(f"Invalid epsilon value: {eps}")
|
| 50 |
+
if not 0.0 <= betas[0] < 1.0:
|
| 51 |
+
raise ValueError(f"Invalid beta parameter at index 0: {betas[0]}")
|
| 52 |
+
if not 0.0 <= betas[1] < 1.0:
|
| 53 |
+
raise ValueError(f"Invalid beta parameter at index 1: {betas[1]}")
|
| 54 |
+
if not 0.0 <= weight_decay:
|
| 55 |
+
raise ValueError(f"Invalid weight_decay value: {weight_decay}")
|
| 56 |
+
|
| 57 |
+
defaults = dict(
|
| 58 |
+
lr=lr,
|
| 59 |
+
betas=betas,
|
| 60 |
+
eps=eps,
|
| 61 |
+
weight_decay=weight_decay,
|
| 62 |
+
maximize=maximize,
|
| 63 |
+
foreach=foreach,
|
| 64 |
+
capturable=capturable,
|
| 65 |
+
decoupled_weight_decay=decoupled_weight_decay,
|
| 66 |
+
differentiable=differentiable,
|
| 67 |
+
)
|
| 68 |
+
super().__init__(params, defaults)
|
| 69 |
+
|
| 70 |
+
def __setstate__(self, state): # noqa: D105
|
| 71 |
+
super().__setstate__(state)
|
| 72 |
+
for group in self.param_groups:
|
| 73 |
+
group.setdefault("foreach", None)
|
| 74 |
+
group.setdefault("maximize", False)
|
| 75 |
+
group.setdefault("differentiable", False)
|
| 76 |
+
group.setdefault("decoupled_weight_decay", False)
|
| 77 |
+
group.setdefault("capturable", False)
|
| 78 |
+
for p in group["params"]:
|
| 79 |
+
p_state = self.state.get(p, [])
|
| 80 |
+
if len(p_state) != 0 and not torch.is_tensor(p_state["step"]):
|
| 81 |
+
step_val = float(p_state["step"])
|
| 82 |
+
p_state["step"] = (
|
| 83 |
+
torch.tensor(
|
| 84 |
+
step_val, dtype=_get_scalar_dtype(), device=p.device
|
| 85 |
+
)
|
| 86 |
+
if group["capturable"]
|
| 87 |
+
else torch.tensor(step_val, dtype=_get_scalar_dtype())
|
| 88 |
+
)
|
| 89 |
+
|
| 90 |
+
def _init_group(
|
| 91 |
+
self, group, params_with_grad, grads, exp_avgs, exp_avg_sqs, state_steps
|
| 92 |
+
):
|
| 93 |
+
has_complex = False
|
| 94 |
+
for p in group["params"]:
|
| 95 |
+
if p.grad is not None:
|
| 96 |
+
has_complex |= torch.is_complex(p)
|
| 97 |
+
params_with_grad.append(p)
|
| 98 |
+
if p.grad.is_sparse:
|
| 99 |
+
raise RuntimeError("RAdam does not support sparse gradients")
|
| 100 |
+
grads.append(p.grad)
|
| 101 |
+
|
| 102 |
+
state = self.state[p]
|
| 103 |
+
# Lazy state initialization
|
| 104 |
+
if len(state) == 0:
|
| 105 |
+
state["step"] = (
|
| 106 |
+
torch.zeros((), dtype=_get_scalar_dtype(), device=p.device)
|
| 107 |
+
if group["capturable"]
|
| 108 |
+
else torch.tensor(0.0, dtype=_get_scalar_dtype())
|
| 109 |
+
)
|
| 110 |
+
# Exponential moving average of gradient values
|
| 111 |
+
state["exp_avg"] = torch.zeros_like(
|
| 112 |
+
p, memory_format=torch.preserve_format
|
| 113 |
+
)
|
| 114 |
+
# Exponential moving average of squared gradient values
|
| 115 |
+
state["exp_avg_sq"] = torch.zeros_like(
|
| 116 |
+
p, memory_format=torch.preserve_format
|
| 117 |
+
)
|
| 118 |
+
|
| 119 |
+
exp_avgs.append(state["exp_avg"])
|
| 120 |
+
exp_avg_sqs.append(state["exp_avg_sq"])
|
| 121 |
+
state_steps.append(state["step"])
|
| 122 |
+
|
| 123 |
+
return has_complex
|
| 124 |
+
|
| 125 |
+
@_use_grad_for_differentiable
|
| 126 |
+
def step(self, closure=None):
|
| 127 |
+
"""Perform a single optimization step.
|
| 128 |
+
|
| 129 |
+
Args:
|
| 130 |
+
closure (Callable, optional): A closure that reevaluates the model
|
| 131 |
+
and returns the loss.
|
| 132 |
+
"""
|
| 133 |
+
self._cuda_graph_capture_health_check()
|
| 134 |
+
|
| 135 |
+
loss = None
|
| 136 |
+
if closure is not None:
|
| 137 |
+
with torch.enable_grad():
|
| 138 |
+
loss = closure()
|
| 139 |
+
|
| 140 |
+
for group in self.param_groups:
|
| 141 |
+
params_with_grad: List[Tensor] = []
|
| 142 |
+
grads: List[Tensor] = []
|
| 143 |
+
exp_avgs: List[Tensor] = []
|
| 144 |
+
exp_avg_sqs: List[Tensor] = []
|
| 145 |
+
state_steps: List[Tensor] = []
|
| 146 |
+
beta1, beta2 = cast(Tuple[float, float], group["betas"])
|
| 147 |
+
|
| 148 |
+
has_complex = self._init_group(
|
| 149 |
+
group, params_with_grad, grads, exp_avgs, exp_avg_sqs, state_steps
|
| 150 |
+
)
|
| 151 |
+
|
| 152 |
+
radam(
|
| 153 |
+
params_with_grad,
|
| 154 |
+
grads,
|
| 155 |
+
exp_avgs,
|
| 156 |
+
exp_avg_sqs,
|
| 157 |
+
state_steps,
|
| 158 |
+
beta1=beta1,
|
| 159 |
+
beta2=beta2,
|
| 160 |
+
lr=group["lr"],
|
| 161 |
+
weight_decay=group["weight_decay"],
|
| 162 |
+
eps=group["eps"],
|
| 163 |
+
maximize=group["maximize"],
|
| 164 |
+
foreach=group["foreach"],
|
| 165 |
+
capturable=group["capturable"],
|
| 166 |
+
differentiable=group["differentiable"],
|
| 167 |
+
decoupled_weight_decay=group["decoupled_weight_decay"],
|
| 168 |
+
has_complex=has_complex,
|
| 169 |
+
)
|
| 170 |
+
|
| 171 |
+
return loss
|
| 172 |
+
|
| 173 |
+
|
| 174 |
+
RAdam.__doc__ = (
|
| 175 |
+
r"""Implements RAdam algorithm.
|
| 176 |
+
|
| 177 |
+
.. math::
|
| 178 |
+
\begin{aligned}
|
| 179 |
+
&\rule{110mm}{0.4pt} \\
|
| 180 |
+
&\textbf{input} : \gamma \text{ (lr)}, \: \beta_1, \beta_2
|
| 181 |
+
\text{ (betas)}, \: \theta_0 \text{ (params)}, \:f(\theta) \text{ (objective)}, \:
|
| 182 |
+
\lambda \text{ (weightdecay)}, \:\textit{maximize} \\
|
| 183 |
+
&\hspace{13mm} \epsilon \text{ (epsilon)}, \textit{decoupled\_weight\_decay} \\
|
| 184 |
+
&\textbf{initialize} : m_0 \leftarrow 0 \text{ ( first moment)},
|
| 185 |
+
v_0 \leftarrow 0 \text{ ( second moment)}, \\
|
| 186 |
+
&\hspace{18mm} \rho_{\infty} \leftarrow 2/(1-\beta_2) -1 \\[-1.ex]
|
| 187 |
+
&\rule{110mm}{0.4pt} \\
|
| 188 |
+
&\textbf{for} \: t=1 \: \textbf{to} \: \ldots \: \textbf{do} \\
|
| 189 |
+
&\hspace{6mm}\textbf{if} \: \textit{maximize}: \\
|
| 190 |
+
&\hspace{12mm}g_t \leftarrow -\nabla_{\theta} f_t (\theta_{t-1}) \\
|
| 191 |
+
&\hspace{6mm}\textbf{else} \\
|
| 192 |
+
&\hspace{12mm}g_t \leftarrow \nabla_{\theta} f_t (\theta_{t-1}) \\
|
| 193 |
+
&\hspace{6mm} \theta_t \leftarrow \theta_{t-1} \\
|
| 194 |
+
&\hspace{6mm} \textbf{if} \: \lambda \neq 0 \\
|
| 195 |
+
&\hspace{12mm}\textbf{if} \: \textit{decoupled\_weight\_decay} \\
|
| 196 |
+
&\hspace{18mm} \theta_t \leftarrow \theta_{t} - \gamma \lambda \theta_{t} \\
|
| 197 |
+
&\hspace{12mm}\textbf{else} \\
|
| 198 |
+
&\hspace{18mm} g_t \leftarrow g_t + \lambda \theta_{t} \\
|
| 199 |
+
&\hspace{6mm}m_t \leftarrow \beta_1 m_{t-1} + (1 - \beta_1) g_t \\
|
| 200 |
+
&\hspace{6mm}v_t \leftarrow \beta_2 v_{t-1} + (1-\beta_2) g^2_t \\
|
| 201 |
+
&\hspace{6mm}\widehat{m_t} \leftarrow m_t/\big(1-\beta_1^t \big) \\
|
| 202 |
+
&\hspace{6mm}\rho_t \leftarrow \rho_{\infty} -
|
| 203 |
+
2 t \beta^t_2 /\big(1-\beta_2^t \big) \\[0.1.ex]
|
| 204 |
+
&\hspace{6mm}\textbf{if} \: \rho_t > 5 \\
|
| 205 |
+
&\hspace{12mm} l_t \leftarrow \frac{\sqrt{ (1-\beta^t_2) }}{ \sqrt{v_t} +\epsilon } \\
|
| 206 |
+
&\hspace{12mm} r_t \leftarrow
|
| 207 |
+
\sqrt{\frac{(\rho_t-4)(\rho_t-2)\rho_{\infty}}{(\rho_{\infty}-4)(\rho_{\infty}-2) \rho_t}} \\
|
| 208 |
+
&\hspace{12mm}\theta_t \leftarrow \theta_t - \gamma \widehat{m_t} r_t l_t \\
|
| 209 |
+
&\hspace{6mm}\textbf{else} \\
|
| 210 |
+
&\hspace{12mm}\theta_t \leftarrow \theta_t - \gamma \widehat{m_t} \\
|
| 211 |
+
&\rule{110mm}{0.4pt} \\[-1.ex]
|
| 212 |
+
&\bf{return} \: \theta_t \\[-1.ex]
|
| 213 |
+
&\rule{110mm}{0.4pt} \\[-1.ex]
|
| 214 |
+
\end{aligned}
|
| 215 |
+
|
| 216 |
+
For further details regarding the algorithm we refer to `On the variance of the adaptive learning rate and beyond`_.
|
| 217 |
+
|
| 218 |
+
This implementation provides an option to use either the original weight_decay implementation as in Adam
|
| 219 |
+
(where the weight_decay is applied to the gradient) or the one from AdamW (where weight_decay is applied
|
| 220 |
+
to the weight) through the decoupled_weight_decay option. When decoupled_weight_decay is set to False
|
| 221 |
+
(default), it uses the original Adam style weight decay, otherwise, it uses the AdamW style which
|
| 222 |
+
corresponds more closely to the `author's implementation`_ in the RAdam paper. Further information
|
| 223 |
+
about decoupled weight decay can be found in `Decoupled Weight Decay Regularization`_.
|
| 224 |
+
|
| 225 |
+
"""
|
| 226 |
+
+ rf"""
|
| 227 |
+
Args:
|
| 228 |
+
params (iterable): iterable of parameters to optimize or dicts defining
|
| 229 |
+
parameter groups
|
| 230 |
+
lr (float, Tensor, optional): learning rate (default: 1e-3)
|
| 231 |
+
betas (Tuple[float, float], optional): coefficients used for computing
|
| 232 |
+
running averages of gradient and its square (default: (0.9, 0.999))
|
| 233 |
+
eps (float, optional): term added to the denominator to improve
|
| 234 |
+
numerical stability (default: 1e-8)
|
| 235 |
+
weight_decay (float, optional): weight decay (L2 penalty) (default: 0)
|
| 236 |
+
decoupled_weight_decay (bool, optional): whether to use decoupled weight
|
| 237 |
+
decay as in AdamW to obtain RAdamW (default: False)
|
| 238 |
+
{_foreach_doc}
|
| 239 |
+
{_maximize_doc}
|
| 240 |
+
{_differentiable_doc}
|
| 241 |
+
{_capturable_doc}
|
| 242 |
+
|
| 243 |
+
.. _On the variance of the adaptive learning rate and beyond:
|
| 244 |
+
https://arxiv.org/abs/1908.03265
|
| 245 |
+
.. _author's implementation:
|
| 246 |
+
https://github.com/LiyuanLucasLiu/RAdam
|
| 247 |
+
.. _Decoupled Weight Decay Regularization:
|
| 248 |
+
https://arxiv.org/abs/1711.05101
|
| 249 |
+
|
| 250 |
+
"""
|
| 251 |
+
)
|
| 252 |
+
|
| 253 |
+
|
| 254 |
+
def _single_tensor_radam(
|
| 255 |
+
params: List[Tensor],
|
| 256 |
+
grads: List[Tensor],
|
| 257 |
+
exp_avgs: List[Tensor],
|
| 258 |
+
exp_avg_sqs: List[Tensor],
|
| 259 |
+
state_steps: List[Tensor],
|
| 260 |
+
*,
|
| 261 |
+
beta1: float,
|
| 262 |
+
beta2: float,
|
| 263 |
+
lr: float,
|
| 264 |
+
weight_decay: float,
|
| 265 |
+
eps: float,
|
| 266 |
+
decoupled_weight_decay: bool,
|
| 267 |
+
differentiable: bool,
|
| 268 |
+
maximize: bool,
|
| 269 |
+
capturable: bool,
|
| 270 |
+
has_complex: bool,
|
| 271 |
+
):
|
| 272 |
+
for i, param in enumerate(params):
|
| 273 |
+
grad = grads[i] if not maximize else -grads[i]
|
| 274 |
+
exp_avg = exp_avgs[i]
|
| 275 |
+
exp_avg_sq = exp_avg_sqs[i]
|
| 276 |
+
step_t = state_steps[i]
|
| 277 |
+
|
| 278 |
+
# If compiling, the compiler will handle cudagraph checks, see note [torch.compile x capturable]
|
| 279 |
+
if not torch._utils.is_compiling() and capturable:
|
| 280 |
+
capturable_supported_devices = _get_capturable_supported_devices()
|
| 281 |
+
assert (
|
| 282 |
+
param.device.type == step_t.device.type
|
| 283 |
+
and param.device.type in capturable_supported_devices
|
| 284 |
+
), f"If capturable=True, params and state_steps must be on supported devices: {capturable_supported_devices}."
|
| 285 |
+
|
| 286 |
+
if torch.is_complex(param):
|
| 287 |
+
param = torch.view_as_real(param)
|
| 288 |
+
grad = torch.view_as_real(grad)
|
| 289 |
+
exp_avg = torch.view_as_real(exp_avg)
|
| 290 |
+
exp_avg_sq = torch.view_as_real(exp_avg_sq)
|
| 291 |
+
|
| 292 |
+
# update step
|
| 293 |
+
step_t += 1
|
| 294 |
+
step = step_t if capturable else _get_value(step_t)
|
| 295 |
+
|
| 296 |
+
if weight_decay != 0:
|
| 297 |
+
if decoupled_weight_decay:
|
| 298 |
+
param.mul_(1 - lr * weight_decay)
|
| 299 |
+
else:
|
| 300 |
+
grad = grad.add(param, alpha=weight_decay)
|
| 301 |
+
|
| 302 |
+
# Decay the first and second moment running average coefficient
|
| 303 |
+
exp_avg.lerp_(grad, 1 - beta1)
|
| 304 |
+
exp_avg_sq.mul_(beta2).addcmul_(grad, grad, value=1 - beta2)
|
| 305 |
+
|
| 306 |
+
bias_correction1 = 1 - beta1**step
|
| 307 |
+
bias_correction2 = 1 - beta2**step
|
| 308 |
+
|
| 309 |
+
# correcting bias for the first moving moment
|
| 310 |
+
bias_corrected_exp_avg = exp_avg / bias_correction1
|
| 311 |
+
|
| 312 |
+
# maximum length of the approximated SMA
|
| 313 |
+
rho_inf = 2 / (1 - beta2) - 1
|
| 314 |
+
# compute the length of the approximated SMA
|
| 315 |
+
rho_t = rho_inf - 2 * step * (beta2**step) / bias_correction2
|
| 316 |
+
|
| 317 |
+
def _compute_rect():
|
| 318 |
+
return (
|
| 319 |
+
(rho_t - 4)
|
| 320 |
+
* (rho_t - 2)
|
| 321 |
+
* rho_inf
|
| 322 |
+
/ ((rho_inf - 4) * (rho_inf - 2) * rho_t)
|
| 323 |
+
) ** 0.5
|
| 324 |
+
|
| 325 |
+
def _compute_adaptive_lr():
|
| 326 |
+
exp_avg_sq_sqrt = exp_avg_sq.sqrt()
|
| 327 |
+
if differentiable:
|
| 328 |
+
exp_avg_sq_sqrt = exp_avg_sq_sqrt.add(eps)
|
| 329 |
+
else:
|
| 330 |
+
exp_avg_sq_sqrt = exp_avg_sq_sqrt.add_(eps)
|
| 331 |
+
|
| 332 |
+
return (bias_correction2**0.5) / exp_avg_sq_sqrt
|
| 333 |
+
|
| 334 |
+
# Compute the variance rectification term and update parameters accordingly
|
| 335 |
+
if capturable:
|
| 336 |
+
update = torch.where(
|
| 337 |
+
rho_t > 5.0, _compute_rect() * _compute_adaptive_lr(), 1.0
|
| 338 |
+
)
|
| 339 |
+
param.add_(bias_corrected_exp_avg * lr * update, alpha=-1.0)
|
| 340 |
+
else:
|
| 341 |
+
if rho_t > 5.0:
|
| 342 |
+
param.add_(
|
| 343 |
+
bias_corrected_exp_avg
|
| 344 |
+
* lr
|
| 345 |
+
* _compute_adaptive_lr()
|
| 346 |
+
* _compute_rect(),
|
| 347 |
+
alpha=-1.0,
|
| 348 |
+
)
|
| 349 |
+
else:
|
| 350 |
+
param.add_(bias_corrected_exp_avg * lr, alpha=-1.0)
|
| 351 |
+
|
| 352 |
+
|
| 353 |
+
def _multi_tensor_radam(
|
| 354 |
+
params: List[Tensor],
|
| 355 |
+
grads: List[Tensor],
|
| 356 |
+
exp_avgs: List[Tensor],
|
| 357 |
+
exp_avg_sqs: List[Tensor],
|
| 358 |
+
state_steps: List[Tensor],
|
| 359 |
+
*,
|
| 360 |
+
beta1: float,
|
| 361 |
+
beta2: float,
|
| 362 |
+
lr: float,
|
| 363 |
+
weight_decay: float,
|
| 364 |
+
eps: float,
|
| 365 |
+
decoupled_weight_decay: bool,
|
| 366 |
+
differentiable: bool,
|
| 367 |
+
maximize: bool,
|
| 368 |
+
capturable: bool,
|
| 369 |
+
has_complex: bool,
|
| 370 |
+
):
|
| 371 |
+
if len(params) == 0:
|
| 372 |
+
return
|
| 373 |
+
|
| 374 |
+
assert not differentiable, "_foreach ops don't support autograd"
|
| 375 |
+
|
| 376 |
+
# If compiling, the compiler will handle cudagraph checks, see note [torch.compile x capturable]
|
| 377 |
+
if not torch._utils.is_compiling() and capturable:
|
| 378 |
+
capturable_supported_devices = _get_capturable_supported_devices(
|
| 379 |
+
supports_xla=False
|
| 380 |
+
)
|
| 381 |
+
assert all(
|
| 382 |
+
p.device.type == step.device.type
|
| 383 |
+
and p.device.type in capturable_supported_devices
|
| 384 |
+
for p, step in zip(params, state_steps)
|
| 385 |
+
), f"If capturable=True, params and state_steps must be on supported devices: {capturable_supported_devices}."
|
| 386 |
+
|
| 387 |
+
grouped_tensors = Optimizer._group_tensors_by_device_and_dtype(
|
| 388 |
+
[params, grads, exp_avgs, exp_avg_sqs, state_steps] # type: ignore[list-item]
|
| 389 |
+
)
|
| 390 |
+
for (
|
| 391 |
+
grouped_params_,
|
| 392 |
+
grouped_grads_,
|
| 393 |
+
grouped_exp_avgs_,
|
| 394 |
+
grouped_exp_avg_sqs_,
|
| 395 |
+
grouped_state_steps_,
|
| 396 |
+
), _ in grouped_tensors.values():
|
| 397 |
+
grouped_params = cast(List[Tensor], grouped_params_)
|
| 398 |
+
grouped_grads = cast(List[Tensor], grouped_grads_)
|
| 399 |
+
grouped_exp_avgs = cast(List[Tensor], grouped_exp_avgs_)
|
| 400 |
+
grouped_exp_avg_sqs = cast(List[Tensor], grouped_exp_avg_sqs_)
|
| 401 |
+
grouped_state_steps = cast(List[Tensor], grouped_state_steps_)
|
| 402 |
+
|
| 403 |
+
# Update steps
|
| 404 |
+
# If steps are on CPU, foreach will fall back to the slow path, which is a for-loop calling t.add(1) over
|
| 405 |
+
# and over. 1 will then be wrapped into a Tensor over and over again, which is slower than if we just
|
| 406 |
+
# wrapped it once now. The alpha is required to assure we go to the right overload.
|
| 407 |
+
if not torch._utils.is_compiling() and grouped_state_steps[0].is_cpu:
|
| 408 |
+
torch._foreach_add_(
|
| 409 |
+
grouped_state_steps, torch.tensor(1.0, device="cpu"), alpha=1.0
|
| 410 |
+
)
|
| 411 |
+
else:
|
| 412 |
+
torch._foreach_add_(grouped_state_steps, 1)
|
| 413 |
+
|
| 414 |
+
if has_complex:
|
| 415 |
+
_view_as_real(
|
| 416 |
+
grouped_params, grouped_grads, grouped_exp_avgs, grouped_exp_avg_sqs
|
| 417 |
+
)
|
| 418 |
+
|
| 419 |
+
if maximize:
|
| 420 |
+
grouped_grads = torch._foreach_neg(grouped_grads) # type: ignore[assignment]
|
| 421 |
+
|
| 422 |
+
# maximum length of the approximated SMA
|
| 423 |
+
rho_inf = 2 / (1 - beta2) - 1
|
| 424 |
+
# compute the length of the approximated SMA
|
| 425 |
+
bias_correction1: Union[Tuple[Tensor, ...], List[Tensor]]
|
| 426 |
+
bias_correction2: Union[Tuple[Tensor, ...], List[Tensor]]
|
| 427 |
+
rho_t_list: Union[Tuple[Tensor, ...], List[Tensor]]
|
| 428 |
+
if capturable:
|
| 429 |
+
bias_correction1 = torch._foreach_pow(beta2, grouped_state_steps)
|
| 430 |
+
torch._foreach_neg_(bias_correction1)
|
| 431 |
+
torch._foreach_add_(bias_correction1, 1)
|
| 432 |
+
bias_correction2 = torch._foreach_pow(beta2, grouped_state_steps)
|
| 433 |
+
torch._foreach_mul_(bias_correction2, grouped_state_steps)
|
| 434 |
+
torch._foreach_mul_(bias_correction2, 2)
|
| 435 |
+
torch._foreach_div_(bias_correction2, bias_correction1)
|
| 436 |
+
torch._foreach_neg_(bias_correction2)
|
| 437 |
+
torch._foreach_add_(bias_correction2, rho_inf)
|
| 438 |
+
rho_t_list = bias_correction2
|
| 439 |
+
else:
|
| 440 |
+
rho_t_list = [
|
| 441 |
+
rho_inf
|
| 442 |
+
- 2
|
| 443 |
+
* _get_value(step)
|
| 444 |
+
* (beta2 ** _get_value(step))
|
| 445 |
+
/ (1 - beta2 ** _get_value(step))
|
| 446 |
+
for step in grouped_state_steps
|
| 447 |
+
]
|
| 448 |
+
|
| 449 |
+
if weight_decay != 0:
|
| 450 |
+
if decoupled_weight_decay:
|
| 451 |
+
torch._foreach_mul_(grouped_params, 1 - lr * weight_decay)
|
| 452 |
+
else:
|
| 453 |
+
# Re-use the intermediate memory (grouped_grads) already allocated for maximize
|
| 454 |
+
if maximize:
|
| 455 |
+
torch._foreach_add_(
|
| 456 |
+
grouped_grads, grouped_params, alpha=weight_decay
|
| 457 |
+
)
|
| 458 |
+
else:
|
| 459 |
+
grouped_grads = torch._foreach_add( # type: ignore[assignment]
|
| 460 |
+
grouped_grads, grouped_params, alpha=weight_decay
|
| 461 |
+
)
|
| 462 |
+
|
| 463 |
+
# Decay the first and second moment running average coefficient
|
| 464 |
+
torch._foreach_lerp_(grouped_exp_avgs, grouped_grads, 1 - beta1)
|
| 465 |
+
|
| 466 |
+
torch._foreach_mul_(grouped_exp_avg_sqs, beta2)
|
| 467 |
+
torch._foreach_addcmul_(
|
| 468 |
+
grouped_exp_avg_sqs, grouped_grads, grouped_grads, 1 - beta2
|
| 469 |
+
)
|
| 470 |
+
|
| 471 |
+
# Delete the local intermediate since it won't be used anymore to save on peak memory
|
| 472 |
+
del grouped_grads
|
| 473 |
+
|
| 474 |
+
if capturable:
|
| 475 |
+
num = torch._foreach_sub(rho_t_list, 4)
|
| 476 |
+
sub2 = torch._foreach_sub(rho_t_list, 2)
|
| 477 |
+
torch._foreach_mul_(num, sub2)
|
| 478 |
+
del sub2
|
| 479 |
+
torch._foreach_mul_(num, rho_inf)
|
| 480 |
+
rho_inf = (rho_inf - 4) * (rho_inf - 2)
|
| 481 |
+
denom = torch._foreach_mul(rho_t_list, rho_inf)
|
| 482 |
+
torch._foreach_div_(num, denom)
|
| 483 |
+
del denom
|
| 484 |
+
torch._foreach_sqrt_(num)
|
| 485 |
+
|
| 486 |
+
# TODO(mlazos): we should try and get a foreach_where op https://github.com/pytorch/pytorch/issues/117884
|
| 487 |
+
rect = [
|
| 488 |
+
torch.where(rho_t > 5.0, n, 0.0) for n, rho_t in zip(num, rho_t_list)
|
| 489 |
+
]
|
| 490 |
+
del num
|
| 491 |
+
del rho_t_list
|
| 492 |
+
unrect_step_size = [torch.where(rect > 0, 0.0, 1.0) for rect in rect]
|
| 493 |
+
torch._foreach_mul_(unrect_step_size, lr)
|
| 494 |
+
|
| 495 |
+
bias_correction1 = torch._foreach_pow(beta1, grouped_state_steps)
|
| 496 |
+
torch._foreach_neg_(bias_correction1)
|
| 497 |
+
torch._foreach_add_(bias_correction1, 1)
|
| 498 |
+
|
| 499 |
+
torch._foreach_div_(unrect_step_size, bias_correction1)
|
| 500 |
+
torch._foreach_neg_(unrect_step_size)
|
| 501 |
+
|
| 502 |
+
bias_correction2 = torch._foreach_pow(beta2, grouped_state_steps)
|
| 503 |
+
torch._foreach_neg_(bias_correction2)
|
| 504 |
+
torch._foreach_add_(bias_correction2, 1)
|
| 505 |
+
torch._foreach_sqrt_(bias_correction2)
|
| 506 |
+
torch._foreach_mul_(bias_correction2, lr)
|
| 507 |
+
torch._foreach_mul_(bias_correction2, rect)
|
| 508 |
+
del rect
|
| 509 |
+
torch._foreach_neg_(bias_correction2)
|
| 510 |
+
torch._foreach_div_(bias_correction2, bias_correction1)
|
| 511 |
+
del bias_correction1
|
| 512 |
+
else:
|
| 513 |
+
rect = [
|
| 514 |
+
(
|
| 515 |
+
(rho_t - 4) # type: ignore[arg-type]
|
| 516 |
+
* (rho_t - 2)
|
| 517 |
+
* rho_inf
|
| 518 |
+
/ ((rho_inf - 4) * (rho_inf - 2) * rho_t)
|
| 519 |
+
)
|
| 520 |
+
** 0.5
|
| 521 |
+
if rho_t > 5
|
| 522 |
+
else 0
|
| 523 |
+
for rho_t in rho_t_list
|
| 524 |
+
]
|
| 525 |
+
unrectified = [0 if rect > 0 else 1.0 for rect in rect]
|
| 526 |
+
|
| 527 |
+
bias_correction1 = [
|
| 528 |
+
1 - beta1 ** _get_value(step) for step in grouped_state_steps
|
| 529 |
+
]
|
| 530 |
+
unrect_step_size = [
|
| 531 |
+
(lr * rect / bc) * -1 for rect, bc in zip(unrectified, bias_correction1)
|
| 532 |
+
]
|
| 533 |
+
bias_correction2 = [
|
| 534 |
+
((1 - beta2 ** _get_value(step)) ** 0.5) * (lr * rect / bc) * -1
|
| 535 |
+
for step, rect, bc in zip(grouped_state_steps, rect, bias_correction1)
|
| 536 |
+
]
|
| 537 |
+
|
| 538 |
+
buffer = torch._foreach_sqrt(grouped_exp_avg_sqs)
|
| 539 |
+
torch._foreach_add_(buffer, eps)
|
| 540 |
+
torch._foreach_div_(buffer, bias_correction2)
|
| 541 |
+
torch._foreach_reciprocal_(buffer)
|
| 542 |
+
torch._foreach_add_(buffer, unrect_step_size)
|
| 543 |
+
|
| 544 |
+
# Here, buffer = sqrt(1 - beta2^t) * rect_step_size / (sqrt(v) + eps) + unrect_step_size
|
| 545 |
+
torch._foreach_addcmul_(grouped_params, grouped_exp_avgs, buffer)
|
| 546 |
+
|
| 547 |
+
|
| 548 |
+
@_disable_dynamo_if_unsupported(single_tensor_fn=_single_tensor_radam)
|
| 549 |
+
def radam(
|
| 550 |
+
params: List[Tensor],
|
| 551 |
+
grads: List[Tensor],
|
| 552 |
+
exp_avgs: List[Tensor],
|
| 553 |
+
exp_avg_sqs: List[Tensor],
|
| 554 |
+
state_steps: List[Tensor],
|
| 555 |
+
# kwonly args with defaults are not supported by functions compiled with torchscript issue #70627
|
| 556 |
+
# setting this as kwarg for now as functional API is compiled by torch/distributed/optim
|
| 557 |
+
decoupled_weight_decay: bool = False,
|
| 558 |
+
foreach: Optional[bool] = None,
|
| 559 |
+
differentiable: bool = False,
|
| 560 |
+
capturable: bool = False,
|
| 561 |
+
has_complex: bool = False,
|
| 562 |
+
maximize: bool = False,
|
| 563 |
+
*,
|
| 564 |
+
beta1: float,
|
| 565 |
+
beta2: float,
|
| 566 |
+
lr: float,
|
| 567 |
+
weight_decay: float,
|
| 568 |
+
eps: float,
|
| 569 |
+
):
|
| 570 |
+
r"""Functional API that performs RAdam algorithm computation.
|
| 571 |
+
|
| 572 |
+
See :class:`~torch.optim.RAdam` for details.
|
| 573 |
+
"""
|
| 574 |
+
if not all(isinstance(t, torch.Tensor) for t in state_steps):
|
| 575 |
+
raise RuntimeError(
|
| 576 |
+
"API has changed, `state_steps` argument must contain a list of singleton tensors"
|
| 577 |
+
)
|
| 578 |
+
|
| 579 |
+
if foreach is None:
|
| 580 |
+
_, foreach = _default_to_fused_or_foreach(
|
| 581 |
+
params, differentiable, use_fused=False
|
| 582 |
+
)
|
| 583 |
+
|
| 584 |
+
if foreach and torch.jit.is_scripting():
|
| 585 |
+
raise RuntimeError("torch.jit.script not supported with foreach optimizers")
|
| 586 |
+
|
| 587 |
+
if foreach and not torch.jit.is_scripting():
|
| 588 |
+
func = _multi_tensor_radam
|
| 589 |
+
else:
|
| 590 |
+
func = _single_tensor_radam
|
| 591 |
+
|
| 592 |
+
func(
|
| 593 |
+
params,
|
| 594 |
+
grads,
|
| 595 |
+
exp_avgs,
|
| 596 |
+
exp_avg_sqs,
|
| 597 |
+
state_steps,
|
| 598 |
+
beta1=beta1,
|
| 599 |
+
beta2=beta2,
|
| 600 |
+
lr=lr,
|
| 601 |
+
weight_decay=weight_decay,
|
| 602 |
+
eps=eps,
|
| 603 |
+
maximize=maximize,
|
| 604 |
+
decoupled_weight_decay=decoupled_weight_decay,
|
| 605 |
+
differentiable=differentiable,
|
| 606 |
+
capturable=capturable,
|
| 607 |
+
has_complex=has_complex,
|
| 608 |
+
)
|
vllm/lib/python3.10/site-packages/torch/optim/rmsprop.py
ADDED
|
@@ -0,0 +1,528 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# mypy: allow-untyped-decorators
|
| 2 |
+
# mypy: allow-untyped-defs
|
| 3 |
+
r"""Implementation for the RMSprop algorithm."""
|
| 4 |
+
from typing import cast, List, Optional, Union
|
| 5 |
+
|
| 6 |
+
import torch
|
| 7 |
+
from torch import Tensor
|
| 8 |
+
|
| 9 |
+
from .optimizer import (
|
| 10 |
+
_capturable_doc,
|
| 11 |
+
_default_to_fused_or_foreach,
|
| 12 |
+
_differentiable_doc,
|
| 13 |
+
_disable_dynamo_if_unsupported,
|
| 14 |
+
_foreach_doc,
|
| 15 |
+
_get_capturable_supported_devices,
|
| 16 |
+
_get_scalar_dtype,
|
| 17 |
+
_maximize_doc,
|
| 18 |
+
_use_grad_for_differentiable,
|
| 19 |
+
_view_as_real,
|
| 20 |
+
Optimizer,
|
| 21 |
+
ParamsT,
|
| 22 |
+
)
|
| 23 |
+
|
| 24 |
+
|
| 25 |
+
__all__ = ["RMSprop", "rmsprop"]
|
| 26 |
+
|
| 27 |
+
|
| 28 |
+
class RMSprop(Optimizer): # noqa: D101
|
| 29 |
+
def __init__(
|
| 30 |
+
self,
|
| 31 |
+
params: ParamsT,
|
| 32 |
+
lr: Union[float, Tensor] = 1e-2,
|
| 33 |
+
alpha: float = 0.99,
|
| 34 |
+
eps: float = 1e-8,
|
| 35 |
+
weight_decay: float = 0,
|
| 36 |
+
momentum: float = 0,
|
| 37 |
+
centered=False,
|
| 38 |
+
capturable=False,
|
| 39 |
+
foreach: Optional[bool] = None,
|
| 40 |
+
maximize: bool = False,
|
| 41 |
+
differentiable: bool = False,
|
| 42 |
+
): # noqa: D107
|
| 43 |
+
if isinstance(lr, Tensor) and lr.numel() != 1:
|
| 44 |
+
raise ValueError("Tensor lr must be 1-element")
|
| 45 |
+
if not 0.0 <= lr:
|
| 46 |
+
raise ValueError(f"Invalid learning rate: {lr}")
|
| 47 |
+
if not 0.0 <= eps:
|
| 48 |
+
raise ValueError(f"Invalid epsilon value: {eps}")
|
| 49 |
+
if not 0.0 <= momentum:
|
| 50 |
+
raise ValueError(f"Invalid momentum value: {momentum}")
|
| 51 |
+
if not 0.0 <= weight_decay:
|
| 52 |
+
raise ValueError(f"Invalid weight_decay value: {weight_decay}")
|
| 53 |
+
if not 0.0 <= alpha:
|
| 54 |
+
raise ValueError(f"Invalid alpha value: {alpha}")
|
| 55 |
+
|
| 56 |
+
defaults = dict(
|
| 57 |
+
lr=lr,
|
| 58 |
+
momentum=momentum,
|
| 59 |
+
alpha=alpha,
|
| 60 |
+
eps=eps,
|
| 61 |
+
centered=centered,
|
| 62 |
+
weight_decay=weight_decay,
|
| 63 |
+
capturable=capturable,
|
| 64 |
+
foreach=foreach,
|
| 65 |
+
maximize=maximize,
|
| 66 |
+
differentiable=differentiable,
|
| 67 |
+
)
|
| 68 |
+
super().__init__(params, defaults)
|
| 69 |
+
|
| 70 |
+
def __setstate__(self, state): # noqa: D105
|
| 71 |
+
super().__setstate__(state)
|
| 72 |
+
for group in self.param_groups:
|
| 73 |
+
group.setdefault("momentum", 0)
|
| 74 |
+
group.setdefault("centered", False)
|
| 75 |
+
group.setdefault("foreach", None)
|
| 76 |
+
group.setdefault("maximize", False)
|
| 77 |
+
group.setdefault("differentiable", False)
|
| 78 |
+
group.setdefault("capturable", False)
|
| 79 |
+
for p in group["params"]:
|
| 80 |
+
p_state = self.state.get(p, [])
|
| 81 |
+
if len(p_state) != 0 and not torch.is_tensor(p_state["step"]):
|
| 82 |
+
step_val = float(p_state["step"])
|
| 83 |
+
p_state["step"] = (
|
| 84 |
+
torch.tensor(
|
| 85 |
+
step_val, dtype=_get_scalar_dtype(), device=p.device
|
| 86 |
+
)
|
| 87 |
+
if group["capturable"]
|
| 88 |
+
else torch.tensor(step_val, dtype=_get_scalar_dtype())
|
| 89 |
+
)
|
| 90 |
+
|
| 91 |
+
def _init_group(
|
| 92 |
+
self,
|
| 93 |
+
group,
|
| 94 |
+
params_with_grad,
|
| 95 |
+
grads,
|
| 96 |
+
square_avgs,
|
| 97 |
+
momentum_buffer_list,
|
| 98 |
+
grad_avgs,
|
| 99 |
+
state_steps,
|
| 100 |
+
):
|
| 101 |
+
has_complex = False
|
| 102 |
+
for p in group["params"]:
|
| 103 |
+
if p.grad is None:
|
| 104 |
+
continue
|
| 105 |
+
has_complex |= torch.is_complex(p)
|
| 106 |
+
params_with_grad.append(p)
|
| 107 |
+
|
| 108 |
+
if p.grad.is_sparse:
|
| 109 |
+
raise RuntimeError("RMSprop does not support sparse gradients")
|
| 110 |
+
grads.append(p.grad)
|
| 111 |
+
|
| 112 |
+
state = self.state[p]
|
| 113 |
+
|
| 114 |
+
# State initialization
|
| 115 |
+
if len(state) == 0:
|
| 116 |
+
state["step"] = (
|
| 117 |
+
torch.zeros((), dtype=_get_scalar_dtype(), device=p.device)
|
| 118 |
+
if group["capturable"]
|
| 119 |
+
else torch.zeros((), dtype=_get_scalar_dtype())
|
| 120 |
+
)
|
| 121 |
+
state["square_avg"] = torch.zeros_like(
|
| 122 |
+
p, memory_format=torch.preserve_format
|
| 123 |
+
)
|
| 124 |
+
if group["momentum"] > 0:
|
| 125 |
+
state["momentum_buffer"] = torch.zeros_like(
|
| 126 |
+
p, memory_format=torch.preserve_format
|
| 127 |
+
)
|
| 128 |
+
if group["centered"]:
|
| 129 |
+
state["grad_avg"] = torch.zeros_like(
|
| 130 |
+
p, memory_format=torch.preserve_format
|
| 131 |
+
)
|
| 132 |
+
square_avgs.append(state["square_avg"])
|
| 133 |
+
state_steps.append(state["step"])
|
| 134 |
+
|
| 135 |
+
if group["momentum"] > 0:
|
| 136 |
+
momentum_buffer_list.append(state["momentum_buffer"])
|
| 137 |
+
if group["centered"]:
|
| 138 |
+
grad_avgs.append(state["grad_avg"])
|
| 139 |
+
|
| 140 |
+
return has_complex
|
| 141 |
+
|
| 142 |
+
@_use_grad_for_differentiable
|
| 143 |
+
def step(self, closure=None):
|
| 144 |
+
"""Perform a single optimization step.
|
| 145 |
+
|
| 146 |
+
Args:
|
| 147 |
+
closure (Callable, optional): A closure that reevaluates the model
|
| 148 |
+
and returns the loss.
|
| 149 |
+
"""
|
| 150 |
+
self._cuda_graph_capture_health_check()
|
| 151 |
+
|
| 152 |
+
loss = None
|
| 153 |
+
if closure is not None:
|
| 154 |
+
with torch.enable_grad():
|
| 155 |
+
loss = closure()
|
| 156 |
+
|
| 157 |
+
for group in self.param_groups:
|
| 158 |
+
params_with_grad: List[Tensor] = []
|
| 159 |
+
grads: List[Tensor] = []
|
| 160 |
+
square_avgs: List[Tensor] = []
|
| 161 |
+
grad_avgs: List[Tensor] = []
|
| 162 |
+
momentum_buffer_list: List[Tensor] = []
|
| 163 |
+
state_steps: List[Tensor] = []
|
| 164 |
+
|
| 165 |
+
has_complex = self._init_group(
|
| 166 |
+
group,
|
| 167 |
+
params_with_grad,
|
| 168 |
+
grads,
|
| 169 |
+
square_avgs,
|
| 170 |
+
momentum_buffer_list,
|
| 171 |
+
grad_avgs,
|
| 172 |
+
state_steps,
|
| 173 |
+
)
|
| 174 |
+
|
| 175 |
+
rmsprop(
|
| 176 |
+
params_with_grad,
|
| 177 |
+
grads,
|
| 178 |
+
square_avgs,
|
| 179 |
+
grad_avgs,
|
| 180 |
+
momentum_buffer_list,
|
| 181 |
+
state_steps,
|
| 182 |
+
lr=group["lr"],
|
| 183 |
+
alpha=group["alpha"],
|
| 184 |
+
eps=group["eps"],
|
| 185 |
+
weight_decay=group["weight_decay"],
|
| 186 |
+
momentum=group["momentum"],
|
| 187 |
+
centered=group["centered"],
|
| 188 |
+
foreach=group["foreach"],
|
| 189 |
+
maximize=group["maximize"],
|
| 190 |
+
differentiable=group["differentiable"],
|
| 191 |
+
capturable=group["capturable"],
|
| 192 |
+
has_complex=has_complex,
|
| 193 |
+
)
|
| 194 |
+
|
| 195 |
+
return loss
|
| 196 |
+
|
| 197 |
+
|
| 198 |
+
RMSprop.__doc__ = (
|
| 199 |
+
r"""Implements RMSprop algorithm.
|
| 200 |
+
|
| 201 |
+
.. math::
|
| 202 |
+
\begin{aligned}
|
| 203 |
+
&\rule{110mm}{0.4pt} \\
|
| 204 |
+
&\textbf{input} : \alpha \text{ (alpha)},\: \gamma \text{ (lr)},
|
| 205 |
+
\: \theta_0 \text{ (params)}, \: f(\theta) \text{ (objective)} \\
|
| 206 |
+
&\hspace{13mm} \lambda \text{ (weight decay)},\: \mu \text{ (momentum)},\: centered\\
|
| 207 |
+
&\textbf{initialize} : v_0 \leftarrow 0 \text{ (square average)}, \:
|
| 208 |
+
\textbf{b}_0 \leftarrow 0 \text{ (buffer)}, \: g^{ave}_0 \leftarrow 0 \\[-1.ex]
|
| 209 |
+
&\rule{110mm}{0.4pt} \\
|
| 210 |
+
&\textbf{for} \: t=1 \: \textbf{to} \: \ldots \: \textbf{do} \\
|
| 211 |
+
&\hspace{5mm}g_t \leftarrow \nabla_{\theta} f_t (\theta_{t-1}) \\
|
| 212 |
+
&\hspace{5mm}if \: \lambda \neq 0 \\
|
| 213 |
+
&\hspace{10mm} g_t \leftarrow g_t + \lambda \theta_{t-1} \\
|
| 214 |
+
&\hspace{5mm}v_t \leftarrow \alpha v_{t-1} + (1 - \alpha) g^2_t
|
| 215 |
+
\hspace{8mm} \\
|
| 216 |
+
&\hspace{5mm} \tilde{v_t} \leftarrow v_t \\
|
| 217 |
+
&\hspace{5mm}if \: centered \\
|
| 218 |
+
&\hspace{10mm} g^{ave}_t \leftarrow g^{ave}_{t-1} \alpha + (1-\alpha) g_t \\
|
| 219 |
+
&\hspace{10mm} \tilde{v_t} \leftarrow \tilde{v_t} - \big(g^{ave}_{t} \big)^2 \\
|
| 220 |
+
&\hspace{5mm}if \: \mu > 0 \\
|
| 221 |
+
&\hspace{10mm} \textbf{b}_t\leftarrow \mu \textbf{b}_{t-1} +
|
| 222 |
+
g_t/ \big(\sqrt{\tilde{v_t}} + \epsilon \big) \\
|
| 223 |
+
&\hspace{10mm} \theta_t \leftarrow \theta_{t-1} - \gamma \textbf{b}_t \\
|
| 224 |
+
&\hspace{5mm} else \\
|
| 225 |
+
&\hspace{10mm}\theta_t \leftarrow \theta_{t-1} -
|
| 226 |
+
\gamma g_t/ \big(\sqrt{\tilde{v_t}} + \epsilon \big) \hspace{3mm} \\
|
| 227 |
+
&\rule{110mm}{0.4pt} \\[-1.ex]
|
| 228 |
+
&\bf{return} \: \theta_t \\[-1.ex]
|
| 229 |
+
&\rule{110mm}{0.4pt} \\[-1.ex]
|
| 230 |
+
\end{aligned}
|
| 231 |
+
|
| 232 |
+
For further details regarding the algorithm we refer to
|
| 233 |
+
`lecture notes <https://www.cs.toronto.edu/~tijmen/csc321/slides/lecture_slides_lec6.pdf>`_ by G. Hinton.
|
| 234 |
+
and centered version `Generating Sequences
|
| 235 |
+
With Recurrent Neural Networks <https://arxiv.org/pdf/1308.0850v5.pdf>`_.
|
| 236 |
+
The implementation here takes the square root of the gradient average before
|
| 237 |
+
adding epsilon (note that TensorFlow interchanges these two operations). The effective
|
| 238 |
+
learning rate is thus :math:`\gamma/(\sqrt{v} + \epsilon)` where :math:`\gamma`
|
| 239 |
+
is the scheduled learning rate and :math:`v` is the weighted moving average
|
| 240 |
+
of the squared gradient.
|
| 241 |
+
"""
|
| 242 |
+
+ rf"""
|
| 243 |
+
Args:
|
| 244 |
+
params (iterable): iterable of parameters to optimize or dicts defining
|
| 245 |
+
parameter groups
|
| 246 |
+
lr (float, Tensor, optional): learning rate (default: 1e-2)
|
| 247 |
+
momentum (float, optional): momentum factor (default: 0)
|
| 248 |
+
alpha (float, optional): smoothing constant (default: 0.99)
|
| 249 |
+
eps (float, optional): term added to the denominator to improve
|
| 250 |
+
numerical stability (default: 1e-8)
|
| 251 |
+
centered (bool, optional) : if ``True``, compute the centered RMSProp,
|
| 252 |
+
the gradient is normalized by an estimation of its variance
|
| 253 |
+
weight_decay (float, optional): weight decay (L2 penalty) (default: 0)
|
| 254 |
+
{_foreach_doc}
|
| 255 |
+
{_maximize_doc}
|
| 256 |
+
{_capturable_doc}
|
| 257 |
+
{_differentiable_doc}
|
| 258 |
+
|
| 259 |
+
"""
|
| 260 |
+
)
|
| 261 |
+
|
| 262 |
+
|
| 263 |
+
def _single_tensor_rmsprop(
|
| 264 |
+
params: List[Tensor],
|
| 265 |
+
grads: List[Tensor],
|
| 266 |
+
square_avgs: List[Tensor],
|
| 267 |
+
grad_avgs: List[Tensor],
|
| 268 |
+
momentum_buffer_list: List[Tensor],
|
| 269 |
+
state_steps: List[Tensor],
|
| 270 |
+
*,
|
| 271 |
+
lr: float,
|
| 272 |
+
alpha: float,
|
| 273 |
+
eps: float,
|
| 274 |
+
weight_decay: float,
|
| 275 |
+
momentum: float,
|
| 276 |
+
centered: bool,
|
| 277 |
+
maximize: bool,
|
| 278 |
+
differentiable: bool,
|
| 279 |
+
capturable: bool,
|
| 280 |
+
has_complex: bool,
|
| 281 |
+
):
|
| 282 |
+
for i, param in enumerate(params):
|
| 283 |
+
step = state_steps[i]
|
| 284 |
+
|
| 285 |
+
# If compiling, the compiler will handle cudagraph checks, see note [torch.compile x capturable]
|
| 286 |
+
if not torch._utils.is_compiling() and capturable:
|
| 287 |
+
capturable_supported_devices = _get_capturable_supported_devices()
|
| 288 |
+
assert (
|
| 289 |
+
param.device.type == step.device.type
|
| 290 |
+
and param.device.type in capturable_supported_devices
|
| 291 |
+
), f"If capturable=True, params and state_steps must be on supported devices: {capturable_supported_devices}."
|
| 292 |
+
|
| 293 |
+
grad = grads[i]
|
| 294 |
+
grad = grad if not maximize else -grad
|
| 295 |
+
square_avg = square_avgs[i]
|
| 296 |
+
|
| 297 |
+
step += 1
|
| 298 |
+
|
| 299 |
+
if weight_decay != 0:
|
| 300 |
+
grad = grad.add(param, alpha=weight_decay)
|
| 301 |
+
|
| 302 |
+
is_complex_param = torch.is_complex(param)
|
| 303 |
+
if is_complex_param:
|
| 304 |
+
param = torch.view_as_real(param)
|
| 305 |
+
grad = torch.view_as_real(grad)
|
| 306 |
+
square_avg = torch.view_as_real(square_avg)
|
| 307 |
+
|
| 308 |
+
square_avg.mul_(alpha).addcmul_(grad, grad, value=1 - alpha)
|
| 309 |
+
|
| 310 |
+
if centered:
|
| 311 |
+
grad_avg = grad_avgs[i]
|
| 312 |
+
if is_complex_param:
|
| 313 |
+
grad_avg = torch.view_as_real(grad_avg)
|
| 314 |
+
grad_avg.lerp_(grad, 1 - alpha)
|
| 315 |
+
avg = square_avg.addcmul(grad_avg, grad_avg, value=-1).sqrt_()
|
| 316 |
+
else:
|
| 317 |
+
avg = square_avg.sqrt()
|
| 318 |
+
|
| 319 |
+
if differentiable:
|
| 320 |
+
avg = avg.add(eps)
|
| 321 |
+
else:
|
| 322 |
+
avg = avg.add_(eps)
|
| 323 |
+
|
| 324 |
+
if momentum > 0:
|
| 325 |
+
buf = momentum_buffer_list[i]
|
| 326 |
+
if is_complex_param:
|
| 327 |
+
buf = torch.view_as_real(buf)
|
| 328 |
+
buf.mul_(momentum).addcdiv_(grad, avg)
|
| 329 |
+
param.add_(buf, alpha=-lr)
|
| 330 |
+
else:
|
| 331 |
+
param.addcdiv_(grad, avg, value=-lr)
|
| 332 |
+
|
| 333 |
+
|
| 334 |
+
def _multi_tensor_rmsprop(
|
| 335 |
+
params: List[Tensor],
|
| 336 |
+
grads: List[Tensor],
|
| 337 |
+
square_avgs: List[Tensor],
|
| 338 |
+
grad_avgs: List[Tensor],
|
| 339 |
+
momentum_buffer_list: List[Tensor],
|
| 340 |
+
state_steps: List[Tensor],
|
| 341 |
+
*,
|
| 342 |
+
lr: float,
|
| 343 |
+
alpha: float,
|
| 344 |
+
eps: float,
|
| 345 |
+
weight_decay: float,
|
| 346 |
+
momentum: float,
|
| 347 |
+
centered: bool,
|
| 348 |
+
maximize: bool,
|
| 349 |
+
differentiable: bool,
|
| 350 |
+
capturable: bool,
|
| 351 |
+
has_complex: bool,
|
| 352 |
+
):
|
| 353 |
+
if len(params) == 0:
|
| 354 |
+
return
|
| 355 |
+
|
| 356 |
+
assert not differentiable, "_foreach ops don't support autograd"
|
| 357 |
+
|
| 358 |
+
# If compiling, the compiler will handle cudagraph checks, see note [torch.compile x capturable]
|
| 359 |
+
if not torch._utils.is_compiling() and capturable:
|
| 360 |
+
capturable_supported_devices = _get_capturable_supported_devices()
|
| 361 |
+
assert all(
|
| 362 |
+
p.device.type == step.device.type
|
| 363 |
+
and p.device.type in capturable_supported_devices
|
| 364 |
+
for p, step in zip(params, state_steps)
|
| 365 |
+
), f"If capturable=True, params and state_steps must be on supported devices: {capturable_supported_devices}."
|
| 366 |
+
|
| 367 |
+
grouped_tensors = Optimizer._group_tensors_by_device_and_dtype(
|
| 368 |
+
[params, grads, square_avgs, grad_avgs, momentum_buffer_list, state_steps] # type: ignore[list-item]
|
| 369 |
+
)
|
| 370 |
+
for (
|
| 371 |
+
(
|
| 372 |
+
grouped_params_,
|
| 373 |
+
grouped_grads_,
|
| 374 |
+
grouped_square_avgs_,
|
| 375 |
+
grouped_grad_avgs_,
|
| 376 |
+
grouped_momentum_buffer_list_,
|
| 377 |
+
grouped_state_steps_,
|
| 378 |
+
)
|
| 379 |
+
), _ in grouped_tensors.values():
|
| 380 |
+
grouped_params = cast(List[Tensor], grouped_params_)
|
| 381 |
+
grouped_grads = cast(List[Tensor], grouped_grads_)
|
| 382 |
+
grouped_square_avgs = cast(List[Tensor], grouped_square_avgs_)
|
| 383 |
+
grouped_state_steps = cast(List[Tensor], grouped_state_steps_)
|
| 384 |
+
|
| 385 |
+
if has_complex:
|
| 386 |
+
state_and_grads = [grouped_grads, grouped_square_avgs]
|
| 387 |
+
if momentum > 0:
|
| 388 |
+
grouped_momentum_buffer_list = cast(
|
| 389 |
+
List[Tensor], grouped_momentum_buffer_list_
|
| 390 |
+
)
|
| 391 |
+
state_and_grads.append(grouped_momentum_buffer_list)
|
| 392 |
+
if centered:
|
| 393 |
+
grouped_grad_avgs = cast(List[Tensor], grouped_grad_avgs_)
|
| 394 |
+
state_and_grads.append(grouped_grad_avgs)
|
| 395 |
+
_view_as_real(grouped_params, *state_and_grads)
|
| 396 |
+
|
| 397 |
+
if maximize:
|
| 398 |
+
grouped_grads = torch._foreach_neg(grouped_grads) # type: ignore[assignment]
|
| 399 |
+
|
| 400 |
+
# Update steps
|
| 401 |
+
# If steps are on CPU, foreach will fall back to the slow path, which is a for-loop calling t.add(1) over
|
| 402 |
+
# and over. 1 will then be wrapped into a Tensor over and over again, which is slower than if we just
|
| 403 |
+
# wrapped it once now. The alpha is required to assure we go to the right overload.
|
| 404 |
+
if not torch._utils.is_compiling() and grouped_state_steps[0].is_cpu:
|
| 405 |
+
torch._foreach_add_(
|
| 406 |
+
grouped_state_steps, torch.tensor(1.0, device="cpu"), alpha=1.0
|
| 407 |
+
)
|
| 408 |
+
else:
|
| 409 |
+
torch._foreach_add_(grouped_state_steps, 1)
|
| 410 |
+
|
| 411 |
+
if weight_decay != 0:
|
| 412 |
+
# Re-use the intermediate memory (grouped_grads) already allocated for maximize
|
| 413 |
+
if maximize:
|
| 414 |
+
torch._foreach_add_(grouped_grads, grouped_params, alpha=weight_decay)
|
| 415 |
+
else:
|
| 416 |
+
grouped_grads = torch._foreach_add( # type: ignore[assignment]
|
| 417 |
+
grouped_grads, grouped_params, alpha=weight_decay
|
| 418 |
+
)
|
| 419 |
+
|
| 420 |
+
torch._foreach_mul_(grouped_square_avgs, alpha)
|
| 421 |
+
torch._foreach_addcmul_(
|
| 422 |
+
grouped_square_avgs, grouped_grads, grouped_grads, value=1 - alpha
|
| 423 |
+
)
|
| 424 |
+
|
| 425 |
+
if centered:
|
| 426 |
+
grouped_grad_avgs = cast(List[Tensor], grouped_grad_avgs_)
|
| 427 |
+
torch._foreach_lerp_(grouped_grad_avgs, grouped_grads, 1 - alpha)
|
| 428 |
+
avg = torch._foreach_addcmul(
|
| 429 |
+
grouped_square_avgs, grouped_grad_avgs, grouped_grad_avgs, value=-1
|
| 430 |
+
)
|
| 431 |
+
torch._foreach_sqrt_(avg)
|
| 432 |
+
torch._foreach_add_(avg, eps)
|
| 433 |
+
else:
|
| 434 |
+
avg = torch._foreach_sqrt(grouped_square_avgs)
|
| 435 |
+
torch._foreach_add_(avg, eps)
|
| 436 |
+
|
| 437 |
+
if momentum > 0:
|
| 438 |
+
grouped_momentum_buffer_list = cast(
|
| 439 |
+
List[Tensor], grouped_momentum_buffer_list_
|
| 440 |
+
)
|
| 441 |
+
torch._foreach_mul_(grouped_momentum_buffer_list, momentum)
|
| 442 |
+
torch._foreach_addcdiv_(grouped_momentum_buffer_list, grouped_grads, avg)
|
| 443 |
+
# If LR is a tensor, the else branch will internally call item()
|
| 444 |
+
# which will cause silent incorrectness if we are capturing
|
| 445 |
+
if capturable and isinstance(lr, torch.Tensor):
|
| 446 |
+
momentum_lr = torch._foreach_mul(grouped_momentum_buffer_list, -lr)
|
| 447 |
+
torch._foreach_add_(grouped_params, momentum_lr)
|
| 448 |
+
else:
|
| 449 |
+
torch._foreach_add_(
|
| 450 |
+
grouped_params, grouped_momentum_buffer_list, alpha=-lr
|
| 451 |
+
)
|
| 452 |
+
else:
|
| 453 |
+
# If LR is a tensor, the else branch will internally call item()
|
| 454 |
+
# which will cause silent incorrectness if we are capturing
|
| 455 |
+
if capturable and isinstance(lr, torch.Tensor):
|
| 456 |
+
torch._foreach_div_(avg, -lr)
|
| 457 |
+
torch._foreach_addcdiv_(grouped_params, grouped_grads, avg)
|
| 458 |
+
else:
|
| 459 |
+
torch._foreach_addcdiv_(grouped_params, grouped_grads, avg, value=-lr)
|
| 460 |
+
|
| 461 |
+
|
| 462 |
+
@_disable_dynamo_if_unsupported(single_tensor_fn=_single_tensor_rmsprop)
|
| 463 |
+
def rmsprop(
|
| 464 |
+
params: List[Tensor],
|
| 465 |
+
grads: List[Tensor],
|
| 466 |
+
square_avgs: List[Tensor],
|
| 467 |
+
grad_avgs: List[Tensor],
|
| 468 |
+
momentum_buffer_list: List[Tensor],
|
| 469 |
+
state_steps: List[Tensor],
|
| 470 |
+
# kwonly args with defaults are not supported by functions compiled with torchscript issue #70627
|
| 471 |
+
# setting this as kwarg for now as functional API is compiled by torch/distributed/optim
|
| 472 |
+
foreach: Optional[bool] = None,
|
| 473 |
+
maximize: bool = False,
|
| 474 |
+
differentiable: bool = False,
|
| 475 |
+
capturable: bool = False,
|
| 476 |
+
has_complex: bool = False,
|
| 477 |
+
*,
|
| 478 |
+
lr: float,
|
| 479 |
+
alpha: float,
|
| 480 |
+
eps: float,
|
| 481 |
+
weight_decay: float,
|
| 482 |
+
momentum: float,
|
| 483 |
+
centered: bool,
|
| 484 |
+
):
|
| 485 |
+
r"""Functional API that performs rmsprop algorithm computation.
|
| 486 |
+
|
| 487 |
+
See :class:`~torch.optim.RMSProp` for details.
|
| 488 |
+
"""
|
| 489 |
+
# this check is slow during compilation, so we skip it
|
| 490 |
+
# if it's strictly needed we can add this check back in dynamo
|
| 491 |
+
if not torch._utils.is_compiling() and not all(
|
| 492 |
+
isinstance(t, torch.Tensor) for t in state_steps
|
| 493 |
+
):
|
| 494 |
+
raise RuntimeError(
|
| 495 |
+
"API has changed, `state_steps` argument must contain a list of singleton tensors"
|
| 496 |
+
)
|
| 497 |
+
|
| 498 |
+
if foreach is None:
|
| 499 |
+
_, foreach = _default_to_fused_or_foreach(
|
| 500 |
+
params, differentiable, use_fused=False
|
| 501 |
+
)
|
| 502 |
+
|
| 503 |
+
if foreach and torch.jit.is_scripting():
|
| 504 |
+
raise RuntimeError("torch.jit.script not supported with foreach optimizers")
|
| 505 |
+
|
| 506 |
+
if foreach and not torch.jit.is_scripting():
|
| 507 |
+
func = _multi_tensor_rmsprop
|
| 508 |
+
else:
|
| 509 |
+
func = _single_tensor_rmsprop
|
| 510 |
+
|
| 511 |
+
func(
|
| 512 |
+
params,
|
| 513 |
+
grads,
|
| 514 |
+
square_avgs,
|
| 515 |
+
grad_avgs,
|
| 516 |
+
momentum_buffer_list,
|
| 517 |
+
state_steps,
|
| 518 |
+
lr=lr,
|
| 519 |
+
alpha=alpha,
|
| 520 |
+
eps=eps,
|
| 521 |
+
weight_decay=weight_decay,
|
| 522 |
+
momentum=momentum,
|
| 523 |
+
centered=centered,
|
| 524 |
+
maximize=maximize,
|
| 525 |
+
capturable=capturable,
|
| 526 |
+
differentiable=differentiable,
|
| 527 |
+
has_complex=has_complex,
|
| 528 |
+
)
|
vllm/lib/python3.10/site-packages/torch/optim/swa_utils.py
ADDED
|
@@ -0,0 +1,467 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# mypy: allow-untyped-defs
|
| 2 |
+
r"""Implementation for Stochastic Weight Averaging implementation."""
|
| 3 |
+
import itertools
|
| 4 |
+
import math
|
| 5 |
+
import warnings
|
| 6 |
+
from copy import deepcopy
|
| 7 |
+
from typing import Any, Callable, Iterable, List, Literal, Optional, Tuple, Union
|
| 8 |
+
|
| 9 |
+
import torch
|
| 10 |
+
from torch import Tensor
|
| 11 |
+
from torch.nn import Module
|
| 12 |
+
from torch.optim.lr_scheduler import _format_param, LRScheduler
|
| 13 |
+
from torch.utils._foreach_utils import _get_foreach_kernels_supported_devices
|
| 14 |
+
|
| 15 |
+
from .optimizer import Optimizer
|
| 16 |
+
|
| 17 |
+
|
| 18 |
+
__all__ = [
|
| 19 |
+
"AveragedModel",
|
| 20 |
+
"update_bn",
|
| 21 |
+
"SWALR",
|
| 22 |
+
"get_ema_multi_avg_fn",
|
| 23 |
+
"get_swa_multi_avg_fn",
|
| 24 |
+
"get_ema_avg_fn",
|
| 25 |
+
"get_swa_avg_fn",
|
| 26 |
+
]
|
| 27 |
+
|
| 28 |
+
from torch.utils._foreach_utils import _group_tensors_by_device_and_dtype
|
| 29 |
+
|
| 30 |
+
|
| 31 |
+
PARAM_LIST = Union[Tuple[Tensor, ...], List[Tensor]]
|
| 32 |
+
|
| 33 |
+
|
| 34 |
+
def get_ema_multi_avg_fn(decay=0.999):
|
| 35 |
+
"""Get the function applying exponential moving average (EMA) across multiple params."""
|
| 36 |
+
|
| 37 |
+
@torch.no_grad()
|
| 38 |
+
def ema_update(ema_param_list: PARAM_LIST, current_param_list: PARAM_LIST, _):
|
| 39 |
+
# foreach lerp only handles float and complex
|
| 40 |
+
if torch.is_floating_point(ema_param_list[0]) or torch.is_complex(
|
| 41 |
+
ema_param_list[0]
|
| 42 |
+
):
|
| 43 |
+
torch._foreach_lerp_(ema_param_list, current_param_list, 1 - decay)
|
| 44 |
+
else:
|
| 45 |
+
for p_ema, p_model in zip(ema_param_list, current_param_list):
|
| 46 |
+
p_ema.copy_(p_ema * decay + p_model * (1 - decay))
|
| 47 |
+
|
| 48 |
+
return ema_update
|
| 49 |
+
|
| 50 |
+
|
| 51 |
+
def get_swa_multi_avg_fn():
|
| 52 |
+
"""Get the function applying stochastic weight average (SWA) across multiple params."""
|
| 53 |
+
|
| 54 |
+
@torch.no_grad()
|
| 55 |
+
def swa_update(
|
| 56 |
+
averaged_param_list: PARAM_LIST,
|
| 57 |
+
current_param_list: PARAM_LIST,
|
| 58 |
+
num_averaged: Union[Tensor, int],
|
| 59 |
+
):
|
| 60 |
+
# foreach lerp only handles float and complex
|
| 61 |
+
if torch.is_floating_point(averaged_param_list[0]) or torch.is_complex(
|
| 62 |
+
averaged_param_list[0]
|
| 63 |
+
):
|
| 64 |
+
torch._foreach_lerp_(
|
| 65 |
+
averaged_param_list, current_param_list, 1 / (num_averaged + 1)
|
| 66 |
+
)
|
| 67 |
+
else:
|
| 68 |
+
diffs = torch._foreach_sub(current_param_list, averaged_param_list)
|
| 69 |
+
if isinstance(num_averaged, Tensor):
|
| 70 |
+
torch._foreach_addcdiv_(
|
| 71 |
+
averaged_param_list,
|
| 72 |
+
diffs,
|
| 73 |
+
[num_averaged + 1] * len(averaged_param_list),
|
| 74 |
+
)
|
| 75 |
+
else:
|
| 76 |
+
torch._foreach_add_(
|
| 77 |
+
averaged_param_list, diffs, alpha=1.0 / (num_averaged + 1)
|
| 78 |
+
)
|
| 79 |
+
|
| 80 |
+
return swa_update
|
| 81 |
+
|
| 82 |
+
|
| 83 |
+
def get_ema_avg_fn(decay=0.999):
|
| 84 |
+
"""Get the function applying exponential moving average (EMA) across a single param."""
|
| 85 |
+
|
| 86 |
+
@torch.no_grad()
|
| 87 |
+
def ema_update(ema_param: Tensor, current_param: Tensor, num_averaged):
|
| 88 |
+
return decay * ema_param + (1 - decay) * current_param
|
| 89 |
+
|
| 90 |
+
return ema_update
|
| 91 |
+
|
| 92 |
+
|
| 93 |
+
def get_swa_avg_fn():
|
| 94 |
+
"""Get the function applying stochastic weight average (SWA) across a single param."""
|
| 95 |
+
|
| 96 |
+
@torch.no_grad()
|
| 97 |
+
def swa_update(
|
| 98 |
+
averaged_param: Tensor, current_param: Tensor, num_averaged: Union[Tensor, int]
|
| 99 |
+
):
|
| 100 |
+
return averaged_param + (current_param - averaged_param) / (num_averaged + 1)
|
| 101 |
+
|
| 102 |
+
return swa_update
|
| 103 |
+
|
| 104 |
+
|
| 105 |
+
class AveragedModel(Module):
|
| 106 |
+
r"""Implements averaged model for Stochastic Weight Averaging (SWA) and Exponential Moving Average (EMA).
|
| 107 |
+
|
| 108 |
+
Stochastic Weight Averaging was proposed in `Averaging Weights Leads to
|
| 109 |
+
Wider Optima and Better Generalization`_ by Pavel Izmailov, Dmitrii
|
| 110 |
+
Podoprikhin, Timur Garipov, Dmitry Vetrov and Andrew Gordon Wilson
|
| 111 |
+
(UAI 2018).
|
| 112 |
+
|
| 113 |
+
Exponential Moving Average is a variation of `Polyak averaging`_,
|
| 114 |
+
but using exponential weights instead of equal weights across iterations.
|
| 115 |
+
|
| 116 |
+
AveragedModel class creates a copy of the provided module :attr:`model`
|
| 117 |
+
on the device :attr:`device` and allows to compute running averages of the
|
| 118 |
+
parameters of the :attr:`model`.
|
| 119 |
+
|
| 120 |
+
Args:
|
| 121 |
+
model (torch.nn.Module): model to use with SWA/EMA
|
| 122 |
+
device (torch.device, optional): if provided, the averaged model will be
|
| 123 |
+
stored on the :attr:`device`
|
| 124 |
+
avg_fn (function, optional): the averaging function used to update
|
| 125 |
+
parameters; the function must take in the current value of the
|
| 126 |
+
:class:`AveragedModel` parameter, the current value of :attr:`model`
|
| 127 |
+
parameter, and the number of models already averaged; if None,
|
| 128 |
+
an equally weighted average is used (default: None)
|
| 129 |
+
multi_avg_fn (function, optional): the averaging function used to update
|
| 130 |
+
parameters inplace; the function must take in the current values of the
|
| 131 |
+
:class:`AveragedModel` parameters as a list, the current values of :attr:`model`
|
| 132 |
+
parameters as a list, and the number of models already averaged; if None,
|
| 133 |
+
an equally weighted average is used (default: None)
|
| 134 |
+
use_buffers (bool): if ``True``, it will compute running averages for
|
| 135 |
+
both the parameters and the buffers of the model. (default: ``False``)
|
| 136 |
+
|
| 137 |
+
Example:
|
| 138 |
+
>>> # xdoctest: +SKIP("undefined variables")
|
| 139 |
+
>>> loader, optimizer, model, loss_fn = ...
|
| 140 |
+
>>> swa_model = torch.optim.swa_utils.AveragedModel(model)
|
| 141 |
+
>>> scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer,
|
| 142 |
+
>>> T_max=300)
|
| 143 |
+
>>> swa_start = 160
|
| 144 |
+
>>> swa_scheduler = SWALR(optimizer, swa_lr=0.05)
|
| 145 |
+
>>> for i in range(300):
|
| 146 |
+
>>> for input, target in loader:
|
| 147 |
+
>>> optimizer.zero_grad()
|
| 148 |
+
>>> loss_fn(model(input), target).backward()
|
| 149 |
+
>>> optimizer.step()
|
| 150 |
+
>>> if i > swa_start:
|
| 151 |
+
>>> swa_model.update_parameters(model)
|
| 152 |
+
>>> swa_scheduler.step()
|
| 153 |
+
>>> else:
|
| 154 |
+
>>> scheduler.step()
|
| 155 |
+
>>>
|
| 156 |
+
>>> # Update bn statistics for the swa_model at the end
|
| 157 |
+
>>> torch.optim.swa_utils.update_bn(loader, swa_model)
|
| 158 |
+
|
| 159 |
+
You can also use custom averaging functions with the `avg_fn` or `multi_avg_fn` parameters.
|
| 160 |
+
If no averaging function is provided, the default is to compute
|
| 161 |
+
equally-weighted average of the weights (SWA).
|
| 162 |
+
|
| 163 |
+
Example:
|
| 164 |
+
>>> # xdoctest: +SKIP("undefined variables")
|
| 165 |
+
>>> # Compute exponential moving averages of the weights and buffers
|
| 166 |
+
>>> ema_model = torch.optim.swa_utils.AveragedModel(model,
|
| 167 |
+
>>> torch.optim.swa_utils.get_ema_multi_avg_fn(0.9), use_buffers=True)
|
| 168 |
+
|
| 169 |
+
.. note::
|
| 170 |
+
When using SWA/EMA with models containing Batch Normalization you may
|
| 171 |
+
need to update the activation statistics for Batch Normalization.
|
| 172 |
+
This can be done either by using the :meth:`torch.optim.swa_utils.update_bn`
|
| 173 |
+
or by setting :attr:`use_buffers` to `True`. The first approach updates the
|
| 174 |
+
statistics in a post-training step by passing data through the model. The
|
| 175 |
+
second does it during the parameter update phase by averaging all buffers.
|
| 176 |
+
Empirical evidence has shown that updating the statistics in normalization
|
| 177 |
+
layers increases accuracy, but you may wish to empirically test which
|
| 178 |
+
approach yields the best results in your problem.
|
| 179 |
+
|
| 180 |
+
.. note::
|
| 181 |
+
:attr:`avg_fn` and `multi_avg_fn` are not saved in the :meth:`state_dict` of the model.
|
| 182 |
+
|
| 183 |
+
.. note::
|
| 184 |
+
When :meth:`update_parameters` is called for the first time (i.e.
|
| 185 |
+
:attr:`n_averaged` is `0`) the parameters of `model` are copied
|
| 186 |
+
to the parameters of :class:`AveragedModel`. For every subsequent
|
| 187 |
+
call of :meth:`update_parameters` the function `avg_fn` is used
|
| 188 |
+
to update the parameters.
|
| 189 |
+
|
| 190 |
+
.. _Averaging Weights Leads to Wider Optima and Better Generalization:
|
| 191 |
+
https://arxiv.org/abs/1803.05407
|
| 192 |
+
.. _There Are Many Consistent Explanations of Unlabeled Data: Why You Should
|
| 193 |
+
Average:
|
| 194 |
+
https://arxiv.org/abs/1806.05594
|
| 195 |
+
.. _SWALP: Stochastic Weight Averaging in Low-Precision Training:
|
| 196 |
+
https://arxiv.org/abs/1904.11943
|
| 197 |
+
.. _Stochastic Weight Averaging in Parallel: Large-Batch Training That
|
| 198 |
+
Generalizes Well:
|
| 199 |
+
https://arxiv.org/abs/2001.02312
|
| 200 |
+
.. _Polyak averaging:
|
| 201 |
+
https://paperswithcode.com/method/polyak-averaging
|
| 202 |
+
"""
|
| 203 |
+
|
| 204 |
+
n_averaged: Tensor
|
| 205 |
+
|
| 206 |
+
def __init__(
|
| 207 |
+
self,
|
| 208 |
+
model: Module,
|
| 209 |
+
device: Optional[Union[int, torch.device]] = None,
|
| 210 |
+
avg_fn: Optional[Callable[[Tensor, Tensor, Union[Tensor, int]], Tensor]] = None,
|
| 211 |
+
multi_avg_fn: Optional[
|
| 212 |
+
Callable[[PARAM_LIST, PARAM_LIST, Union[Tensor, int]], None]
|
| 213 |
+
] = None,
|
| 214 |
+
use_buffers=False,
|
| 215 |
+
): # noqa: D107
|
| 216 |
+
super().__init__()
|
| 217 |
+
assert (
|
| 218 |
+
avg_fn is None or multi_avg_fn is None
|
| 219 |
+
), "Only one of avg_fn and multi_avg_fn should be provided"
|
| 220 |
+
self.module = deepcopy(model)
|
| 221 |
+
if device is not None:
|
| 222 |
+
self.module = self.module.to(device)
|
| 223 |
+
self.register_buffer(
|
| 224 |
+
"n_averaged", torch.tensor(0, dtype=torch.long, device=device)
|
| 225 |
+
)
|
| 226 |
+
self.avg_fn = avg_fn
|
| 227 |
+
self.multi_avg_fn = multi_avg_fn
|
| 228 |
+
self.use_buffers = use_buffers
|
| 229 |
+
|
| 230 |
+
def forward(self, *args, **kwargs):
|
| 231 |
+
"""Forward pass."""
|
| 232 |
+
return self.module(*args, **kwargs)
|
| 233 |
+
|
| 234 |
+
def update_parameters(self, model: Module):
|
| 235 |
+
"""Update model parameters."""
|
| 236 |
+
self_param = (
|
| 237 |
+
itertools.chain(self.module.parameters(), self.module.buffers())
|
| 238 |
+
if self.use_buffers
|
| 239 |
+
else self.parameters()
|
| 240 |
+
)
|
| 241 |
+
model_param = (
|
| 242 |
+
itertools.chain(model.parameters(), model.buffers())
|
| 243 |
+
if self.use_buffers
|
| 244 |
+
else model.parameters()
|
| 245 |
+
)
|
| 246 |
+
self_param_detached: List[Optional[Tensor]] = []
|
| 247 |
+
model_param_detached: List[Optional[Tensor]] = []
|
| 248 |
+
for p_averaged, p_model in zip(self_param, model_param):
|
| 249 |
+
p_model_ = p_model.detach().to(p_averaged.device)
|
| 250 |
+
self_param_detached.append(p_averaged.detach())
|
| 251 |
+
model_param_detached.append(p_model_)
|
| 252 |
+
if self.n_averaged == 0:
|
| 253 |
+
p_averaged.detach().copy_(p_model_)
|
| 254 |
+
|
| 255 |
+
if self.n_averaged > 0:
|
| 256 |
+
if self.multi_avg_fn is not None or self.avg_fn is None:
|
| 257 |
+
grouped_tensors = _group_tensors_by_device_and_dtype(
|
| 258 |
+
[self_param_detached, model_param_detached]
|
| 259 |
+
)
|
| 260 |
+
for (device, _), (
|
| 261 |
+
[self_params, model_params],
|
| 262 |
+
_,
|
| 263 |
+
) in grouped_tensors.items():
|
| 264 |
+
if self.multi_avg_fn:
|
| 265 |
+
self.multi_avg_fn(
|
| 266 |
+
self_params, model_params, self.n_averaged.to(device) # type: ignore[arg-type]
|
| 267 |
+
)
|
| 268 |
+
elif (
|
| 269 |
+
device is not None
|
| 270 |
+
and device.type in _get_foreach_kernels_supported_devices()
|
| 271 |
+
):
|
| 272 |
+
multi_avg_fn = get_swa_multi_avg_fn()
|
| 273 |
+
multi_avg_fn(
|
| 274 |
+
self_params, model_params, self.n_averaged.to(device)
|
| 275 |
+
)
|
| 276 |
+
else:
|
| 277 |
+
avg_fn = get_swa_avg_fn()
|
| 278 |
+
n_averaged = self.n_averaged.to(device)
|
| 279 |
+
for p_averaged, p_model in zip(self_params, model_params): # type: ignore[assignment]
|
| 280 |
+
p_averaged.copy_(avg_fn(p_averaged, p_model, n_averaged))
|
| 281 |
+
else:
|
| 282 |
+
for p_averaged, p_model in zip( # type: ignore[assignment]
|
| 283 |
+
self_param_detached, model_param_detached
|
| 284 |
+
):
|
| 285 |
+
n_averaged = self.n_averaged.to(p_averaged.device)
|
| 286 |
+
p_averaged.detach().copy_(
|
| 287 |
+
self.avg_fn(p_averaged.detach(), p_model, n_averaged)
|
| 288 |
+
)
|
| 289 |
+
|
| 290 |
+
if not self.use_buffers:
|
| 291 |
+
# If not apply running averages to the buffers,
|
| 292 |
+
# keep the buffers in sync with the source model.
|
| 293 |
+
for b_swa, b_model in zip(self.module.buffers(), model.buffers()):
|
| 294 |
+
b_swa.detach().copy_(b_model.detach().to(b_swa.device))
|
| 295 |
+
self.n_averaged += 1
|
| 296 |
+
|
| 297 |
+
|
| 298 |
+
@torch.no_grad()
|
| 299 |
+
def update_bn(
|
| 300 |
+
loader: Iterable[Any],
|
| 301 |
+
model: Module,
|
| 302 |
+
device: Optional[Union[int, torch.device]] = None,
|
| 303 |
+
):
|
| 304 |
+
r"""Update BatchNorm running_mean, running_var buffers in the model.
|
| 305 |
+
|
| 306 |
+
It performs one pass over data in `loader` to estimate the activation
|
| 307 |
+
statistics for BatchNorm layers in the model.
|
| 308 |
+
|
| 309 |
+
Args:
|
| 310 |
+
loader (torch.utils.data.DataLoader): dataset loader to compute the
|
| 311 |
+
activation statistics on. Each data batch should be either a
|
| 312 |
+
tensor, or a list/tuple whose first element is a tensor
|
| 313 |
+
containing data.
|
| 314 |
+
model (torch.nn.Module): model for which we seek to update BatchNorm
|
| 315 |
+
statistics.
|
| 316 |
+
device (torch.device, optional): If set, data will be transferred to
|
| 317 |
+
:attr:`device` before being passed into :attr:`model`.
|
| 318 |
+
|
| 319 |
+
Example:
|
| 320 |
+
>>> # xdoctest: +SKIP("Undefined variables")
|
| 321 |
+
>>> loader, model = ...
|
| 322 |
+
>>> torch.optim.swa_utils.update_bn(loader, model)
|
| 323 |
+
|
| 324 |
+
.. note::
|
| 325 |
+
The `update_bn` utility assumes that each data batch in :attr:`loader`
|
| 326 |
+
is either a tensor or a list or tuple of tensors; in the latter case it
|
| 327 |
+
is assumed that :meth:`model.forward()` should be called on the first
|
| 328 |
+
element of the list or tuple corresponding to the data batch.
|
| 329 |
+
"""
|
| 330 |
+
momenta = {}
|
| 331 |
+
for module in model.modules():
|
| 332 |
+
if isinstance(module, torch.nn.modules.batchnorm._BatchNorm):
|
| 333 |
+
module.reset_running_stats()
|
| 334 |
+
momenta[module] = module.momentum
|
| 335 |
+
|
| 336 |
+
if not momenta:
|
| 337 |
+
return
|
| 338 |
+
|
| 339 |
+
was_training = model.training
|
| 340 |
+
model.train()
|
| 341 |
+
for module in momenta.keys():
|
| 342 |
+
module.momentum = None
|
| 343 |
+
|
| 344 |
+
for input in loader:
|
| 345 |
+
if isinstance(input, (list, tuple)):
|
| 346 |
+
input = input[0]
|
| 347 |
+
if device is not None:
|
| 348 |
+
input = input.to(device)
|
| 349 |
+
|
| 350 |
+
model(input)
|
| 351 |
+
|
| 352 |
+
for bn_module in momenta.keys():
|
| 353 |
+
bn_module.momentum = momenta[bn_module]
|
| 354 |
+
model.train(was_training)
|
| 355 |
+
|
| 356 |
+
|
| 357 |
+
class SWALR(LRScheduler):
|
| 358 |
+
r"""Anneals the learning rate in each parameter group to a fixed value.
|
| 359 |
+
|
| 360 |
+
This learning rate scheduler is meant to be used with Stochastic Weight
|
| 361 |
+
Averaging (SWA) method (see `torch.optim.swa_utils.AveragedModel`).
|
| 362 |
+
|
| 363 |
+
Args:
|
| 364 |
+
optimizer (torch.optim.Optimizer): wrapped optimizer
|
| 365 |
+
swa_lrs (float or list): the learning rate value for all param groups
|
| 366 |
+
together or separately for each group.
|
| 367 |
+
annealing_epochs (int): number of epochs in the annealing phase
|
| 368 |
+
(default: 10)
|
| 369 |
+
annealing_strategy (str): "cos" or "linear"; specifies the annealing
|
| 370 |
+
strategy: "cos" for cosine annealing, "linear" for linear annealing
|
| 371 |
+
(default: "cos")
|
| 372 |
+
last_epoch (int): the index of the last epoch (default: -1)
|
| 373 |
+
|
| 374 |
+
The :class:`SWALR` scheduler can be used together with other
|
| 375 |
+
schedulers to switch to a constant learning rate late in the training
|
| 376 |
+
as in the example below.
|
| 377 |
+
|
| 378 |
+
Example:
|
| 379 |
+
>>> # xdoctest: +SKIP("Undefined variables")
|
| 380 |
+
>>> loader, optimizer, model = ...
|
| 381 |
+
>>> lr_lambda = lambda epoch: 0.9
|
| 382 |
+
>>> scheduler = torch.optim.lr_scheduler.MultiplicativeLR(optimizer,
|
| 383 |
+
>>> lr_lambda=lr_lambda)
|
| 384 |
+
>>> swa_scheduler = torch.optim.swa_utils.SWALR(optimizer,
|
| 385 |
+
>>> anneal_strategy="linear", anneal_epochs=20, swa_lr=0.05)
|
| 386 |
+
>>> swa_start = 160
|
| 387 |
+
>>> for i in range(300):
|
| 388 |
+
>>> for input, target in loader:
|
| 389 |
+
>>> optimizer.zero_grad()
|
| 390 |
+
>>> loss_fn(model(input), target).backward()
|
| 391 |
+
>>> optimizer.step()
|
| 392 |
+
>>> if i > swa_start:
|
| 393 |
+
>>> swa_scheduler.step()
|
| 394 |
+
>>> else:
|
| 395 |
+
>>> scheduler.step()
|
| 396 |
+
|
| 397 |
+
.. _Averaging Weights Leads to Wider Optima and Better Generalization:
|
| 398 |
+
https://arxiv.org/abs/1803.05407
|
| 399 |
+
"""
|
| 400 |
+
|
| 401 |
+
def __init__(
|
| 402 |
+
self,
|
| 403 |
+
optimizer: Optimizer,
|
| 404 |
+
swa_lr: float,
|
| 405 |
+
anneal_epochs=10,
|
| 406 |
+
anneal_strategy: Literal["cos", "linear"] = "cos",
|
| 407 |
+
last_epoch=-1,
|
| 408 |
+
): # noqa: D107
|
| 409 |
+
swa_lrs = _format_param("swa_lr", optimizer, swa_lr)
|
| 410 |
+
for swa_lr, group in zip(swa_lrs, optimizer.param_groups):
|
| 411 |
+
group["swa_lr"] = swa_lr
|
| 412 |
+
if anneal_strategy not in ["cos", "linear"]:
|
| 413 |
+
raise ValueError(
|
| 414 |
+
"anneal_strategy must by one of 'cos' or 'linear', "
|
| 415 |
+
f"instead got {anneal_strategy}"
|
| 416 |
+
)
|
| 417 |
+
elif anneal_strategy == "cos":
|
| 418 |
+
self.anneal_func = self._cosine_anneal
|
| 419 |
+
elif anneal_strategy == "linear":
|
| 420 |
+
self.anneal_func = self._linear_anneal
|
| 421 |
+
if not isinstance(anneal_epochs, int) or anneal_epochs < 0:
|
| 422 |
+
raise ValueError(
|
| 423 |
+
f"anneal_epochs must be equal or greater than 0, got {anneal_epochs}"
|
| 424 |
+
)
|
| 425 |
+
self.anneal_epochs = anneal_epochs
|
| 426 |
+
super().__init__(optimizer, last_epoch)
|
| 427 |
+
|
| 428 |
+
@staticmethod
|
| 429 |
+
def _linear_anneal(t):
|
| 430 |
+
return t
|
| 431 |
+
|
| 432 |
+
@staticmethod
|
| 433 |
+
def _cosine_anneal(t):
|
| 434 |
+
return (1 - math.cos(math.pi * t)) / 2
|
| 435 |
+
|
| 436 |
+
@staticmethod
|
| 437 |
+
def _get_initial_lr(lr, swa_lr, alpha):
|
| 438 |
+
if alpha == 1:
|
| 439 |
+
return swa_lr
|
| 440 |
+
return (lr - alpha * swa_lr) / (1 - alpha)
|
| 441 |
+
|
| 442 |
+
def get_lr(self):
|
| 443 |
+
"""Get learning rate."""
|
| 444 |
+
# `_get_lr_called_within_step` is only available `_enable_get_lr_call`,
|
| 445 |
+
# so we ignore the type error here. See `LRScheduler.step()` for more details.
|
| 446 |
+
if not self._get_lr_called_within_step: # type: ignore[attr-defined]
|
| 447 |
+
warnings.warn(
|
| 448 |
+
"To get the last learning rate computed by the scheduler, "
|
| 449 |
+
"please use `get_last_lr()`.",
|
| 450 |
+
UserWarning,
|
| 451 |
+
)
|
| 452 |
+
# Set in `LRScheduler._initial_step()`
|
| 453 |
+
step = self._step_count - 1 # type: ignore[attr-defined]
|
| 454 |
+
if self.anneal_epochs == 0:
|
| 455 |
+
step = max(1, step)
|
| 456 |
+
prev_t = max(0, min(1, (step - 1) / max(1, self.anneal_epochs)))
|
| 457 |
+
prev_alpha = self.anneal_func(prev_t)
|
| 458 |
+
prev_lrs = [
|
| 459 |
+
self._get_initial_lr(group["lr"], group["swa_lr"], prev_alpha)
|
| 460 |
+
for group in self.optimizer.param_groups
|
| 461 |
+
]
|
| 462 |
+
t = max(0, min(1, step / max(1, self.anneal_epochs)))
|
| 463 |
+
alpha = self.anneal_func(t)
|
| 464 |
+
return [
|
| 465 |
+
group["swa_lr"] * alpha + lr * (1 - alpha)
|
| 466 |
+
for group, lr in zip(self.optimizer.param_groups, prev_lrs)
|
| 467 |
+
]
|
vllm/lib/python3.10/site-packages/torch/quantization/__init__.py
ADDED
|
@@ -0,0 +1,86 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# mypy: allow-untyped-defs
|
| 2 |
+
from .fake_quantize import * # noqa: F403
|
| 3 |
+
from .fuse_modules import fuse_modules
|
| 4 |
+
from .fuser_method_mappings import * # noqa: F403
|
| 5 |
+
from .observer import * # noqa: F403
|
| 6 |
+
from .qconfig import * # noqa: F403
|
| 7 |
+
from .quant_type import * # noqa: F403
|
| 8 |
+
from .quantization_mappings import * # noqa: F403
|
| 9 |
+
from .quantize import * # noqa: F403
|
| 10 |
+
from .quantize_jit import * # noqa: F403
|
| 11 |
+
from .stubs import * # noqa: F403
|
| 12 |
+
|
| 13 |
+
|
| 14 |
+
def default_eval_fn(model, calib_data):
|
| 15 |
+
r"""
|
| 16 |
+
Default evaluation function takes a torch.utils.data.Dataset or a list of
|
| 17 |
+
input Tensors and run the model on the dataset
|
| 18 |
+
"""
|
| 19 |
+
for data, target in calib_data:
|
| 20 |
+
model(data)
|
| 21 |
+
|
| 22 |
+
|
| 23 |
+
__all__ = [
|
| 24 |
+
"QuantWrapper",
|
| 25 |
+
"QuantStub",
|
| 26 |
+
"DeQuantStub",
|
| 27 |
+
# Top level API for eager mode quantization
|
| 28 |
+
"quantize",
|
| 29 |
+
"quantize_dynamic",
|
| 30 |
+
"quantize_qat",
|
| 31 |
+
"prepare",
|
| 32 |
+
"convert",
|
| 33 |
+
"prepare_qat",
|
| 34 |
+
# Top level API for graph mode quantization on TorchScript
|
| 35 |
+
"quantize_jit",
|
| 36 |
+
"quantize_dynamic_jit",
|
| 37 |
+
"_prepare_ondevice_dynamic_jit",
|
| 38 |
+
"_convert_ondevice_dynamic_jit",
|
| 39 |
+
"_quantize_ondevice_dynamic_jit",
|
| 40 |
+
# Top level API for graph mode quantization on GraphModule(torch.fx)
|
| 41 |
+
# 'fuse_fx', 'quantize_fx', # TODO: add quantize_dynamic_fx
|
| 42 |
+
# 'prepare_fx', 'prepare_dynamic_fx', 'convert_fx',
|
| 43 |
+
"QuantType", # quantization type
|
| 44 |
+
# custom module APIs
|
| 45 |
+
"get_default_static_quant_module_mappings",
|
| 46 |
+
"get_static_quant_module_class",
|
| 47 |
+
"get_default_dynamic_quant_module_mappings",
|
| 48 |
+
"get_default_qat_module_mappings",
|
| 49 |
+
"get_default_qconfig_propagation_list",
|
| 50 |
+
"get_default_compare_output_module_list",
|
| 51 |
+
"get_quantized_operator",
|
| 52 |
+
"get_fuser_method",
|
| 53 |
+
# Sub functions for `prepare` and `swap_module`
|
| 54 |
+
"propagate_qconfig_",
|
| 55 |
+
"add_quant_dequant",
|
| 56 |
+
"swap_module",
|
| 57 |
+
"default_eval_fn",
|
| 58 |
+
# Observers
|
| 59 |
+
"ObserverBase",
|
| 60 |
+
"WeightObserver",
|
| 61 |
+
"HistogramObserver",
|
| 62 |
+
"observer",
|
| 63 |
+
"default_observer",
|
| 64 |
+
"default_weight_observer",
|
| 65 |
+
"default_placeholder_observer",
|
| 66 |
+
"default_per_channel_weight_observer",
|
| 67 |
+
# FakeQuantize (for qat)
|
| 68 |
+
"default_fake_quant",
|
| 69 |
+
"default_weight_fake_quant",
|
| 70 |
+
"default_fixed_qparams_range_neg1to1_fake_quant",
|
| 71 |
+
"default_fixed_qparams_range_0to1_fake_quant",
|
| 72 |
+
"default_per_channel_weight_fake_quant",
|
| 73 |
+
"default_histogram_fake_quant",
|
| 74 |
+
# QConfig
|
| 75 |
+
"QConfig",
|
| 76 |
+
"default_qconfig",
|
| 77 |
+
"default_dynamic_qconfig",
|
| 78 |
+
"float16_dynamic_qconfig",
|
| 79 |
+
"float_qparams_weight_only_qconfig",
|
| 80 |
+
# QAT utilities
|
| 81 |
+
"default_qat_qconfig",
|
| 82 |
+
"prepare_qat",
|
| 83 |
+
"quantize_qat",
|
| 84 |
+
# module transformations
|
| 85 |
+
"fuse_modules",
|
| 86 |
+
]
|
vllm/lib/python3.10/site-packages/torch/quantization/__pycache__/_numeric_suite_fx.cpython-310.pyc
ADDED
|
Binary file (991 Bytes). View file
|
|
|