Add files using upload-large-folder tool
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- .gitattributes +1 -0
- janus/lib/python3.10/site-packages/torch/distributed/_composable/__pycache__/__init__.cpython-310.pyc +0 -0
- janus/lib/python3.10/site-packages/torch/distributed/_composable/__pycache__/checkpoint_activation.cpython-310.pyc +0 -0
- janus/lib/python3.10/site-packages/torch/distributed/_composable/__pycache__/contract.cpython-310.pyc +0 -0
- janus/lib/python3.10/site-packages/torch/distributed/_composable/__pycache__/fully_shard.cpython-310.pyc +0 -0
- janus/lib/python3.10/site-packages/torch/distributed/_composable/__pycache__/replicate.cpython-310.pyc +0 -0
- janus/lib/python3.10/site-packages/torch/distributed/_composable/checkpoint_activation.py +126 -0
- janus/lib/python3.10/site-packages/torch/distributed/_composable/contract.py +224 -0
- janus/lib/python3.10/site-packages/torch/distributed/_composable/fsdp/__init__.py +2 -0
- janus/lib/python3.10/site-packages/torch/distributed/_composable/fsdp/__pycache__/__init__.cpython-310.pyc +0 -0
- janus/lib/python3.10/site-packages/torch/distributed/_composable/fsdp/__pycache__/_fsdp_common.cpython-310.pyc +0 -0
- janus/lib/python3.10/site-packages/torch/distributed/_composable/fsdp/__pycache__/_fsdp_init.cpython-310.pyc +0 -0
- janus/lib/python3.10/site-packages/torch/distributed/_composable/fsdp/__pycache__/_fsdp_param.cpython-310.pyc +0 -0
- janus/lib/python3.10/site-packages/torch/distributed/_composable/fsdp/__pycache__/_fsdp_param_group.cpython-310.pyc +0 -0
- janus/lib/python3.10/site-packages/torch/distributed/_composable/fsdp/__pycache__/_fsdp_state.cpython-310.pyc +0 -0
- janus/lib/python3.10/site-packages/torch/distributed/_composable/fsdp/_fsdp_common.py +152 -0
- janus/lib/python3.10/site-packages/torch/distributed/_composable/fsdp/_fsdp_param.py +754 -0
- janus/lib/python3.10/site-packages/torch/distributed/_composable/fully_shard.py +131 -0
- janus/lib/python3.10/site-packages/torch/distributed/_tools/__pycache__/mod_tracker.cpython-310.pyc +0 -0
- janus/lib/python3.10/site-packages/torch/distributed/_tools/runtime_estimator.py +527 -0
- janus/lib/python3.10/site-packages/torch/distributed/fsdp/__pycache__/__init__.cpython-310.pyc +0 -0
- janus/lib/python3.10/site-packages/torch/distributed/fsdp/__pycache__/_common_utils.cpython-310.pyc +0 -0
- janus/lib/python3.10/site-packages/torch/distributed/fsdp/__pycache__/_exec_order_utils.cpython-310.pyc +0 -0
- janus/lib/python3.10/site-packages/torch/distributed/fsdp/__pycache__/_flat_param.cpython-310.pyc +0 -0
- janus/lib/python3.10/site-packages/torch/distributed/fsdp/__pycache__/_fsdp_extensions.cpython-310.pyc +0 -0
- janus/lib/python3.10/site-packages/torch/distributed/fsdp/__pycache__/_init_utils.cpython-310.pyc +0 -0
- janus/lib/python3.10/site-packages/torch/distributed/fsdp/__pycache__/_limiter_utils.cpython-310.pyc +0 -0
- janus/lib/python3.10/site-packages/torch/distributed/fsdp/__pycache__/_state_dict_utils.cpython-310.pyc +0 -0
- janus/lib/python3.10/site-packages/torch/distributed/fsdp/__pycache__/_trace_utils.cpython-310.pyc +0 -0
- janus/lib/python3.10/site-packages/torch/distributed/fsdp/__pycache__/_traversal_utils.cpython-310.pyc +0 -0
- janus/lib/python3.10/site-packages/torch/distributed/fsdp/__pycache__/_unshard_param_utils.cpython-310.pyc +0 -0
- janus/lib/python3.10/site-packages/torch/distributed/fsdp/_common_utils.py +558 -0
- janus/lib/python3.10/site-packages/torch/distributed/fsdp/_dynamo_utils.py +46 -0
- janus/lib/python3.10/site-packages/torch/distributed/fsdp/_flat_param.py +0 -0
- janus/lib/python3.10/site-packages/torch/distributed/fsdp/_init_utils.py +1200 -0
- janus/lib/python3.10/site-packages/torch/distributed/fsdp/_optim_utils.py +2091 -0
- janus/lib/python3.10/site-packages/torch/distributed/fsdp/_traversal_utils.py +113 -0
- janus/lib/python3.10/site-packages/torch/distributed/fsdp/_unshard_param_utils.py +336 -0
- janus/lib/python3.10/site-packages/torch/distributed/fsdp/_wrap_utils.py +262 -0
- janus/lib/python3.10/site-packages/torch/distributed/fsdp/fully_sharded_data_parallel.py +0 -0
- janus/lib/python3.10/site-packages/torch/distributed/fsdp/sharded_grad_scaler.py +396 -0
- janus/lib/python3.10/site-packages/torch/distributed/rpc/__init__.py +249 -0
- janus/lib/python3.10/site-packages/torch/distributed/rpc/__pycache__/__init__.cpython-310.pyc +0 -0
- janus/lib/python3.10/site-packages/torch/distributed/rpc/__pycache__/_utils.cpython-310.pyc +0 -0
- janus/lib/python3.10/site-packages/torch/distributed/rpc/__pycache__/api.cpython-310.pyc +0 -0
- janus/lib/python3.10/site-packages/torch/distributed/rpc/__pycache__/backend_registry.cpython-310.pyc +0 -0
- janus/lib/python3.10/site-packages/torch/distributed/rpc/__pycache__/constants.cpython-310.pyc +0 -0
- janus/lib/python3.10/site-packages/torch/distributed/rpc/__pycache__/functions.cpython-310.pyc +0 -0
- janus/lib/python3.10/site-packages/torch/distributed/rpc/__pycache__/internal.cpython-310.pyc +0 -0
- janus/lib/python3.10/site-packages/torch/distributed/rpc/__pycache__/options.cpython-310.pyc +0 -0
.gitattributes
CHANGED
|
@@ -454,3 +454,4 @@ janus/lib/python3.10/site-packages/sympy/utilities/tests/__pycache__/test_wester
|
|
| 454 |
janus/lib/python3.10/site-packages/sympy/solvers/tests/__pycache__/test_solveset.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
|
| 455 |
janus/lib/python3.10/site-packages/sympy/solvers/__pycache__/solveset.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
|
| 456 |
janus/lib/python3.10/site-packages/torch/_decomp/__pycache__/decompositions.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
| 454 |
janus/lib/python3.10/site-packages/sympy/solvers/tests/__pycache__/test_solveset.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
|
| 455 |
janus/lib/python3.10/site-packages/sympy/solvers/__pycache__/solveset.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
|
| 456 |
janus/lib/python3.10/site-packages/torch/_decomp/__pycache__/decompositions.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
|
| 457 |
+
janus/lib/python3.10/site-packages/torch/onnx/__pycache__/symbolic_opset9.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
|
janus/lib/python3.10/site-packages/torch/distributed/_composable/__pycache__/__init__.cpython-310.pyc
ADDED
|
Binary file (357 Bytes). View file
|
|
|
janus/lib/python3.10/site-packages/torch/distributed/_composable/__pycache__/checkpoint_activation.cpython-310.pyc
ADDED
|
Binary file (4.25 kB). View file
|
|
|
janus/lib/python3.10/site-packages/torch/distributed/_composable/__pycache__/contract.cpython-310.pyc
ADDED
|
Binary file (6.13 kB). View file
|
|
|
janus/lib/python3.10/site-packages/torch/distributed/_composable/__pycache__/fully_shard.cpython-310.pyc
ADDED
|
Binary file (3.87 kB). View file
|
|
|
janus/lib/python3.10/site-packages/torch/distributed/_composable/__pycache__/replicate.cpython-310.pyc
ADDED
|
Binary file (7.19 kB). View file
|
|
|
janus/lib/python3.10/site-packages/torch/distributed/_composable/checkpoint_activation.py
ADDED
|
@@ -0,0 +1,126 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# mypy: allow-untyped-decorators
|
| 2 |
+
# mypy: allow-untyped-defs
|
| 3 |
+
from contextlib import contextmanager, nullcontext
|
| 4 |
+
from typing import Any, ContextManager, Dict, Optional, Tuple
|
| 5 |
+
|
| 6 |
+
import torch
|
| 7 |
+
import torch.nn as nn
|
| 8 |
+
from torch.utils.checkpoint import (
|
| 9 |
+
_checkpoint_without_reentrant_generator,
|
| 10 |
+
_DEFAULT_DETERMINISM_MODE,
|
| 11 |
+
)
|
| 12 |
+
|
| 13 |
+
from .contract import contract
|
| 14 |
+
|
| 15 |
+
|
| 16 |
+
@contextmanager
|
| 17 |
+
def _no_hook(module: nn.Module, user_ctx: Optional[ContextManager] = None):
|
| 18 |
+
r"""
|
| 19 |
+
Disable hooks installed by checkpoint to avoid unintentional recursion
|
| 20 |
+
during backward recomputation.
|
| 21 |
+
"""
|
| 22 |
+
|
| 23 |
+
with user_ctx if user_ctx else nullcontext():
|
| 24 |
+
orig_enable_hook = checkpoint.state(module).enable_hook
|
| 25 |
+
checkpoint.state(module).enable_hook = False
|
| 26 |
+
try:
|
| 27 |
+
yield
|
| 28 |
+
finally:
|
| 29 |
+
checkpoint.state(module).enable_hook = orig_enable_hook
|
| 30 |
+
|
| 31 |
+
|
| 32 |
+
@contract()
|
| 33 |
+
def checkpoint(module: nn.Module, **kwargs) -> nn.Module:
|
| 34 |
+
r"""
|
| 35 |
+
This is a composable activation checkpointing API. Unlike functional
|
| 36 |
+
activation checkpointing APIs, this one does not require changing model
|
| 37 |
+
source code. Unlike ``nn.Module`` wrapper activation checkpointing APIs,
|
| 38 |
+
this one does not modify model structure or fully-qualified names either.
|
| 39 |
+
Under the hood, it registers activation checkpointing logic as pre- and
|
| 40 |
+
post-forward hooks. Hence, this API can be easily applied to any model or
|
| 41 |
+
sub-modules in the model.
|
| 42 |
+
|
| 43 |
+
Args:
|
| 44 |
+
module (nn.Module): the target model or sub-module to apply activation
|
| 45 |
+
checkpointing.
|
| 46 |
+
|
| 47 |
+
Example::
|
| 48 |
+
>>> # xdoctest: +SKIP
|
| 49 |
+
>>> import torch.nn as nn
|
| 50 |
+
>>>
|
| 51 |
+
>>> class MyModel(nn.Module):
|
| 52 |
+
>>> def __init__(self) -> None:
|
| 53 |
+
>>> super().__init__()
|
| 54 |
+
>>> self.l1 = nn.Linear(10, 10)
|
| 55 |
+
>>> self.l2 = nn.Linear(10, 10)
|
| 56 |
+
>>>
|
| 57 |
+
>>> def forward(self, x):
|
| 58 |
+
>>> return self.l2(self.l1(x))
|
| 59 |
+
>>>
|
| 60 |
+
>>> model = MyModel()
|
| 61 |
+
>>> checkpoint(model.l1) # apply activation checkpointing only to l1
|
| 62 |
+
>>> model(torch.zeros(2, 10)).sum().backward()
|
| 63 |
+
|
| 64 |
+
"""
|
| 65 |
+
torch._C._log_api_usage_once("torch.distributed.checkpoint")
|
| 66 |
+
|
| 67 |
+
use_reentrant = kwargs.pop("use_reentrant", False)
|
| 68 |
+
if use_reentrant:
|
| 69 |
+
raise NotImplementedError(
|
| 70 |
+
"use_reentrant=True is not supported in composable checkpoint. "
|
| 71 |
+
"Please use torch.utils.checkpoint.checkpoint instead."
|
| 72 |
+
)
|
| 73 |
+
preserve_rng_state = kwargs.pop("preserve_rng_state", True)
|
| 74 |
+
user_context_fns = kwargs.pop("context_fn", None)
|
| 75 |
+
determinism_check = kwargs.pop("determinism_check", _DEFAULT_DETERMINISM_MODE)
|
| 76 |
+
debug = kwargs.pop("debug", False)
|
| 77 |
+
|
| 78 |
+
if kwargs:
|
| 79 |
+
raise ValueError(
|
| 80 |
+
"Unexpected keyword arguments: " + ",".join(arg for arg in kwargs)
|
| 81 |
+
)
|
| 82 |
+
|
| 83 |
+
def forward_pre_hook(
|
| 84 |
+
module: nn.Module, args: Tuple[Any, ...], kwargs: Dict[str, Any]
|
| 85 |
+
) -> None:
|
| 86 |
+
if checkpoint.state(module).enable_hook:
|
| 87 |
+
|
| 88 |
+
def context_fns():
|
| 89 |
+
if user_context_fns is not None:
|
| 90 |
+
ctx1, ctx2 = user_context_fns()
|
| 91 |
+
return ctx1, _no_hook(module, ctx2)
|
| 92 |
+
else:
|
| 93 |
+
return nullcontext(), _no_hook(module)
|
| 94 |
+
|
| 95 |
+
checkpoint.state(
|
| 96 |
+
module
|
| 97 |
+
)._ac_generator = _checkpoint_without_reentrant_generator(
|
| 98 |
+
module,
|
| 99 |
+
preserve_rng_state,
|
| 100 |
+
context_fns,
|
| 101 |
+
determinism_check,
|
| 102 |
+
debug,
|
| 103 |
+
*args,
|
| 104 |
+
**kwargs,
|
| 105 |
+
)
|
| 106 |
+
next(checkpoint.state(module)._ac_generator)
|
| 107 |
+
|
| 108 |
+
def forward_hook(module: nn.Module, inputs: Tuple[Any, ...], output: Any) -> Any:
|
| 109 |
+
if checkpoint.state(module).enable_hook:
|
| 110 |
+
try:
|
| 111 |
+
next(checkpoint.state(module)._ac_generator)
|
| 112 |
+
except StopIteration:
|
| 113 |
+
pass
|
| 114 |
+
else:
|
| 115 |
+
raise RuntimeError(
|
| 116 |
+
"Expected non-reentrant activation checkpoint generator to be exhausted, but it was not!"
|
| 117 |
+
)
|
| 118 |
+
|
| 119 |
+
# Ensure that we no longer hold on to the generator. always_call=True helps ensure we
|
| 120 |
+
# clear this even in the case of exception in fwd pass.
|
| 121 |
+
checkpoint.state(module)._ac_generator = None
|
| 122 |
+
|
| 123 |
+
checkpoint.state(module).enable_hook = True
|
| 124 |
+
module.register_forward_pre_hook(forward_pre_hook, with_kwargs=True)
|
| 125 |
+
module.register_forward_hook(forward_hook, prepend=True, always_call=True)
|
| 126 |
+
return module
|
janus/lib/python3.10/site-packages/torch/distributed/_composable/contract.py
ADDED
|
@@ -0,0 +1,224 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# mypy: allow-untyped-defs
|
| 2 |
+
import uuid
|
| 3 |
+
from collections import OrderedDict
|
| 4 |
+
from functools import wraps
|
| 5 |
+
from typing import Callable, Dict, List, Optional, Sequence, Type, Union
|
| 6 |
+
|
| 7 |
+
import torch
|
| 8 |
+
import torch.nn as nn
|
| 9 |
+
from torch.distributed._composable_state import _State
|
| 10 |
+
from torch.distributed.utils import _get_root_modules
|
| 11 |
+
|
| 12 |
+
|
| 13 |
+
def generate_state_key(string="__composable_api_state_key"):
|
| 14 |
+
return f"{string}_{str(uuid.uuid4())}"
|
| 15 |
+
|
| 16 |
+
|
| 17 |
+
STATE_KEY = generate_state_key()
|
| 18 |
+
REGISTRY_KEY = generate_state_key()
|
| 19 |
+
|
| 20 |
+
|
| 21 |
+
# TODO: we can add additional info to RegistryItem to share across APIs. E.g.,
|
| 22 |
+
# we can add args and kwargs here, and then we can detect whether fully_shard
|
| 23 |
+
# is combined with reentrant activation checkpointing and error out with a clear
|
| 24 |
+
# message.
|
| 25 |
+
class RegistryItem:
|
| 26 |
+
pass
|
| 27 |
+
|
| 28 |
+
|
| 29 |
+
def contract(state_cls: Type[_State] = _State):
|
| 30 |
+
r"""
|
| 31 |
+
Decorate a function as a composable distributed API, where the first
|
| 32 |
+
argument of the function must be an :class:`nn.Module` instance or sequence
|
| 33 |
+
of :class:`nn.Module` instances.
|
| 34 |
+
|
| 35 |
+
The decorator verifies that the decorated function does not modify
|
| 36 |
+
fully-qualified names (FQNs) for parameters, buffers, or modules. The
|
| 37 |
+
decorated function can return different module instances than the input
|
| 38 |
+
modules; the FQN invariant will be enforced following the input order.
|
| 39 |
+
|
| 40 |
+
When a function ``func`` is decorated by ``@contract()``, a
|
| 41 |
+
``.state(module: nn.Module)`` method will be installed to the decorated
|
| 42 |
+
function. Then you can retrieve and modify the state on a module by calling
|
| 43 |
+
``func.state(module)``.
|
| 44 |
+
|
| 45 |
+
Example::
|
| 46 |
+
>>> # xdoctest: +SKIP
|
| 47 |
+
>>> import torch.nn as nn
|
| 48 |
+
>>>
|
| 49 |
+
>>> class MyModel(nn.Module):
|
| 50 |
+
>>> def __init__(self) -> None:
|
| 51 |
+
>>> super().__init__()
|
| 52 |
+
>>> self.l1 = nn.Linear(10, 10)
|
| 53 |
+
>>> self.l2 = nn.Linear(10, 10)
|
| 54 |
+
>>>
|
| 55 |
+
>>> def forward(self, x):
|
| 56 |
+
>>> return self.l2(self.l1(x))
|
| 57 |
+
>>>
|
| 58 |
+
>>> @contract()
|
| 59 |
+
>>> def my_feature(module: nn.Module) -> nn.Module:
|
| 60 |
+
>>> my_feature.state(module).some_state = "any value"
|
| 61 |
+
>>> return module
|
| 62 |
+
>>>
|
| 63 |
+
>>> model = MyModel()
|
| 64 |
+
>>> my_feature(model.l1)
|
| 65 |
+
>>> assert my_feature.state(model.l1).some_state == "any value"
|
| 66 |
+
>>> my_feature(model.l2)
|
| 67 |
+
>>> model(torch.randn(2, 10)).sum().backward()
|
| 68 |
+
"""
|
| 69 |
+
|
| 70 |
+
# wraps will make functions decorated with contract() pickleable - needed for integration with torch.package
|
| 71 |
+
@wraps(state_cls)
|
| 72 |
+
def inner(func):
|
| 73 |
+
@wraps(func)
|
| 74 |
+
def wrapper(
|
| 75 |
+
module: Union[nn.Module, Sequence[nn.Module]], *args, **kwargs
|
| 76 |
+
) -> Optional[nn.Module]:
|
| 77 |
+
inp_module = module
|
| 78 |
+
if isinstance(module, nn.Module):
|
| 79 |
+
modules = [module]
|
| 80 |
+
else:
|
| 81 |
+
# If the user passes a sequence of modules, then we assume that
|
| 82 |
+
# we only need to insert the state object on the root modules
|
| 83 |
+
# (i.e. those without a parent) among the passed-in modules.
|
| 84 |
+
modules = _get_root_modules(list(module))
|
| 85 |
+
state = state_cls() # shared across all modules
|
| 86 |
+
registry_item = RegistryItem() # shared across all modules
|
| 87 |
+
|
| 88 |
+
# `func` is allowed to return different module instances than the
|
| 89 |
+
# input modules as long as FQNs are preserved following the input
|
| 90 |
+
# module order
|
| 91 |
+
all_orig_named_params: List[Dict[str, nn.Parameter]] = []
|
| 92 |
+
all_orig_named_buffers: List[Dict[str, torch.Tensor]] = []
|
| 93 |
+
all_orig_named_modules: List[Dict[str, nn.Module]] = []
|
| 94 |
+
|
| 95 |
+
for module in modules:
|
| 96 |
+
default_all_state: Dict[Callable, _State] = OrderedDict()
|
| 97 |
+
default_registry: Dict[str, RegistryItem] = OrderedDict()
|
| 98 |
+
all_state: Dict[Callable, _State] = module.__dict__.setdefault( # type: ignore[call-overload]
|
| 99 |
+
STATE_KEY, default_all_state
|
| 100 |
+
)
|
| 101 |
+
if not isinstance(all_state, dict):
|
| 102 |
+
raise AssertionError(
|
| 103 |
+
f"Distributed composable API states corrupted: {all_state}"
|
| 104 |
+
)
|
| 105 |
+
registry: Dict[str, RegistryItem] = module.__dict__.setdefault( # type: ignore[call-overload]
|
| 106 |
+
REGISTRY_KEY, default_registry
|
| 107 |
+
)
|
| 108 |
+
if not isinstance(registry, dict):
|
| 109 |
+
raise AssertionError(
|
| 110 |
+
f"Distributed composable API registry corrupted: {registry}"
|
| 111 |
+
)
|
| 112 |
+
if func in all_state or func.__name__ in registry:
|
| 113 |
+
raise AssertionError(
|
| 114 |
+
"Each distinct composable distributed API can only be applied to a "
|
| 115 |
+
f"module once. {func.__name__} has already been applied to the "
|
| 116 |
+
f"following module:\n{module}"
|
| 117 |
+
)
|
| 118 |
+
all_state.setdefault(func, state)
|
| 119 |
+
registry.setdefault(func.__name__, registry_item)
|
| 120 |
+
|
| 121 |
+
all_orig_named_params.append(OrderedDict(module.named_parameters()))
|
| 122 |
+
all_orig_named_buffers.append(OrderedDict(module.named_buffers()))
|
| 123 |
+
all_orig_named_modules.append(OrderedDict(module.named_modules()))
|
| 124 |
+
|
| 125 |
+
updated = func(inp_module, *args, **kwargs)
|
| 126 |
+
if updated is None:
|
| 127 |
+
updated = inp_module
|
| 128 |
+
if isinstance(updated, nn.Module):
|
| 129 |
+
updated_modules = [updated]
|
| 130 |
+
else:
|
| 131 |
+
updated_modules = _get_root_modules(list(inp_module))
|
| 132 |
+
|
| 133 |
+
all_new_named_params: List[Dict[str, nn.Parameter]] = []
|
| 134 |
+
all_new_named_buffers: List[Dict[str, torch.Tensor]] = []
|
| 135 |
+
all_new_named_modules: List[Dict[str, nn.Module]] = []
|
| 136 |
+
for module in updated_modules:
|
| 137 |
+
all_new_named_params.append(OrderedDict(module.named_parameters()))
|
| 138 |
+
all_new_named_buffers.append(OrderedDict(module.named_buffers()))
|
| 139 |
+
all_new_named_modules.append(OrderedDict(module.named_modules()))
|
| 140 |
+
|
| 141 |
+
num_orig_modules = len(all_orig_named_modules)
|
| 142 |
+
num_new_modules = len(all_new_named_modules)
|
| 143 |
+
if num_orig_modules != num_new_modules:
|
| 144 |
+
raise AssertionError(
|
| 145 |
+
f"{func.__name__} should return the same number of modules as input modules"
|
| 146 |
+
f"Inputs: {num_orig_modules} modules\n"
|
| 147 |
+
f"Outputs: {num_new_modules} modules"
|
| 148 |
+
)
|
| 149 |
+
|
| 150 |
+
def check_fqn(orig_fqns: List[str], new_fqns: List[str], check_key: str):
|
| 151 |
+
if orig_fqns == new_fqns:
|
| 152 |
+
return
|
| 153 |
+
|
| 154 |
+
orig_fqn_set, new_fqn_set = set(orig_fqns), set(new_fqns)
|
| 155 |
+
orig_only = orig_fqn_set - new_fqn_set
|
| 156 |
+
new_only = new_fqn_set - orig_fqn_set
|
| 157 |
+
if len(orig_only) or len(new_only):
|
| 158 |
+
raise RuntimeError(
|
| 159 |
+
f"{check_key}"
|
| 160 |
+
"Composable distributed API implementations cannot modify FQNs.\n"
|
| 161 |
+
f"FQNs only in original: {orig_only}\n"
|
| 162 |
+
f"FQNs only in new: {new_only}"
|
| 163 |
+
)
|
| 164 |
+
else:
|
| 165 |
+
raise RuntimeError(
|
| 166 |
+
f"{check_key}"
|
| 167 |
+
"Composable distributed API implementations cannot modify "
|
| 168 |
+
"the order of FQNs.\n"
|
| 169 |
+
f"Original FQNs: {orig_only}\n"
|
| 170 |
+
f"New FQNs: {new_only}"
|
| 171 |
+
)
|
| 172 |
+
|
| 173 |
+
for orig_named_params, new_named_params in zip(
|
| 174 |
+
all_orig_named_params, all_new_named_params
|
| 175 |
+
):
|
| 176 |
+
check_fqn(
|
| 177 |
+
list(orig_named_params.keys()),
|
| 178 |
+
list(new_named_params.keys()),
|
| 179 |
+
"Checking parameters: ",
|
| 180 |
+
)
|
| 181 |
+
for orig_named_buffers, new_named_buffers in zip(
|
| 182 |
+
all_orig_named_buffers, all_new_named_buffers
|
| 183 |
+
):
|
| 184 |
+
check_fqn(
|
| 185 |
+
list(orig_named_buffers.keys()),
|
| 186 |
+
list(new_named_buffers.keys()),
|
| 187 |
+
"Checking buffers: ",
|
| 188 |
+
)
|
| 189 |
+
for orig_named_modules, new_named_modules in zip(
|
| 190 |
+
all_orig_named_modules, all_new_named_modules
|
| 191 |
+
):
|
| 192 |
+
check_fqn(
|
| 193 |
+
list(orig_named_modules.keys()),
|
| 194 |
+
list(new_named_modules.keys()),
|
| 195 |
+
"Checking modules: ",
|
| 196 |
+
)
|
| 197 |
+
|
| 198 |
+
# TODO: verify that installed distributed paradigms are compatible with
|
| 199 |
+
# each other.
|
| 200 |
+
|
| 201 |
+
return updated
|
| 202 |
+
|
| 203 |
+
def get_state(module: nn.Module) -> Optional[_State]:
|
| 204 |
+
return module.__dict__.setdefault( # type: ignore[call-overload]
|
| 205 |
+
STATE_KEY,
|
| 206 |
+
{}, # TODO(@yhcharles): this is a temporary fix, need a better way
|
| 207 |
+
).get(
|
| 208 |
+
func
|
| 209 |
+
) # type: ignore[call-overload]
|
| 210 |
+
|
| 211 |
+
wrapper.state = get_state # type: ignore[attr-defined]
|
| 212 |
+
|
| 213 |
+
return wrapper
|
| 214 |
+
|
| 215 |
+
return inner
|
| 216 |
+
|
| 217 |
+
|
| 218 |
+
def _get_registry(module: nn.Module) -> Optional[Dict[str, RegistryItem]]:
|
| 219 |
+
r"""
|
| 220 |
+
Get an ``OrderedDict`` of composable APIs that have been applied to the
|
| 221 |
+
``module``, indexed by the API name. If no API has been applied, then this
|
| 222 |
+
returns ``None``.
|
| 223 |
+
"""
|
| 224 |
+
return getattr(module, REGISTRY_KEY, None)
|
janus/lib/python3.10/site-packages/torch/distributed/_composable/fsdp/__init__.py
ADDED
|
@@ -0,0 +1,2 @@
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from ._fsdp_api import CPUOffloadPolicy, MixedPrecisionPolicy, OffloadPolicy
|
| 2 |
+
from .fully_shard import FSDPModule, fully_shard, register_fsdp_forward_method
|
janus/lib/python3.10/site-packages/torch/distributed/_composable/fsdp/__pycache__/__init__.cpython-310.pyc
ADDED
|
Binary file (384 Bytes). View file
|
|
|
janus/lib/python3.10/site-packages/torch/distributed/_composable/fsdp/__pycache__/_fsdp_common.cpython-310.pyc
ADDED
|
Binary file (5.15 kB). View file
|
|
|
janus/lib/python3.10/site-packages/torch/distributed/_composable/fsdp/__pycache__/_fsdp_init.cpython-310.pyc
ADDED
|
Binary file (5.18 kB). View file
|
|
|
janus/lib/python3.10/site-packages/torch/distributed/_composable/fsdp/__pycache__/_fsdp_param.cpython-310.pyc
ADDED
|
Binary file (20.9 kB). View file
|
|
|
janus/lib/python3.10/site-packages/torch/distributed/_composable/fsdp/__pycache__/_fsdp_param_group.cpython-310.pyc
ADDED
|
Binary file (18.5 kB). View file
|
|
|
janus/lib/python3.10/site-packages/torch/distributed/_composable/fsdp/__pycache__/_fsdp_state.cpython-310.pyc
ADDED
|
Binary file (11.9 kB). View file
|
|
|
janus/lib/python3.10/site-packages/torch/distributed/_composable/fsdp/_fsdp_common.py
ADDED
|
@@ -0,0 +1,152 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# mypy: allow-untyped-defs
|
| 2 |
+
import math
|
| 3 |
+
import traceback
|
| 4 |
+
from dataclasses import dataclass
|
| 5 |
+
from enum import auto, Enum
|
| 6 |
+
from typing import Any, cast, List, Optional
|
| 7 |
+
|
| 8 |
+
import torch
|
| 9 |
+
import torch._dynamo.compiled_autograd as ca
|
| 10 |
+
import torch.distributed as dist
|
| 11 |
+
import torch.nn as nn
|
| 12 |
+
from torch.distributed._composable.contract import _get_registry
|
| 13 |
+
from torch.distributed.tensor import DeviceMesh, DTensor
|
| 14 |
+
from torch.distributed.tensor._dtensor_spec import DTensorSpec
|
| 15 |
+
|
| 16 |
+
|
| 17 |
+
@dataclass
|
| 18 |
+
class DataParallelMeshInfo:
|
| 19 |
+
mesh: DeviceMesh
|
| 20 |
+
shard_mesh_dim: Optional[int] = None
|
| 21 |
+
replicate_mesh_dim: Optional[int] = None
|
| 22 |
+
|
| 23 |
+
def __post_init__(self):
|
| 24 |
+
if self.shard_mesh_dim is None and self.replicate_mesh_dim is None:
|
| 25 |
+
raise AssertionError(
|
| 26 |
+
"At least one of shard_mesh_dim and replicate_mesh_dim must not be None"
|
| 27 |
+
)
|
| 28 |
+
|
| 29 |
+
|
| 30 |
+
@dataclass
|
| 31 |
+
class FSDPMeshInfo(DataParallelMeshInfo):
|
| 32 |
+
def __post_init__(self):
|
| 33 |
+
super().__post_init__()
|
| 34 |
+
if self.shard_mesh_dim is None:
|
| 35 |
+
raise AssertionError("Expects non-None shard_mesh_dim")
|
| 36 |
+
self.shard_mesh_size: int = self.mesh.size(self.shard_mesh_dim)
|
| 37 |
+
self.shard_process_group = self.mesh.get_group(self.shard_mesh_dim)
|
| 38 |
+
self.shard_mesh_rank: int = self.shard_process_group.rank()
|
| 39 |
+
|
| 40 |
+
|
| 41 |
+
@dataclass
|
| 42 |
+
class DDPMeshInfo(DataParallelMeshInfo):
|
| 43 |
+
def __post_init__(self):
|
| 44 |
+
super().__post_init__()
|
| 45 |
+
if self.replicate_mesh_dim is None:
|
| 46 |
+
raise AssertionError("Expects non-None replicate_mesh_dim")
|
| 47 |
+
self.replicate_mesh_size: int = self.mesh.size(self.replicate_mesh_dim)
|
| 48 |
+
self.replicate_process_group = self.mesh.get_group(self.replicate_mesh_dim)
|
| 49 |
+
self.replicate_mesh_rank: int = self.replicate_process_group.rank()
|
| 50 |
+
|
| 51 |
+
|
| 52 |
+
@dataclass
|
| 53 |
+
class HSDPMeshInfo(FSDPMeshInfo, DDPMeshInfo):
|
| 54 |
+
def __post_init__(self):
|
| 55 |
+
# Calls `FSDPMeshInfo` -> `DDPMeshInfo` -> `DataParallelMeshInfo`
|
| 56 |
+
super().__post_init__()
|
| 57 |
+
|
| 58 |
+
|
| 59 |
+
class TrainingState(Enum):
|
| 60 |
+
"""Describes the training state of one FSDP state / parameter group."""
|
| 61 |
+
|
| 62 |
+
# Transition to forward starting pre-forward until post-forward
|
| 63 |
+
FORWARD = auto()
|
| 64 |
+
# Transition to pre-backward when unsharding in backward
|
| 65 |
+
PRE_BACKWARD = auto()
|
| 66 |
+
# Transition to post-backward when resharding and reducing gradients
|
| 67 |
+
POST_BACKWARD = auto()
|
| 68 |
+
# Idle before/after forward or before pre-backward/after post-backward
|
| 69 |
+
IDLE = auto()
|
| 70 |
+
|
| 71 |
+
|
| 72 |
+
def _raise_assert_with_print(*args: Any, **kwargs: Any):
|
| 73 |
+
print(f"[Rank {dist.get_rank()}] ", end="")
|
| 74 |
+
print(*args, **kwargs)
|
| 75 |
+
traceback.print_stack()
|
| 76 |
+
raise AssertionError(*args, **kwargs)
|
| 77 |
+
|
| 78 |
+
|
| 79 |
+
def _is_composable_with_fsdp(module: nn.Module) -> bool:
|
| 80 |
+
registry = _get_registry(module)
|
| 81 |
+
if registry is None:
|
| 82 |
+
return True
|
| 83 |
+
# Registry keys by function name
|
| 84 |
+
return "replicate" not in registry
|
| 85 |
+
|
| 86 |
+
|
| 87 |
+
def _get_dim0_padded_size(tensor_size: torch.Size, dim0_factor: int) -> torch.Size:
|
| 88 |
+
padded_dim0 = math.ceil(tensor_size[0] / dim0_factor) * dim0_factor
|
| 89 |
+
return cast(torch.Size, torch.Size([padded_dim0]) + tensor_size[1:])
|
| 90 |
+
|
| 91 |
+
|
| 92 |
+
def _chunk_with_empty(
|
| 93 |
+
tensor: torch.Tensor, num_chunks: int, dim: int
|
| 94 |
+
) -> List[torch.Tensor]:
|
| 95 |
+
chunks = list(torch.chunk(tensor, num_chunks, dim=dim))
|
| 96 |
+
while len(chunks) < num_chunks:
|
| 97 |
+
chunks.append(chunks[0].new_empty(0))
|
| 98 |
+
return chunks
|
| 99 |
+
|
| 100 |
+
|
| 101 |
+
def _get_dim0_chunked_size(
|
| 102 |
+
chunk: torch.Tensor, unchunked_size: torch.Size
|
| 103 |
+
) -> torch.Size:
|
| 104 |
+
if chunk.numel() > 0:
|
| 105 |
+
return chunk.size()
|
| 106 |
+
# For 0 numel, we need to preserve trailing dims for DTensor APIs
|
| 107 |
+
return cast(torch.Size, torch.Size([0]) + unchunked_size[1:])
|
| 108 |
+
|
| 109 |
+
|
| 110 |
+
def _from_local_no_grad(
|
| 111 |
+
local_tensor: torch.Tensor,
|
| 112 |
+
sharding_spec: DTensorSpec,
|
| 113 |
+
) -> DTensor:
|
| 114 |
+
"""
|
| 115 |
+
This method is similar to ``DTensor.from_local()`` except that in eager mode
|
| 116 |
+
it avoids some CPU overhead by avoiding default args and not being differentiable.
|
| 117 |
+
"""
|
| 118 |
+
|
| 119 |
+
if not ca.compiled_autograd_enabled:
|
| 120 |
+
return DTensor(
|
| 121 |
+
# Use the local tensor directly instead of constructing a new tensor
|
| 122 |
+
# variable, e.g. with `view_as()`, since this is not differentiable
|
| 123 |
+
local_tensor,
|
| 124 |
+
sharding_spec,
|
| 125 |
+
requires_grad=local_tensor.requires_grad,
|
| 126 |
+
)
|
| 127 |
+
else:
|
| 128 |
+
return DTensor.from_local(
|
| 129 |
+
local_tensor,
|
| 130 |
+
sharding_spec.mesh,
|
| 131 |
+
sharding_spec.placements,
|
| 132 |
+
shape=sharding_spec.shape,
|
| 133 |
+
stride=sharding_spec.stride,
|
| 134 |
+
)
|
| 135 |
+
|
| 136 |
+
|
| 137 |
+
def _to_dtype_if_needed(
|
| 138 |
+
tensor: torch.Tensor, dtype: Optional[torch.dtype]
|
| 139 |
+
) -> torch.Tensor:
|
| 140 |
+
if dtype is not None and tensor.dtype != dtype:
|
| 141 |
+
return tensor.to(dtype)
|
| 142 |
+
return tensor
|
| 143 |
+
|
| 144 |
+
|
| 145 |
+
def _cast_fp_tensor(dtype: torch.dtype, x: torch.Tensor) -> torch.Tensor:
|
| 146 |
+
if (
|
| 147 |
+
not isinstance(x, torch.Tensor)
|
| 148 |
+
or not torch.is_floating_point(x)
|
| 149 |
+
or x.dtype == dtype
|
| 150 |
+
):
|
| 151 |
+
return x
|
| 152 |
+
return x.to(dtype)
|
janus/lib/python3.10/site-packages/torch/distributed/_composable/fsdp/_fsdp_param.py
ADDED
|
@@ -0,0 +1,754 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# mypy: allow-untyped-defs
|
| 2 |
+
import itertools
|
| 3 |
+
from dataclasses import dataclass, field
|
| 4 |
+
from enum import auto, Enum
|
| 5 |
+
from typing import Any, cast, List, Optional, Sequence, Tuple
|
| 6 |
+
|
| 7 |
+
import torch
|
| 8 |
+
import torch._dynamo.compiled_autograd as ca
|
| 9 |
+
import torch.nn as nn
|
| 10 |
+
from torch._prims_common import make_contiguous_strides_for
|
| 11 |
+
from torch.distributed._functional_collectives import AsyncCollectiveTensor
|
| 12 |
+
from torch.distributed.tensor import DTensor, Replicate, Shard
|
| 13 |
+
from torch.distributed.tensor._dtensor_spec import DTensorSpec, TensorMeta
|
| 14 |
+
from torch.distributed.tensor.device_mesh import _mesh_resources
|
| 15 |
+
from torch.distributed.tensor.placement_types import _StridedShard, Placement
|
| 16 |
+
|
| 17 |
+
from ._fsdp_api import CPUOffloadPolicy, MixedPrecisionPolicy, OffloadPolicy
|
| 18 |
+
from ._fsdp_common import (
|
| 19 |
+
_chunk_with_empty,
|
| 20 |
+
_from_local_no_grad,
|
| 21 |
+
_get_dim0_chunked_size,
|
| 22 |
+
_raise_assert_with_print,
|
| 23 |
+
_to_dtype_if_needed,
|
| 24 |
+
FSDPMeshInfo,
|
| 25 |
+
HSDPMeshInfo,
|
| 26 |
+
)
|
| 27 |
+
|
| 28 |
+
|
| 29 |
+
"""
|
| 30 |
+
[Note: FSDP tensors]
|
| 31 |
+
FSDP considers the following tensors:
|
| 32 |
+
- Original parameter: parameter passed to :class:`FSDPParam`, i.e. the one
|
| 33 |
+
on the module when applying FSDP
|
| 34 |
+
- Sharded parameter: sharding the original parameter on dim-0 as a DTensor
|
| 35 |
+
over the main mesh
|
| 36 |
+
- All-gather inputs: the ``torch.Tensor`` or ``Tensor`` s passed to all-gather,
|
| 37 |
+
derived from the sharded parameter
|
| 38 |
+
- All-gather output: the ``torch.Tensor`` or ``Tensor`` s resulting from
|
| 39 |
+
all-gathering the all-gather inputs
|
| 40 |
+
- Unsharded parameter: parameter used for forward/backward computation, derived
|
| 41 |
+
from the all-gather output; autograd leaf
|
| 42 |
+
|
| 43 |
+
We define these tensors to describe the general framework that can accomodate
|
| 44 |
+
extensions, where:
|
| 45 |
+
- all-gather-inputs = pre-all-gather-transform(sharded-parameter)
|
| 46 |
+
- unsharded-parameter = post-all-gather-transform(all-gather-outputs)
|
| 47 |
+
|
| 48 |
+
For the default ``torch.Tensor`` case, there is only one all-gather input, and
|
| 49 |
+
it shares the same underlying tensor data as the sharded parameter, meaning
|
| 50 |
+
that they can be thought of as the same tensors. The same applies for the
|
| 51 |
+
all-gather output and unsharded parameter. For non-``torch.Tensor`` extensions,
|
| 52 |
+
these equivalences may no longer hold due to the pre/post-all-gather
|
| 53 |
+
transforms, and some may have multiple all-gather inputs/outputs (e.g.
|
| 54 |
+
quantized data and scales).
|
| 55 |
+
|
| 56 |
+
[Note: FSDP and autograd]
|
| 57 |
+
FSDP dynamically frees and allocates the unsharded parameter. Since autograd
|
| 58 |
+
can pack a reference to it or a view to save for backward, we use storage
|
| 59 |
+
resizing to implement the freeing/allocation since that preserves the aliasing.
|
| 60 |
+
This implies that we construct the unsharded parameter object once and write to
|
| 61 |
+
it in-place thereafter. For the default ``torch.Tensor` original parameter
|
| 62 |
+
case, the all-gather output and unsharded parameter share the same
|
| 63 |
+
data, so we use storage resizing on the all-gather output.
|
| 64 |
+
"""
|
| 65 |
+
|
| 66 |
+
lib = torch.library.Library("fsdp", "FRAGMENT") # noqa: TOR901
|
| 67 |
+
|
| 68 |
+
lib.define("set_(Tensor(a!) tensor, Tensor data) -> ()")
|
| 69 |
+
|
| 70 |
+
|
| 71 |
+
@torch.library.impl(lib, "set_", "Meta")
|
| 72 |
+
@torch.library.impl(lib, "set_", "CUDA")
|
| 73 |
+
@torch.library.impl(lib, "set_", "CPU")
|
| 74 |
+
def set_(tensor, data):
|
| 75 |
+
tensor.set_(data)
|
| 76 |
+
|
| 77 |
+
|
| 78 |
+
"""
|
| 79 |
+
[Note: Avoiding functionalization for fsdp.set_ and inductor.resize_storage_bytes_(0)]
|
| 80 |
+
|
| 81 |
+
Currently we don't functionalize `fsdp.set_` op or `inductor.resize_storage_bytes_(0)` op
|
| 82 |
+
(i.e. they show up as a mutation op in the middle of the AOT joint graph).
|
| 83 |
+
|
| 84 |
+
Reason:
|
| 85 |
+
Traceable FSDP2 compiled autograd BWD graph have the following traits:
|
| 86 |
+
(1) Two inputs of the graph were aliased to each other (one from hook closed-over tensors, one from FWD saved tensors).
|
| 87 |
+
(2) One of them is mutated (set_ and resize_(0) to handle the all-gathered param).
|
| 88 |
+
(3) They are both subclasses.
|
| 89 |
+
The combination of these traits is not supported by AOTAutograd (it's difficult to reason about subclass aliasing).
|
| 90 |
+
So this doesn't work at all for Traceable FSDP2.
|
| 91 |
+
|
| 92 |
+
The compromise we use is to avoid functionalization for the FSDP2 set_ and resize_(0) ops.
|
| 93 |
+
This avoids the problem above, because from AOTAutograd point-of-view there are no mutations
|
| 94 |
+
that functionalization needs to handle. (Although we need to be careful not to DCE those mutable ops.)
|
| 95 |
+
|
| 96 |
+
We can avoid this functionalization because:
|
| 97 |
+
(1) The nn.Parameter is never used before its .set_() is called in eager code (i.e. no alias of it is created),
|
| 98 |
+
so it's safe to call .set_() in the middle of the graph to swap out its storage and start using the nn.Parameter downstream.
|
| 99 |
+
(2) We always re-allocate the buffer for nn.Parameter to store the AllGather output and to be used in downstream user ops.
|
| 100 |
+
So calling resize-to-0 in the middle of the graph to free nn.Parameter memory after use should always be okay
|
| 101 |
+
(since we always allocate anew next time we need it, we strictly don't need to keep the old tensor storage around anymore).
|
| 102 |
+
|
| 103 |
+
Q: But doesn't the torch.compile stack have the "functional graph" assumption in many places?
|
| 104 |
+
A: Yes - this is WIP but we will try to get back to functional graph as early as possible in the lowering process.
|
| 105 |
+
Specifically, we believe we can move both .set_ and .resize_(0) ops to end of graph in AOT joint graph before partitioner
|
| 106 |
+
(i.e. effectively "re-functionalizing" those ops). Put it in another way, we avoid functionalization for those two ops just to
|
| 107 |
+
make AOTAutograd alias analysis happy, and as soon as we are past that point, we "re-functionalize" the graph.
|
| 108 |
+
This requires a custom FX pass but we believe it's not hard to write and maintain.
|
| 109 |
+
|
| 110 |
+
Q: What's the importance of partitioner not saving views of nn.Parameter as FWD saved tensors?
|
| 111 |
+
A: This is critical: we do want to save FWD nn.Parameter graph input (instead of its view) for BWD use,
|
| 112 |
+
so that downstream ops in BWD graph uses the post-`.set_` nn.Parameter instead of any of its saved views as input.
|
| 113 |
+
This is because .set_ will not update any of the nn.Parameter's views, so BWD downstream ops must use the original
|
| 114 |
+
nn.Parameter in order to see the result of .set_.
|
| 115 |
+
"""
|
| 116 |
+
|
| 117 |
+
|
| 118 |
+
@torch.library.impl(lib, "set_", "Functionalize")
|
| 119 |
+
def set__functionalize(tensor, data):
|
| 120 |
+
torch._sync(tensor)
|
| 121 |
+
torch._sync(data)
|
| 122 |
+
# AOTDispatcher needs to know if any inputs had their storages mutated.
|
| 123 |
+
# (Why? It sometimes detaches inputs before sending them into the graph,
|
| 124 |
+
# when it sees that they do not need to have any gradients computed)
|
| 125 |
+
torch._functionalize_set_storage_changed(tensor)
|
| 126 |
+
tensor_inner = torch._from_functional_tensor(tensor)
|
| 127 |
+
data_inner = torch._from_functional_tensor(data)
|
| 128 |
+
with torch._C._ExcludeDispatchKeyGuard(
|
| 129 |
+
torch._C.DispatchKeySet(torch._C.DispatchKey.Functionalize)
|
| 130 |
+
):
|
| 131 |
+
torch.ops.fsdp.set_.default(tensor_inner, data_inner)
|
| 132 |
+
|
| 133 |
+
|
| 134 |
+
torch.fx.node.has_side_effect(torch.ops.fsdp.set_.default)
|
| 135 |
+
|
| 136 |
+
|
| 137 |
+
class ShardedState(Enum):
|
| 138 |
+
"""
|
| 139 |
+
- ``SHARDED``: The sharded parameter is registered to the module. It is the
|
| 140 |
+
only contributor to parameter memory.
|
| 141 |
+
- ``SHARDED_POST_FORWARD``: The unsharded parameter is resharded to a
|
| 142 |
+
smaller world size. Since this data should not be used for computation,
|
| 143 |
+
we do not register it to the module. Users should reshard the module
|
| 144 |
+
before any in-place modifications. Both it and the sharded parameter
|
| 145 |
+
contribute to parameter memory.
|
| 146 |
+
- ``UNSHARDED``: The unsharded parameter is registered to the module. Both
|
| 147 |
+
it and the sharded parameter contribute to parameter memory.
|
| 148 |
+
"""
|
| 149 |
+
|
| 150 |
+
SHARDED = auto()
|
| 151 |
+
SHARDED_POST_FORWARD = auto()
|
| 152 |
+
UNSHARDED = auto()
|
| 153 |
+
|
| 154 |
+
|
| 155 |
+
@dataclass
|
| 156 |
+
class ParamModuleInfo:
|
| 157 |
+
"""
|
| 158 |
+
For a parameter, this stores the module and the parameter name to be able
|
| 159 |
+
to do a parameter swap via ``setattr(module, param_name, ...)`` or to get
|
| 160 |
+
the parameter via ``getattr(module, param_name)``. We additionally save
|
| 161 |
+
shared modules and shared parameter names to update them accordingly.
|
| 162 |
+
"""
|
| 163 |
+
|
| 164 |
+
# Parameter names are unprefixed, e.g. "weight", not "lin.weight"
|
| 165 |
+
module: nn.Module
|
| 166 |
+
param_name: str
|
| 167 |
+
shared_modules: List[nn.Module] = field(default_factory=list)
|
| 168 |
+
shared_param_names: List[str] = field(default_factory=list)
|
| 169 |
+
|
| 170 |
+
|
| 171 |
+
@dataclass
|
| 172 |
+
class ExtensionsData:
|
| 173 |
+
# User-defined metadata passed from pre to post-all-gather
|
| 174 |
+
all_gather_metadata: Optional[Any] = None
|
| 175 |
+
# Save the all-gather input sizes to unflatten the all-gather outputs to ND
|
| 176 |
+
all_gather_input_sizes: Sequence[torch.Size] = () # ND
|
| 177 |
+
|
| 178 |
+
def clear(self):
|
| 179 |
+
self.all_gather_metadata = None
|
| 180 |
+
self.all_gather_input_sizes = ()
|
| 181 |
+
|
| 182 |
+
|
| 183 |
+
class FSDPParam:
|
| 184 |
+
"""
|
| 185 |
+
This class manages a parameter with FSDP or FSDP variants applied,
|
| 186 |
+
implementing dim-0 per-parameter sharding.
|
| 187 |
+
"""
|
| 188 |
+
|
| 189 |
+
orig_dtype: torch.dtype
|
| 190 |
+
param_dtype: Optional[torch.dtype]
|
| 191 |
+
reduce_dtype: Optional[torch.dtype]
|
| 192 |
+
_orig_size: torch.Size # ND
|
| 193 |
+
sharded_size: torch.Size # ND
|
| 194 |
+
contiguous_sharded_stride: Tuple[int, ...]
|
| 195 |
+
padded_sharded_param_size: torch.Size # ND
|
| 196 |
+
sharded_post_forward_size: torch.Size # ND
|
| 197 |
+
contiguous_sharded_post_forward_stride: Tuple[int, ...]
|
| 198 |
+
_sharded_param_data: torch.Tensor # 1D
|
| 199 |
+
sharded_param: nn.Parameter # ND
|
| 200 |
+
_sharded_post_forward_param_data: Optional[torch.Tensor] # 1D
|
| 201 |
+
_sharded_post_forward_param: Optional[nn.Parameter] # ND
|
| 202 |
+
_unsharded_param: nn.Parameter # ND
|
| 203 |
+
unsharded_accumulated_grad: Optional[torch.Tensor] # ND
|
| 204 |
+
_sharding_spec: DTensorSpec
|
| 205 |
+
# DTensor attributes (only defined for DTensor `param`):
|
| 206 |
+
_tp_spec: DTensorSpec
|
| 207 |
+
all_gather_outputs: List[torch.Tensor] # 1D
|
| 208 |
+
# All-gather extension attributes
|
| 209 |
+
_extensions_data: ExtensionsData
|
| 210 |
+
_unsharded_inner_tensors: List[torch.Tensor]
|
| 211 |
+
|
| 212 |
+
def __init__(
|
| 213 |
+
self,
|
| 214 |
+
param: nn.Parameter,
|
| 215 |
+
module_info: ParamModuleInfo,
|
| 216 |
+
mesh_info: FSDPMeshInfo,
|
| 217 |
+
post_forward_mesh_info: Optional[FSDPMeshInfo],
|
| 218 |
+
device: torch.device,
|
| 219 |
+
mp_policy: MixedPrecisionPolicy,
|
| 220 |
+
offload_policy: OffloadPolicy,
|
| 221 |
+
):
|
| 222 |
+
self._module_info: ParamModuleInfo = module_info
|
| 223 |
+
self.mesh_info = mesh_info
|
| 224 |
+
self.post_forward_mesh_info = post_forward_mesh_info
|
| 225 |
+
self.device = device
|
| 226 |
+
self.offload_to_cpu: bool = isinstance(offload_policy, CPUOffloadPolicy)
|
| 227 |
+
self.pin_memory = (
|
| 228 |
+
self.offload_to_cpu and cast(CPUOffloadPolicy, offload_policy).pin_memory
|
| 229 |
+
)
|
| 230 |
+
self.grad_offload_event: Optional[torch.cuda.Event] = None
|
| 231 |
+
self._init_sharded_param(param, device)
|
| 232 |
+
if self.post_forward_mesh_info:
|
| 233 |
+
self._init_sharded_post_forward_param_metadata(param)
|
| 234 |
+
self._init_extensions()
|
| 235 |
+
self.all_gather_outputs: List[torch.Tensor] = []
|
| 236 |
+
self.unsharded_accumulated_grad = None
|
| 237 |
+
self._param_fqn: Optional[str] = None # prefixed from root module
|
| 238 |
+
# TODO: Remove this padding logic once DTensor pads the local tensor:
|
| 239 |
+
# https://github.com/pytorch/pytorch/issues/113045
|
| 240 |
+
self._post_load_hook_handle = (
|
| 241 |
+
module_info.module.register_load_state_dict_post_hook(
|
| 242 |
+
lambda *args, **kwargs: self.reset_sharded_param()
|
| 243 |
+
)
|
| 244 |
+
)
|
| 245 |
+
|
| 246 |
+
@torch.no_grad()
|
| 247 |
+
def _init_sharded_param(self, param: nn.Parameter, device: torch.device):
|
| 248 |
+
if param.device != device and param.device.type != "meta":
|
| 249 |
+
raise AssertionError(
|
| 250 |
+
f"Expects the parameter to already be moved to device {device} but got {param.device}"
|
| 251 |
+
)
|
| 252 |
+
# TODO: Replace the sharded DTensor parameter construction logic with
|
| 253 |
+
# `distribute_tensor` after https://github.com/pytorch/pytorch/issues/116101
|
| 254 |
+
# TODO: Simplify the following sharded parameter padding logic after
|
| 255 |
+
# https://github.com/pytorch/pytorch/issues/113045
|
| 256 |
+
self.is_dtensor = isinstance(param, DTensor)
|
| 257 |
+
if self.is_dtensor:
|
| 258 |
+
self._tp_spec = cast(DTensor, param)._spec
|
| 259 |
+
dp_mesh, tp_mesh = (self.mesh_info.mesh, self._tp_spec.mesh)
|
| 260 |
+
dp_global_mesh = _mesh_resources.get_root_mesh(dp_mesh)
|
| 261 |
+
tp_global_mesh = _mesh_resources.get_root_mesh(tp_mesh)
|
| 262 |
+
if dp_global_mesh != tp_global_mesh or (
|
| 263 |
+
dp_global_mesh is None or tp_global_mesh is None
|
| 264 |
+
):
|
| 265 |
+
raise AssertionError(
|
| 266 |
+
"FSDP requires the DP and TP mesh to have the same parent mesh but got: \n"
|
| 267 |
+
f"DP's global mesh: {dp_global_mesh}\nTP's global mesh: {tp_global_mesh}"
|
| 268 |
+
)
|
| 269 |
+
|
| 270 |
+
name_dims_error = "FSDP requires named DeviceMesh dims for ND parallelism"
|
| 271 |
+
assert dp_mesh.mesh_dim_names is not None, name_dims_error
|
| 272 |
+
assert tp_mesh.mesh_dim_names is not None, name_dims_error
|
| 273 |
+
submesh_names = dp_mesh.mesh_dim_names + tp_mesh.mesh_dim_names
|
| 274 |
+
self._spmd_mesh = dp_global_mesh[submesh_names]
|
| 275 |
+
if len(self._tp_spec.placements) != 1:
|
| 276 |
+
raise NotImplementedError(
|
| 277 |
+
f"FSDP only supports 1D TP, not {self._tp_spec.placements}"
|
| 278 |
+
)
|
| 279 |
+
split_factor = self._tp_spec.num_shards_map[0]
|
| 280 |
+
assert (
|
| 281 |
+
2 <= self._spmd_mesh.ndim <= 3
|
| 282 |
+
), f"_spmd_mesh.ndim can only be 2 or 3 but got {self._spmd_mesh.ndim}."
|
| 283 |
+
self._spmd_placements: Tuple[Placement, ...]
|
| 284 |
+
dp_shard_tp_placement = (
|
| 285 |
+
(
|
| 286 |
+
_StridedShard(0, split_factor=split_factor)
|
| 287 |
+
if split_factor > 1
|
| 288 |
+
else Shard(0)
|
| 289 |
+
),
|
| 290 |
+
self._tp_spec.placements[0],
|
| 291 |
+
)
|
| 292 |
+
if self._spmd_mesh.ndim == 2:
|
| 293 |
+
self._spmd_placements = dp_shard_tp_placement
|
| 294 |
+
else:
|
| 295 |
+
assert self.mesh_info.replicate_mesh_dim == 0
|
| 296 |
+
self._spmd_placements = (Replicate(),) + dp_shard_tp_placement
|
| 297 |
+
self._sharding_spec = DTensorSpec(
|
| 298 |
+
self._spmd_mesh,
|
| 299 |
+
self._spmd_placements,
|
| 300 |
+
tensor_meta=self._tp_spec.tensor_meta,
|
| 301 |
+
)
|
| 302 |
+
# NOTE: FSDP+TP does not support uneven sharding for now
|
| 303 |
+
# TODO: enable uneven sharding for FSDP+TP
|
| 304 |
+
if split_factor > 1: # FSDP has strided sharding on tensor dim 0
|
| 305 |
+
num_shards = self._sharding_spec.num_shards_map[0]
|
| 306 |
+
tensor_size_dim_0 = self._sharding_spec.shape[0]
|
| 307 |
+
if tensor_size_dim_0 % num_shards != 0:
|
| 308 |
+
raise NotImplementedError(
|
| 309 |
+
"FSDP+TP sharding does not support uneven sharding for now: "
|
| 310 |
+
f"tensor dim 0 has size {tensor_size_dim_0} which cannot be "
|
| 311 |
+
f"evenly sharded into {num_shards} shards."
|
| 312 |
+
)
|
| 313 |
+
|
| 314 |
+
param_data = cast(DTensor, param)._local_tensor
|
| 315 |
+
else:
|
| 316 |
+
self._spmd_mesh = self.mesh_info.mesh
|
| 317 |
+
if isinstance(self.mesh_info, HSDPMeshInfo):
|
| 318 |
+
self._spmd_placements = (Replicate(), Shard(0))
|
| 319 |
+
else:
|
| 320 |
+
self._spmd_placements = (Shard(0),)
|
| 321 |
+
self._sharding_spec = DTensorSpec(
|
| 322 |
+
self._spmd_mesh,
|
| 323 |
+
self._spmd_placements,
|
| 324 |
+
tensor_meta=TensorMeta(
|
| 325 |
+
param.size(),
|
| 326 |
+
param.stride(),
|
| 327 |
+
param.dtype,
|
| 328 |
+
),
|
| 329 |
+
)
|
| 330 |
+
param_data = param
|
| 331 |
+
self._orig_size = param_data.size()
|
| 332 |
+
self._contiguous_orig_stride = make_contiguous_strides_for(self._orig_size)
|
| 333 |
+
shard_rank = self.mesh_info.shard_mesh_rank
|
| 334 |
+
shard_world_size = self.mesh_info.shard_mesh_size
|
| 335 |
+
chunks = _chunk_with_empty(param_data, shard_world_size, dim=0)
|
| 336 |
+
sharded_param = chunks[shard_rank]
|
| 337 |
+
self.sharded_size = _get_dim0_chunked_size(sharded_param, param_data.size())
|
| 338 |
+
self.contiguous_sharded_stride = make_contiguous_strides_for(self.sharded_size)
|
| 339 |
+
padded_sharded_size = chunks[0].size() # 0th always padded
|
| 340 |
+
padded_sharded_param = param_data.new_zeros(padded_sharded_size)
|
| 341 |
+
self.padded_sharded_param_size = padded_sharded_param.size()
|
| 342 |
+
if sharded_param.numel() > 0:
|
| 343 |
+
padded_sharded_param[: sharded_param.size(0)].copy_(sharded_param)
|
| 344 |
+
if self.offload_to_cpu and not padded_sharded_param.is_meta:
|
| 345 |
+
padded_sharded_param = padded_sharded_param.cpu()
|
| 346 |
+
if self.pin_memory:
|
| 347 |
+
padded_sharded_param = padded_sharded_param.pin_memory()
|
| 348 |
+
self._sharded_param_data = padded_sharded_param.view(-1)
|
| 349 |
+
self.sharded_param = nn.Parameter(
|
| 350 |
+
self.to_sharded_dtensor(padded_sharded_param[: sharded_param.size(0)])
|
| 351 |
+
)
|
| 352 |
+
self.sharded_param.requires_grad_(param.requires_grad)
|
| 353 |
+
# Let `param_data` be freed normally when its ref count reaches 0 when
|
| 354 |
+
# the `fully_shard` call returns to allow provided parameters to alias
|
| 355 |
+
self._setattr_on_modules(self.sharded_param)
|
| 356 |
+
self.sharded_state = ShardedState.SHARDED
|
| 357 |
+
|
| 358 |
+
def _init_sharded_post_forward_param_metadata(self, param: torch.Tensor) -> None:
|
| 359 |
+
mesh_info = self.post_forward_mesh_info
|
| 360 |
+
assert mesh_info is not None # mypy
|
| 361 |
+
param_data = param._local_tensor if isinstance(param, DTensor) else param
|
| 362 |
+
chunks = _chunk_with_empty(param_data, mesh_info.shard_mesh_size, dim=0)
|
| 363 |
+
self.sharded_post_forward_size = _get_dim0_chunked_size(
|
| 364 |
+
chunks[mesh_info.shard_mesh_rank], param_data.size()
|
| 365 |
+
)
|
| 366 |
+
self.contiguous_sharded_post_forward_stride = make_contiguous_strides_for(
|
| 367 |
+
self.sharded_post_forward_size
|
| 368 |
+
)
|
| 369 |
+
|
| 370 |
+
def init_dtype_attrs(self, mp_policy: MixedPrecisionPolicy):
|
| 371 |
+
param_dtype, reduce_dtype = (mp_policy.param_dtype, mp_policy.reduce_dtype)
|
| 372 |
+
self.orig_dtype = self.sharded_param.dtype
|
| 373 |
+
# Clamp `param_dtype` to `None` if no casting is required
|
| 374 |
+
if param_dtype == self.orig_dtype:
|
| 375 |
+
param_dtype = None
|
| 376 |
+
self.param_dtype = param_dtype
|
| 377 |
+
self.reduce_dtype = reduce_dtype
|
| 378 |
+
# None indicates that the mixed precision is not enabled
|
| 379 |
+
|
| 380 |
+
def _init_extensions(self) -> None:
|
| 381 |
+
inner_tensor = self._sharded_local_tensor
|
| 382 |
+
has_fsdp_pre_all_gather = hasattr(inner_tensor, "fsdp_pre_all_gather")
|
| 383 |
+
has_fsdp_post_all_gather = hasattr(inner_tensor, "fsdp_post_all_gather")
|
| 384 |
+
if has_fsdp_pre_all_gather != has_fsdp_post_all_gather:
|
| 385 |
+
raise AssertionError(
|
| 386 |
+
"Both fsdp_pre_all_gather and fsdp_post_all_gather should be defined "
|
| 387 |
+
f"if using all-gather extensions: {inner_tensor}"
|
| 388 |
+
)
|
| 389 |
+
if has_fsdp_pre_all_gather:
|
| 390 |
+
if self.padded_sharded_param_size != self._sharded_local_tensor.size():
|
| 391 |
+
raise NotImplementedError(
|
| 392 |
+
"FSDP all-gather extensions require even sharding on dim-0.\n"
|
| 393 |
+
f"{self._orig_size} is not divisible by FSDP world size {self.mesh_info.mesh.size()}."
|
| 394 |
+
)
|
| 395 |
+
self._extensions_data = ExtensionsData()
|
| 396 |
+
self._unsharded_inner_tensors: List[torch.Tensor] = []
|
| 397 |
+
|
| 398 |
+
def init_all_gather_outputs(
|
| 399 |
+
self,
|
| 400 |
+
all_gather_input_numels: List[int],
|
| 401 |
+
all_gather_input_dtypes: List[torch.dtype],
|
| 402 |
+
world_size: int,
|
| 403 |
+
device: torch.device,
|
| 404 |
+
force_recreate: bool = False,
|
| 405 |
+
):
|
| 406 |
+
if not force_recreate and len(self.all_gather_outputs) > 0:
|
| 407 |
+
return # already initialized
|
| 408 |
+
self.all_gather_outputs = [
|
| 409 |
+
torch.empty(torch.Size([numel * world_size]), dtype=dtype, device=device)
|
| 410 |
+
for numel, dtype in zip(all_gather_input_numels, all_gather_input_dtypes)
|
| 411 |
+
]
|
| 412 |
+
|
| 413 |
+
def init_unsharded_param(self):
|
| 414 |
+
"""
|
| 415 |
+
[Note: Invariants for torch.compile Traceable FSDP2]
|
| 416 |
+
1. Under compile, we always re-populate the content of `self._unsharded_param`
|
| 417 |
+
per AllGather using the slow path.
|
| 418 |
+
2. Under compile, we always recreate `self.all_gather_outputs` per AllGather.
|
| 419 |
+
This is to ensure the buffer creation is internal to the graph and
|
| 420 |
+
avoid `self.all_gather_outputs` being captured as a graph input.
|
| 421 |
+
3. Under compile, at the end of `free_unsharded_param()`, we always clean up
|
| 422 |
+
`self.all_gather_outputs` and `self._unsharded_inner_tensors`,
|
| 423 |
+
to avoid them being captured as graph output.
|
| 424 |
+
|
| 425 |
+
With these invariants, only these tensors will be inputs to the graph:
|
| 426 |
+
- Sharded parameters
|
| 427 |
+
- Placeholders for the `self._unsharded_param` nn.Parameter
|
| 428 |
+
"""
|
| 429 |
+
if not ca.compiled_autograd_enabled and hasattr(
|
| 430 |
+
self, "_unsharded_param"
|
| 431 |
+
): # after the 1st all-gather
|
| 432 |
+
inner_tensor = self._sharded_local_tensor
|
| 433 |
+
if not hasattr(inner_tensor, "fsdp_post_all_gather"):
|
| 434 |
+
return # already initialized
|
| 435 |
+
for tensor in self._unsharded_inner_tensors:
|
| 436 |
+
alloc_storage(tensor)
|
| 437 |
+
all_gather_outputs = self._unflatten_all_gather_outputs()
|
| 438 |
+
inner_tensor.fsdp_post_all_gather(
|
| 439 |
+
all_gather_outputs,
|
| 440 |
+
self._extensions_data.all_gather_metadata,
|
| 441 |
+
self.param_dtype or self.orig_dtype,
|
| 442 |
+
out=self._unsharded_param,
|
| 443 |
+
)
|
| 444 |
+
self._extensions_data.clear()
|
| 445 |
+
return
|
| 446 |
+
inner_tensor = self._sharded_local_tensor
|
| 447 |
+
if not ca.compiled_autograd_enabled and hasattr(
|
| 448 |
+
inner_tensor, "fsdp_post_all_gather"
|
| 449 |
+
):
|
| 450 |
+
all_gather_outputs = self._unflatten_all_gather_outputs()
|
| 451 |
+
(
|
| 452 |
+
unsharded_tensor,
|
| 453 |
+
self._unsharded_inner_tensors,
|
| 454 |
+
) = inner_tensor.fsdp_post_all_gather(
|
| 455 |
+
all_gather_outputs,
|
| 456 |
+
self._extensions_data.all_gather_metadata,
|
| 457 |
+
self.param_dtype or self.orig_dtype,
|
| 458 |
+
)
|
| 459 |
+
self._extensions_data.clear()
|
| 460 |
+
else:
|
| 461 |
+
# For the default path (no post-all-gather), the all-gather output
|
| 462 |
+
# gives the unsharded parameter data directly
|
| 463 |
+
assert len(self.all_gather_outputs) == 1, f"{len(self.all_gather_outputs)}"
|
| 464 |
+
unsharded_tensor = self.all_gather_outputs[0]
|
| 465 |
+
unsharded_param = torch.as_strided(
|
| 466 |
+
unsharded_tensor,
|
| 467 |
+
self._orig_size,
|
| 468 |
+
self._contiguous_orig_stride,
|
| 469 |
+
storage_offset=0,
|
| 470 |
+
)
|
| 471 |
+
if self.is_dtensor:
|
| 472 |
+
unsharded_param = _from_local_no_grad(unsharded_param, self._tp_spec)
|
| 473 |
+
if hasattr(self, "_unsharded_param"):
|
| 474 |
+
assert ca.compiled_autograd_enabled
|
| 475 |
+
with torch.no_grad(), torch.autograd._unsafe_preserve_version_counter(
|
| 476 |
+
self._unsharded_param
|
| 477 |
+
):
|
| 478 |
+
torch.ops.fsdp.set_.default(self._unsharded_param, unsharded_param)
|
| 479 |
+
else:
|
| 480 |
+
self._unsharded_param = nn.Parameter(
|
| 481 |
+
unsharded_param, requires_grad=self.sharded_param.requires_grad
|
| 482 |
+
)
|
| 483 |
+
|
| 484 |
+
def _unflatten_all_gather_outputs(self) -> Tuple[torch.Tensor, ...]:
|
| 485 |
+
return tuple(
|
| 486 |
+
t.view(-1, *s[1:])
|
| 487 |
+
for t, s in zip(
|
| 488 |
+
self.all_gather_outputs, self._extensions_data.all_gather_input_sizes
|
| 489 |
+
)
|
| 490 |
+
)
|
| 491 |
+
|
| 492 |
+
def to_sharded(self) -> None:
|
| 493 |
+
self._setattr_on_modules(self.sharded_param)
|
| 494 |
+
self.free_unsharded_param()
|
| 495 |
+
self.sharded_state = ShardedState.SHARDED
|
| 496 |
+
|
| 497 |
+
def to_sharded_post_forward(self) -> None:
|
| 498 |
+
if self.is_dtensor:
|
| 499 |
+
raise NotImplementedError(
|
| 500 |
+
"Resharding to smaller mesh with TP is not supported yet"
|
| 501 |
+
)
|
| 502 |
+
self._assert_in_states(ShardedState.UNSHARDED)
|
| 503 |
+
assert self.post_forward_mesh_info is not None # mypy
|
| 504 |
+
assert len(self.all_gather_outputs) == 1
|
| 505 |
+
shard_world_size = self.post_forward_mesh_info.shard_mesh_size
|
| 506 |
+
if (numel := self.all_gather_outputs[0].numel()) % shard_world_size != 0:
|
| 507 |
+
_raise_assert_with_print(
|
| 508 |
+
f"All-gather output size ({numel}) must be divisible by the shard "
|
| 509 |
+
f"world size ({shard_world_size})"
|
| 510 |
+
)
|
| 511 |
+
shard_rank = self.post_forward_mesh_info.shard_mesh_rank
|
| 512 |
+
sharded_numel = numel // shard_world_size
|
| 513 |
+
self._sharded_post_forward_param_data = (
|
| 514 |
+
self.all_gather_outputs[0].narrow(
|
| 515 |
+
0, sharded_numel * shard_rank, sharded_numel
|
| 516 |
+
)
|
| 517 |
+
).clone() # clone to be able to free all-gather output
|
| 518 |
+
sharded_post_forward_tensor = torch.as_strided(
|
| 519 |
+
self._sharded_post_forward_param_data,
|
| 520 |
+
size=self.sharded_post_forward_size,
|
| 521 |
+
stride=self.contiguous_sharded_post_forward_stride,
|
| 522 |
+
storage_offset=0,
|
| 523 |
+
)
|
| 524 |
+
self._sharded_post_forward_param = nn.Parameter(
|
| 525 |
+
self.to_sharded_post_forward_dtensor(sharded_post_forward_tensor)
|
| 526 |
+
)
|
| 527 |
+
self._setattr_on_modules(self._sharded_post_forward_param)
|
| 528 |
+
self.free_unsharded_param()
|
| 529 |
+
self.sharded_state = ShardedState.SHARDED_POST_FORWARD
|
| 530 |
+
|
| 531 |
+
def to_unsharded(self) -> None:
|
| 532 |
+
# Assume that the data has been allocated and all-gathered
|
| 533 |
+
set_requires_grad_if_needed(self.sharded_param, self._unsharded_param)
|
| 534 |
+
self._setattr_on_modules(self._unsharded_param)
|
| 535 |
+
if self.sharded_state == ShardedState.SHARDED_POST_FORWARD:
|
| 536 |
+
# The data is allocated in the default stream via the post-forward
|
| 537 |
+
# reshard and must be kept alive for the next all-gather copy-in.
|
| 538 |
+
# Since we call this method after the copy-out, the data's lifetime
|
| 539 |
+
# is ensured without further synchronization.
|
| 540 |
+
self._sharded_post_forward_param = None
|
| 541 |
+
self._sharded_post_forward_param_data = None # free
|
| 542 |
+
self.sharded_state = ShardedState.UNSHARDED
|
| 543 |
+
|
| 544 |
+
def _setattr_on_modules(self, param: nn.Parameter) -> None:
|
| 545 |
+
unsafe_setattr_param(
|
| 546 |
+
self._module_info.module, self._module_info.param_name, param
|
| 547 |
+
)
|
| 548 |
+
for shared_module, shared_param_name in zip(
|
| 549 |
+
self._module_info.shared_modules, self._module_info.shared_param_names
|
| 550 |
+
):
|
| 551 |
+
unsafe_setattr_param(shared_module, shared_param_name, param)
|
| 552 |
+
|
| 553 |
+
def to_sharded_dtensor(self, tensor: torch.Tensor) -> DTensor:
|
| 554 |
+
"""
|
| 555 |
+
Converts a local tensor representing either the sharded parameter or
|
| 556 |
+
sharded gradient to DTensor.
|
| 557 |
+
"""
|
| 558 |
+
if tensor.shape != self.sharded_size:
|
| 559 |
+
_raise_assert_with_print(
|
| 560 |
+
f"Expects size {self.sharded_size} but got {tensor.shape}"
|
| 561 |
+
)
|
| 562 |
+
return _from_local_no_grad(
|
| 563 |
+
tensor,
|
| 564 |
+
self._sharding_spec,
|
| 565 |
+
)
|
| 566 |
+
|
| 567 |
+
def to_sharded_post_forward_dtensor(self, tensor: torch.Tensor) -> DTensor:
|
| 568 |
+
if tensor.shape != self.sharded_post_forward_size:
|
| 569 |
+
_raise_assert_with_print(
|
| 570 |
+
f"Expects size {self.sharded_post_forward_size} but got {tensor.shape}"
|
| 571 |
+
)
|
| 572 |
+
assert isinstance(self.post_forward_mesh_info, HSDPMeshInfo)
|
| 573 |
+
# TODO: Prefer this DTensor to be read-only and generalize the
|
| 574 |
+
# placement once we support TP.
|
| 575 |
+
post_forward_sharding_spec = DTensorSpec(
|
| 576 |
+
self.post_forward_mesh_info.mesh,
|
| 577 |
+
(Replicate(), Shard(0)),
|
| 578 |
+
tensor_meta=self._sharding_spec.tensor_meta,
|
| 579 |
+
)
|
| 580 |
+
return _from_local_no_grad(tensor, post_forward_sharding_spec)
|
| 581 |
+
|
| 582 |
+
def to_accumulated_grad_if_needed(self) -> None:
|
| 583 |
+
# Access `_unsharded_param` to bypass the sharded state check since we
|
| 584 |
+
# prefer to reshard before upcasting the gradient to save memory
|
| 585 |
+
if (
|
| 586 |
+
self.reduce_dtype is None
|
| 587 |
+
or self._unsharded_param.grad is None
|
| 588 |
+
or self._unsharded_param.grad.dtype == self.reduce_dtype
|
| 589 |
+
):
|
| 590 |
+
return
|
| 591 |
+
unsharded_grad = self._unsharded_param.grad
|
| 592 |
+
self._unsharded_param.grad = None
|
| 593 |
+
self.unsharded_accumulated_grad = unsharded_grad.to(self.reduce_dtype)
|
| 594 |
+
|
| 595 |
+
def accumulate_unsharded_grad_if_needed(self) -> None:
|
| 596 |
+
if (
|
| 597 |
+
self.unsharded_accumulated_grad is not None
|
| 598 |
+
and self.unsharded_param.grad is not None
|
| 599 |
+
):
|
| 600 |
+
self.unsharded_accumulated_grad += self.unsharded_param.grad
|
| 601 |
+
self.unsharded_param.grad = None
|
| 602 |
+
|
| 603 |
+
def alloc_all_gather_outputs(self) -> None:
|
| 604 |
+
for tensor in self.all_gather_outputs:
|
| 605 |
+
alloc_storage(tensor)
|
| 606 |
+
|
| 607 |
+
def free_unsharded_param(self) -> None:
|
| 608 |
+
for tensor in itertools.chain(
|
| 609 |
+
self.all_gather_outputs, self._unsharded_inner_tensors
|
| 610 |
+
):
|
| 611 |
+
free_storage(tensor)
|
| 612 |
+
if ca.compiled_autograd_enabled:
|
| 613 |
+
self.all_gather_outputs = []
|
| 614 |
+
self._unsharded_inner_tensors = []
|
| 615 |
+
|
| 616 |
+
@property
|
| 617 |
+
def all_gather_inputs(self) -> List[torch.Tensor]: # 1D
|
| 618 |
+
self._assert_in_states(ShardedState.SHARDED, ShardedState.SHARDED_POST_FORWARD)
|
| 619 |
+
if self.sharded_state == ShardedState.SHARDED:
|
| 620 |
+
if not ca.compiled_autograd_enabled and hasattr(
|
| 621 |
+
self._sharded_local_tensor, "fsdp_pre_all_gather"
|
| 622 |
+
):
|
| 623 |
+
sharded_local_tensor = self._sharded_local_tensor
|
| 624 |
+
if self.offload_to_cpu:
|
| 625 |
+
sharded_local_tensor = sharded_local_tensor.to(
|
| 626 |
+
self.device, non_blocking=True
|
| 627 |
+
)
|
| 628 |
+
(
|
| 629 |
+
all_gather_inputs,
|
| 630 |
+
self._extensions_data.all_gather_metadata,
|
| 631 |
+
) = sharded_local_tensor.fsdp_pre_all_gather(self.mesh_info.mesh)
|
| 632 |
+
self._extensions_data.all_gather_input_sizes = [
|
| 633 |
+
t.size() for t in all_gather_inputs
|
| 634 |
+
]
|
| 635 |
+
return [t.view(-1) for t in all_gather_inputs]
|
| 636 |
+
sharded_param_data = self._sharded_param_data
|
| 637 |
+
if self.offload_to_cpu:
|
| 638 |
+
sharded_param_data = sharded_param_data.to(
|
| 639 |
+
self.device, non_blocking=True
|
| 640 |
+
)
|
| 641 |
+
return [_to_dtype_if_needed(sharded_param_data, self.param_dtype)]
|
| 642 |
+
elif self.sharded_state == ShardedState.SHARDED_POST_FORWARD:
|
| 643 |
+
if not ca.compiled_autograd_enabled and hasattr(
|
| 644 |
+
self._sharded_local_tensor, "fsdp_pre_all_gather"
|
| 645 |
+
):
|
| 646 |
+
raise NotImplementedError
|
| 647 |
+
all_gather_input = _to_dtype_if_needed(
|
| 648 |
+
cast(torch.Tensor, self._sharded_post_forward_param_data),
|
| 649 |
+
self.param_dtype,
|
| 650 |
+
)
|
| 651 |
+
return [all_gather_input]
|
| 652 |
+
return [torch.empty(0)] # mypy
|
| 653 |
+
|
| 654 |
+
@property
|
| 655 |
+
def unsharded_param(self) -> nn.Parameter: # ND
|
| 656 |
+
self._assert_in_states(ShardedState.UNSHARDED)
|
| 657 |
+
return self._unsharded_param
|
| 658 |
+
|
| 659 |
+
@property
|
| 660 |
+
def unsharded_grad_data(self) -> torch.Tensor:
|
| 661 |
+
grad = self.unsharded_param.grad
|
| 662 |
+
assert grad is not None, "Expects unsharded_param.grad to not be None"
|
| 663 |
+
return self._get_grad_inner_tensor(grad)
|
| 664 |
+
|
| 665 |
+
@property
|
| 666 |
+
def unsharded_accumulated_grad_data(self) -> torch.Tensor:
|
| 667 |
+
grad = self.unsharded_accumulated_grad
|
| 668 |
+
assert grad is not None, "Expects unsharded_accumulated_grad to not be None"
|
| 669 |
+
return self._get_grad_inner_tensor(grad)
|
| 670 |
+
|
| 671 |
+
def _get_grad_inner_tensor(self, grad: torch.Tensor) -> torch.Tensor:
|
| 672 |
+
if self.is_dtensor:
|
| 673 |
+
if isinstance(grad, AsyncCollectiveTensor):
|
| 674 |
+
grad = grad.wait()
|
| 675 |
+
assert isinstance(grad, DTensor), f"{type(grad)}"
|
| 676 |
+
if any(pl.is_partial() for pl in grad.placements):
|
| 677 |
+
placements = [
|
| 678 |
+
Replicate() if pl.is_partial() else pl for pl in grad.placements
|
| 679 |
+
]
|
| 680 |
+
grad = grad.redistribute(placements=placements)
|
| 681 |
+
grad = grad._local_tensor
|
| 682 |
+
return grad
|
| 683 |
+
|
| 684 |
+
@property
|
| 685 |
+
def _sharded_local_tensor(self) -> torch.Tensor:
|
| 686 |
+
return cast(DTensor, self.sharded_param)._local_tensor
|
| 687 |
+
|
| 688 |
+
def _assert_in_states(self, *states: ShardedState) -> None:
|
| 689 |
+
if self.sharded_state not in states:
|
| 690 |
+
_raise_assert_with_print(
|
| 691 |
+
f"Expects to be in one of {states}, not {self.sharded_state}"
|
| 692 |
+
)
|
| 693 |
+
|
| 694 |
+
def reset_sharded_param(self):
|
| 695 |
+
# For ops like `nn.Module._apply` or `load_state_dict(assign=True)`
|
| 696 |
+
# that change the sharded parameter tensor, we may need to re-pad the
|
| 697 |
+
# sharded local tensor and re-save the reference.
|
| 698 |
+
module_info = self._module_info
|
| 699 |
+
new_param = getattr(module_info.module, module_info.param_name)
|
| 700 |
+
if new_param is not self.sharded_param:
|
| 701 |
+
if torch.__future__.get_swap_module_params_on_conversion():
|
| 702 |
+
raise AssertionError(
|
| 703 |
+
f"Expects swap_tensors to preserve object but got {new_param} "
|
| 704 |
+
f"instead of {self.sharded_param}"
|
| 705 |
+
)
|
| 706 |
+
self.sharded_param = new_param
|
| 707 |
+
local_tensor = new_param._local_tensor
|
| 708 |
+
if local_tensor.is_meta:
|
| 709 |
+
return
|
| 710 |
+
padded_sharded_size = self.padded_sharded_param_size
|
| 711 |
+
if local_tensor.size() != padded_sharded_size:
|
| 712 |
+
padded_local_tensor = local_tensor.new_zeros(padded_sharded_size)
|
| 713 |
+
padded_local_tensor[: local_tensor.size(0)].copy_(local_tensor)
|
| 714 |
+
local_tensor = padded_local_tensor
|
| 715 |
+
if self.pin_memory and not local_tensor.is_pinned():
|
| 716 |
+
local_tensor = local_tensor.cpu().pin_memory()
|
| 717 |
+
self._sharded_param_data = local_tensor.view(-1)
|
| 718 |
+
assert isinstance(self.sharded_param, DTensor) # mypy
|
| 719 |
+
self.sharded_param._local_tensor = local_tensor[: self.sharded_size[0]]
|
| 720 |
+
|
| 721 |
+
def __repr__(self):
|
| 722 |
+
return f"FSDPParam(fqn={self._param_fqn}, orig_size={self._orig_size})"
|
| 723 |
+
|
| 724 |
+
|
| 725 |
+
def alloc_storage(tensor: torch.Tensor) -> None:
|
| 726 |
+
size = tensor.numel() * tensor.itemsize
|
| 727 |
+
if (storage := tensor.untyped_storage()).size() != size:
|
| 728 |
+
storage.resize_(size)
|
| 729 |
+
|
| 730 |
+
|
| 731 |
+
def free_storage(tensor: torch.Tensor) -> None:
|
| 732 |
+
if (storage := tensor.untyped_storage()).size() != 0:
|
| 733 |
+
storage.resize_(0)
|
| 734 |
+
|
| 735 |
+
|
| 736 |
+
# NOTE: These bypass `nn.Module.__setattr__` checks, which incur non-trivial
|
| 737 |
+
# CPU overhead, if the module did not override it. For FSDP, we know we do not
|
| 738 |
+
# need those checks when transitioning between sharded/unsharded parameters.
|
| 739 |
+
def unsafe_setattr_param(
|
| 740 |
+
module: nn.Module, param_name: str, param: nn.Parameter
|
| 741 |
+
) -> None:
|
| 742 |
+
if getattr(module.__setattr__, "__func__", None) is nn.Module.__setattr__:
|
| 743 |
+
module._parameters[param_name] = param
|
| 744 |
+
else: # slow path
|
| 745 |
+
setattr(module, param_name, param)
|
| 746 |
+
|
| 747 |
+
|
| 748 |
+
def set_requires_grad_if_needed(
|
| 749 |
+
src_tensor: torch.Tensor, dst_tensor: torch.Tensor
|
| 750 |
+
) -> None:
|
| 751 |
+
# Only call `requires_grad_` if needed to avoid the Python <> C++ context
|
| 752 |
+
# switch overhead
|
| 753 |
+
if src_tensor.requires_grad != dst_tensor.requires_grad:
|
| 754 |
+
dst_tensor.requires_grad_(src_tensor.requires_grad)
|
janus/lib/python3.10/site-packages/torch/distributed/_composable/fully_shard.py
ADDED
|
@@ -0,0 +1,131 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# mypy: allow-untyped-decorators
|
| 2 |
+
from typing import Callable, Iterable, Optional, Union
|
| 3 |
+
from typing_extensions import deprecated
|
| 4 |
+
|
| 5 |
+
import torch
|
| 6 |
+
import torch.distributed as dist
|
| 7 |
+
import torch.nn as nn
|
| 8 |
+
from torch.distributed._composable.contract import contract
|
| 9 |
+
from torch.distributed._composable_state import _get_module_state, _insert_module_state
|
| 10 |
+
from torch.distributed.fsdp._common_utils import _FSDPState
|
| 11 |
+
from torch.distributed.fsdp._dynamo_utils import _annotate_modules_for_dynamo
|
| 12 |
+
from torch.distributed.fsdp._init_utils import (
|
| 13 |
+
_init_buffer_state,
|
| 14 |
+
_init_core_state,
|
| 15 |
+
_init_device_handle,
|
| 16 |
+
_init_ignored_module_states,
|
| 17 |
+
_init_param_handle_from_module,
|
| 18 |
+
_init_prefetching_state,
|
| 19 |
+
_init_process_group_state,
|
| 20 |
+
_init_runtime_state,
|
| 21 |
+
_init_state_dict_state,
|
| 22 |
+
HYBRID_SHARDING_STRATEGIES,
|
| 23 |
+
)
|
| 24 |
+
from torch.distributed.fsdp._runtime_utils import (
|
| 25 |
+
_register_post_forward_hook,
|
| 26 |
+
_register_pre_forward_hook,
|
| 27 |
+
_register_root_pre_forward_hook,
|
| 28 |
+
)
|
| 29 |
+
from torch.distributed.fsdp._state_dict_utils import _register_all_state_dict_hooks
|
| 30 |
+
from torch.distributed.fsdp._wrap_utils import _auto_wrap
|
| 31 |
+
from torch.distributed.fsdp.api import (
|
| 32 |
+
BackwardPrefetch,
|
| 33 |
+
CPUOffload,
|
| 34 |
+
MixedPrecision,
|
| 35 |
+
ShardingStrategy,
|
| 36 |
+
)
|
| 37 |
+
from torch.distributed.fsdp.wrap import _Policy
|
| 38 |
+
|
| 39 |
+
|
| 40 |
+
@contract(state_cls=_FSDPState)
|
| 41 |
+
@deprecated(
|
| 42 |
+
"`torch.distributed._composable.fully_shard` is being deprecated. "
|
| 43 |
+
"You can continue to use the wrapper based FSDP. "
|
| 44 |
+
"See usage in: https://github.com/pytorch/pytorch/blob/main/torch/distributed/fsdp/fully_sharded_data_parallel.py. "
|
| 45 |
+
"`torch.distributed._composable.fully_shard` will be removed after PyTorch 2.5.",
|
| 46 |
+
category=FutureWarning,
|
| 47 |
+
)
|
| 48 |
+
def fully_shard(
|
| 49 |
+
module: nn.Module,
|
| 50 |
+
*,
|
| 51 |
+
process_group: Optional[dist.ProcessGroup] = None,
|
| 52 |
+
policy: Optional[_Policy] = None,
|
| 53 |
+
strategy: Optional[ShardingStrategy] = None,
|
| 54 |
+
mixed_precision: Optional[MixedPrecision] = None,
|
| 55 |
+
cpu_offload: Optional[CPUOffload] = None,
|
| 56 |
+
ignored_modules: Optional[Iterable[torch.nn.Module]] = None,
|
| 57 |
+
device_id: Optional[Union[int, torch.device]] = None,
|
| 58 |
+
param_init_fn: Optional[Callable[[nn.Module], None]] = None,
|
| 59 |
+
sync_module_states: bool = False,
|
| 60 |
+
forward_prefetch: bool = False,
|
| 61 |
+
ignored_states: Union[
|
| 62 |
+
Optional[Iterable[torch.nn.Parameter]], Optional[Iterable[torch.nn.Module]]
|
| 63 |
+
] = None,
|
| 64 |
+
) -> nn.Module:
|
| 65 |
+
"""Applies ``FullyShardedDataParallel`` (FSDP) semantics to ``module``."""
|
| 66 |
+
torch._C._log_api_usage_once("torch.distributed.fully_shard")
|
| 67 |
+
# Enforce the new auto wrap policy
|
| 68 |
+
if policy is not None and not isinstance(policy, _Policy):
|
| 69 |
+
raise ValueError(f"Expects a `_Policy` but got {policy}")
|
| 70 |
+
state = fully_shard.state(module)
|
| 71 |
+
state = _init_ignored_module_states(state, module, ignored_modules, ignored_states)
|
| 72 |
+
state = _init_device_handle(state, module, state._ignored_params, device_id)
|
| 73 |
+
_annotate_modules_for_dynamo(module, state._ignored_modules, True)
|
| 74 |
+
state = _init_process_group_state(state, process_group, strategy, policy)
|
| 75 |
+
if policy is not None:
|
| 76 |
+
root_kwargs = {
|
| 77 |
+
"process_group": process_group,
|
| 78 |
+
"strategy": strategy,
|
| 79 |
+
"mixed_precision": mixed_precision,
|
| 80 |
+
"cpu_offload": cpu_offload,
|
| 81 |
+
"ignored_modules": ignored_modules,
|
| 82 |
+
"device_id": device_id,
|
| 83 |
+
"param_init_fn": param_init_fn,
|
| 84 |
+
"sync_module_states": sync_module_states,
|
| 85 |
+
"forward_prefetch": forward_prefetch,
|
| 86 |
+
"ignored_states": ignored_states,
|
| 87 |
+
}
|
| 88 |
+
if strategy in HYBRID_SHARDING_STRATEGIES:
|
| 89 |
+
root_kwargs["process_group"] = (state.process_group, state._inter_node_pg)
|
| 90 |
+
_auto_wrap(
|
| 91 |
+
module,
|
| 92 |
+
policy,
|
| 93 |
+
state._ignored_modules,
|
| 94 |
+
state._ignored_params,
|
| 95 |
+
root_kwargs,
|
| 96 |
+
fully_shard,
|
| 97 |
+
)
|
| 98 |
+
state = _init_core_state(
|
| 99 |
+
state,
|
| 100 |
+
strategy or ShardingStrategy.FULL_SHARD,
|
| 101 |
+
mixed_precision,
|
| 102 |
+
cpu_offload,
|
| 103 |
+
limit_all_gathers=True,
|
| 104 |
+
use_orig_params=True,
|
| 105 |
+
backward_prefetch_limit=1,
|
| 106 |
+
forward_prefetch_limit=1,
|
| 107 |
+
)
|
| 108 |
+
state = _init_runtime_state(state)
|
| 109 |
+
state = _init_prefetching_state(
|
| 110 |
+
state, BackwardPrefetch.BACKWARD_PRE, forward_prefetch=forward_prefetch
|
| 111 |
+
)
|
| 112 |
+
state = _init_buffer_state(state, module)
|
| 113 |
+
state = _init_param_handle_from_module(
|
| 114 |
+
state, module, device_id, param_init_fn, sync_module_states
|
| 115 |
+
)
|
| 116 |
+
state = _init_state_dict_state(state)
|
| 117 |
+
_register_all_state_dict_hooks(state)
|
| 118 |
+
_register_pre_forward_hook(state, module)
|
| 119 |
+
_register_post_forward_hook(state, module)
|
| 120 |
+
_register_root_pre_forward_hook(state, module) # prepend last
|
| 121 |
+
# Always insert the state for the passed-in module even if it has no
|
| 122 |
+
# managed parameters, in which case it has no handles and does not appear
|
| 123 |
+
# in `_fully_sharded_module_to_handles`
|
| 124 |
+
_insert_module_state(module, state)
|
| 125 |
+
for submodule in module.modules():
|
| 126 |
+
if (
|
| 127 |
+
submodule in state._fully_sharded_module_to_handle
|
| 128 |
+
and _get_module_state(submodule) is None
|
| 129 |
+
):
|
| 130 |
+
_insert_module_state(submodule, state)
|
| 131 |
+
return module
|
janus/lib/python3.10/site-packages/torch/distributed/_tools/__pycache__/mod_tracker.cpython-310.pyc
ADDED
|
Binary file (9.26 kB). View file
|
|
|
janus/lib/python3.10/site-packages/torch/distributed/_tools/runtime_estimator.py
ADDED
|
@@ -0,0 +1,527 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Owner(s): ["module: unknown"]
|
| 2 |
+
import math
|
| 3 |
+
import os
|
| 4 |
+
from collections import defaultdict
|
| 5 |
+
from typing import Any, Callable, Dict, List, Set, Tuple
|
| 6 |
+
from typing_extensions import Self
|
| 7 |
+
|
| 8 |
+
import torch
|
| 9 |
+
import torch.utils._pytree as pytree
|
| 10 |
+
from torch._guards import active_fake_mode
|
| 11 |
+
from torch._inductor.utils import get_device_tflops, get_gpu_dram_gbps
|
| 12 |
+
from torch._subclasses.fake_tensor import FakeTensorMode
|
| 13 |
+
from torch.distributed._tools.mod_tracker import ModTracker
|
| 14 |
+
from torch.utils._mode_utils import no_dispatch
|
| 15 |
+
from torch.utils._python_dispatch import TorchDispatchMode
|
| 16 |
+
from torch.utils.flop_counter import flop_registry
|
| 17 |
+
|
| 18 |
+
|
| 19 |
+
aten = torch.ops.aten
|
| 20 |
+
|
| 21 |
+
# This value is hard-coded here:
|
| 22 |
+
# https://github.com/pytorch/pytorch/blob/5fba5d83f0703ff8077ab65448a998e9ad6598fd/c10/cuda/CUDACachingAllocator.cpp#L117
|
| 23 |
+
_PYTORCH_MIN_ALLOCATE = (
|
| 24 |
+
2**9 if int(os.environ.get("PYTORCH_NO_CUDA_MEMORY_CACHING", 0)) == 0 else 1
|
| 25 |
+
)
|
| 26 |
+
|
| 27 |
+
# No fall-back kernel needed/exists for view ops
|
| 28 |
+
_VIEW_OPS = {
|
| 29 |
+
aten.lift_fresh,
|
| 30 |
+
aten.t,
|
| 31 |
+
aten.transpose,
|
| 32 |
+
aten.view,
|
| 33 |
+
aten.detach,
|
| 34 |
+
aten._unsafe_view,
|
| 35 |
+
aten.split,
|
| 36 |
+
aten.adjoint,
|
| 37 |
+
aten.as_strided,
|
| 38 |
+
aten.diagonal,
|
| 39 |
+
aten.expand,
|
| 40 |
+
aten.expand_as,
|
| 41 |
+
aten.movedim,
|
| 42 |
+
aten.permute,
|
| 43 |
+
aten.select,
|
| 44 |
+
aten.squeeze,
|
| 45 |
+
aten.mT,
|
| 46 |
+
aten.mH,
|
| 47 |
+
aten.real,
|
| 48 |
+
aten.imag,
|
| 49 |
+
aten.view_as,
|
| 50 |
+
aten.unflatten,
|
| 51 |
+
aten.unfold,
|
| 52 |
+
aten.unbind,
|
| 53 |
+
aten.unsqueeze,
|
| 54 |
+
aten.vsplit,
|
| 55 |
+
aten.hsplit,
|
| 56 |
+
aten.split_with_sizes,
|
| 57 |
+
aten.swapaxes,
|
| 58 |
+
aten.swapdims,
|
| 59 |
+
aten.chunk,
|
| 60 |
+
}
|
| 61 |
+
# We can ignore benchmarking tensor create ops
|
| 62 |
+
_CREATE_OPS = {
|
| 63 |
+
aten.randint,
|
| 64 |
+
aten.randn,
|
| 65 |
+
aten.rand,
|
| 66 |
+
aten.randn_like,
|
| 67 |
+
aten.rand_like,
|
| 68 |
+
aten.randint_like,
|
| 69 |
+
aten.arange,
|
| 70 |
+
aten.ones_like,
|
| 71 |
+
aten.zeros_like,
|
| 72 |
+
}
|
| 73 |
+
|
| 74 |
+
_IGNORE_OPS = _VIEW_OPS | _CREATE_OPS
|
| 75 |
+
|
| 76 |
+
__all__ = ["RuntimeEstimator"]
|
| 77 |
+
|
| 78 |
+
|
| 79 |
+
class RuntimeEstimator(TorchDispatchMode):
|
| 80 |
+
"""
|
| 81 |
+
Estimates the GPU runtime in milliseconds using various estimation methods under the ``FakeTensorMode``.
|
| 82 |
+
|
| 83 |
+
This class provides a ``TorchDispatchMode`` based context manager that can be used to estimate the eager
|
| 84 |
+
runtime of PyTorch functions. It supports two estimation modes, benchmarking (`operator-level-benchmark`) and
|
| 85 |
+
roofline cost modeling (`operator-level-cost-model`).
|
| 86 |
+
For modules executed under this context manager, it agggregates the forward and backward operation runtimes
|
| 87 |
+
and also records their execution orders.
|
| 88 |
+
|
| 89 |
+
Attributes:
|
| 90 |
+
mod_runtimes (Dict[str, Dict[str, float]]): A dictionary of module runtimes. The key to the outer dictionary
|
| 91 |
+
is the fully qualified name (FQN) of the module. For each module the forward and backward runtimes of the
|
| 92 |
+
operations are aggregated in the inner dictionary keyed by 'fw' and 'bw'.
|
| 93 |
+
mod_fw_pre_order (List[str]): List of module FQNs in pre-forward execution order.
|
| 94 |
+
mod_bw_pre_order (List[str]): List of module FQNs in pre-backward execution order.
|
| 95 |
+
mod_fw_post_order (List[str]): List of module FQNs in post-forward execution order.
|
| 96 |
+
mod_bw_post_order (List[str]): List of module FQNs in post-backward execution order.
|
| 97 |
+
total_runtime (float): The total estimated runtime in milliseconds.
|
| 98 |
+
|
| 99 |
+
Note:
|
| 100 |
+
1) The benchmarking estimate mode will execute kernels on GPU and assumes that every operation can run in
|
| 101 |
+
isolation without causing an OOM error. It is also designed to be used only under ``FakeTensorMode``.
|
| 102 |
+
2) Currently wrapper tensor sub-classes such as ``DTensor`` won't produce correct estimates. We plan to support
|
| 103 |
+
them in future PRs.
|
| 104 |
+
3) We only estimate the compute time, if your code has communication, it will not be considered. Again, we will
|
| 105 |
+
support this in future PRs.
|
| 106 |
+
|
| 107 |
+
Example usage:
|
| 108 |
+
|
| 109 |
+
.. code-block:: python
|
| 110 |
+
|
| 111 |
+
runtime_estimator = RuntimeEstimator()
|
| 112 |
+
with FakeTensorMode():
|
| 113 |
+
module = ...
|
| 114 |
+
optimizer = ...
|
| 115 |
+
inp = ...
|
| 116 |
+
with runtime_estimator(estimate_mode_type="operator-level-cost-model"):
|
| 117 |
+
loss = module(inp)
|
| 118 |
+
loss.backward()
|
| 119 |
+
optimizer.step()
|
| 120 |
+
optimizer.zero_grad()
|
| 121 |
+
runtime_estimator.display_modulewise_stats()
|
| 122 |
+
"""
|
| 123 |
+
|
| 124 |
+
_float_types: Set[torch.dtype] = {
|
| 125 |
+
torch.float16,
|
| 126 |
+
torch.bfloat16,
|
| 127 |
+
torch.float32,
|
| 128 |
+
torch.float64,
|
| 129 |
+
}
|
| 130 |
+
_no_fallback_kernel: Set[torch._ops._OpNamespace] = set()
|
| 131 |
+
fake_mode: FakeTensorMode
|
| 132 |
+
|
| 133 |
+
def __init__(self) -> None:
|
| 134 |
+
super().__init__()
|
| 135 |
+
self._estimate: Callable
|
| 136 |
+
self._estimate_mode_type: str
|
| 137 |
+
self._mod_tracker = ModTracker()
|
| 138 |
+
self.mod_runtimes: Dict[str, Dict[str, float]] = defaultdict(
|
| 139 |
+
lambda: defaultdict(lambda: 0.0)
|
| 140 |
+
)
|
| 141 |
+
self.mod_fw_pre_order: List[str] = []
|
| 142 |
+
self.mod_bw_pre_order: List[str] = []
|
| 143 |
+
self.mod_fw_post_order: List[str] = []
|
| 144 |
+
self.mod_bw_post_order: List[str] = []
|
| 145 |
+
self.total_runtime: float = 0.0
|
| 146 |
+
|
| 147 |
+
# Adapted from: https://github.com/pytorch/pytorch/blob/9b902b3ee3bd608a19543362b66bf06c373dd374/torch/_subclasses/fake_tensor.py#L1969 # noqa: PGH004,B950
|
| 148 |
+
# NB: returns fake tensors
|
| 149 |
+
@classmethod
|
| 150 |
+
def _maybe_run_and_benchmark_fallback_kernel( # type: ignore[no-untyped-def]
|
| 151 |
+
cls,
|
| 152 |
+
func,
|
| 153 |
+
args,
|
| 154 |
+
kwargs,
|
| 155 |
+
orig_not_implemented_exception,
|
| 156 |
+
):
|
| 157 |
+
"""
|
| 158 |
+
Runs and benchmarks a fallback kernel for a given function.
|
| 159 |
+
|
| 160 |
+
Args:
|
| 161 |
+
func (Callable): The function to benchmark.
|
| 162 |
+
args (Tuple): The arguments to pass to the function.
|
| 163 |
+
kwargs (Dict[str, Any]): The keyword arguments to pass to the function.
|
| 164 |
+
orig_not_implemented_exception (Exception): The original exception to raise if the fallback kernel
|
| 165 |
+
is not implemented.
|
| 166 |
+
|
| 167 |
+
Returns:
|
| 168 |
+
Tuple[Any, float]: A tuple containing the result of the function and
|
| 169 |
+
the mean operation time in milliseconds.
|
| 170 |
+
"""
|
| 171 |
+
# these should all be supported, just to be safe
|
| 172 |
+
# avoid fallback for operators which inplace modify metadata
|
| 173 |
+
# because the input fake tensors would be umodified
|
| 174 |
+
if torch.Tag.inplace_view in func.tags: # type: ignore[attr-defined]
|
| 175 |
+
raise orig_not_implemented_exception
|
| 176 |
+
|
| 177 |
+
inp_impls = {}
|
| 178 |
+
flat_args, args_spec = pytree.tree_flatten((args, kwargs))
|
| 179 |
+
# Don't use in_kernel_invocation_manager(fake_mode) as we want to do
|
| 180 |
+
# REAL compute (not with meta device)
|
| 181 |
+
with no_dispatch():
|
| 182 |
+
|
| 183 |
+
def to_real_tensor(e): # type: ignore[no-untyped-def]
|
| 184 |
+
if cls.fake_mode.is_our_fake(e):
|
| 185 |
+
if e.dtype in cls._float_types:
|
| 186 |
+
out = torch.rand_like(e, device=e.fake_device)
|
| 187 |
+
else:
|
| 188 |
+
out = torch.ones_like(e, device=e.fake_device)
|
| 189 |
+
if e.is_sparse:
|
| 190 |
+
out._coalesced_(e.is_coalesced())
|
| 191 |
+
inp_impls[id(out)] = e
|
| 192 |
+
return out
|
| 193 |
+
return e
|
| 194 |
+
|
| 195 |
+
flat_args = [to_real_tensor(a) for a in flat_args]
|
| 196 |
+
args, kwargs = pytree.tree_unflatten(flat_args, args_spec)
|
| 197 |
+
r = func(*args, **kwargs)
|
| 198 |
+
warmup_iters, actual_iters = 2, 3
|
| 199 |
+
for _ in range(warmup_iters):
|
| 200 |
+
func(*args, **kwargs)
|
| 201 |
+
start_event = torch.cuda.Event(enable_timing=True)
|
| 202 |
+
end_event = torch.cuda.Event(enable_timing=True)
|
| 203 |
+
start_event.record(torch.cuda.current_stream())
|
| 204 |
+
for _ in range(actual_iters):
|
| 205 |
+
func(*args, **kwargs)
|
| 206 |
+
end_event.record(torch.cuda.current_stream())
|
| 207 |
+
torch.cuda.synchronize()
|
| 208 |
+
cuda_time = start_event.elapsed_time(end_event)
|
| 209 |
+
mean_op_time = cuda_time / actual_iters
|
| 210 |
+
|
| 211 |
+
storages = set()
|
| 212 |
+
|
| 213 |
+
for e in flat_args:
|
| 214 |
+
if isinstance(e, torch.Tensor):
|
| 215 |
+
if not e.is_sparse:
|
| 216 |
+
storages.add(e._typed_storage()._cdata)
|
| 217 |
+
|
| 218 |
+
# TODO: also check metadata change on inputs
|
| 219 |
+
# proper aliasing/metadata relationship between outputs and inputs will
|
| 220 |
+
# not be set up, bc of conversion to device, unless we can reuse an
|
| 221 |
+
# input impl
|
| 222 |
+
|
| 223 |
+
def map_out(e): # type: ignore[no-untyped-def]
|
| 224 |
+
if id(e) not in inp_impls and (
|
| 225 |
+
isinstance(e, torch.Tensor)
|
| 226 |
+
and not e.is_sparse
|
| 227 |
+
and e._typed_storage()._cdata in storages
|
| 228 |
+
):
|
| 229 |
+
raise orig_not_implemented_exception
|
| 230 |
+
|
| 231 |
+
if isinstance(e, torch.Tensor):
|
| 232 |
+
if id(e) in inp_impls:
|
| 233 |
+
return inp_impls[id(e)]
|
| 234 |
+
else:
|
| 235 |
+
return cls.fake_mode.fake_tensor_converter.from_real_tensor(
|
| 236 |
+
cls.fake_mode, e
|
| 237 |
+
)
|
| 238 |
+
else:
|
| 239 |
+
return e
|
| 240 |
+
|
| 241 |
+
return (pytree.tree_map(map_out, r), mean_op_time)
|
| 242 |
+
|
| 243 |
+
@classmethod
|
| 244 |
+
def _benchmark_estimate(cls, func, args, kwargs) -> Tuple[Any, float]: # type: ignore[no-untyped-def]
|
| 245 |
+
"""
|
| 246 |
+
Estimates the runtime of a function using benchmarking.
|
| 247 |
+
|
| 248 |
+
Args:
|
| 249 |
+
func: The function to estimate.
|
| 250 |
+
args: The arguments to pass to the function.
|
| 251 |
+
kwargs: The keyword arguments to pass to the function.
|
| 252 |
+
res: The result of the function.
|
| 253 |
+
|
| 254 |
+
Returns:
|
| 255 |
+
Tuple[Any, float]: A tuple containing the result of the function and
|
| 256 |
+
the mean operation time in milliseconds.
|
| 257 |
+
"""
|
| 258 |
+
assert isinstance(
|
| 259 |
+
cls.fake_mode, FakeTensorMode
|
| 260 |
+
), "Initialize/Assign FakeTensorMode before using this function"
|
| 261 |
+
mean_op_time = 0.0
|
| 262 |
+
if func._overloadpacket not in _VIEW_OPS:
|
| 263 |
+
try:
|
| 264 |
+
res, mean_op_time = cls._maybe_run_and_benchmark_fallback_kernel(
|
| 265 |
+
func,
|
| 266 |
+
args,
|
| 267 |
+
kwargs,
|
| 268 |
+
NotImplementedError,
|
| 269 |
+
)
|
| 270 |
+
return (res, mean_op_time)
|
| 271 |
+
except NotImplementedError:
|
| 272 |
+
cls._no_fallback_kernel.add(func._overloadpacket)
|
| 273 |
+
res = func(*args, **kwargs or {})
|
| 274 |
+
return (res, mean_op_time)
|
| 275 |
+
|
| 276 |
+
# Adapted from: https://github.com/pytorch/pytorch/blob/9b902b3ee3bd608a19543362b66bf06c373dd374/torch/_inductor/scheduler.py#L589 # noqa: PGH004,B950
|
| 277 |
+
@classmethod
|
| 278 |
+
def _roofline_estimate(cls, func, args, kwargs) -> Tuple[Any, float]: # type: ignore[no-untyped-def]
|
| 279 |
+
"""
|
| 280 |
+
Estimates the runtime of a function using a roofline cost model.
|
| 281 |
+
|
| 282 |
+
Args:
|
| 283 |
+
func: The function to estimate.
|
| 284 |
+
args: The arguments to pass to the function.
|
| 285 |
+
kwargs: The keyword arguments to pass to the function.
|
| 286 |
+
out: The output of the function.
|
| 287 |
+
|
| 288 |
+
Returns:
|
| 289 |
+
Tuple[Any, float]: A tuple containing the result of the function and
|
| 290 |
+
the mean operation time in milliseconds.
|
| 291 |
+
"""
|
| 292 |
+
assert (
|
| 293 |
+
torch.cuda.is_available()
|
| 294 |
+
), "Roofline estimation needs to access CUDA capabilities to make estimations"
|
| 295 |
+
|
| 296 |
+
def get_num_bytes(t: torch.Tensor) -> int:
|
| 297 |
+
"""
|
| 298 |
+
Calculates the memory consumption of a tensor.
|
| 299 |
+
|
| 300 |
+
Args:
|
| 301 |
+
t (torch.Tensor): The input tensor.
|
| 302 |
+
|
| 303 |
+
Returns:
|
| 304 |
+
int: The memory consumption of the tensor in bytes.
|
| 305 |
+
"""
|
| 306 |
+
num_bytes = t.untyped_storage().nbytes()
|
| 307 |
+
mem_consumed = (
|
| 308 |
+
math.ceil(num_bytes / _PYTORCH_MIN_ALLOCATE) * _PYTORCH_MIN_ALLOCATE
|
| 309 |
+
)
|
| 310 |
+
return mem_consumed
|
| 311 |
+
|
| 312 |
+
def get_compute_time(func_packet, args, kwargs, out, out_dtypes) -> float: # type: ignore[no-untyped-def]
|
| 313 |
+
"""
|
| 314 |
+
Estimates the compute time of an aten operator.
|
| 315 |
+
|
| 316 |
+
Args:
|
| 317 |
+
func_packet: The operator overload packet.
|
| 318 |
+
args: The arguments to the operator.
|
| 319 |
+
kwargs: The keyword arguments to the operator.
|
| 320 |
+
out: The output of the operator.
|
| 321 |
+
out_dtypes: The output data types.
|
| 322 |
+
|
| 323 |
+
Returns:
|
| 324 |
+
float: The estimated compute time in nanoseconds.
|
| 325 |
+
"""
|
| 326 |
+
if func_packet in flop_registry:
|
| 327 |
+
assert (
|
| 328 |
+
len(out_dtypes) == 1
|
| 329 |
+
), f"Only support single out dtype got {out_dtypes} for {func_packet}"
|
| 330 |
+
dtype = out_dtypes.pop()
|
| 331 |
+
# This actually gives peta-FLOPs/s hence multiply by 1e15 to get the FLOPs/s
|
| 332 |
+
peak_gpu_flops = get_device_tflops(dtype) * 1e15
|
| 333 |
+
# We can expect to achieve 75% of theoretical peak flops
|
| 334 |
+
factor = 0.75
|
| 335 |
+
peak_empirical_flops = factor * peak_gpu_flops
|
| 336 |
+
flop_count_func = flop_registry[func_packet]
|
| 337 |
+
# We divide by a factor of 2 to get the MACs (multiply and accumulate)
|
| 338 |
+
flop_count = flop_count_func(*args, **kwargs, out_val=out) / 2
|
| 339 |
+
# We multiply by 1e9 to get the time in nano seconds
|
| 340 |
+
compute_time = (flop_count / peak_empirical_flops) * 1e9
|
| 341 |
+
return compute_time
|
| 342 |
+
return 0.0
|
| 343 |
+
|
| 344 |
+
def get_transfer_time(flat_args_kwargs, flat_outs) -> float: # type: ignore[no-untyped-def]
|
| 345 |
+
"""
|
| 346 |
+
Estimates the memory transfer time of input and output tensors.
|
| 347 |
+
|
| 348 |
+
Args:
|
| 349 |
+
flat_args_kwargs (List[torch.Tensor]): The flat list of arguments and keyword arguments.
|
| 350 |
+
flat_outs (List[torch.Tensor]): The flat list of outputs.
|
| 351 |
+
|
| 352 |
+
Returns:
|
| 353 |
+
float: The estimated memory transfer time in nanoseconds.
|
| 354 |
+
"""
|
| 355 |
+
gpu_memory_bandwidth = get_gpu_dram_gbps()
|
| 356 |
+
read_bytes = sum(
|
| 357 |
+
get_num_bytes(t)
|
| 358 |
+
for t in flat_args_kwargs
|
| 359 |
+
if isinstance(t, torch.Tensor)
|
| 360 |
+
)
|
| 361 |
+
write_bytes = sum(
|
| 362 |
+
get_num_bytes(t) for t in flat_outs if isinstance(t, torch.Tensor)
|
| 363 |
+
)
|
| 364 |
+
counted_bytes = read_bytes + write_bytes
|
| 365 |
+
# The GPU memory bandwidth is in GB/s so the transfer time is in nanoseconds
|
| 366 |
+
transfer_time = counted_bytes / gpu_memory_bandwidth
|
| 367 |
+
return transfer_time
|
| 368 |
+
|
| 369 |
+
# Roofline Cost Model Explanation
|
| 370 |
+
|
| 371 |
+
# The roofline cost model estimates the execution time of an operator based on
|
| 372 |
+
# the device's empirical maximum FLOPs/sec (pi) and device DRAM bandwidth (beta).
|
| 373 |
+
|
| 374 |
+
# Variables:
|
| 375 |
+
# - pi: Maximum empirical FLOPs/sec of the device
|
| 376 |
+
# - beta: Maximum empirical device DRAM bandwidth (bytes/sec) of the device
|
| 377 |
+
# - I: Arithmetic intensity of the operator (FLOPs/bytes)
|
| 378 |
+
# - op_flops: FLOPs required by the operator
|
| 379 |
+
# - op_bytes: Bytes transferred to and from DRAM for the operator
|
| 380 |
+
|
| 381 |
+
# Calculation Steps:
|
| 382 |
+
# 1. Calculate arithmetic intensity: I = op_flops / op_bytes
|
| 383 |
+
# 2. Calculate estimated FLOPs/sec: est_flops_sec = min(pi, beta * I)
|
| 384 |
+
# 3. Calculate estimated operator time: estimated_op_time = op_flops / est_flops_sec
|
| 385 |
+
# This simplifies to: estimated_op_time = max(op_flops / pi, op_flops / (beta * I))
|
| 386 |
+
# Further simplifying: estimated_op_time = max(op_flops / pi, op_bytes / beta)
|
| 387 |
+
|
| 388 |
+
# Simplified Formulas:
|
| 389 |
+
# - compute_time = op_flops / pi
|
| 390 |
+
# - transfer_time = op_bytes / beta
|
| 391 |
+
# - estimated_op_time = max(compute_time, transfer_time)
|
| 392 |
+
|
| 393 |
+
kwargs = kwargs if kwargs else {}
|
| 394 |
+
out = func(*args, **kwargs)
|
| 395 |
+
op_time = 0.0
|
| 396 |
+
func_packet = func._overloadpacket
|
| 397 |
+
if func_packet not in _IGNORE_OPS:
|
| 398 |
+
flat_args_kwargs, args_spec = pytree.tree_flatten((args, kwargs))
|
| 399 |
+
flat_outs, out_spec = pytree.tree_flatten(out)
|
| 400 |
+
transfer_time = get_transfer_time(flat_args_kwargs, flat_outs)
|
| 401 |
+
|
| 402 |
+
out_dtypes = {
|
| 403 |
+
t.dtype
|
| 404 |
+
for t in flat_outs
|
| 405 |
+
if isinstance(t, torch.Tensor) and t.dtype in cls._float_types
|
| 406 |
+
}
|
| 407 |
+
|
| 408 |
+
args, kwargs = pytree.tree_unflatten(flat_args_kwargs, args_spec)
|
| 409 |
+
out = pytree.tree_unflatten(flat_outs, out_spec)
|
| 410 |
+
|
| 411 |
+
compute_time = get_compute_time(func_packet, args, kwargs, out, out_dtypes)
|
| 412 |
+
# We get the estimated time as the max of the transfer time and
|
| 413 |
+
# compute time. We divide by 1e6 to get the time in ms
|
| 414 |
+
op_time = max(transfer_time, compute_time) / 1e6
|
| 415 |
+
|
| 416 |
+
return (out, op_time)
|
| 417 |
+
|
| 418 |
+
def display_modulewise_stats(self, depth: int = 2) -> None:
|
| 419 |
+
"""
|
| 420 |
+
Displays module-wise statistics collected by ``RuntimeEstimator``.
|
| 421 |
+
|
| 422 |
+
Prints the pre-forward and pre-backward execution orders.
|
| 423 |
+
Displays the module-wise forward and backward runtimes in milliseconds.
|
| 424 |
+
|
| 425 |
+
Args:
|
| 426 |
+
depth (int): The maximum depth of module hierarchy to display (default to 2).
|
| 427 |
+
"""
|
| 428 |
+
print("Pre-Forward Execution Order: ")
|
| 429 |
+
for mod_fqn in self.mod_fw_pre_order:
|
| 430 |
+
mod_depth = mod_fqn.count(".") + 1
|
| 431 |
+
if mod_depth > depth:
|
| 432 |
+
continue
|
| 433 |
+
print(mod_fqn)
|
| 434 |
+
print("Pre-Backward Execution Order: ")
|
| 435 |
+
for mod_fqn in self.mod_bw_pre_order:
|
| 436 |
+
mod_depth = mod_fqn.count(".") + 1
|
| 437 |
+
if mod_depth > depth:
|
| 438 |
+
continue
|
| 439 |
+
print(mod_fqn)
|
| 440 |
+
for mod_fqn, runtimes in self.mod_runtimes.items():
|
| 441 |
+
mod_depth = mod_fqn.count(".") + 1
|
| 442 |
+
if mod_depth > depth:
|
| 443 |
+
continue
|
| 444 |
+
print(
|
| 445 |
+
f"{mod_fqn} fw: {runtimes.get('fw', 0.0):.3f}ms bw: {runtimes.get('bw', 0.0):.3f}ms"
|
| 446 |
+
)
|
| 447 |
+
|
| 448 |
+
def __torch_dispatch__(self, func, types, args=..., kwargs=None): # type: ignore[no-untyped-def]
|
| 449 |
+
# TODO: @sanketpurandare: Flatten tensors by desugaring the tensor subclasses
|
| 450 |
+
# TODO: @sanketpurandare: Add logic for incorporating communication time
|
| 451 |
+
res, op_time = self._estimate(func, args, kwargs)
|
| 452 |
+
for par in self._mod_tracker.parents:
|
| 453 |
+
if self._mod_tracker.is_bw:
|
| 454 |
+
self.mod_runtimes[par]["bw"] += op_time
|
| 455 |
+
else:
|
| 456 |
+
self.mod_runtimes[par]["fw"] += op_time
|
| 457 |
+
self.total_runtime += op_time
|
| 458 |
+
return res
|
| 459 |
+
|
| 460 |
+
def __call__(self, estimate_mode_type: str) -> Self:
|
| 461 |
+
"""
|
| 462 |
+
Sets the estimate mode type.
|
| 463 |
+
|
| 464 |
+
Currently supported modes:
|
| 465 |
+
- "operator-level-benchmark": Estimates runtime using operator benchmarking.
|
| 466 |
+
- "operator-level-cost-model": Estimates runtime using roofline cost model.
|
| 467 |
+
|
| 468 |
+
Args:
|
| 469 |
+
estimate_mode_type (str): The type of estimate mode to use.
|
| 470 |
+
|
| 471 |
+
Returns:
|
| 472 |
+
RuntimeEstimator: The runtime estimator instance.
|
| 473 |
+
|
| 474 |
+
Raises:
|
| 475 |
+
NotImplementedError: If the estimate mode type is not supported.
|
| 476 |
+
"""
|
| 477 |
+
if estimate_mode_type == "operator-level-benchmark":
|
| 478 |
+
self._estimate = RuntimeEstimator._benchmark_estimate
|
| 479 |
+
elif estimate_mode_type == "operator-level-cost-model":
|
| 480 |
+
self._estimate = RuntimeEstimator._roofline_estimate
|
| 481 |
+
else:
|
| 482 |
+
raise NotImplementedError(
|
| 483 |
+
f"estimate_mode_type {estimate_mode_type} not supported"
|
| 484 |
+
)
|
| 485 |
+
self._estimate_mode_type = estimate_mode_type
|
| 486 |
+
return self
|
| 487 |
+
|
| 488 |
+
def __enter__(self) -> Self:
|
| 489 |
+
fake_mode = active_fake_mode()
|
| 490 |
+
assert isinstance(
|
| 491 |
+
fake_mode, FakeTensorMode
|
| 492 |
+
), "No FakeTensorMode found, designed to used under FakeTensorMode"
|
| 493 |
+
RuntimeEstimator.fake_mode = fake_mode
|
| 494 |
+
self.total_runtime = 0.0
|
| 495 |
+
self.mod_runtimes = defaultdict(lambda: defaultdict(lambda: 0.0))
|
| 496 |
+
self.mod_fw_pre_order.clear()
|
| 497 |
+
self.mod_bw_pre_order.clear()
|
| 498 |
+
self.mod_fw_post_order.clear()
|
| 499 |
+
self.mod_bw_post_order.clear()
|
| 500 |
+
self._mod_tracker.register_user_hooks(
|
| 501 |
+
pre_fw_hook=lambda mod, inp: self.mod_fw_pre_order.append(
|
| 502 |
+
self._mod_tracker.get_known_fqn(mod)
|
| 503 |
+
),
|
| 504 |
+
pre_bw_hook=lambda mod, g_out: self.mod_bw_pre_order.append(
|
| 505 |
+
self._mod_tracker.get_known_fqn(mod)
|
| 506 |
+
),
|
| 507 |
+
post_fw_hook=lambda mod, inp, out: self.mod_fw_post_order.append(
|
| 508 |
+
self._mod_tracker.get_known_fqn(mod)
|
| 509 |
+
),
|
| 510 |
+
post_bw_hook=lambda mod, g_inp: self.mod_bw_post_order.append(
|
| 511 |
+
self._mod_tracker.get_known_fqn(mod)
|
| 512 |
+
),
|
| 513 |
+
)
|
| 514 |
+
self._mod_tracker.__enter__()
|
| 515 |
+
super().__enter__()
|
| 516 |
+
return self
|
| 517 |
+
|
| 518 |
+
def __exit__(self, *args: Any) -> None:
|
| 519 |
+
print(
|
| 520 |
+
f"Estimated ({self._estimate_mode_type})"
|
| 521 |
+
f"total_time: {self.total_runtime:.3f} ms"
|
| 522 |
+
)
|
| 523 |
+
if len(self._no_fallback_kernel) > 0:
|
| 524 |
+
print("no_fallback_kernel: ", list(self._no_fallback_kernel))
|
| 525 |
+
super().__exit__(*args)
|
| 526 |
+
self._mod_tracker.clear_user_hooks()
|
| 527 |
+
self._mod_tracker.__exit__()
|
janus/lib/python3.10/site-packages/torch/distributed/fsdp/__pycache__/__init__.cpython-310.pyc
ADDED
|
Binary file (757 Bytes). View file
|
|
|
janus/lib/python3.10/site-packages/torch/distributed/fsdp/__pycache__/_common_utils.cpython-310.pyc
ADDED
|
Binary file (17.1 kB). View file
|
|
|
janus/lib/python3.10/site-packages/torch/distributed/fsdp/__pycache__/_exec_order_utils.cpython-310.pyc
ADDED
|
Binary file (10.5 kB). View file
|
|
|
janus/lib/python3.10/site-packages/torch/distributed/fsdp/__pycache__/_flat_param.cpython-310.pyc
ADDED
|
Binary file (72 kB). View file
|
|
|
janus/lib/python3.10/site-packages/torch/distributed/fsdp/__pycache__/_fsdp_extensions.cpython-310.pyc
ADDED
|
Binary file (4.99 kB). View file
|
|
|
janus/lib/python3.10/site-packages/torch/distributed/fsdp/__pycache__/_init_utils.cpython-310.pyc
ADDED
|
Binary file (31.5 kB). View file
|
|
|
janus/lib/python3.10/site-packages/torch/distributed/fsdp/__pycache__/_limiter_utils.cpython-310.pyc
ADDED
|
Binary file (1.58 kB). View file
|
|
|
janus/lib/python3.10/site-packages/torch/distributed/fsdp/__pycache__/_state_dict_utils.cpython-310.pyc
ADDED
|
Binary file (21.2 kB). View file
|
|
|
janus/lib/python3.10/site-packages/torch/distributed/fsdp/__pycache__/_trace_utils.cpython-310.pyc
ADDED
|
Binary file (9.52 kB). View file
|
|
|
janus/lib/python3.10/site-packages/torch/distributed/fsdp/__pycache__/_traversal_utils.cpython-310.pyc
ADDED
|
Binary file (3.03 kB). View file
|
|
|
janus/lib/python3.10/site-packages/torch/distributed/fsdp/__pycache__/_unshard_param_utils.cpython-310.pyc
ADDED
|
Binary file (8.11 kB). View file
|
|
|
janus/lib/python3.10/site-packages/torch/distributed/fsdp/_common_utils.py
ADDED
|
@@ -0,0 +1,558 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# mypy: allow-untyped-defs
|
| 2 |
+
"""
|
| 3 |
+
This file includes private common utilities for FSDP.
|
| 4 |
+
"""
|
| 5 |
+
import logging
|
| 6 |
+
import traceback
|
| 7 |
+
import warnings
|
| 8 |
+
import weakref
|
| 9 |
+
from enum import auto, Enum
|
| 10 |
+
from functools import partial
|
| 11 |
+
from typing import (
|
| 12 |
+
Any,
|
| 13 |
+
Callable,
|
| 14 |
+
cast,
|
| 15 |
+
Dict,
|
| 16 |
+
Generator,
|
| 17 |
+
Iterable,
|
| 18 |
+
List,
|
| 19 |
+
no_type_check,
|
| 20 |
+
Optional,
|
| 21 |
+
Set,
|
| 22 |
+
Tuple,
|
| 23 |
+
Type,
|
| 24 |
+
TYPE_CHECKING,
|
| 25 |
+
)
|
| 26 |
+
|
| 27 |
+
import torch
|
| 28 |
+
import torch.distributed as dist
|
| 29 |
+
import torch.distributed.fsdp._flat_param as flat_param_file
|
| 30 |
+
import torch.nn as nn
|
| 31 |
+
from torch.distributed._composable_state import _get_module_state, _State
|
| 32 |
+
from torch.distributed.algorithms._checkpoint.checkpoint_wrapper import (
|
| 33 |
+
_CHECKPOINT_PREFIX,
|
| 34 |
+
)
|
| 35 |
+
from torch.distributed.utils import _apply_to_tensors
|
| 36 |
+
from torch.utils._mode_utils import no_dispatch
|
| 37 |
+
|
| 38 |
+
from .api import (
|
| 39 |
+
FullOptimStateDictConfig,
|
| 40 |
+
FullStateDictConfig,
|
| 41 |
+
OptimStateDictConfig,
|
| 42 |
+
ShardingStrategy,
|
| 43 |
+
StateDictConfig,
|
| 44 |
+
StateDictType,
|
| 45 |
+
)
|
| 46 |
+
|
| 47 |
+
|
| 48 |
+
if TYPE_CHECKING:
|
| 49 |
+
from torch.distributed.device_mesh import DeviceMesh
|
| 50 |
+
from torch.distributed.fsdp._fsdp_extensions import FSDPExtensions
|
| 51 |
+
|
| 52 |
+
from ._flat_param import FlatParamHandle
|
| 53 |
+
|
| 54 |
+
FSDP_WRAPPED_MODULE = "_fsdp_wrapped_module"
|
| 55 |
+
FSDP_PREFIX = FSDP_WRAPPED_MODULE + "."
|
| 56 |
+
FSDP_FLATTENED = "_fsdp_flattened"
|
| 57 |
+
|
| 58 |
+
# Save a global mapping from module to its input tensor dtype to be populated
|
| 59 |
+
# during the forward pre-hook and consumed in the forward post-hook when
|
| 60 |
+
# overriding a module's mixed precision
|
| 61 |
+
# NOTE: We currently take the last input tensor's dtype in the case of multiple
|
| 62 |
+
# floating-point input tensors, which may be incorrect. However, since there is
|
| 63 |
+
# not a 1:1 correspondence between input and output tensors, we must use *some*
|
| 64 |
+
# heuristic like this to predict the desired output dtype.
|
| 65 |
+
_MODULE_TO_INP_DTYPE: weakref.WeakKeyDictionary = weakref.WeakKeyDictionary()
|
| 66 |
+
|
| 67 |
+
|
| 68 |
+
class _FSDPDeviceHandle:
|
| 69 |
+
"""
|
| 70 |
+
This is a simple abstraction for FSDP computing devices,
|
| 71 |
+
which enables custom backends that implement CUDA-like
|
| 72 |
+
semantics to be integrated with FSDP.
|
| 73 |
+
"""
|
| 74 |
+
|
| 75 |
+
def __init__(self, device: torch.device, backend: Any = None):
|
| 76 |
+
if backend is None:
|
| 77 |
+
try:
|
| 78 |
+
self.__backend = getattr(torch, device.type)
|
| 79 |
+
self.__device = device
|
| 80 |
+
except AttributeError as exc:
|
| 81 |
+
raise AttributeError(
|
| 82 |
+
f"Device '{device}' does not have a corresponding backend registered as 'torch.{device.type}'."
|
| 83 |
+
) from exc
|
| 84 |
+
else:
|
| 85 |
+
self.__backend = backend
|
| 86 |
+
|
| 87 |
+
@classmethod
|
| 88 |
+
def from_device(cls, device: torch.device) -> "_FSDPDeviceHandle":
|
| 89 |
+
"""
|
| 90 |
+
Return a device handle corresponding to the device, and through this handle,
|
| 91 |
+
operations with the same semantics as CUDA can be performed on the device.
|
| 92 |
+
Just return torch.cuda if the device is cuda to make attribute-access faster.
|
| 93 |
+
Custom backend must first register a module with the same name with {device.type} on torch.
|
| 94 |
+
"""
|
| 95 |
+
if device.type == "cuda":
|
| 96 |
+
return cast(_FSDPDeviceHandle, torch.cuda)
|
| 97 |
+
elif device.type == "mtia":
|
| 98 |
+
return cast(_FSDPDeviceHandle, torch.mtia)
|
| 99 |
+
return cls(device)
|
| 100 |
+
|
| 101 |
+
def __getattr__(self, __name: str) -> Any:
|
| 102 |
+
try:
|
| 103 |
+
return getattr(self.__backend, __name)
|
| 104 |
+
except AttributeError as exc:
|
| 105 |
+
raise AttributeError(
|
| 106 |
+
f"Custom backend '{self.__device.type}' not implement 'torch.{self.__device.type}.{__name}'"
|
| 107 |
+
) from exc
|
| 108 |
+
|
| 109 |
+
|
| 110 |
+
class _UninitializedDeviceHandle(_FSDPDeviceHandle):
|
| 111 |
+
def __init__(self) -> None:
|
| 112 |
+
pass
|
| 113 |
+
|
| 114 |
+
def __getattribute__(self, __name: str) -> Any:
|
| 115 |
+
raise RuntimeError("Trying to use an uninitialized device handle.")
|
| 116 |
+
|
| 117 |
+
|
| 118 |
+
class _FSDPState(_State):
|
| 119 |
+
def __init__(self) -> None:
|
| 120 |
+
# TODO: Move all the attributes to this class to enable typing for
|
| 121 |
+
# FSDP/fully_shard.
|
| 122 |
+
self._ignored_modules: Set[nn.Module] = set()
|
| 123 |
+
self._ignored_params: Set[nn.Parameter] = set()
|
| 124 |
+
# Buffer names are cleaned (without wrapper prefixes)
|
| 125 |
+
self._ignored_buffer_names: Set[str] = set()
|
| 126 |
+
self.process_group: Optional[dist.ProcessGroup] = None
|
| 127 |
+
self.rank: int = -1
|
| 128 |
+
self.world_size: int = -1
|
| 129 |
+
self._device_mesh: Optional[DeviceMesh] = None
|
| 130 |
+
self.sharding_strategy = ShardingStrategy.FULL_SHARD
|
| 131 |
+
self._use_orig_params: bool = False
|
| 132 |
+
self.training_state = TrainingState.IDLE
|
| 133 |
+
self._unshard_params_ctx: Dict[nn.Module, Generator] = {}
|
| 134 |
+
self._state_dict_type: StateDictType = StateDictType.FULL_STATE_DICT
|
| 135 |
+
self._state_dict_config: StateDictConfig = FullStateDictConfig()
|
| 136 |
+
self._optim_state_dict_config: OptimStateDictConfig = FullOptimStateDictConfig()
|
| 137 |
+
self._is_root: Optional[bool] = None
|
| 138 |
+
self._handle: Optional[flat_param_file.FlatParamHandle] = None
|
| 139 |
+
self._fully_sharded_module_to_handle: Dict[
|
| 140 |
+
nn.Module, Optional[flat_param_file.FlatParamHandle]
|
| 141 |
+
] = {}
|
| 142 |
+
self.compute_device: Optional[torch.device] = None
|
| 143 |
+
self._gradient_predivide_factor: int = 0
|
| 144 |
+
self._gradient_postdivide_factor: int = 0
|
| 145 |
+
self._comm_hook: Optional[Callable] = None
|
| 146 |
+
self._comm_hook_state: Optional[Any] = None
|
| 147 |
+
self._unshard_event: Optional[torch.Event] = None
|
| 148 |
+
# Abstract device handle for fsdp compute device. For now,
|
| 149 |
+
# the compute device must implement cuda semantics used by fsdp
|
| 150 |
+
self._device_handle: _FSDPDeviceHandle = _UninitializedDeviceHandle()
|
| 151 |
+
# All following attributes should only be used for root states:
|
| 152 |
+
# Save these static lists to avoid the repeated tree traversals
|
| 153 |
+
self._all_fsdp_states: List[_FSDPState] = []
|
| 154 |
+
self._all_handles: List[flat_param_file.FlatParamHandle] = []
|
| 155 |
+
self._fsdp_extension: Optional[FSDPExtensions] = None
|
| 156 |
+
|
| 157 |
+
|
| 158 |
+
def _get_module_fsdp_state(module: nn.Module) -> Optional[_FSDPState]:
|
| 159 |
+
state = _get_module_state(module)
|
| 160 |
+
if state is None or not isinstance(state, _FSDPState):
|
| 161 |
+
return None
|
| 162 |
+
return state
|
| 163 |
+
|
| 164 |
+
|
| 165 |
+
def _get_module_fsdp_state_if_fully_sharded_module(
|
| 166 |
+
module: nn.Module,
|
| 167 |
+
) -> Optional[_FSDPState]:
|
| 168 |
+
state = _get_module_fsdp_state(module)
|
| 169 |
+
if state is None:
|
| 170 |
+
return None
|
| 171 |
+
if state == module: # FullyShardedDataParallel module case.
|
| 172 |
+
return state
|
| 173 |
+
if module in state._fully_sharded_module_to_handle: # fully_shard case.
|
| 174 |
+
return state
|
| 175 |
+
return None
|
| 176 |
+
|
| 177 |
+
|
| 178 |
+
class TrainingState(Enum):
|
| 179 |
+
"""
|
| 180 |
+
An enum that indicates the state of a ``FullyShardedDataParallel` instance.
|
| 181 |
+
"""
|
| 182 |
+
|
| 183 |
+
IDLE = auto()
|
| 184 |
+
FORWARD_BACKWARD = auto()
|
| 185 |
+
SUMMON_FULL_PARAMS = auto()
|
| 186 |
+
|
| 187 |
+
|
| 188 |
+
class HandleTrainingState(Enum):
|
| 189 |
+
"""
|
| 190 |
+
An enum that indicates the state of a ``FlatParamHandle`.
|
| 191 |
+
"""
|
| 192 |
+
|
| 193 |
+
IDLE = auto()
|
| 194 |
+
FORWARD = auto()
|
| 195 |
+
BACKWARD_PRE = auto()
|
| 196 |
+
BACKWARD_POST = auto()
|
| 197 |
+
SUMMON_FULL_PARAMS = auto()
|
| 198 |
+
|
| 199 |
+
|
| 200 |
+
def _is_composable(state: _FSDPState):
|
| 201 |
+
# TODO: This is a temporary hack for differentiate between code paths.
|
| 202 |
+
return not isinstance(state, nn.Module)
|
| 203 |
+
|
| 204 |
+
|
| 205 |
+
@no_type_check
|
| 206 |
+
def _module_handle(state: _FSDPState, module: nn.Module) -> Optional["FlatParamHandle"]:
|
| 207 |
+
"""
|
| 208 |
+
Returns the ``FlatParamHandle`` s corresponding to ``module``. This is
|
| 209 |
+
the handle that contains some parameter in ``module``.
|
| 210 |
+
"""
|
| 211 |
+
if _is_composable(state):
|
| 212 |
+
# A valid FSDP state may have no managed parameters and hence no
|
| 213 |
+
# handles, meaning no entry in `_fully_sharded_module_to_handles`
|
| 214 |
+
if state._handle is None:
|
| 215 |
+
return None
|
| 216 |
+
assert (
|
| 217 |
+
module in state._fully_sharded_module_to_handle
|
| 218 |
+
), f"Expects a fully sharded module but got {module} on rank {state.rank}"
|
| 219 |
+
return state._fully_sharded_module_to_handle[module]
|
| 220 |
+
else:
|
| 221 |
+
# NOTE: This assumes `module` is a `FullyShardedDataParallel` instance.
|
| 222 |
+
return module._handle
|
| 223 |
+
|
| 224 |
+
|
| 225 |
+
@no_type_check
|
| 226 |
+
def _has_fsdp_params(state: _FSDPState, module: nn.Module) -> bool:
|
| 227 |
+
"""Returns if ``module`` has parameters managed by FSDP."""
|
| 228 |
+
return _module_handle(state, module) is not None
|
| 229 |
+
|
| 230 |
+
|
| 231 |
+
def _get_sharding_strategy(handle):
|
| 232 |
+
"""
|
| 233 |
+
Returns the sharding strategy of the handle.
|
| 234 |
+
"""
|
| 235 |
+
return handle._sharding_strategy if handle else None
|
| 236 |
+
|
| 237 |
+
|
| 238 |
+
def clean_tensor_name(tensor_name: str) -> str:
|
| 239 |
+
"""
|
| 240 |
+
Cleans the parameter or buffer name by removing any module wrapper
|
| 241 |
+
prefixes.
|
| 242 |
+
"""
|
| 243 |
+
tensor_name = tensor_name.replace(FSDP_PREFIX, "")
|
| 244 |
+
# TODO: Explicitly replacing the checkpoint wrapper prefix is not ideal as
|
| 245 |
+
# it couples `CheckpointWrapper` and FSDP and also does not scale for more
|
| 246 |
+
# module wrappers.
|
| 247 |
+
tensor_name = tensor_name.replace(_CHECKPOINT_PREFIX, "")
|
| 248 |
+
return tensor_name
|
| 249 |
+
|
| 250 |
+
|
| 251 |
+
def _set_fsdp_flattened(tensor: torch.Tensor) -> None:
|
| 252 |
+
"""
|
| 253 |
+
Sets an attribute on ``tensor`` to mark it as flattened by FSDP. This is to
|
| 254 |
+
avoid re-flattening it during nested construction.
|
| 255 |
+
"""
|
| 256 |
+
setattr(tensor, FSDP_FLATTENED, True)
|
| 257 |
+
|
| 258 |
+
|
| 259 |
+
def _is_fsdp_flattened(tensor: torch.Tensor) -> bool:
|
| 260 |
+
"""Returns if ``tensor`` has been marked as flattened by FSDP."""
|
| 261 |
+
return getattr(tensor, FSDP_FLATTENED, False)
|
| 262 |
+
|
| 263 |
+
|
| 264 |
+
def _named_parameters_with_duplicates(
|
| 265 |
+
module: nn.Module, **kwargs: Any
|
| 266 |
+
) -> List[Tuple[str, nn.Parameter]]:
|
| 267 |
+
"""
|
| 268 |
+
This API is required as some modules overwrite `named_parameters()` but do not support
|
| 269 |
+
`remove_duplicate`.
|
| 270 |
+
"""
|
| 271 |
+
assert (
|
| 272 |
+
"remove_duplicate" not in kwargs
|
| 273 |
+
), "_named_parameters_with_duplicates cannot be used with `remove_duplicate` argument."
|
| 274 |
+
kwargs["remove_duplicate"] = False
|
| 275 |
+
try:
|
| 276 |
+
ret = list(module.named_parameters(**kwargs))
|
| 277 |
+
except AssertionError as e:
|
| 278 |
+
kwargs.pop("remove_duplicate")
|
| 279 |
+
ret = list(module.named_parameters(**kwargs))
|
| 280 |
+
return ret
|
| 281 |
+
|
| 282 |
+
|
| 283 |
+
def _get_param_to_fqns(
|
| 284 |
+
model: torch.nn.Module,
|
| 285 |
+
dedup_shared_params: bool = True,
|
| 286 |
+
) -> Dict[nn.Parameter, List[str]]:
|
| 287 |
+
"""
|
| 288 |
+
Constructs a mapping from parameter to a list of its \"canonical\" FQNs. Here,
|
| 289 |
+
we use canonical to mean the fully-qualified name assigned to the parameter
|
| 290 |
+
based on its position in the original nn.Module hierarchy before any wrapper
|
| 291 |
+
or parallelism has been applied to it. This is in contrast to FQNs that may be
|
| 292 |
+
generated after parallelisms or wrappers have been applied to the model.
|
| 293 |
+
|
| 294 |
+
Each normal parameter maps to a singleton list containing its FQN, while each
|
| 295 |
+
``FlatParameter`` maps to a list of its original parameter FQNs, which may
|
| 296 |
+
have length greater than one. All FQNs are prefixed starting from ``model``.
|
| 297 |
+
|
| 298 |
+
In the case where FSDP was applied with ``use_orig_params=True``, there should be no
|
| 299 |
+
``FlatParameter`` s registered to the model's modules and this mapping will only
|
| 300 |
+
contain mappings from ``nn.Parameter`` s to singleton FQN lists.
|
| 301 |
+
|
| 302 |
+
It is only in the case where FSDP was applied with ``use_orig_params=False`` where
|
| 303 |
+
a ``FlatParameter`` will be registered in place of the original parameters and there
|
| 304 |
+
will be mappings from each ``FlatParameter`` to lists of FQNs corresponding to the
|
| 305 |
+
original parameters.
|
| 306 |
+
|
| 307 |
+
Args:
|
| 308 |
+
model (torch.nn.Module): Root module (which may or may not be a
|
| 309 |
+
:class:`FullyShardedDataParallel` instance).
|
| 310 |
+
dedup_shared_params (bool): For shared parameters, if ``True``, only
|
| 311 |
+
includes the FQNs corresponding to the first encounter of the
|
| 312 |
+
shared parameter in the module traversal; if ``False``, then
|
| 313 |
+
includes the FQNs across all encounters. (Default: ``True``)
|
| 314 |
+
"""
|
| 315 |
+
|
| 316 |
+
def module_fn(module, prefix, tree_level, param_to_fqns):
|
| 317 |
+
for param_name, param in _named_parameters_with_duplicates(
|
| 318 |
+
module, recurse=False
|
| 319 |
+
):
|
| 320 |
+
local_fqns = (
|
| 321 |
+
param._fqns
|
| 322 |
+
if isinstance(param, flat_param_file.FlatParameter)
|
| 323 |
+
else [param_name]
|
| 324 |
+
) # prefixed from `module`
|
| 325 |
+
global_fqns = [
|
| 326 |
+
clean_tensor_name(prefix + name) for name in local_fqns
|
| 327 |
+
] # prefixed from the top level `model` (i.e. including `prefix`)
|
| 328 |
+
is_shared_param = param in param_to_fqns
|
| 329 |
+
if not is_shared_param:
|
| 330 |
+
param_to_fqns[param] = global_fqns
|
| 331 |
+
else:
|
| 332 |
+
if isinstance(param, flat_param_file.FlatParameter):
|
| 333 |
+
# DMP overwrites `named_parameters` and skip (advance to
|
| 334 |
+
# the next child module) the wrapped_module (e.g.,
|
| 335 |
+
# _dmp_wrapped_module and _fsdp_wrapped_module). When a user
|
| 336 |
+
# calls `named_child` to traverse the module recursively and
|
| 337 |
+
# calls `named_parameters` with `recurse=False`, parameters
|
| 338 |
+
# will be traversed more than once.
|
| 339 |
+
# This hack is specified designed for DMP + FSDP. We
|
| 340 |
+
# overwrite the flat_parameters traversal result to only obtain
|
| 341 |
+
# the last one, which happens to be the correct one.
|
| 342 |
+
#
|
| 343 |
+
# TODO: Remove this hack once DMP + FSDP is not supported.
|
| 344 |
+
warnings.warn(
|
| 345 |
+
"FlatParameter is being traversed more than once. "
|
| 346 |
+
"This case should only happen when using "
|
| 347 |
+
"DistributedModelParallel with FullyShardedDataParallel."
|
| 348 |
+
)
|
| 349 |
+
param_to_fqns[param] = global_fqns
|
| 350 |
+
elif not dedup_shared_params:
|
| 351 |
+
param_to_fqns[param].extend(global_fqns)
|
| 352 |
+
|
| 353 |
+
def return_fn(param_to_fqns):
|
| 354 |
+
return param_to_fqns
|
| 355 |
+
|
| 356 |
+
param_to_unflat_param_names: Dict[torch.nn.Parameter, List[str]] = {}
|
| 357 |
+
return _apply_to_modules(
|
| 358 |
+
model,
|
| 359 |
+
module_fn,
|
| 360 |
+
return_fn,
|
| 361 |
+
[key for key, _ in _named_parameters_with_duplicates(model)],
|
| 362 |
+
param_to_unflat_param_names,
|
| 363 |
+
)
|
| 364 |
+
|
| 365 |
+
|
| 366 |
+
@no_type_check
|
| 367 |
+
def _log_post_backward_hook(
|
| 368 |
+
state: _FSDPState, handle: "FlatParamHandle", logger: logging.Logger
|
| 369 |
+
) -> None:
|
| 370 |
+
# Under TORCH_DISTRIBUTED_DEBUG=INFO, log the module names this hook fires for.
|
| 371 |
+
# Below logging of module names this post-bwd hook fires for can help debug certain
|
| 372 |
+
# cases where hooks don't fire, such as under certain activation checkpoint configs.
|
| 373 |
+
if state._use_orig_params and handle._debug_level == dist.DebugLevel.INFO:
|
| 374 |
+
param_fqns = _get_handle_fqns_from_root(state, handle)
|
| 375 |
+
logger.warning("FSDP firing post-backward hooks for parameters %s", param_fqns)
|
| 376 |
+
|
| 377 |
+
|
| 378 |
+
@no_type_check
|
| 379 |
+
def _get_handle_fqns_from_root(
|
| 380 |
+
state: _FSDPState, handle: "FlatParamHandle"
|
| 381 |
+
) -> Optional[List[str]]:
|
| 382 |
+
if handle is None:
|
| 383 |
+
return None
|
| 384 |
+
param_to_fqn = state._exec_order_data.param_to_fqn
|
| 385 |
+
handle_params = handle.flat_param._params # only populated for use_orig_params
|
| 386 |
+
param_fqns = [
|
| 387 |
+
fqn for fqn_list in [param_to_fqn[p] for p in handle_params] for fqn in fqn_list
|
| 388 |
+
]
|
| 389 |
+
return param_fqns
|
| 390 |
+
|
| 391 |
+
|
| 392 |
+
def _apply_to_modules(
|
| 393 |
+
root_module: torch.nn.Module,
|
| 394 |
+
module_fn: Callable,
|
| 395 |
+
return_fn: Callable,
|
| 396 |
+
filter_fqns: Optional[List[str]] = None,
|
| 397 |
+
*args,
|
| 398 |
+
**kwargs,
|
| 399 |
+
):
|
| 400 |
+
"""
|
| 401 |
+
Performs a pre-order traversal of the modules in the hierarchy rooted at
|
| 402 |
+
``root_module``, applying ``module_fn`` at each module and finally
|
| 403 |
+
returning a value using ``return_fn``. The traversal constructs the full
|
| 404 |
+
module prefix name (e.g. "module.submodule." just like in model state dict)
|
| 405 |
+
and makes that available to ``module_fn``.
|
| 406 |
+
|
| 407 |
+
``filter_fqns`` is used because some module may have its own prefix similar
|
| 408 |
+
to ``FullyShardedDataParallel`` and the ``named_parameters()`` is overwritten
|
| 409 |
+
to remove the prefix.
|
| 410 |
+
"""
|
| 411 |
+
|
| 412 |
+
def f(module: torch.nn.Module, prefix: str, tree_level: int, *args, **kwargs):
|
| 413 |
+
# Call the module function before recursing over children (pre-order)
|
| 414 |
+
module_fn(module, prefix, tree_level, *args, **kwargs)
|
| 415 |
+
for submodule_name, submodule in module.named_children():
|
| 416 |
+
if submodule is None:
|
| 417 |
+
continue
|
| 418 |
+
new_prefix = prefix + submodule_name + "."
|
| 419 |
+
new_tree_level = tree_level + 1
|
| 420 |
+
if filter_fqns is not None:
|
| 421 |
+
for fqn in filter_fqns:
|
| 422 |
+
if fqn.startswith(new_prefix):
|
| 423 |
+
break
|
| 424 |
+
else:
|
| 425 |
+
# DMP's named_parameter() will mess up the traversal with
|
| 426 |
+
# ``named_children`` + `named_parameter(recurse=False)``.
|
| 427 |
+
# This hack is a must to make the traversal work.
|
| 428 |
+
# TODO: Remove this hack once DMP + FSDP is not supported.
|
| 429 |
+
# It turns out that recursive wrapping may trigger this as
|
| 430 |
+
# well.
|
| 431 |
+
if (
|
| 432 |
+
submodule_name == "_fsdp_wrapped_module"
|
| 433 |
+
or submodule_name == "_dmp_wrapped_module"
|
| 434 |
+
):
|
| 435 |
+
new_prefix = prefix
|
| 436 |
+
elif submodule_name == "module":
|
| 437 |
+
new_prefix = prefix
|
| 438 |
+
f(submodule, new_prefix, new_tree_level, *args, **kwargs)
|
| 439 |
+
|
| 440 |
+
f(root_module, "", 0, *args, **kwargs)
|
| 441 |
+
return return_fn(*args, **kwargs)
|
| 442 |
+
|
| 443 |
+
|
| 444 |
+
@no_type_check
|
| 445 |
+
def _assert_in_training_states(
|
| 446 |
+
state: _FSDPState,
|
| 447 |
+
training_states: List[TrainingState],
|
| 448 |
+
) -> None:
|
| 449 |
+
"""Asserts that FSDP is in the states ``_training_states``."""
|
| 450 |
+
# Raise a `ValueError` instead of using `assert` to ensure that these
|
| 451 |
+
# logical assertions run even if `assert`s are disabled
|
| 452 |
+
if state.training_state not in training_states:
|
| 453 |
+
msg = (
|
| 454 |
+
f"expected to be in states {training_states} but current state is "
|
| 455 |
+
f"{state.training_state}"
|
| 456 |
+
)
|
| 457 |
+
# Print the error on rank 0 in case this is called in the backward pass
|
| 458 |
+
if state.rank == 0:
|
| 459 |
+
if isinstance(state, nn.Module):
|
| 460 |
+
print(f"Asserting FSDP instance is: {state}")
|
| 461 |
+
print(f"ERROR: {msg}")
|
| 462 |
+
traceback.print_stack()
|
| 463 |
+
raise ValueError(msg)
|
| 464 |
+
|
| 465 |
+
|
| 466 |
+
def _get_root_modules(modules: Set[nn.Module]) -> Set[nn.Module]:
|
| 467 |
+
"""
|
| 468 |
+
Returns:
|
| 469 |
+
Set[nn.Module]: The subset of ``modules`` that are root modules (i.e.
|
| 470 |
+
parent-less) with respect to the modules in the set itself. In other
|
| 471 |
+
words, these are the modules in ``modules`` that are not the child of
|
| 472 |
+
any other module in ``modules``.
|
| 473 |
+
"""
|
| 474 |
+
root_modules: Set[nn.Module] = set()
|
| 475 |
+
module_to_submodules = {module: set(module.modules()) for module in modules}
|
| 476 |
+
for candidate_module in modules:
|
| 477 |
+
is_root_module = True
|
| 478 |
+
for module, submodules in module_to_submodules.items():
|
| 479 |
+
is_child_module = (
|
| 480 |
+
candidate_module is not module and candidate_module in submodules
|
| 481 |
+
)
|
| 482 |
+
if is_child_module:
|
| 483 |
+
is_root_module = False
|
| 484 |
+
break
|
| 485 |
+
if is_root_module:
|
| 486 |
+
root_modules.add(candidate_module)
|
| 487 |
+
return root_modules
|
| 488 |
+
|
| 489 |
+
|
| 490 |
+
def _override_module_mixed_precision(
|
| 491 |
+
root: torch.nn.Module,
|
| 492 |
+
module_classes_to_override: Iterable[Type[nn.Module]],
|
| 493 |
+
wrap_override_dict: Dict[str, Any] = {"mixed_precision": None}, # noqa: B006
|
| 494 |
+
) -> Set[Type[nn.Module]]:
|
| 495 |
+
module_classes_to_override = tuple(set(module_classes_to_override))
|
| 496 |
+
# Return a set of the actually overridden module classes
|
| 497 |
+
overridden_module_classes: Set[Type[nn.Module]] = set()
|
| 498 |
+
for mod in root.modules():
|
| 499 |
+
if isinstance(mod, module_classes_to_override):
|
| 500 |
+
overridden_module_classes.add(type(mod))
|
| 501 |
+
mod._wrap_overrides = wrap_override_dict # type: ignore[assignment]
|
| 502 |
+
# TODO: We need to run this mixed precision ignored module in fp32,
|
| 503 |
+
# but ensure subsequent modules, that may possibly be running with
|
| 504 |
+
# mixed precision, still receive the appropriate precision inputs
|
| 505 |
+
# without user having to adjust mixed precision config too much.
|
| 506 |
+
# As a result, we attach pre and post forward hooks to up / down
|
| 507 |
+
# cast. We should revisit this design.
|
| 508 |
+
|
| 509 |
+
def cast_fn(
|
| 510 |
+
dtype: torch.dtype, module: nn.Module, x: torch.Tensor
|
| 511 |
+
) -> torch.Tensor:
|
| 512 |
+
if not torch.is_floating_point(x) or x.dtype == dtype:
|
| 513 |
+
return x
|
| 514 |
+
_MODULE_TO_INP_DTYPE[module] = x.dtype
|
| 515 |
+
return x.to(dtype)
|
| 516 |
+
|
| 517 |
+
def forward_pre_hook(module, args):
|
| 518 |
+
return _apply_to_tensors(partial(cast_fn, torch.float32, module), args)
|
| 519 |
+
|
| 520 |
+
def forward_post_hook(module, args, output):
|
| 521 |
+
# NOTE: If the forward did not have any floating-point tensors,
|
| 522 |
+
# then the dtype will not be set for this module, and we do not
|
| 523 |
+
# upcast the dtype.
|
| 524 |
+
if module in _MODULE_TO_INP_DTYPE:
|
| 525 |
+
old_dtype = _MODULE_TO_INP_DTYPE[module]
|
| 526 |
+
return _apply_to_tensors(
|
| 527 |
+
partial(cast_fn, old_dtype, module), output
|
| 528 |
+
)
|
| 529 |
+
|
| 530 |
+
# We intentionally append both of these hooks so that they run after
|
| 531 |
+
# all other hooks.
|
| 532 |
+
mod.register_forward_pre_hook(forward_pre_hook, prepend=False)
|
| 533 |
+
mod.register_forward_hook(forward_post_hook, prepend=False)
|
| 534 |
+
return overridden_module_classes
|
| 535 |
+
|
| 536 |
+
|
| 537 |
+
def _no_dispatch_record_stream(tensor: torch.Tensor, stream: torch.Stream) -> None:
|
| 538 |
+
# FIXME record_stream doesn't work with non-cuda/mtia tensors
|
| 539 |
+
if tensor.device.type not in [
|
| 540 |
+
"cuda",
|
| 541 |
+
"mtia",
|
| 542 |
+
torch._C._get_privateuse1_backend_name(),
|
| 543 |
+
]:
|
| 544 |
+
return
|
| 545 |
+
|
| 546 |
+
if torch.distributed._functional_collectives.is_torchdynamo_compiling():
|
| 547 |
+
return
|
| 548 |
+
# from @ezyang:
|
| 549 |
+
# The no_dispatch was added in https://github.com/pytorch/pytorch/pull/88014 cc @fegin
|
| 550 |
+
# Looking over the PR, it looks like this is because we don't actually support Stream arguments
|
| 551 |
+
# in torch dispatch, so it just chokes.
|
| 552 |
+
# If Dynamo is able to answer "are there any torch dispatch modes" active (it should answer False),
|
| 553 |
+
# a better version of this would just be to check if there are any modes before disabling dispatch.
|
| 554 |
+
# TODO(voz): Extend a dynamo util to answer the above, unify the codepaths here.
|
| 555 |
+
tensor.record_stream(stream)
|
| 556 |
+
else:
|
| 557 |
+
with no_dispatch():
|
| 558 |
+
tensor.record_stream(stream)
|
janus/lib/python3.10/site-packages/torch/distributed/fsdp/_dynamo_utils.py
ADDED
|
@@ -0,0 +1,46 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# mypy: allow-untyped-defs
|
| 2 |
+
from typing import Set
|
| 3 |
+
|
| 4 |
+
import torch.nn as nn
|
| 5 |
+
|
| 6 |
+
|
| 7 |
+
def _annotate_modules_for_dynamo(
|
| 8 |
+
module: nn.Module,
|
| 9 |
+
ignored_modules: Set[nn.Module],
|
| 10 |
+
use_orig_params: bool,
|
| 11 |
+
):
|
| 12 |
+
"""
|
| 13 |
+
Annotates the submodules in ``module`` 's tree, except those in
|
| 14 |
+
``ignored_modules``, indicating that the submodules are FSDP-managed and
|
| 15 |
+
saving the ``use_orig_params`` setting passed to the FSDP constructor.
|
| 16 |
+
"""
|
| 17 |
+
for submodule in module.modules():
|
| 18 |
+
if submodule not in ignored_modules:
|
| 19 |
+
"""[note: Dynamo treats FSDP wrapped modules as UnspecializedNNModule]
|
| 20 |
+
|
| 21 |
+
Dynamo doesn't get to see this instance (FullyShardedDataParallel) during tracing, since
|
| 22 |
+
it skips tracing all the torch.distributed.fsdp code.
|
| 23 |
+
- Why? Running the FSDP code eagerly avoids lots of issues trying to trace complex hooks, and also
|
| 24 |
+
gets us graph-breaks on FSDP module boundaries which we want anyway for comm ops.
|
| 25 |
+
- However, we _also_ want dynamo to treat the wrapped module inside FSDP 'unspecially' (*),
|
| 26 |
+
and we need a way to indicate to dynamo which modules are wrapped by FSDP.
|
| 27 |
+
|
| 28 |
+
(*) UnspecializedNNModules in dynamo are traced-through without any assumptions, and with thorough
|
| 29 |
+
guards. NNModules otherwise are 'specialized', meaning there is less overhead due to assuming
|
| 30 |
+
their code is well-behaved.
|
| 31 |
+
|
| 32 |
+
One particular issue with specialized NNModules for FSDP is that the
|
| 33 |
+
views created for orig_params are captured into the compiled graph on the first iteration, and while
|
| 34 |
+
they are always going to point to the correct flatparameter and give correct results, their order
|
| 35 |
+
of creation influences the order of backward execution, preventing overlap of comm and computation
|
| 36 |
+
during backward. We need to _use_ the new parameter views created on each forward iteration, in
|
| 37 |
+
order for backward to interleave hooks with compute per layer. UnspecializedNNModule lets us achieve
|
| 38 |
+
this by capturing the module code more 'functionally' and passing parameters in as inputs each time.
|
| 39 |
+
"""
|
| 40 |
+
submodule._is_fsdp_managed_module = True # type: ignore[assignment]
|
| 41 |
+
|
| 42 |
+
# Dynamo only supports FSDP with use_orig_params=True.
|
| 43 |
+
# This is hacky, but I could not think of another way to add an assertion to dynamo
|
| 44 |
+
# for this, since Dynamo skips all the FSDP code frames and thus can't inspect the
|
| 45 |
+
# FSDP module directly
|
| 46 |
+
submodule._fsdp_use_orig_params = use_orig_params # type: ignore[assignment]
|
janus/lib/python3.10/site-packages/torch/distributed/fsdp/_flat_param.py
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
janus/lib/python3.10/site-packages/torch/distributed/fsdp/_init_utils.py
ADDED
|
@@ -0,0 +1,1200 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# mypy: allow-untyped-defs
|
| 2 |
+
import collections
|
| 3 |
+
import itertools
|
| 4 |
+
import os
|
| 5 |
+
import warnings
|
| 6 |
+
from typing import (
|
| 7 |
+
Any,
|
| 8 |
+
Callable,
|
| 9 |
+
Deque,
|
| 10 |
+
Dict,
|
| 11 |
+
Generator,
|
| 12 |
+
Iterable,
|
| 13 |
+
Iterator,
|
| 14 |
+
List,
|
| 15 |
+
no_type_check,
|
| 16 |
+
Optional,
|
| 17 |
+
Set,
|
| 18 |
+
Tuple,
|
| 19 |
+
TYPE_CHECKING,
|
| 20 |
+
Union,
|
| 21 |
+
)
|
| 22 |
+
|
| 23 |
+
import torch
|
| 24 |
+
import torch.distributed as dist
|
| 25 |
+
import torch.distributed.fsdp._exec_order_utils as exec_order_utils
|
| 26 |
+
import torch.distributed.fsdp._traversal_utils as traversal_utils
|
| 27 |
+
import torch.distributed.fsdp.fully_sharded_data_parallel as fsdp_file
|
| 28 |
+
import torch.nn as nn
|
| 29 |
+
from torch.distributed.algorithms._comm_hooks import default_hooks
|
| 30 |
+
from torch.distributed.device_mesh import _mesh_resources, DeviceMesh
|
| 31 |
+
from torch.distributed.distributed_c10d import _get_default_group
|
| 32 |
+
from torch.distributed.fsdp._common_utils import (
|
| 33 |
+
_FSDPDeviceHandle,
|
| 34 |
+
_FSDPState,
|
| 35 |
+
_get_module_fsdp_state,
|
| 36 |
+
_is_fsdp_flattened,
|
| 37 |
+
_named_parameters_with_duplicates,
|
| 38 |
+
clean_tensor_name,
|
| 39 |
+
TrainingState,
|
| 40 |
+
)
|
| 41 |
+
from torch.distributed.fsdp._flat_param import (
|
| 42 |
+
_FSDP_USE_FULL_PREC_IN_EVAL,
|
| 43 |
+
FlatParameter,
|
| 44 |
+
FlatParamHandle,
|
| 45 |
+
HandleShardingStrategy,
|
| 46 |
+
)
|
| 47 |
+
from torch.distributed.fsdp._limiter_utils import _FreeEventQueue
|
| 48 |
+
from torch.distributed.fsdp.api import (
|
| 49 |
+
BackwardPrefetch,
|
| 50 |
+
CPUOffload,
|
| 51 |
+
FullOptimStateDictConfig,
|
| 52 |
+
FullStateDictConfig,
|
| 53 |
+
MixedPrecision,
|
| 54 |
+
ShardingStrategy,
|
| 55 |
+
StateDictConfig,
|
| 56 |
+
StateDictType,
|
| 57 |
+
)
|
| 58 |
+
from torch.distributed.fsdp.wrap import _Policy
|
| 59 |
+
from torch.distributed.tensor.parallel.fsdp import DTensorExtensions
|
| 60 |
+
from torch.distributed.utils import _sync_params_and_buffers
|
| 61 |
+
from torch.utils._python_dispatch import is_traceable_wrapper_subclass
|
| 62 |
+
|
| 63 |
+
|
| 64 |
+
if TYPE_CHECKING:
|
| 65 |
+
from torch.utils.hooks import RemovableHandle
|
| 66 |
+
|
| 67 |
+
_TORCHDISTX_AVAIL = True
|
| 68 |
+
try:
|
| 69 |
+
from torchdistx import deferred_init, fake # type: ignore[import]
|
| 70 |
+
except ImportError:
|
| 71 |
+
_TORCHDISTX_AVAIL = False
|
| 72 |
+
|
| 73 |
+
PARAM_BROADCAST_BUCKET_SIZE = int(250 * 1024 * 1024)
|
| 74 |
+
FSDP_SYNCED = "_fsdp_synced"
|
| 75 |
+
# Specification of process groups for hybrid sharding strategies.
|
| 76 |
+
HybridShardProcessGroupType = Tuple[dist.ProcessGroup, dist.ProcessGroup]
|
| 77 |
+
# Overall specification of process group.
|
| 78 |
+
ProcessGroupType = Optional[Union[dist.ProcessGroup, HybridShardProcessGroupType]]
|
| 79 |
+
|
| 80 |
+
|
| 81 |
+
# TODO (awgu): Refactor this later
|
| 82 |
+
SHARDING_STRATEGY_MAP = {
|
| 83 |
+
ShardingStrategy.NO_SHARD: HandleShardingStrategy.NO_SHARD,
|
| 84 |
+
ShardingStrategy.FULL_SHARD: HandleShardingStrategy.FULL_SHARD,
|
| 85 |
+
ShardingStrategy.SHARD_GRAD_OP: HandleShardingStrategy.SHARD_GRAD_OP,
|
| 86 |
+
ShardingStrategy.HYBRID_SHARD: HandleShardingStrategy.HYBRID_SHARD,
|
| 87 |
+
ShardingStrategy._HYBRID_SHARD_ZERO2: HandleShardingStrategy._HYBRID_SHARD_ZERO2,
|
| 88 |
+
}
|
| 89 |
+
HYBRID_SHARDING_STRATEGIES = [
|
| 90 |
+
ShardingStrategy.HYBRID_SHARD,
|
| 91 |
+
ShardingStrategy._HYBRID_SHARD_ZERO2,
|
| 92 |
+
]
|
| 93 |
+
NO_RESHARD_AFTER_FORWARD_STRATEGIES = (
|
| 94 |
+
ShardingStrategy.SHARD_GRAD_OP,
|
| 95 |
+
ShardingStrategy._HYBRID_SHARD_ZERO2,
|
| 96 |
+
)
|
| 97 |
+
|
| 98 |
+
|
| 99 |
+
# NOTE: Since non-self attributes cannot be type annotated, several attributes
|
| 100 |
+
# on `state` are defined first as local variables before being assigned.
|
| 101 |
+
|
| 102 |
+
|
| 103 |
+
@no_type_check
|
| 104 |
+
def _init_process_group_state(
|
| 105 |
+
state: _FSDPState,
|
| 106 |
+
process_group: ProcessGroupType,
|
| 107 |
+
sharding_strategy: ShardingStrategy,
|
| 108 |
+
policy: Optional[_Policy],
|
| 109 |
+
device_mesh: Optional[DeviceMesh] = None,
|
| 110 |
+
) -> _FSDPState:
|
| 111 |
+
if process_group is not None and device_mesh is not None:
|
| 112 |
+
raise ValueError(
|
| 113 |
+
"Cannot pass both process_group and device_mesh at the "
|
| 114 |
+
"same time. Please just pass only one of them."
|
| 115 |
+
)
|
| 116 |
+
is_hybrid_strategy = sharding_strategy in HYBRID_SHARDING_STRATEGIES
|
| 117 |
+
if is_hybrid_strategy:
|
| 118 |
+
if process_group is None and policy is None and device_mesh is None:
|
| 119 |
+
# Raise an error here, since this is manual wrapping with no process group
|
| 120 |
+
# passed in, there is no way to ensure all wrapped FSDP instances use the same
|
| 121 |
+
# process groups.
|
| 122 |
+
raise ValueError(
|
| 123 |
+
f"Manual wrapping with {sharding_strategy} "
|
| 124 |
+
"requires explicit specification of process group or device_mesh."
|
| 125 |
+
)
|
| 126 |
+
else:
|
| 127 |
+
state = _init_process_group_state_for_hybrid_shard(
|
| 128 |
+
state, process_group, device_mesh
|
| 129 |
+
)
|
| 130 |
+
else:
|
| 131 |
+
if device_mesh:
|
| 132 |
+
state._device_mesh = device_mesh
|
| 133 |
+
state.process_group = device_mesh.get_group(mesh_dim=0)
|
| 134 |
+
else:
|
| 135 |
+
state.process_group = (
|
| 136 |
+
process_group if process_group is not None else _get_default_group()
|
| 137 |
+
)
|
| 138 |
+
|
| 139 |
+
state.rank = state.process_group.rank()
|
| 140 |
+
state.world_size = state.process_group.size()
|
| 141 |
+
data_parallel_world_size = state.world_size
|
| 142 |
+
if is_hybrid_strategy:
|
| 143 |
+
data_parallel_world_size *= state._inter_node_pg.size()
|
| 144 |
+
state._gradient_predivide_factor = (
|
| 145 |
+
default_hooks.DefaultState._get_gradient_predivide_factor(
|
| 146 |
+
data_parallel_world_size
|
| 147 |
+
)
|
| 148 |
+
)
|
| 149 |
+
state._gradient_postdivide_factor = (
|
| 150 |
+
data_parallel_world_size / state._gradient_predivide_factor
|
| 151 |
+
)
|
| 152 |
+
return state
|
| 153 |
+
|
| 154 |
+
|
| 155 |
+
@no_type_check
|
| 156 |
+
def _init_process_group_state_for_hybrid_shard(
|
| 157 |
+
state: _FSDPState,
|
| 158 |
+
process_group: ProcessGroupType,
|
| 159 |
+
device_mesh: DeviceMesh,
|
| 160 |
+
) -> _FSDPState:
|
| 161 |
+
if device_mesh:
|
| 162 |
+
if _is_valid_hybrid_shard_device_mesh(device_mesh):
|
| 163 |
+
state._device_mesh = device_mesh
|
| 164 |
+
# We currently only allow _inter_node_pg to be the outermost dimension, and the
|
| 165 |
+
# process_group(intra_node) to be the innermost dimension.
|
| 166 |
+
state._inter_node_pg = device_mesh.get_group(mesh_dim=0)
|
| 167 |
+
state.process_group = device_mesh.get_group(mesh_dim=1)
|
| 168 |
+
else:
|
| 169 |
+
raise ValueError(
|
| 170 |
+
f"Expected device_mesh to have ndim=2 but got {device_mesh.ndim}"
|
| 171 |
+
)
|
| 172 |
+
elif process_group is None:
|
| 173 |
+
default_group = _get_default_group()
|
| 174 |
+
intra_node_group, inter_node_group = _init_intra_and_inter_node_groups(
|
| 175 |
+
default_group, state._device_handle.device_count()
|
| 176 |
+
)
|
| 177 |
+
# we shard across intra-node
|
| 178 |
+
state.process_group = intra_node_group
|
| 179 |
+
# save _inter_node_pg to allreduce across.
|
| 180 |
+
state._inter_node_pg = inter_node_group
|
| 181 |
+
else:
|
| 182 |
+
# Check type and assign state.process_group and state._inter_node_pg.
|
| 183 |
+
if _is_valid_hybrid_shard_pg_type(process_group):
|
| 184 |
+
# Assuming that user passed in as intra node group and inter node group
|
| 185 |
+
# as documented.
|
| 186 |
+
state.process_group, state._inter_node_pg = process_group
|
| 187 |
+
else:
|
| 188 |
+
raise ValueError(
|
| 189 |
+
"Expected process_group to be passed in as either None or "
|
| 190 |
+
f"Tuple[dist.ProcessGroup, dist.ProcessGroup] but got {type(process_group)}"
|
| 191 |
+
)
|
| 192 |
+
# Create state for allreduce
|
| 193 |
+
state._inter_node_state = _get_default_comm_hook_state(
|
| 194 |
+
process_group=state._inter_node_pg,
|
| 195 |
+
)
|
| 196 |
+
return state
|
| 197 |
+
|
| 198 |
+
|
| 199 |
+
@no_type_check
|
| 200 |
+
def _is_valid_hybrid_shard_pg_type(process_group: Any) -> bool:
|
| 201 |
+
return (
|
| 202 |
+
isinstance(process_group, tuple)
|
| 203 |
+
and len(process_group) == 2
|
| 204 |
+
and all(isinstance(pg, dist.ProcessGroup) for pg in process_group)
|
| 205 |
+
)
|
| 206 |
+
|
| 207 |
+
|
| 208 |
+
@no_type_check
|
| 209 |
+
def _is_valid_hybrid_shard_device_mesh(device_mesh: DeviceMesh) -> bool:
|
| 210 |
+
return isinstance(device_mesh, DeviceMesh) and device_mesh.ndim == 2
|
| 211 |
+
|
| 212 |
+
|
| 213 |
+
@no_type_check
|
| 214 |
+
def _init_intra_node_process_group(num_devices_per_node: int) -> dist.ProcessGroup:
|
| 215 |
+
"""
|
| 216 |
+
Return a process group across the current node.
|
| 217 |
+
|
| 218 |
+
For example, given each row is a distinct node:
|
| 219 |
+
0 1 2 3 4 5 6 7
|
| 220 |
+
8 9 10 11 12 13 14 15
|
| 221 |
+
This API would return an intra-node subgroup across
|
| 222 |
+
[0, 1, ..., 7] or [8, 9, ..., 15] depending on the process's rank.
|
| 223 |
+
For example, rank 3 would get [0, 1, ..., 7].
|
| 224 |
+
"""
|
| 225 |
+
intra_node_subgroup, _ = dist.new_subgroups(num_devices_per_node)
|
| 226 |
+
return intra_node_subgroup
|
| 227 |
+
|
| 228 |
+
|
| 229 |
+
@no_type_check
|
| 230 |
+
def _init_inter_node_process_group(
|
| 231 |
+
global_process_group: dist.ProcessGroup,
|
| 232 |
+
num_devices_per_node: int,
|
| 233 |
+
) -> dist.ProcessGroup:
|
| 234 |
+
"""
|
| 235 |
+
Return an inter-node process group where each contained rank has the same local rank.
|
| 236 |
+
|
| 237 |
+
For example, given each row is a distinct node:
|
| 238 |
+
0 1 2 3 4 5 6 7
|
| 239 |
+
8 9 10 11 12 13 14 15
|
| 240 |
+
This API would return inter-node process group [0, 8], [1, 9], [2, 10], and so forth
|
| 241 |
+
depending on the process's rank. For example, rank 1 would get [1, 9], rank 5
|
| 242 |
+
would get [5, 13].
|
| 243 |
+
"""
|
| 244 |
+
# the inter-node pg that is returned
|
| 245 |
+
inter_node_pg = None
|
| 246 |
+
sharding_backend = dist.get_backend(global_process_group)
|
| 247 |
+
world_size = dist.get_world_size(global_process_group)
|
| 248 |
+
# Assuming fully homogeneous setup
|
| 249 |
+
num_nodes = world_size // num_devices_per_node
|
| 250 |
+
my_local_rank = dist.get_rank(global_process_group) % num_devices_per_node
|
| 251 |
+
for local_rank in range(num_devices_per_node):
|
| 252 |
+
ranks_for_inter_group = [
|
| 253 |
+
local_rank + (i * num_devices_per_node) for i in range(num_nodes)
|
| 254 |
+
]
|
| 255 |
+
# every rank always needs to call dist.new_group
|
| 256 |
+
grp = dist.new_group(ranks=ranks_for_inter_group, backend=sharding_backend)
|
| 257 |
+
if local_rank == my_local_rank:
|
| 258 |
+
inter_node_pg = grp
|
| 259 |
+
|
| 260 |
+
assert (
|
| 261 |
+
inter_node_pg is not None
|
| 262 |
+
), f"{my_local_rank} expected to assign inter-node pg, but did not"
|
| 263 |
+
return inter_node_pg
|
| 264 |
+
|
| 265 |
+
|
| 266 |
+
def _init_intra_and_inter_node_groups(
|
| 267 |
+
global_process_group: dist.ProcessGroup,
|
| 268 |
+
num_devices_per_node: int,
|
| 269 |
+
) -> Tuple[dist.ProcessGroup, dist.ProcessGroup]:
|
| 270 |
+
"""
|
| 271 |
+
Initialize intra and inter-node process groups and return the ones corresponding to this process's rank.
|
| 272 |
+
|
| 273 |
+
This function can be used to initialize process groups for ``HYBRID_SHARD`` or
|
| 274 |
+
``_HYBRID_SHARD_ZERO2`` in FSDP.
|
| 275 |
+
This function assumes each node has an equal number of CUDA-enabled devices.
|
| 276 |
+
Returns:
|
| 277 |
+
Tuple[dist.ProcessGroup, dist.ProcessGroup]: Intra and inter-node process group.
|
| 278 |
+
"""
|
| 279 |
+
return (
|
| 280 |
+
_init_intra_node_process_group(num_devices_per_node),
|
| 281 |
+
_init_inter_node_process_group(global_process_group, num_devices_per_node),
|
| 282 |
+
)
|
| 283 |
+
|
| 284 |
+
|
| 285 |
+
@no_type_check
|
| 286 |
+
def _init_ignored_module_states(
|
| 287 |
+
state: _FSDPState,
|
| 288 |
+
module: nn.Module,
|
| 289 |
+
ignored_modules: Optional[Iterable[torch.nn.Module]],
|
| 290 |
+
ignored_states: Union[
|
| 291 |
+
Optional[Iterable[torch.nn.Parameter]], Optional[Iterable[torch.nn.Module]]
|
| 292 |
+
] = None,
|
| 293 |
+
) -> _FSDPState:
|
| 294 |
+
if ignored_modules is not None and ignored_states is not None:
|
| 295 |
+
raise ValueError(
|
| 296 |
+
"Cannot pass both ignored_modules and ignored_states at the "
|
| 297 |
+
"same time. Please just pass ignored_states."
|
| 298 |
+
)
|
| 299 |
+
ignored_parameters = None
|
| 300 |
+
passed_as_ignored_states = ignored_states is not None
|
| 301 |
+
if passed_as_ignored_states:
|
| 302 |
+
ignored_states_list = list(ignored_states)
|
| 303 |
+
_check_ignored_states(ignored_states_list, True)
|
| 304 |
+
else:
|
| 305 |
+
ignored_states_list = []
|
| 306 |
+
_check_ignored_states(
|
| 307 |
+
list(ignored_modules) if ignored_modules is not None else [], False
|
| 308 |
+
)
|
| 309 |
+
if len(ignored_states_list) > 0:
|
| 310 |
+
if isinstance(ignored_states_list[0], nn.Parameter):
|
| 311 |
+
ignored_parameters = ignored_states_list
|
| 312 |
+
else:
|
| 313 |
+
ignored_modules = ignored_states_list
|
| 314 |
+
state._ignored_modules = _get_ignored_modules(module, ignored_modules)
|
| 315 |
+
state._ignored_params = _get_ignored_params(
|
| 316 |
+
module,
|
| 317 |
+
state._ignored_modules,
|
| 318 |
+
ignored_parameters,
|
| 319 |
+
)
|
| 320 |
+
state._ignored_buffer_names = _get_ignored_buffer_names(
|
| 321 |
+
module,
|
| 322 |
+
state._ignored_modules,
|
| 323 |
+
)
|
| 324 |
+
# TODO: FSDP's contract for buffers is not well-defined. They are
|
| 325 |
+
# implicitly ignored for most functionality since they are not sharded;
|
| 326 |
+
# however, FSDP still imposes some semantics on buffers (e.g. buffer mixed
|
| 327 |
+
# precision). We should formalize this contract and decide if we need to
|
| 328 |
+
# compute and store `_ignored_buffers`.
|
| 329 |
+
return state
|
| 330 |
+
|
| 331 |
+
|
| 332 |
+
def _check_ignored_states(
|
| 333 |
+
ignored_states: List[Any], passed_as_ignored_states: bool
|
| 334 |
+
) -> None:
|
| 335 |
+
"""
|
| 336 |
+
Check that the ignored states are uniformly parameters or uniformly modules.
|
| 337 |
+
|
| 338 |
+
We may remove this check in the future if we permit mixing.
|
| 339 |
+
"""
|
| 340 |
+
if len(ignored_states) == 0:
|
| 341 |
+
return
|
| 342 |
+
if passed_as_ignored_states:
|
| 343 |
+
all_params = all(isinstance(state, nn.Parameter) for state in ignored_states)
|
| 344 |
+
all_modules = all(isinstance(state, nn.Module) for state in ignored_states)
|
| 345 |
+
if not all_params and not all_modules:
|
| 346 |
+
# Sort for consistent ordering for unit test regex matching
|
| 347 |
+
sorted_types = sorted({type(state) for state in ignored_states}, key=repr)
|
| 348 |
+
raise ValueError(
|
| 349 |
+
"ignored_states expects all nn.Parameter or all nn.Module list "
|
| 350 |
+
f"elements but got types {sorted_types}"
|
| 351 |
+
)
|
| 352 |
+
else:
|
| 353 |
+
if not all(isinstance(state, nn.Module) for state in ignored_states):
|
| 354 |
+
sorted_types = sorted({type(state) for state in ignored_states}, key=repr)
|
| 355 |
+
raise ValueError(
|
| 356 |
+
"ignored_modules expects nn.Module list elements but got "
|
| 357 |
+
f"types {sorted_types}"
|
| 358 |
+
)
|
| 359 |
+
|
| 360 |
+
|
| 361 |
+
@no_type_check
|
| 362 |
+
def _init_device_handle(
|
| 363 |
+
state: _FSDPState,
|
| 364 |
+
module: nn.Module,
|
| 365 |
+
ignored_params: Set[nn.Parameter],
|
| 366 |
+
device_id: Optional[Union[int, torch.device]],
|
| 367 |
+
) -> _FSDPState:
|
| 368 |
+
"""
|
| 369 |
+
Determine device handle used for initializing FSDP.
|
| 370 |
+
|
| 371 |
+
If a device is specified by ``device_id``,
|
| 372 |
+
then returns device handle corresponds to that device type. Otherwise, If the
|
| 373 |
+
module is already on a non-CPU device, then the device type is that non-CPU device type.
|
| 374 |
+
If the module is on CPU or meta, then the device type is the current accelerator device.
|
| 375 |
+
See the :ref:`Accelerators<accelerators>` for details.
|
| 376 |
+
|
| 377 |
+
|
| 378 |
+
This method will be called once ignored paramters was determined, as the device handle maybe needed
|
| 379 |
+
for other initialization.
|
| 380 |
+
"""
|
| 381 |
+
determined_device = None
|
| 382 |
+
if device_id is not None:
|
| 383 |
+
determined_device = (
|
| 384 |
+
device_id
|
| 385 |
+
if isinstance(device_id, torch.device)
|
| 386 |
+
else torch.device(device_id)
|
| 387 |
+
)
|
| 388 |
+
if determined_device is None:
|
| 389 |
+
for param in _get_orig_params(module, ignored_params):
|
| 390 |
+
if param.device.type in {"cpu", "meta"}:
|
| 391 |
+
continue
|
| 392 |
+
if determined_device is None:
|
| 393 |
+
determined_device = param.device
|
| 394 |
+
else:
|
| 395 |
+
if param.device.type != determined_device.type:
|
| 396 |
+
raise RuntimeError(
|
| 397 |
+
f"FSDP does not support modules with different device types "
|
| 398 |
+
f"but got params on {determined_device.type} and {param.device.type}"
|
| 399 |
+
)
|
| 400 |
+
determined_device = determined_device or torch._C._get_accelerator()
|
| 401 |
+
if determined_device.type == "cpu":
|
| 402 |
+
raise RuntimeError(
|
| 403 |
+
"FSDP needs a non-CPU accelerator device, but no accelerator device is detected."
|
| 404 |
+
)
|
| 405 |
+
|
| 406 |
+
state._device_handle = _FSDPDeviceHandle.from_device(determined_device)
|
| 407 |
+
return state
|
| 408 |
+
|
| 409 |
+
|
| 410 |
+
@no_type_check
|
| 411 |
+
def _init_buffer_state(
|
| 412 |
+
state: _FSDPState,
|
| 413 |
+
module: nn.Module,
|
| 414 |
+
) -> _FSDPState:
|
| 415 |
+
state._buffer_names = _get_buffer_names(module)
|
| 416 |
+
# Save a mapping from clean fully-qualified buffer name (starting from
|
| 417 |
+
# `module`) to its original dtype for restoring that dtype during model
|
| 418 |
+
# checkpointing when buffer mixed precision is enabled. The names should
|
| 419 |
+
# be clean since the casting happens in a `summon_full_params()` context.
|
| 420 |
+
_buffer_name_to_orig_dtype: Dict[str, torch.dtype] = {}
|
| 421 |
+
for buffer_name, buffer in module.named_buffers():
|
| 422 |
+
buffer_name = clean_tensor_name(buffer_name)
|
| 423 |
+
_buffer_name_to_orig_dtype[buffer_name] = buffer.dtype
|
| 424 |
+
state._buffer_name_to_orig_dtype = _buffer_name_to_orig_dtype
|
| 425 |
+
return state
|
| 426 |
+
|
| 427 |
+
|
| 428 |
+
@no_type_check
|
| 429 |
+
def _init_core_state(
|
| 430 |
+
state: _FSDPState,
|
| 431 |
+
sharding_strategy: Optional[ShardingStrategy],
|
| 432 |
+
mixed_precision: Optional[MixedPrecision],
|
| 433 |
+
cpu_offload: Optional[CPUOffload],
|
| 434 |
+
limit_all_gathers: bool,
|
| 435 |
+
use_orig_params: bool,
|
| 436 |
+
backward_prefetch_limit: int,
|
| 437 |
+
forward_prefetch_limit: int,
|
| 438 |
+
) -> _FSDPState:
|
| 439 |
+
# We clamp the strategy to `NO_SHARD` for world size of 1 since they are
|
| 440 |
+
# currently functionally equivalent. This may change if/when we integrate
|
| 441 |
+
# FSDP with MoE.
|
| 442 |
+
if state.world_size == 1:
|
| 443 |
+
if sharding_strategy != ShardingStrategy.NO_SHARD:
|
| 444 |
+
warnings.warn(
|
| 445 |
+
"FSDP is switching to use `NO_SHARD` instead of "
|
| 446 |
+
f"{sharding_strategy or ShardingStrategy.FULL_SHARD} since "
|
| 447 |
+
"the world size is 1."
|
| 448 |
+
)
|
| 449 |
+
sharding_strategy = ShardingStrategy.NO_SHARD
|
| 450 |
+
elif sharding_strategy == ShardingStrategy.NO_SHARD:
|
| 451 |
+
warnings.warn(
|
| 452 |
+
"The `NO_SHARD` sharding strategy is deprecated. If having issues, "
|
| 453 |
+
"please use `DistributedDataParallel` instead.",
|
| 454 |
+
FutureWarning,
|
| 455 |
+
# Level 1 is here, level 2 is from `FullyShardedDataParallel`, and
|
| 456 |
+
# level 3 is from the true caller
|
| 457 |
+
stacklevel=3,
|
| 458 |
+
)
|
| 459 |
+
state.sharding_strategy = sharding_strategy or ShardingStrategy.FULL_SHARD
|
| 460 |
+
state.mixed_precision = mixed_precision or MixedPrecision()
|
| 461 |
+
if mixed_precision is not None:
|
| 462 |
+
torch._C._log_api_usage_once(
|
| 463 |
+
f"torch.distributed.fsdp.mixed_precision.{str(state.mixed_precision)}"
|
| 464 |
+
)
|
| 465 |
+
state._use_full_prec_in_eval = (
|
| 466 |
+
os.environ.get(_FSDP_USE_FULL_PREC_IN_EVAL, "") == "1"
|
| 467 |
+
)
|
| 468 |
+
state.cpu_offload = cpu_offload or CPUOffload()
|
| 469 |
+
state.limit_all_gathers = limit_all_gathers
|
| 470 |
+
state._use_orig_params = use_orig_params
|
| 471 |
+
state.training_state = TrainingState.IDLE
|
| 472 |
+
state._is_root = None
|
| 473 |
+
state._free_event_queue = _FreeEventQueue()
|
| 474 |
+
state._debug_level = dist.get_debug_level()
|
| 475 |
+
state._exec_order_data = exec_order_utils._ExecOrderData(
|
| 476 |
+
state._debug_level,
|
| 477 |
+
backward_prefetch_limit,
|
| 478 |
+
forward_prefetch_limit,
|
| 479 |
+
)
|
| 480 |
+
state._unshard_event = None
|
| 481 |
+
# Mapping from fully sharded module to the handles it is responsible to
|
| 482 |
+
# unshard and reshard (see [Note: Fully Sharded Module])
|
| 483 |
+
_fully_sharded_module_to_handle: Dict[nn.Module, FlatParamHandle] = {}
|
| 484 |
+
state._fully_sharded_module_to_handle = _fully_sharded_module_to_handle
|
| 485 |
+
# Invariant: `state.params` contains exactly the `FlatParameter`s of the
|
| 486 |
+
# handles in `state._handle`
|
| 487 |
+
_handle: Optional[FlatParamHandle] = None
|
| 488 |
+
state._handle = _handle
|
| 489 |
+
params: List[FlatParameter] = []
|
| 490 |
+
state.params = params
|
| 491 |
+
return state
|
| 492 |
+
|
| 493 |
+
|
| 494 |
+
@no_type_check
|
| 495 |
+
def _init_runtime_state(
|
| 496 |
+
state: _FSDPState,
|
| 497 |
+
) -> _FSDPState:
|
| 498 |
+
_root_pre_forward_handles: List[RemovableHandle] = []
|
| 499 |
+
state._root_pre_forward_handles = _root_pre_forward_handles
|
| 500 |
+
_pre_forward_handles: List[RemovableHandle] = []
|
| 501 |
+
state._pre_forward_handles = _pre_forward_handles
|
| 502 |
+
_post_forward_handles: List[RemovableHandle] = []
|
| 503 |
+
state._post_forward_handles = _post_forward_handles
|
| 504 |
+
state._sync_gradients = True
|
| 505 |
+
state._comm_hook = None
|
| 506 |
+
state._comm_hook_state = None
|
| 507 |
+
# Used to prevent running the pre-backward hook multiple times
|
| 508 |
+
return state
|
| 509 |
+
|
| 510 |
+
|
| 511 |
+
@no_type_check
|
| 512 |
+
def _init_prefetching_state(
|
| 513 |
+
state: _FSDPState,
|
| 514 |
+
backward_prefetch: BackwardPrefetch,
|
| 515 |
+
forward_prefetch: bool,
|
| 516 |
+
) -> _FSDPState:
|
| 517 |
+
state.backward_prefetch = backward_prefetch
|
| 518 |
+
state.forward_prefetch = forward_prefetch
|
| 519 |
+
# The data structures use tuples of handles to generalize over the case
|
| 520 |
+
# where a module's forward involves multiple handles.
|
| 521 |
+
return state
|
| 522 |
+
|
| 523 |
+
|
| 524 |
+
@no_type_check
|
| 525 |
+
def _init_extension(state: _FSDPState, device_mesh: DeviceMesh = None) -> _FSDPState:
|
| 526 |
+
# TODO: we need to add additional check once we support FSDP + PiPPy.
|
| 527 |
+
# This check is currently sufficient, since we only support FSDP + TP.
|
| 528 |
+
root_mesh = _mesh_resources.get_root_mesh(device_mesh)
|
| 529 |
+
# if a root mesh is not the same as device_mesh,
|
| 530 |
+
# meaning the device_mesh is sliced out from the root mesh.
|
| 531 |
+
if device_mesh and root_mesh != state._device_mesh:
|
| 532 |
+
state._fsdp_extension = DTensorExtensions(state._device_handle)
|
| 533 |
+
else:
|
| 534 |
+
# We need to explicilty set _fsdp_extension to None.
|
| 535 |
+
# Otherwise, we will run into an infinite recursion when getting the attribute.
|
| 536 |
+
state._fsdp_extension = None
|
| 537 |
+
return state
|
| 538 |
+
|
| 539 |
+
|
| 540 |
+
@no_type_check
|
| 541 |
+
def _init_state_dict_state(state: _FSDPState) -> _FSDPState:
|
| 542 |
+
state._state_dict_type = StateDictType.FULL_STATE_DICT
|
| 543 |
+
state_dict_config: StateDictConfig = FullStateDictConfig()
|
| 544 |
+
state._optim_state_dict_config = FullOptimStateDictConfig()
|
| 545 |
+
state._state_dict_config = state_dict_config
|
| 546 |
+
unshard_params_ctx: Dict[nn.Module, Generator] = {}
|
| 547 |
+
state._unshard_params_ctx = unshard_params_ctx
|
| 548 |
+
|
| 549 |
+
return state
|
| 550 |
+
|
| 551 |
+
|
| 552 |
+
def _verify_managed_params(module: nn.Module, params: List[nn.Parameter]) -> None:
|
| 553 |
+
"""
|
| 554 |
+
Verify if the parameters are accepted by FSDP. The only restriction now
|
| 555 |
+
is that the parameter cannot be a scalar tensor (param.shape == []).
|
| 556 |
+
"""
|
| 557 |
+
for param in params:
|
| 558 |
+
if len(param.shape) == 0:
|
| 559 |
+
param_name = ""
|
| 560 |
+
for name, param_ in module.named_parameters():
|
| 561 |
+
if param is param_:
|
| 562 |
+
param_name = name
|
| 563 |
+
break
|
| 564 |
+
assert param_name
|
| 565 |
+
raise ValueError(
|
| 566 |
+
"FSDP doesn't support salar parameters. "
|
| 567 |
+
f"Change {param_name} to a 1D tensor with numel equal to 1."
|
| 568 |
+
)
|
| 569 |
+
|
| 570 |
+
|
| 571 |
+
@no_type_check
|
| 572 |
+
def _init_param_handle_from_module(
|
| 573 |
+
state: _FSDPState,
|
| 574 |
+
fully_sharded_module: nn.Module,
|
| 575 |
+
device_id: Optional[Union[int, torch.device]],
|
| 576 |
+
param_init_fn: Optional[Callable[[nn.Module], None]],
|
| 577 |
+
sync_module_states: bool,
|
| 578 |
+
) -> _FSDPState:
|
| 579 |
+
"""Initialize a ``FlatParamHandle`` from a module ``fully_sharded_module``."""
|
| 580 |
+
_check_single_device_module(fully_sharded_module, state._ignored_params, device_id)
|
| 581 |
+
device_from_device_id = _get_device_from_device_id(
|
| 582 |
+
device_id, state.rank, state._device_handle
|
| 583 |
+
)
|
| 584 |
+
is_meta_module, is_torchdistX_deferred_init = _need_to_materialize_module(
|
| 585 |
+
fully_sharded_module, state._ignored_params, state._ignored_modules
|
| 586 |
+
)
|
| 587 |
+
# Materialize the module if needed
|
| 588 |
+
if (is_meta_module or is_torchdistX_deferred_init) and param_init_fn is not None:
|
| 589 |
+
_materialize_with_param_init_fn(
|
| 590 |
+
fully_sharded_module, param_init_fn, state._ignored_modules
|
| 591 |
+
)
|
| 592 |
+
elif is_meta_module:
|
| 593 |
+
_materialize_meta_module(
|
| 594 |
+
fully_sharded_module,
|
| 595 |
+
device_id,
|
| 596 |
+
state._ignored_modules,
|
| 597 |
+
state._device_handle,
|
| 598 |
+
)
|
| 599 |
+
elif is_torchdistX_deferred_init:
|
| 600 |
+
deferred_init.materialize_module(
|
| 601 |
+
fully_sharded_module,
|
| 602 |
+
check_fn=lambda submodule: _get_module_fsdp_state(submodule) is None
|
| 603 |
+
and submodule not in state._ignored_modules,
|
| 604 |
+
)
|
| 605 |
+
|
| 606 |
+
ignored_buffers = {
|
| 607 |
+
buffer
|
| 608 |
+
for ignored_module in state._ignored_modules
|
| 609 |
+
for buffer in ignored_module.buffers()
|
| 610 |
+
}
|
| 611 |
+
|
| 612 |
+
_move_module_to_device(
|
| 613 |
+
fully_sharded_module,
|
| 614 |
+
state._ignored_params,
|
| 615 |
+
ignored_buffers,
|
| 616 |
+
device_from_device_id,
|
| 617 |
+
)
|
| 618 |
+
state.compute_device = _get_compute_device(
|
| 619 |
+
fully_sharded_module,
|
| 620 |
+
state._ignored_params,
|
| 621 |
+
device_from_device_id,
|
| 622 |
+
state.rank,
|
| 623 |
+
state._device_handle,
|
| 624 |
+
)
|
| 625 |
+
|
| 626 |
+
managed_params = list(_get_orig_params(fully_sharded_module, state._ignored_params))
|
| 627 |
+
_verify_managed_params(fully_sharded_module, managed_params)
|
| 628 |
+
if sync_module_states:
|
| 629 |
+
_sync_module_params_and_buffers(
|
| 630 |
+
fully_sharded_module, managed_params, state.process_group
|
| 631 |
+
)
|
| 632 |
+
if state.sharding_strategy in HYBRID_SHARDING_STRATEGIES:
|
| 633 |
+
_sync_module_params_and_buffers(
|
| 634 |
+
fully_sharded_module, managed_params, state._inter_node_pg
|
| 635 |
+
)
|
| 636 |
+
_init_param_handle_from_params(state, managed_params, fully_sharded_module)
|
| 637 |
+
return state
|
| 638 |
+
|
| 639 |
+
|
| 640 |
+
@no_type_check
|
| 641 |
+
def _init_param_handle_from_params(
|
| 642 |
+
state: _FSDPState,
|
| 643 |
+
params: List[nn.Parameter],
|
| 644 |
+
fully_sharded_module: nn.Module,
|
| 645 |
+
):
|
| 646 |
+
if len(params) == 0:
|
| 647 |
+
return
|
| 648 |
+
handle = FlatParamHandle(
|
| 649 |
+
params,
|
| 650 |
+
fully_sharded_module,
|
| 651 |
+
state.compute_device,
|
| 652 |
+
SHARDING_STRATEGY_MAP[state.sharding_strategy],
|
| 653 |
+
state.cpu_offload.offload_params,
|
| 654 |
+
state.mixed_precision.param_dtype,
|
| 655 |
+
state.mixed_precision.reduce_dtype,
|
| 656 |
+
state.mixed_precision.keep_low_precision_grads,
|
| 657 |
+
state.process_group,
|
| 658 |
+
state._use_orig_params,
|
| 659 |
+
fsdp_extension=state._fsdp_extension,
|
| 660 |
+
)
|
| 661 |
+
handle.shard()
|
| 662 |
+
assert not state._handle
|
| 663 |
+
state.params.append(handle.flat_param)
|
| 664 |
+
state._handle = handle
|
| 665 |
+
state._fully_sharded_module_to_handle[handle._fully_sharded_module] = handle
|
| 666 |
+
cpu_device = torch.device("cpu")
|
| 667 |
+
if state.cpu_offload.offload_params and handle.flat_param.device != cpu_device:
|
| 668 |
+
handle.flat_param_to(cpu_device)
|
| 669 |
+
|
| 670 |
+
|
| 671 |
+
def _get_ignored_modules(
|
| 672 |
+
root_module: nn.Module,
|
| 673 |
+
_ignored_modules: Optional[Iterable[torch.nn.Module]],
|
| 674 |
+
) -> Set[nn.Module]:
|
| 675 |
+
"""
|
| 676 |
+
Check that ``_ignored_modules`` is an iterable of ``nn.Module`` s without any FSDP instances.
|
| 677 |
+
|
| 678 |
+
Return the modules contained in their module
|
| 679 |
+
subtrees as a :class:`set`. Nested FSDP instances are excluded, but their
|
| 680 |
+
already-computed ignored modules are included.
|
| 681 |
+
|
| 682 |
+
``_ignored_modules`` represents the argument passed by the user to FSDP.
|
| 683 |
+
"""
|
| 684 |
+
msg_prefix = "`ignored_modules` should be an iterable of `torch.nn.Module`s "
|
| 685 |
+
try:
|
| 686 |
+
ignored_root_modules = (
|
| 687 |
+
set(_ignored_modules) if _ignored_modules is not None else set()
|
| 688 |
+
)
|
| 689 |
+
except TypeError as e:
|
| 690 |
+
raise TypeError(msg_prefix + f"but got {type(_ignored_modules)}") from e
|
| 691 |
+
for module in ignored_root_modules:
|
| 692 |
+
if not isinstance(module, torch.nn.Module):
|
| 693 |
+
raise TypeError(msg_prefix + f"but got an iterable with {type(module)}")
|
| 694 |
+
if _get_module_fsdp_state(module):
|
| 695 |
+
# TODO: We may relax this by taking the FSDP instance's wrapped
|
| 696 |
+
# module to provide more flexibility to the user.
|
| 697 |
+
raise ValueError("`ignored_modules` should not include FSDP modules")
|
| 698 |
+
# Treat modules that cannot compose with `fully_shard` as ignored modules,
|
| 699 |
+
# meaning that their subtrees are ignored
|
| 700 |
+
for module in root_module.modules():
|
| 701 |
+
if not traversal_utils._composable(module):
|
| 702 |
+
ignored_root_modules.add(module)
|
| 703 |
+
# NOTE: Even if `ignored_root_modules` is empty, do not return early so
|
| 704 |
+
# that this FSDP instance can get any ignored modules from its children.
|
| 705 |
+
|
| 706 |
+
# Include child modules and exclude nested FSDP modules themselves
|
| 707 |
+
ignored_modules = {
|
| 708 |
+
child
|
| 709 |
+
for module in ignored_root_modules
|
| 710 |
+
for child in module.modules()
|
| 711 |
+
if not isinstance(child, fsdp_file.FullyShardedDataParallel)
|
| 712 |
+
}
|
| 713 |
+
if root_module in ignored_modules:
|
| 714 |
+
warnings.warn(
|
| 715 |
+
"Trying to ignore the top-level module passed into the FSDP "
|
| 716 |
+
"constructor itself will result in all parameters being "
|
| 717 |
+
f"ignored and is not well-supported: {module}"
|
| 718 |
+
)
|
| 719 |
+
# Include nested FSDP modules' ignored modules
|
| 720 |
+
for submodule in root_module.modules():
|
| 721 |
+
optional_fsdp_state = _get_module_fsdp_state(submodule)
|
| 722 |
+
if optional_fsdp_state is not None:
|
| 723 |
+
assert hasattr(optional_fsdp_state, "_ignored_modules")
|
| 724 |
+
ignored_modules.update(optional_fsdp_state._ignored_modules)
|
| 725 |
+
return ignored_modules
|
| 726 |
+
|
| 727 |
+
|
| 728 |
+
def _get_ignored_params(
|
| 729 |
+
root_module: torch.nn.Module,
|
| 730 |
+
ignored_modules: Set[torch.nn.Module],
|
| 731 |
+
ignored_parameters: Optional[Iterable[torch.nn.Parameter]] = None,
|
| 732 |
+
) -> Set[torch.nn.Parameter]:
|
| 733 |
+
"""
|
| 734 |
+
Return the parameters of the modules in ``ignored_modules`` and the parameters in ``ignored_parameters``.
|
| 735 |
+
|
| 736 |
+
:class:`FlatParameter` s are excluded from the result.
|
| 737 |
+
"""
|
| 738 |
+
all_ignored_params: Set[torch.nn.Parameter] = set()
|
| 739 |
+
|
| 740 |
+
params_in_ignored_modules = {
|
| 741 |
+
p for m in ignored_modules for p in m.parameters() if not _is_fsdp_flattened(p)
|
| 742 |
+
}
|
| 743 |
+
|
| 744 |
+
all_ignored_params.update(params_in_ignored_modules)
|
| 745 |
+
|
| 746 |
+
if ignored_parameters is not None:
|
| 747 |
+
params_in_ignored_parameters = {
|
| 748 |
+
p for p in ignored_parameters if not _is_fsdp_flattened(p)
|
| 749 |
+
}
|
| 750 |
+
all_ignored_params.update(params_in_ignored_parameters)
|
| 751 |
+
|
| 752 |
+
# Always include nested FSDP modules' ignored parameters
|
| 753 |
+
for submodule in root_module.modules():
|
| 754 |
+
optional_fsdp_state = _get_module_fsdp_state(submodule)
|
| 755 |
+
if optional_fsdp_state is not None:
|
| 756 |
+
assert hasattr(optional_fsdp_state, "_ignored_params")
|
| 757 |
+
all_ignored_params.update(optional_fsdp_state._ignored_params)
|
| 758 |
+
|
| 759 |
+
return all_ignored_params
|
| 760 |
+
|
| 761 |
+
|
| 762 |
+
def _get_ignored_buffer_names(
|
| 763 |
+
root_module: torch.nn.Module,
|
| 764 |
+
ignored_modules: Set[torch.nn.Module],
|
| 765 |
+
) -> Set[str]:
|
| 766 |
+
"""Return the cleaned buffer FQNs in ``ignored_modules``."""
|
| 767 |
+
all_ignored_buffer_names: Set[str] = set()
|
| 768 |
+
|
| 769 |
+
buffers_in_ignored_modules = {
|
| 770 |
+
buffer for m in ignored_modules for buffer in m.buffers()
|
| 771 |
+
}
|
| 772 |
+
|
| 773 |
+
all_ignored_buffer_names.update(
|
| 774 |
+
{
|
| 775 |
+
clean_tensor_name(buffer_name)
|
| 776 |
+
for buffer_name, buffer in root_module.named_buffers()
|
| 777 |
+
if buffer in buffers_in_ignored_modules
|
| 778 |
+
}
|
| 779 |
+
)
|
| 780 |
+
|
| 781 |
+
# Always include nested FSDP modules' ignored buffer names
|
| 782 |
+
for submodule in root_module.modules():
|
| 783 |
+
optional_fsdp_state = _get_module_fsdp_state(submodule)
|
| 784 |
+
if optional_fsdp_state is not None:
|
| 785 |
+
assert hasattr(optional_fsdp_state, "_ignored_buffer_names")
|
| 786 |
+
all_ignored_buffer_names.update(optional_fsdp_state._ignored_buffer_names)
|
| 787 |
+
|
| 788 |
+
return all_ignored_buffer_names
|
| 789 |
+
|
| 790 |
+
|
| 791 |
+
def _get_buffer_names(root_module: nn.Module) -> Set[str]:
|
| 792 |
+
"""Return the fully prefixed names of all buffers in the module hierarchy rooted at ``root_module`` as a class:`set`."""
|
| 793 |
+
return {
|
| 794 |
+
clean_tensor_name(buffer_name) for buffer_name, _ in root_module.named_buffers()
|
| 795 |
+
}
|
| 796 |
+
|
| 797 |
+
|
| 798 |
+
def _check_single_device_module(
|
| 799 |
+
module: nn.Module,
|
| 800 |
+
ignored_params: Set[nn.Parameter],
|
| 801 |
+
device_id: Optional[Union[int, torch.device]],
|
| 802 |
+
) -> None:
|
| 803 |
+
"""
|
| 804 |
+
Raise an error if ``module`` has original parameters on multiple devices, ignoring the parameters in ``ignored_params``.
|
| 805 |
+
|
| 806 |
+
Thus, after this method, the
|
| 807 |
+
module must be either fully on the CPU or fully on a non-CPU device.
|
| 808 |
+
"""
|
| 809 |
+
devices = {param.device for param in _get_orig_params(module, ignored_params)}
|
| 810 |
+
# We allow module to be partially on CPU and partially on GPU if device_id is not
|
| 811 |
+
# None, since the device_id arg will result in the CPU portion being moved to
|
| 812 |
+
# GPU. This is useful in cases where part of the module may be parallelized
|
| 813 |
+
# by another algorithm and may already be on GPU. We'd like to enforce device_id
|
| 814 |
+
# to not be None, otherwise we'd flatten parameters in a mixed module which is
|
| 815 |
+
# not supported.
|
| 816 |
+
if len(devices) == 2 and torch.device("cpu") in devices:
|
| 817 |
+
if device_id is None:
|
| 818 |
+
raise RuntimeError(
|
| 819 |
+
"To support a module with both CPU and GPU params, "
|
| 820 |
+
"please pass in device_id argument."
|
| 821 |
+
)
|
| 822 |
+
elif len(devices) > 1:
|
| 823 |
+
raise RuntimeError(
|
| 824 |
+
f"FSDP only supports single device modules but got params on {devices}"
|
| 825 |
+
)
|
| 826 |
+
|
| 827 |
+
|
| 828 |
+
def _get_device_from_device_id(
|
| 829 |
+
device_id: Optional[Union[int, torch.device]],
|
| 830 |
+
rank: int,
|
| 831 |
+
device_handle: _FSDPDeviceHandle,
|
| 832 |
+
) -> Optional[torch.device]:
|
| 833 |
+
"""
|
| 834 |
+
Return a ``torch.device`` for the specified ``device_id``.
|
| 835 |
+
|
| 836 |
+
Processes ``device_id`` and returns either the corresponding device or
|
| 837 |
+
``None`` if ``device_id`` is ``None``.
|
| 838 |
+
"""
|
| 839 |
+
if device_id is None:
|
| 840 |
+
return None
|
| 841 |
+
device = (
|
| 842 |
+
device_id if isinstance(device_id, torch.device) else torch.device(device_id)
|
| 843 |
+
)
|
| 844 |
+
if device.type != "cpu" and device.index is None:
|
| 845 |
+
warnings.warn(
|
| 846 |
+
f"FSDP got the argument `device_id` {device_id} on rank "
|
| 847 |
+
f"{rank}, which does not have an explicit index. "
|
| 848 |
+
f"FSDP will use the current device {device_handle.current_device()}. "
|
| 849 |
+
f"If this is incorrect, please explicitly call `torch.{device.type}.set_device()` "
|
| 850 |
+
"before FSDP initialization or pass in the explicit device "
|
| 851 |
+
"index as the `device_id` argument."
|
| 852 |
+
)
|
| 853 |
+
device = torch.device(device_handle.current_device())
|
| 854 |
+
return device
|
| 855 |
+
|
| 856 |
+
|
| 857 |
+
def _need_to_materialize_module(
|
| 858 |
+
module: nn.Module,
|
| 859 |
+
ignored_params: Set[nn.Parameter],
|
| 860 |
+
ignored_modules: Set[nn.Module],
|
| 861 |
+
) -> Tuple[bool, bool]:
|
| 862 |
+
"""
|
| 863 |
+
Return if ``module`` has parameters on meta device and if ``module`` is using torchdistX deferred initialization.
|
| 864 |
+
|
| 865 |
+
At most of the returned bools can
|
| 866 |
+
be ``True``. If either is ``True``, then ``module`` needs to be
|
| 867 |
+
materialized.
|
| 868 |
+
"""
|
| 869 |
+
managed_params = list(_get_orig_params(module, ignored_params))
|
| 870 |
+
is_meta_module = any(param.is_meta for param in managed_params)
|
| 871 |
+
# TODO: We need to establish a contract for FSDP and buffers. For now, we
|
| 872 |
+
# skip checking for meta buffers from ignored modules. We should consider
|
| 873 |
+
# refactoring the initialization holistically to avoid so many traversals.
|
| 874 |
+
for submodule in module.modules():
|
| 875 |
+
if submodule in ignored_modules:
|
| 876 |
+
continue
|
| 877 |
+
for buf in submodule.buffers(recurse=False):
|
| 878 |
+
is_meta_module |= buf.is_meta
|
| 879 |
+
is_torchdistX_deferred_init = (
|
| 880 |
+
not is_meta_module
|
| 881 |
+
and _TORCHDISTX_AVAIL
|
| 882 |
+
and any(fake.is_fake(param) for param in managed_params)
|
| 883 |
+
)
|
| 884 |
+
return is_meta_module, is_torchdistX_deferred_init
|
| 885 |
+
|
| 886 |
+
|
| 887 |
+
def _materialize_with_param_init_fn(
|
| 888 |
+
root_module: nn.Module,
|
| 889 |
+
param_init_fn: Callable[[nn.Module], None],
|
| 890 |
+
ignored_modules: Set[nn.Module],
|
| 891 |
+
) -> None:
|
| 892 |
+
if not callable(param_init_fn):
|
| 893 |
+
raise ValueError(
|
| 894 |
+
f"Expected {param_init_fn} to be callable but got {type(param_init_fn)}"
|
| 895 |
+
)
|
| 896 |
+
modules_to_materialize = _get_modules_to_materialize(root_module, ignored_modules)
|
| 897 |
+
for module in modules_to_materialize:
|
| 898 |
+
param_init_fn(module)
|
| 899 |
+
|
| 900 |
+
|
| 901 |
+
def _materialize_meta_module(
|
| 902 |
+
root_module: nn.Module,
|
| 903 |
+
device_from_device_id: Optional[torch.device],
|
| 904 |
+
ignored_modules: Set[nn.Module],
|
| 905 |
+
device_handle: _FSDPDeviceHandle,
|
| 906 |
+
):
|
| 907 |
+
# Run default meta device initialization
|
| 908 |
+
materialization_device = device_from_device_id or torch.device(
|
| 909 |
+
device_handle.current_device()
|
| 910 |
+
)
|
| 911 |
+
modules_to_materialize = _get_modules_to_materialize(root_module, ignored_modules)
|
| 912 |
+
module = None
|
| 913 |
+
try:
|
| 914 |
+
# Assume that each module's `reset_parameters()` only initializes its
|
| 915 |
+
# own parameters and not those of its children
|
| 916 |
+
with torch.no_grad():
|
| 917 |
+
for module in modules_to_materialize:
|
| 918 |
+
# As a contract to the user, only call `reset_parameters()` if
|
| 919 |
+
# the module has directly managed parameters/buffers
|
| 920 |
+
module_state_iter = itertools.chain(
|
| 921 |
+
module.parameters(recurse=False), module.buffers(recurse=False)
|
| 922 |
+
)
|
| 923 |
+
has_module_states = len(list(module_state_iter)) > 0
|
| 924 |
+
if has_module_states:
|
| 925 |
+
module.to_empty(device=materialization_device, recurse=False)
|
| 926 |
+
module.reset_parameters() # type: ignore[operator]
|
| 927 |
+
except BaseException as e:
|
| 928 |
+
warnings.warn(
|
| 929 |
+
"Unable to call `reset_parameters()` for module on meta "
|
| 930 |
+
f"device with error {str(e)}. Please ensure that your module of"
|
| 931 |
+
f"type {type(module)} implements a `reset_parameters()` method." # type: ignore[possibly-undefined]
|
| 932 |
+
)
|
| 933 |
+
raise e
|
| 934 |
+
|
| 935 |
+
|
| 936 |
+
def _get_modules_to_materialize(
|
| 937 |
+
root_module: nn.Module, ignored_modules: Set[nn.Module]
|
| 938 |
+
) -> List[nn.Module]:
|
| 939 |
+
# Run BFS to collect the modules to materialize via `reset_parameters()`,
|
| 940 |
+
# stopping at any module with FSDP already applied or at ignored modules.
|
| 941 |
+
modules_to_materialize: List[nn.Module] = []
|
| 942 |
+
queue = collections.deque([root_module])
|
| 943 |
+
visited_modules: Set[nn.Module] = {root_module}
|
| 944 |
+
while queue:
|
| 945 |
+
module = queue.popleft()
|
| 946 |
+
modules_to_materialize.append(module)
|
| 947 |
+
for child_module in module.children():
|
| 948 |
+
if (
|
| 949 |
+
child_module not in visited_modules
|
| 950 |
+
and _get_module_fsdp_state(child_module) is None
|
| 951 |
+
and child_module not in ignored_modules
|
| 952 |
+
):
|
| 953 |
+
visited_modules.add(child_module)
|
| 954 |
+
queue.append(child_module)
|
| 955 |
+
return modules_to_materialize
|
| 956 |
+
|
| 957 |
+
|
| 958 |
+
def _move_module_to_device(
|
| 959 |
+
module: nn.Module,
|
| 960 |
+
ignored_params: Set[nn.Parameter],
|
| 961 |
+
ignored_buffers: Set[torch.Tensor],
|
| 962 |
+
device_from_device_id: Optional[torch.device],
|
| 963 |
+
) -> None:
|
| 964 |
+
"""
|
| 965 |
+
Move ``module`` depending on ``device_from_device_id`` and its current device.
|
| 966 |
+
|
| 967 |
+
This includes moving ignored modules' parameters.
|
| 968 |
+
|
| 969 |
+
- If ``device_from_device_id`` is not ``None``, then this moves
|
| 970 |
+
``module`` to the device.
|
| 971 |
+
- If ``device_from_device_id`` is ``None``, then this does not move
|
| 972 |
+
``module`` but warns the user if it is on CPU.
|
| 973 |
+
|
| 974 |
+
Precondition: ``_check_single_device_module()``.
|
| 975 |
+
"""
|
| 976 |
+
cpu_device = torch.device("cpu")
|
| 977 |
+
if device_from_device_id is not None:
|
| 978 |
+
# BFS from `module` without traversing any nested FSDP instances to
|
| 979 |
+
# collect the parameters/buffers that have not yet been managed
|
| 980 |
+
queue: Deque[nn.Module] = collections.deque()
|
| 981 |
+
queue.append(module)
|
| 982 |
+
params: List[nn.Parameter] = []
|
| 983 |
+
buffers: List[torch.Tensor] = []
|
| 984 |
+
while queue:
|
| 985 |
+
curr_module = queue.popleft()
|
| 986 |
+
# NOTE: We include a check to only move parameters/buffers that are
|
| 987 |
+
# on CPU device. If they are on a CUDA device different from the
|
| 988 |
+
# one specified by `device_id`, then this does NOT move them. This
|
| 989 |
+
# is so that we can raise an error in `_get_compute_device()`.
|
| 990 |
+
params.extend(
|
| 991 |
+
param
|
| 992 |
+
for param in curr_module.parameters(recurse=False)
|
| 993 |
+
if param.device == cpu_device
|
| 994 |
+
)
|
| 995 |
+
buffers.extend(
|
| 996 |
+
buffer
|
| 997 |
+
for buffer in curr_module.buffers(recurse=False)
|
| 998 |
+
if buffer.device == cpu_device
|
| 999 |
+
)
|
| 1000 |
+
for submodule in curr_module.children():
|
| 1001 |
+
if not isinstance(submodule, fsdp_file.FullyShardedDataParallel):
|
| 1002 |
+
queue.append(submodule)
|
| 1003 |
+
params_to_move = [p for p in params if p not in ignored_params]
|
| 1004 |
+
bufs_to_move = [p for p in buffers if p not in ignored_buffers]
|
| 1005 |
+
_move_states_to_device(params_to_move, bufs_to_move, device_from_device_id)
|
| 1006 |
+
return
|
| 1007 |
+
param = next(_get_orig_params(module, ignored_params), None)
|
| 1008 |
+
if param is not None and param.device == cpu_device:
|
| 1009 |
+
_warn_cpu_init()
|
| 1010 |
+
|
| 1011 |
+
|
| 1012 |
+
def _move_states_to_device(
|
| 1013 |
+
params: List[nn.Parameter],
|
| 1014 |
+
buffers: List[torch.Tensor],
|
| 1015 |
+
device_from_device_id: Optional[torch.device],
|
| 1016 |
+
) -> None:
|
| 1017 |
+
"""
|
| 1018 |
+
Move states to the specified device.
|
| 1019 |
+
|
| 1020 |
+
Precondition: ``_check_single_device_module()`` and module's parameters and
|
| 1021 |
+
buffers have been materialized if needed.
|
| 1022 |
+
"""
|
| 1023 |
+
if len(params) == 0 and len(buffers) == 0:
|
| 1024 |
+
return
|
| 1025 |
+
if len(params) > 0:
|
| 1026 |
+
current_device = params[0].device
|
| 1027 |
+
elif len(buffers) > 0:
|
| 1028 |
+
current_device = buffers[0].device
|
| 1029 |
+
cpu_device = torch.device("cpu")
|
| 1030 |
+
if device_from_device_id is not None:
|
| 1031 |
+
# Move the parameters and buffers like the `.data` code path in
|
| 1032 |
+
# `nn.Module._apply()`, which underlies `nn.Module.to()`
|
| 1033 |
+
for param in params:
|
| 1034 |
+
with torch.no_grad():
|
| 1035 |
+
param.data = param.to(device_from_device_id)
|
| 1036 |
+
if param.grad is not None:
|
| 1037 |
+
param.grad.data = param.grad.to(device_from_device_id)
|
| 1038 |
+
for buffer in buffers:
|
| 1039 |
+
buffer.data = buffer.to(device_from_device_id)
|
| 1040 |
+
elif current_device == cpu_device: # type: ignore[possibly-undefined]
|
| 1041 |
+
_warn_cpu_init()
|
| 1042 |
+
|
| 1043 |
+
|
| 1044 |
+
def _warn_cpu_init():
|
| 1045 |
+
warnings.warn(
|
| 1046 |
+
"The passed-in `module` is on CPU and will thus have FSDP's sharding "
|
| 1047 |
+
"initialization run on CPU, which may be slower than on GPU. We "
|
| 1048 |
+
"recommend passing in the `device_id` argument for FSDP to move "
|
| 1049 |
+
"`module` to GPU for the sharding initialization. `module` must also "
|
| 1050 |
+
"be on GPU device to work with the `sync_module_states=True` flag "
|
| 1051 |
+
"since that requires GPU communication."
|
| 1052 |
+
)
|
| 1053 |
+
|
| 1054 |
+
|
| 1055 |
+
def _get_compute_device(
|
| 1056 |
+
module: nn.Module,
|
| 1057 |
+
ignored_params: Set[nn.Parameter],
|
| 1058 |
+
device_from_device_id: Optional[torch.device],
|
| 1059 |
+
rank: int,
|
| 1060 |
+
device_handle: _FSDPDeviceHandle,
|
| 1061 |
+
) -> torch.device:
|
| 1062 |
+
"""
|
| 1063 |
+
Determine and return this FSDP instance's compute device.
|
| 1064 |
+
|
| 1065 |
+
If the module is already on a non-CPU device, then the compute device is that non-CPU
|
| 1066 |
+
device. If the module is on CPU, then the compute device is the current
|
| 1067 |
+
device.
|
| 1068 |
+
|
| 1069 |
+
Since this method should be called after materializing the module, any
|
| 1070 |
+
non-CPU device should not be meta device. For now, the compute device is
|
| 1071 |
+
always a CUDA or CUDA-like device with its explicit index.
|
| 1072 |
+
|
| 1073 |
+
Precondition: ``_check_single_device_module()`` and
|
| 1074 |
+
``_move_module_to_device()``.
|
| 1075 |
+
"""
|
| 1076 |
+
param = next(_get_orig_params(module, ignored_params), None)
|
| 1077 |
+
if param is not None and param.device.type != "cpu":
|
| 1078 |
+
compute_device = param.device # Determined by model param placement
|
| 1079 |
+
else:
|
| 1080 |
+
compute_device = torch.device(device_handle.current_device())
|
| 1081 |
+
if device_from_device_id is not None and compute_device != device_from_device_id:
|
| 1082 |
+
raise ValueError(
|
| 1083 |
+
f"Inconsistent compute device and `device_id` on rank {rank}: "
|
| 1084 |
+
f"{compute_device} vs {device_from_device_id}"
|
| 1085 |
+
)
|
| 1086 |
+
return compute_device
|
| 1087 |
+
|
| 1088 |
+
|
| 1089 |
+
# TODO: See how to deprecate!
|
| 1090 |
+
def _sync_module_params_and_buffers(
|
| 1091 |
+
module: nn.Module,
|
| 1092 |
+
params: List[nn.Parameter],
|
| 1093 |
+
process_group: dist.ProcessGroup,
|
| 1094 |
+
) -> None:
|
| 1095 |
+
"""
|
| 1096 |
+
Synchronize module states (i.e. parameters ``params`` and all not-yet-synced buffers) by broadcasting from rank 0 to all ranks.
|
| 1097 |
+
|
| 1098 |
+
Precondition: ``sync_module_states == True`` and ``self.process_group`` has
|
| 1099 |
+
been set.
|
| 1100 |
+
"""
|
| 1101 |
+
module_states: List[torch.Tensor] = []
|
| 1102 |
+
for buffer in module.buffers():
|
| 1103 |
+
# Avoid re-synchronizing buffers in case of nested wrapping
|
| 1104 |
+
if not getattr(buffer, FSDP_SYNCED, False):
|
| 1105 |
+
setattr(buffer, FSDP_SYNCED, True)
|
| 1106 |
+
detached_buffer = buffer.detach()
|
| 1107 |
+
if is_traceable_wrapper_subclass(detached_buffer):
|
| 1108 |
+
# NOTE: Here we assume no nested subclasses, at most one level of subclass
|
| 1109 |
+
# in both model's buffers and params
|
| 1110 |
+
attrs, _ = detached_buffer.__tensor_flatten__() # type: ignore[attr-defined]
|
| 1111 |
+
inner_buffers = [getattr(detached_buffer, attr) for attr in attrs]
|
| 1112 |
+
module_states.extend(inner_buffers)
|
| 1113 |
+
else:
|
| 1114 |
+
module_states.append(detached_buffer)
|
| 1115 |
+
|
| 1116 |
+
for param in params:
|
| 1117 |
+
detached_param = param.detach()
|
| 1118 |
+
if is_traceable_wrapper_subclass(detached_param):
|
| 1119 |
+
attrs, _ = detached_param.__tensor_flatten__() # type: ignore[attr-defined]
|
| 1120 |
+
inner_params = [getattr(detached_param, attr) for attr in attrs]
|
| 1121 |
+
module_states.extend(inner_params)
|
| 1122 |
+
else:
|
| 1123 |
+
module_states.append(detached_param)
|
| 1124 |
+
|
| 1125 |
+
_check_module_states_for_sync_module_states(module_states)
|
| 1126 |
+
_sync_params_and_buffers(
|
| 1127 |
+
process_group,
|
| 1128 |
+
module_states,
|
| 1129 |
+
PARAM_BROADCAST_BUCKET_SIZE,
|
| 1130 |
+
src=0,
|
| 1131 |
+
)
|
| 1132 |
+
|
| 1133 |
+
|
| 1134 |
+
def _check_module_states_for_sync_module_states(
|
| 1135 |
+
module_states: List[torch.Tensor],
|
| 1136 |
+
) -> None:
|
| 1137 |
+
if module_states and any(
|
| 1138 |
+
tensor.device == torch.device("cpu") for tensor in module_states
|
| 1139 |
+
):
|
| 1140 |
+
raise ValueError(
|
| 1141 |
+
"The module has CPU parameters or buffers when `sync_module_states=True`, "
|
| 1142 |
+
"which requires them to be on GPU. Please specify the `device_id` argument "
|
| 1143 |
+
"or move the module to GPU before passing it to FSDP."
|
| 1144 |
+
)
|
| 1145 |
+
|
| 1146 |
+
|
| 1147 |
+
def _get_orig_params(
|
| 1148 |
+
module: nn.Module,
|
| 1149 |
+
ignored_params: Set[nn.Parameter],
|
| 1150 |
+
) -> Iterator[nn.Parameter]:
|
| 1151 |
+
"""
|
| 1152 |
+
Return an iterator over the original parameters in ``module``.
|
| 1153 |
+
|
| 1154 |
+
The iterator does not return
|
| 1155 |
+
the parameters in ``ignored_params``, any ``FlatParameter`` s (which may be
|
| 1156 |
+
present due to nested FSDP wrapping), or any original parameters already
|
| 1157 |
+
flattened (only relevant when ``use_orig_params=True``).
|
| 1158 |
+
"""
|
| 1159 |
+
param_gen = module.parameters()
|
| 1160 |
+
try:
|
| 1161 |
+
while True:
|
| 1162 |
+
param = next(param_gen)
|
| 1163 |
+
if param not in ignored_params and not _is_fsdp_flattened(param):
|
| 1164 |
+
yield param
|
| 1165 |
+
except StopIteration:
|
| 1166 |
+
pass
|
| 1167 |
+
|
| 1168 |
+
|
| 1169 |
+
def _check_orig_params_flattened(
|
| 1170 |
+
fsdp_module,
|
| 1171 |
+
ignored_params: Set[nn.Parameter],
|
| 1172 |
+
) -> None:
|
| 1173 |
+
"""
|
| 1174 |
+
Check that original parameters in ``fsdp_module`` have been flattened.
|
| 1175 |
+
|
| 1176 |
+
The flattened parameters are made
|
| 1177 |
+
invisible to ``named_parameters()`` for the module hierarchy rooted at
|
| 1178 |
+
``fsdp_module``. This should be called as a sanity check after flattening
|
| 1179 |
+
the wrapped module's parameters.
|
| 1180 |
+
"""
|
| 1181 |
+
for param_name, param in _named_parameters_with_duplicates(fsdp_module):
|
| 1182 |
+
if param not in ignored_params and not _is_fsdp_flattened(param):
|
| 1183 |
+
raise RuntimeError(
|
| 1184 |
+
f"Found an unflattened parameter: {param_name}; "
|
| 1185 |
+
f"{param.size()} {param.__class__}"
|
| 1186 |
+
)
|
| 1187 |
+
|
| 1188 |
+
|
| 1189 |
+
def _get_default_comm_hook(sharding_strategy: ShardingStrategy):
|
| 1190 |
+
return (
|
| 1191 |
+
default_hooks.allreduce_hook
|
| 1192 |
+
if sharding_strategy == ShardingStrategy.NO_SHARD
|
| 1193 |
+
else default_hooks.reduce_scatter_hook
|
| 1194 |
+
)
|
| 1195 |
+
|
| 1196 |
+
|
| 1197 |
+
def _get_default_comm_hook_state(
|
| 1198 |
+
process_group: dist.ProcessGroup,
|
| 1199 |
+
) -> default_hooks.DefaultState:
|
| 1200 |
+
return default_hooks.DefaultState(process_group=process_group)
|
janus/lib/python3.10/site-packages/torch/distributed/fsdp/_optim_utils.py
ADDED
|
@@ -0,0 +1,2091 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# mypy: allow-untyped-defs
|
| 2 |
+
import copy
|
| 3 |
+
import functools
|
| 4 |
+
import logging
|
| 5 |
+
import warnings
|
| 6 |
+
from contextlib import ExitStack
|
| 7 |
+
from dataclasses import dataclass, field
|
| 8 |
+
from typing import (
|
| 9 |
+
Any,
|
| 10 |
+
cast,
|
| 11 |
+
Dict,
|
| 12 |
+
Iterable,
|
| 13 |
+
Iterator,
|
| 14 |
+
List,
|
| 15 |
+
NamedTuple,
|
| 16 |
+
no_type_check,
|
| 17 |
+
Optional,
|
| 18 |
+
Sequence,
|
| 19 |
+
Set,
|
| 20 |
+
Tuple,
|
| 21 |
+
TYPE_CHECKING,
|
| 22 |
+
Union,
|
| 23 |
+
)
|
| 24 |
+
|
| 25 |
+
import torch
|
| 26 |
+
import torch.distributed as dist
|
| 27 |
+
import torch.distributed.fsdp._traversal_utils as traversal_utils
|
| 28 |
+
import torch.nn as nn
|
| 29 |
+
from torch.distributed._state_dict_utils import _gather_state_dict
|
| 30 |
+
from torch.distributed.distributed_c10d import _get_pg_default_device
|
| 31 |
+
from torch.distributed.fsdp._common_utils import (
|
| 32 |
+
_apply_to_modules,
|
| 33 |
+
_FSDPState,
|
| 34 |
+
_get_module_fsdp_state_if_fully_sharded_module,
|
| 35 |
+
_get_param_to_fqns,
|
| 36 |
+
_module_handle,
|
| 37 |
+
_named_parameters_with_duplicates,
|
| 38 |
+
clean_tensor_name,
|
| 39 |
+
)
|
| 40 |
+
from torch.distributed.fsdp._debug_utils import SimpleProfiler
|
| 41 |
+
from torch.distributed.fsdp._flat_param import FlatParameter, FlatParamHandle
|
| 42 |
+
from torch.distributed.fsdp._fsdp_extensions import (
|
| 43 |
+
_ext_chunk_dtensor,
|
| 44 |
+
_ext_chunk_tensor,
|
| 45 |
+
)
|
| 46 |
+
from torch.distributed.fsdp._runtime_utils import (
|
| 47 |
+
_lazy_init,
|
| 48 |
+
_reset_flat_param_grad_info_if_needed,
|
| 49 |
+
)
|
| 50 |
+
from torch.distributed.fsdp.api import (
|
| 51 |
+
ShardingStrategy,
|
| 52 |
+
StateDictSettings,
|
| 53 |
+
StateDictType,
|
| 54 |
+
)
|
| 55 |
+
from torch.distributed.tensor import DTensor, Replicate
|
| 56 |
+
from torch.utils._pytree import tree_map_only
|
| 57 |
+
|
| 58 |
+
|
| 59 |
+
if TYPE_CHECKING:
|
| 60 |
+
from torch.distributed._shard.sharded_tensor import ShardedTensor
|
| 61 |
+
|
| 62 |
+
|
| 63 |
+
logger = logging.getLogger(__name__)
|
| 64 |
+
|
| 65 |
+
|
| 66 |
+
@dataclass
|
| 67 |
+
class FSDPParamInfo:
|
| 68 |
+
state: _FSDPState
|
| 69 |
+
handle: FlatParamHandle
|
| 70 |
+
param_indices: Dict[str, int]
|
| 71 |
+
param_requires_grad: List[bool]
|
| 72 |
+
|
| 73 |
+
|
| 74 |
+
def sorted_items(dictionary: Dict[str, Any]) -> Iterator[Tuple[str, Any]]:
|
| 75 |
+
keys = sorted(dictionary.keys())
|
| 76 |
+
for k in keys:
|
| 77 |
+
yield k, dictionary[k]
|
| 78 |
+
|
| 79 |
+
|
| 80 |
+
@dataclass
|
| 81 |
+
class _ConsolidatedOptimState:
|
| 82 |
+
"""
|
| 83 |
+
This holds the consolidated optimizer state on the target rank. Positive-
|
| 84 |
+
dimension tensor state is communicated across ranks, while zero-dimension
|
| 85 |
+
tensor state and non-tensor state is taken directly from the target rank.
|
| 86 |
+
|
| 87 |
+
PyTorch version 1.12 moved to using zero-dimension tensors for scalar
|
| 88 |
+
values, but user implemented optimizers may still use float (i.e. a
|
| 89 |
+
non-tensor). Thus, we support both and handle them identically.
|
| 90 |
+
|
| 91 |
+
Attributes:
|
| 92 |
+
tensor_state (Dict[str, torch.Tensor]): Mapping from positive-dimension
|
| 93 |
+
tensor state name to the unsharded flat tensor representing the
|
| 94 |
+
state.
|
| 95 |
+
zero_dim_tensor_state (Dict[str, torch.Tensor]): Mapping from zero-
|
| 96 |
+
dimension tensor state name to its value.
|
| 97 |
+
non_tensor_state (Dict[str, Any]): Mapping from non-tensor state
|
| 98 |
+
name to its value.
|
| 99 |
+
"""
|
| 100 |
+
|
| 101 |
+
tensor_state: Dict[str, torch.Tensor] = field(default_factory=dict)
|
| 102 |
+
zero_dim_tensor_state: Dict[str, torch.Tensor] = field(default_factory=dict)
|
| 103 |
+
non_tensor_state: Dict[str, Any] = field(default_factory=dict)
|
| 104 |
+
|
| 105 |
+
|
| 106 |
+
class _PosDimTensorInfo(NamedTuple):
|
| 107 |
+
"""
|
| 108 |
+
Meatadata for positive-dimension tensors used internally for
|
| 109 |
+
:meth:`scatter_full_optim_state_dict`.
|
| 110 |
+
|
| 111 |
+
Attributes:
|
| 112 |
+
shape (torch.Size): Sharded tensor shape (which is equal to the
|
| 113 |
+
unsharded tensor shape if the tensor is optimizer state for a
|
| 114 |
+
non-FSDP parameter and is hence not sharded).
|
| 115 |
+
dtype (torch.dtype): Data type of the tensor.
|
| 116 |
+
"""
|
| 117 |
+
|
| 118 |
+
shape: torch.Size
|
| 119 |
+
dtype: torch.dtype
|
| 120 |
+
|
| 121 |
+
|
| 122 |
+
class _OptimStateKey(NamedTuple):
|
| 123 |
+
"""
|
| 124 |
+
This represents an optimizer state key that may be used commonly across
|
| 125 |
+
ranks. It is based on the unflattened parameter names rather than parameter
|
| 126 |
+
IDs to make it independent of each rank's own optimizer construction.
|
| 127 |
+
"""
|
| 128 |
+
|
| 129 |
+
unflat_param_names: Tuple[str, ...]
|
| 130 |
+
is_fsdp_managed: bool
|
| 131 |
+
|
| 132 |
+
|
| 133 |
+
def _unflatten_optim_state(
|
| 134 |
+
fsdp_param_info: FSDPParamInfo,
|
| 135 |
+
flat_param_state: Dict[str, Any],
|
| 136 |
+
to_save: bool,
|
| 137 |
+
shard_state: bool,
|
| 138 |
+
cpu_offload: bool,
|
| 139 |
+
) -> List[Dict[str, Any]]:
|
| 140 |
+
"""
|
| 141 |
+
Unflattens the optimizer state, consisting of the "state" part and the
|
| 142 |
+
"param_groups" part. Unflattening the "state" part involves consolidating
|
| 143 |
+
the state on the target rank and remapping from flattened to unflattened
|
| 144 |
+
parameter IDs, and the "param_groups" part only involves remapping from
|
| 145 |
+
flattened to unflattened parameter IDs.
|
| 146 |
+
|
| 147 |
+
Args:
|
| 148 |
+
fsdp_param_info (FSDPParamInfo): The FSDP state, the handle, and a
|
| 149 |
+
mapping from FQN to original parameter index.
|
| 150 |
+
flat_param_state (Dict[str, Any]): Entry for the flat parameter in the
|
| 151 |
+
"state" part of the optimizer state dict.
|
| 152 |
+
to_save (bool): Whether to save the state on this rank.
|
| 153 |
+
|
| 154 |
+
Returns:
|
| 155 |
+
List[Dict[str, Any]]: A :class:`list` holding the entries in the
|
| 156 |
+
"state" part of the optimizer state dict corresponding to the
|
| 157 |
+
unflattened parameters comprising the flat parameter if on the target
|
| 158 |
+
rank or an empty :class:`list` otherwise. The final optimizer state
|
| 159 |
+
dict will need to map these entries using the proper unflattened
|
| 160 |
+
parameter IDs.
|
| 161 |
+
"""
|
| 162 |
+
assert (
|
| 163 |
+
not shard_state or to_save
|
| 164 |
+
), "If ``shard_state`` is True, ``to_save`` has to be True."
|
| 165 |
+
consolidated_state = _communicate_optim_state(
|
| 166 |
+
fsdp_param_info,
|
| 167 |
+
flat_param_state,
|
| 168 |
+
)
|
| 169 |
+
if to_save:
|
| 170 |
+
unflat_param_state = _unflatten_communicated_optim_state(
|
| 171 |
+
fsdp_param_info,
|
| 172 |
+
consolidated_state,
|
| 173 |
+
shard_state,
|
| 174 |
+
)
|
| 175 |
+
for optim_state in unflat_param_state:
|
| 176 |
+
# We can't use .items() below cuz we'd run into a concurrent modification error
|
| 177 |
+
if cpu_offload:
|
| 178 |
+
for key in list(optim_state.keys()):
|
| 179 |
+
state = optim_state[key]
|
| 180 |
+
if not isinstance(state, torch.Tensor):
|
| 181 |
+
continue
|
| 182 |
+
optim_state[key] = state.cpu()
|
| 183 |
+
return unflat_param_state
|
| 184 |
+
else:
|
| 185 |
+
return []
|
| 186 |
+
|
| 187 |
+
|
| 188 |
+
def _is_zero_dim_tensor(x: Any) -> bool:
|
| 189 |
+
return torch.is_tensor(x) and x.dim() == 0
|
| 190 |
+
|
| 191 |
+
|
| 192 |
+
def _communicate_optim_state(
|
| 193 |
+
fsdp_param_info: FSDPParamInfo,
|
| 194 |
+
flat_param_state: Dict[str, Any],
|
| 195 |
+
) -> _ConsolidatedOptimState:
|
| 196 |
+
"""
|
| 197 |
+
Communicates the optimizer state for a flat parameter across ranks. All
|
| 198 |
+
ranks will hold the entire non-sharded optimizer state on GPU.
|
| 199 |
+
|
| 200 |
+
If ``N`` is the number of tensor optimizer states in the optimizer state
|
| 201 |
+
dict, then the communication complexity is 0 if ``N = 0`` and ``N + 1``
|
| 202 |
+
otherwise (where the plus 1 comes from all-gathering the padding per rank).
|
| 203 |
+
|
| 204 |
+
Args:
|
| 205 |
+
fsdp_param_info (FSDPParamInfo): The FSDP state, the handle, and a
|
| 206 |
+
mapping from FQN to original parameter index.
|
| 207 |
+
flat_param_state (Dict[str, Any]): The entry in the "state" part of the
|
| 208 |
+
optimizer state dict corresponding to the flat parameter.
|
| 209 |
+
|
| 210 |
+
Returns:
|
| 211 |
+
ConsolidatedOptimState: Consolidated optimizer state for the target
|
| 212 |
+
flat parameter.
|
| 213 |
+
"""
|
| 214 |
+
fsdp_state = fsdp_param_info.state
|
| 215 |
+
flat_param = fsdp_param_info.handle.flat_param
|
| 216 |
+
state = _ConsolidatedOptimState()
|
| 217 |
+
tensor_state, zero_dim_tensor_state, non_tensor_state = (
|
| 218 |
+
state.tensor_state,
|
| 219 |
+
state.zero_dim_tensor_state,
|
| 220 |
+
state.non_tensor_state,
|
| 221 |
+
)
|
| 222 |
+
|
| 223 |
+
for state_name, value in sorted_items(flat_param_state):
|
| 224 |
+
# Positive-dimension tensor state: communicate across ranks
|
| 225 |
+
if torch.is_tensor(value) and value.dim() > 0:
|
| 226 |
+
# If the parameter is not sharded, then neither is the
|
| 227 |
+
# positive-dimension tensor state, so no need to communicate it --
|
| 228 |
+
# we take the target rank's value
|
| 229 |
+
if (
|
| 230 |
+
fsdp_state.world_size == 1
|
| 231 |
+
or fsdp_state.sharding_strategy == ShardingStrategy.NO_SHARD
|
| 232 |
+
):
|
| 233 |
+
tensor_state[state_name] = value
|
| 234 |
+
continue
|
| 235 |
+
assert (
|
| 236 |
+
fsdp_state.compute_device is not None
|
| 237 |
+
), "compute_device has not been initialized"
|
| 238 |
+
if value.device.type != fsdp_state.compute_device.type:
|
| 239 |
+
value = value.to(fsdp_state.compute_device)
|
| 240 |
+
# Assume that positive-dimension tensor optimizer state
|
| 241 |
+
# has the same shape as the sharded flat parameter
|
| 242 |
+
buffer_size = flat_param._full_param_padded.size() # type: ignore[attr-defined]
|
| 243 |
+
tensor_buffer = value.new_zeros(*buffer_size)
|
| 244 |
+
dist.all_gather_into_tensor(
|
| 245 |
+
tensor_buffer, value, group=fsdp_state.process_group
|
| 246 |
+
)
|
| 247 |
+
fsdp_state._device_handle.synchronize()
|
| 248 |
+
unpadded_numel = cast(
|
| 249 |
+
nn.Parameter, flat_param._unpadded_unsharded_size
|
| 250 |
+
).numel()
|
| 251 |
+
tensor_state[state_name] = tensor_buffer[:unpadded_numel]
|
| 252 |
+
# Zero-dimension tensor state and non-tensor state: take this rank's
|
| 253 |
+
# value directly
|
| 254 |
+
else:
|
| 255 |
+
if _is_zero_dim_tensor(value):
|
| 256 |
+
zero_dim_tensor_state[state_name] = value.detach().clone()
|
| 257 |
+
else:
|
| 258 |
+
non_tensor_state[state_name] = value
|
| 259 |
+
return state
|
| 260 |
+
|
| 261 |
+
|
| 262 |
+
def _unflatten_communicated_optim_state(
|
| 263 |
+
fsdp_param_info: FSDPParamInfo,
|
| 264 |
+
state: _ConsolidatedOptimState,
|
| 265 |
+
shard_state: bool,
|
| 266 |
+
) -> List[Dict[str, Any]]:
|
| 267 |
+
"""
|
| 268 |
+
Unflattens the communicated optimizer state (given by ``tensor_state``,
|
| 269 |
+
``non_tensor_state``, and ``zero_dim_tensor_state``) for a single flat
|
| 270 |
+
parameter. This should only be called on the target rank.
|
| 271 |
+
|
| 272 |
+
Args:
|
| 273 |
+
fsdp_param_info (FSDPParamInfo): The FSDP state, the handle, and a
|
| 274 |
+
mapping from FQN to original parameter index.
|
| 275 |
+
state (_ConsolidatedOptimState): Consolidated optimizer state.
|
| 276 |
+
|
| 277 |
+
Returns:
|
| 278 |
+
List[Dict[str, Any]]: A :class:`list` holding the entries in the
|
| 279 |
+
"state" part of the optimizer state dict corresponding to the
|
| 280 |
+
unflattened parameters comprising the flat parameter. The final
|
| 281 |
+
optimizer state dict will need to map these entries using the proper
|
| 282 |
+
unflattened parameter IDs.
|
| 283 |
+
"""
|
| 284 |
+
fsdp_state = fsdp_param_info.state
|
| 285 |
+
handle = fsdp_param_info.handle
|
| 286 |
+
flat_param = handle.flat_param
|
| 287 |
+
unflat_param_state: List[Dict[str, Any]] = []
|
| 288 |
+
flat_param_views: Dict[str, Iterator] = {}
|
| 289 |
+
num_unflat_params = flat_param._num_params
|
| 290 |
+
tensor_state, zero_dim_tensor_state, non_tensor_state = (
|
| 291 |
+
state.tensor_state,
|
| 292 |
+
state.zero_dim_tensor_state,
|
| 293 |
+
state.non_tensor_state,
|
| 294 |
+
)
|
| 295 |
+
|
| 296 |
+
for _ in range(num_unflat_params):
|
| 297 |
+
unflat_state_param = {}
|
| 298 |
+
# Add positive-dimension tensor state: unflatten with views
|
| 299 |
+
for state_name, flat_tensor in sorted_items(tensor_state):
|
| 300 |
+
views_generated = state_name in flat_param_views
|
| 301 |
+
if not views_generated:
|
| 302 |
+
views = handle._get_unflat_views(flat_tensor)
|
| 303 |
+
flat_param_views[state_name] = views
|
| 304 |
+
else:
|
| 305 |
+
views = flat_param_views[state_name]
|
| 306 |
+
optim_state: Union[torch.Tensor, ShardedTensor, DTensor] = next(views)
|
| 307 |
+
if shard_state:
|
| 308 |
+
osd_config = fsdp_state._optim_state_dict_config
|
| 309 |
+
if getattr(osd_config, "_use_dtensor", False):
|
| 310 |
+
assert fsdp_state._device_mesh is not None
|
| 311 |
+
optim_state = _ext_chunk_dtensor(
|
| 312 |
+
optim_state,
|
| 313 |
+
fsdp_state.rank,
|
| 314 |
+
fsdp_state._device_mesh,
|
| 315 |
+
fsdp_state._fsdp_extension,
|
| 316 |
+
)
|
| 317 |
+
else:
|
| 318 |
+
assert fsdp_state.process_group is not None
|
| 319 |
+
optim_state = _ext_chunk_tensor(
|
| 320 |
+
optim_state,
|
| 321 |
+
fsdp_state.rank,
|
| 322 |
+
fsdp_state.world_size,
|
| 323 |
+
fsdp_state._device_handle.device_count(),
|
| 324 |
+
fsdp_state.process_group,
|
| 325 |
+
fsdp_state._fsdp_extension,
|
| 326 |
+
)
|
| 327 |
+
unflat_state_param[state_name] = optim_state
|
| 328 |
+
|
| 329 |
+
# Add zero-dimension tensor state: take the target rank's value
|
| 330 |
+
for state_name, zero_dim_tensor in sorted_items(zero_dim_tensor_state):
|
| 331 |
+
unflat_state_param[state_name] = zero_dim_tensor
|
| 332 |
+
# Add non-tensor state: take the target rank's value
|
| 333 |
+
for state_name, non_tensor in sorted_items(non_tensor_state):
|
| 334 |
+
unflat_state_param[state_name] = non_tensor
|
| 335 |
+
unflat_param_state.append(unflat_state_param)
|
| 336 |
+
return unflat_param_state
|
| 337 |
+
|
| 338 |
+
|
| 339 |
+
def _broadcast_processed_state(
|
| 340 |
+
fsdp_state: _FSDPState,
|
| 341 |
+
optim_state: Dict[str, Any],
|
| 342 |
+
group: Optional[dist.ProcessGroup],
|
| 343 |
+
) -> Dict[str, Any]:
|
| 344 |
+
objects: List[Any] = [None]
|
| 345 |
+
if dist.get_rank(group) == 0:
|
| 346 |
+
objects[0] = tree_map_only(
|
| 347 |
+
torch.Tensor,
|
| 348 |
+
lambda v: v.cpu() if v.dim() == 0 else _PosDimTensorInfo(v.shape, v.dtype), # type: ignore[union-attr]
|
| 349 |
+
optim_state,
|
| 350 |
+
)
|
| 351 |
+
dist.broadcast_object_list(objects, src=0, group=group)
|
| 352 |
+
if dist.get_rank(group) == 0:
|
| 353 |
+
return optim_state
|
| 354 |
+
else:
|
| 355 |
+
return objects[0]
|
| 356 |
+
|
| 357 |
+
|
| 358 |
+
def _broadcast_state(
|
| 359 |
+
fsdp_state: _FSDPState, state: Any, group: Optional[dist.ProcessGroup]
|
| 360 |
+
) -> Any:
|
| 361 |
+
if dist.get_rank(group) == 0:
|
| 362 |
+
if not isinstance(state, torch.Tensor) or state.dim() == 0:
|
| 363 |
+
return state
|
| 364 |
+
tensor = state.to(fsdp_state.compute_device)
|
| 365 |
+
else:
|
| 366 |
+
if isinstance(state, torch.Tensor):
|
| 367 |
+
assert state.dim() == 0, (
|
| 368 |
+
"For non-zero ranks, a tensor state should have zero dimension, "
|
| 369 |
+
"but got the state with shape {state.shape()}."
|
| 370 |
+
)
|
| 371 |
+
return state
|
| 372 |
+
elif not isinstance(state, _PosDimTensorInfo):
|
| 373 |
+
return state
|
| 374 |
+
tensor = torch.zeros(
|
| 375 |
+
state.shape, dtype=state.dtype, device=fsdp_state.compute_device
|
| 376 |
+
)
|
| 377 |
+
dist.broadcast(tensor, src=0, group=group)
|
| 378 |
+
return tensor
|
| 379 |
+
|
| 380 |
+
|
| 381 |
+
def _shard_orig_param_state(
|
| 382 |
+
fsdp_param_info: FSDPParamInfo,
|
| 383 |
+
fqn: str,
|
| 384 |
+
optim_state: Dict[str, Any],
|
| 385 |
+
) -> Dict[str, Any]:
|
| 386 |
+
"""
|
| 387 |
+
Shard the optimizer state for the original parameter with the name ``fqn``.
|
| 388 |
+
This API should only be used when ``use_orig_params`` is True.
|
| 389 |
+
"""
|
| 390 |
+
if not optim_state:
|
| 391 |
+
return {}
|
| 392 |
+
fsdp_state = fsdp_param_info.state
|
| 393 |
+
flat_param = fsdp_param_info.handle.flat_param
|
| 394 |
+
param_idx = fsdp_param_info.param_indices[fqn]
|
| 395 |
+
shard_param_info = flat_param._shard_param_infos[param_idx] # type: ignore[attr-defined]
|
| 396 |
+
optim_state = _gather_state_dict(
|
| 397 |
+
optim_state, pg=fsdp_state.process_group, device=fsdp_state.compute_device
|
| 398 |
+
)
|
| 399 |
+
if not shard_param_info.in_shard:
|
| 400 |
+
return {}
|
| 401 |
+
# Flatten and shard the state.
|
| 402 |
+
new_optim_state: Dict[str, Any] = {}
|
| 403 |
+
intra_param_start_idx = shard_param_info.intra_param_start_idx
|
| 404 |
+
intra_param_end_idx = shard_param_info.intra_param_end_idx
|
| 405 |
+
for state_name, value in optim_state.items():
|
| 406 |
+
if (
|
| 407 |
+
torch.is_tensor(value)
|
| 408 |
+
and value.dim() > 0
|
| 409 |
+
and fsdp_state.sharding_strategy != ShardingStrategy.NO_SHARD
|
| 410 |
+
):
|
| 411 |
+
value = value.flatten()[intra_param_start_idx : intra_param_end_idx + 1].clone() # type: ignore[operator]
|
| 412 |
+
new_optim_state[state_name] = value
|
| 413 |
+
return new_optim_state
|
| 414 |
+
|
| 415 |
+
|
| 416 |
+
def _flatten_optim_state_dict(
|
| 417 |
+
optim_state_dict: Dict[str, Any],
|
| 418 |
+
model: nn.Module,
|
| 419 |
+
use_orig_params: bool = False,
|
| 420 |
+
optim: Optional[torch.optim.Optimizer] = None,
|
| 421 |
+
rank0_only: bool = False,
|
| 422 |
+
group: Optional[dist.ProcessGroup] = None,
|
| 423 |
+
) -> Dict[str, Any]:
|
| 424 |
+
"""
|
| 425 |
+
Flattens the full optimizer state dict, still keying by unflattened parameter
|
| 426 |
+
names.
|
| 427 |
+
|
| 428 |
+
If ``use_orig_params`` is True, each rank will have all FSDP-managed
|
| 429 |
+
parameters but some of these parameters may be empty due to the sharding.
|
| 430 |
+
For a regular optim.Optimizer, states for those empty parameters will
|
| 431 |
+
not be initialized. So, when aggregating the FQNs across ranks, no assert
|
| 432 |
+
will be raised on a rank even if it does not have all the states -- it is
|
| 433 |
+
valid and FSDP know how to aggregate them. However, FSDP has to ignore
|
| 434 |
+
handling those parameters that are not managed by FSDP and do not exist on
|
| 435 |
+
the local rank -- it is managed by other parallelism and FSDP does not
|
| 436 |
+
know ho to handle/aggregate them.
|
| 437 |
+
|
| 438 |
+
Note that ``_flatten_tensor_optim_state`` does not need ``optim`` to
|
| 439 |
+
flatten/shard the state. However, NamedOptimizer and KeyedOptimizer require
|
| 440 |
+
all the states even if the corresponding parameters are empty. To this end,
|
| 441 |
+
``optim`` will be used to to get the initial state of the empty parameters.
|
| 442 |
+
``optim`` should only be non-None if the ``optim` is KeyedOptimizer or
|
| 443 |
+
NamedOptimizer.
|
| 444 |
+
|
| 445 |
+
Returns:
|
| 446 |
+
Dict[str, Any]: The flattened optimizer state dict.
|
| 447 |
+
"""
|
| 448 |
+
SimpleProfiler.reset()
|
| 449 |
+
|
| 450 |
+
unflat_osd = optim_state_dict
|
| 451 |
+
if "state" not in unflat_osd and not rank0_only:
|
| 452 |
+
raise ValueError(
|
| 453 |
+
'`optim_state_dict` must have the keys "state"'
|
| 454 |
+
"to be a valid optimizer state dict"
|
| 455 |
+
)
|
| 456 |
+
param_to_fqns = _get_param_to_fqns(model)
|
| 457 |
+
fqn_to_fsdp_param_info = _get_fqn_to_fsdp_param_info(model)
|
| 458 |
+
fsdp_state = next(iter(fqn_to_fsdp_param_info.values())).state
|
| 459 |
+
|
| 460 |
+
# Broadcast unflat_osd without non-scalar tensor if rank0_only is True.
|
| 461 |
+
if rank0_only:
|
| 462 |
+
unflat_osd = _broadcast_processed_state(fsdp_state, unflat_osd, group=group)
|
| 463 |
+
|
| 464 |
+
# Construct the "state" part
|
| 465 |
+
flat_osd_state: Dict[Union[_OptimStateKey, str], Any] = {}
|
| 466 |
+
unflat_osd_state = unflat_osd["state"]
|
| 467 |
+
all_state_keys = set(unflat_osd_state.keys())
|
| 468 |
+
|
| 469 |
+
for param, fqns in param_to_fqns.items():
|
| 470 |
+
fqn = fqns[0]
|
| 471 |
+
if fqn not in unflat_osd_state:
|
| 472 |
+
continue
|
| 473 |
+
all_state_keys.difference_update(fqns)
|
| 474 |
+
|
| 475 |
+
if rank0_only:
|
| 476 |
+
for fqn in fqns:
|
| 477 |
+
if not unflat_osd_state[fqn]:
|
| 478 |
+
continue
|
| 479 |
+
for state_name in unflat_osd_state[fqn].keys():
|
| 480 |
+
unflat_osd_state[fqn][state_name] = _broadcast_state(
|
| 481 |
+
fsdp_state, unflat_osd_state[fqn][state_name], group=group
|
| 482 |
+
)
|
| 483 |
+
fqn = fqns[0]
|
| 484 |
+
if fqn in fqn_to_fsdp_param_info:
|
| 485 |
+
fsdp_param_info = fqn_to_fsdp_param_info[fqn]
|
| 486 |
+
if use_orig_params:
|
| 487 |
+
with SimpleProfiler.profile(SimpleProfiler.Type.RESHARDING):
|
| 488 |
+
flat_state = _shard_orig_param_state(
|
| 489 |
+
fsdp_param_info,
|
| 490 |
+
fqn,
|
| 491 |
+
unflat_osd_state[fqn],
|
| 492 |
+
)
|
| 493 |
+
else:
|
| 494 |
+
flat_state = _flatten_optim_state(
|
| 495 |
+
fsdp_param_info,
|
| 496 |
+
unflat_osd_state,
|
| 497 |
+
fqns,
|
| 498 |
+
)
|
| 499 |
+
key = _OptimStateKey(tuple(fqns), True)
|
| 500 |
+
# Only include non-empty states since as expected by
|
| 501 |
+
# `torch.optim.Optimizer` s unless the optimizer is KeyedOptimizer
|
| 502 |
+
# or NamedOptimizer.
|
| 503 |
+
if flat_state:
|
| 504 |
+
flat_osd_state[key] = flat_state
|
| 505 |
+
elif use_orig_params:
|
| 506 |
+
assert (
|
| 507 |
+
len(fqns) == 1
|
| 508 |
+
), f"use_orig_params is True but there are multiple FQNs, {fqns}."
|
| 509 |
+
if optim is not None: # NamedOptimizer or KeyedOptimizer case.
|
| 510 |
+
state = optim.state.get(param, None) # type: ignore[call-overload]
|
| 511 |
+
if state is not None:
|
| 512 |
+
flat_osd_state[key] = copy.deepcopy(state)
|
| 513 |
+
else:
|
| 514 |
+
warnings.warn(
|
| 515 |
+
f"optim_state[{key}] is not on rank{fsdp_state.rank}."
|
| 516 |
+
)
|
| 517 |
+
|
| 518 |
+
else:
|
| 519 |
+
raise RuntimeError(
|
| 520 |
+
f"The state of {key} is empty. This should happen when "
|
| 521 |
+
"use_orig_params=True."
|
| 522 |
+
)
|
| 523 |
+
else: # do not flatten non-FSDP parameters' states
|
| 524 |
+
assert len(fqns) == 1
|
| 525 |
+
key = _OptimStateKey(tuple(fqns), False)
|
| 526 |
+
flat_osd_state[key] = copy.copy(unflat_osd_state[fqn])
|
| 527 |
+
|
| 528 |
+
if rank0_only:
|
| 529 |
+
for fqn in fqns:
|
| 530 |
+
if not unflat_osd_state[fqn]:
|
| 531 |
+
continue
|
| 532 |
+
for state_name, param_state in list(unflat_osd_state[fqn].items()):
|
| 533 |
+
if fsdp_state.rank > 0:
|
| 534 |
+
# Deference the tensor so that PyTorch can collect the memory.
|
| 535 |
+
del unflat_osd_state[fqn][state_name]
|
| 536 |
+
else:
|
| 537 |
+
# Move the tensor in the original osd back to CPU to make the
|
| 538 |
+
# original osd unaffected.
|
| 539 |
+
unflat_osd_state[fqn][state_name] = unflat_osd_state[fqn][
|
| 540 |
+
state_name
|
| 541 |
+
].cpu()
|
| 542 |
+
|
| 543 |
+
# Handle user-defined state, states that are not associated with parameters.
|
| 544 |
+
for key in all_state_keys:
|
| 545 |
+
user_state = unflat_osd_state[key]
|
| 546 |
+
if isinstance(user_state, torch.Tensor) and rank0_only and use_orig_params:
|
| 547 |
+
user_state = _broadcast_state(fsdp_state, user_state, group=group)
|
| 548 |
+
flat_osd_state[key] = copy.copy(user_state)
|
| 549 |
+
|
| 550 |
+
SimpleProfiler.dump_and_reset("FSDP _flatten_optim_state_dict() profiling: ")
|
| 551 |
+
# Construct the "param_groups" part -- copy as is since it will be
|
| 552 |
+
# rekeyed later according to the target rank's optimizer
|
| 553 |
+
# Only copy param_groups if it exists in unflat_osd
|
| 554 |
+
if "param_groups" in unflat_osd:
|
| 555 |
+
flat_osd_param_groups = copy.deepcopy(unflat_osd["param_groups"])
|
| 556 |
+
return {"state": flat_osd_state, "param_groups": flat_osd_param_groups}
|
| 557 |
+
else:
|
| 558 |
+
return {"state": flat_osd_state}
|
| 559 |
+
|
| 560 |
+
|
| 561 |
+
def _flatten_optim_state(
|
| 562 |
+
fsdp_param_info: FSDPParamInfo,
|
| 563 |
+
unflat_osd_state: Dict[str, Dict[str, Any]],
|
| 564 |
+
unflat_param_names: List[str],
|
| 565 |
+
) -> Dict[str, Any]:
|
| 566 |
+
"""
|
| 567 |
+
Flattens the optimizer state in ``full_optim_state_dict`` for a single
|
| 568 |
+
flat parameter in ``fsdp_param_info`` corresponding to the unflattened
|
| 569 |
+
parameter names in ``unflat_param_names``.
|
| 570 |
+
|
| 571 |
+
Args:
|
| 572 |
+
fsdp_param_info (FSDPParamInfo): The FSDP state, the handle, and a
|
| 573 |
+
mapping from FQN to original parameter index.
|
| 574 |
+
unflat_osd_state (Dict[str, Dict[str, Any]]): The "state" part of the
|
| 575 |
+
optimizer state dict corresponding to the unflattened parameters.
|
| 576 |
+
unflat_param_names (List[str]): A :class:`list` of unflattened
|
| 577 |
+
parameter names corresponding to the flat parameter ``flat_param``.
|
| 578 |
+
|
| 579 |
+
Returns:
|
| 580 |
+
Dict[str, Any]: A :class:`dict` mapping state names to their values for
|
| 581 |
+
a particular flat parameter. The sharded optimizer state dict's "state"
|
| 582 |
+
part will map a key to this returned value.
|
| 583 |
+
"""
|
| 584 |
+
fsdp_state = fsdp_param_info.state
|
| 585 |
+
handle = fsdp_param_info.handle
|
| 586 |
+
flat_param = handle.flat_param
|
| 587 |
+
num_unflat_params = len(unflat_param_names)
|
| 588 |
+
assert num_unflat_params > 0, (
|
| 589 |
+
"Expects at least one unflattened parameter corresponding to the "
|
| 590 |
+
"flat parameter"
|
| 591 |
+
)
|
| 592 |
+
unflat_param_shapes = flat_param._shapes
|
| 593 |
+
num_unflat_param_shapes = len(unflat_param_shapes)
|
| 594 |
+
assert (
|
| 595 |
+
num_unflat_params == num_unflat_param_shapes
|
| 596 |
+
), f"Expects {num_unflat_params} shapes but got {num_unflat_param_shapes}"
|
| 597 |
+
|
| 598 |
+
# Check if these unflattened parameters have any optimizer state
|
| 599 |
+
has_state = [
|
| 600 |
+
bool(unflat_param_name in unflat_osd_state)
|
| 601 |
+
for unflat_param_name in unflat_param_names
|
| 602 |
+
]
|
| 603 |
+
# If none of the unflattened parameters comprising this flat parameter have
|
| 604 |
+
# any state, then we do not want an entry in the optimizer state dict
|
| 605 |
+
if not any(has_state):
|
| 606 |
+
return {} # no need to flatten any state
|
| 607 |
+
# There may still be some unflattened parameters with state and some
|
| 608 |
+
# without
|
| 609 |
+
unflat_param_states = [
|
| 610 |
+
_gather_state_dict(
|
| 611 |
+
unflat_osd_state[unflat_param_name],
|
| 612 |
+
pg=fsdp_state.process_group,
|
| 613 |
+
device=fsdp_state.compute_device,
|
| 614 |
+
)
|
| 615 |
+
if unflat_param_name in unflat_osd_state
|
| 616 |
+
else None
|
| 617 |
+
for unflat_param_name in unflat_param_names
|
| 618 |
+
]
|
| 619 |
+
# Check that the unflattened parameters have the same state names
|
| 620 |
+
state_names = None
|
| 621 |
+
for unflat_param_state in unflat_param_states:
|
| 622 |
+
if unflat_param_state is None:
|
| 623 |
+
continue
|
| 624 |
+
if state_names is None:
|
| 625 |
+
state_names = set(unflat_param_state.keys())
|
| 626 |
+
else:
|
| 627 |
+
if state_names != set(unflat_param_state.keys()):
|
| 628 |
+
raise ValueError(
|
| 629 |
+
"Differing optimizer state names for the unflattened "
|
| 630 |
+
f"parameters: {unflat_param_names}"
|
| 631 |
+
)
|
| 632 |
+
assert state_names is not None
|
| 633 |
+
|
| 634 |
+
# Flatten the state
|
| 635 |
+
flat_state: Dict[str, Any] = {}
|
| 636 |
+
for state_name in state_names:
|
| 637 |
+
state_values = [
|
| 638 |
+
unflat_param_state[state_name] if unflat_param_state is not None else None
|
| 639 |
+
for unflat_param_state in unflat_param_states
|
| 640 |
+
]
|
| 641 |
+
non_none_state_values = [v for v in state_values if v is not None]
|
| 642 |
+
# If all ranks have None, this is a None value
|
| 643 |
+
if not non_none_state_values:
|
| 644 |
+
flat_state[state_name] = None
|
| 645 |
+
continue
|
| 646 |
+
are_pos_dim_tensors = are_zero_dim_tensors = are_non_tensors = True
|
| 647 |
+
for v in non_none_state_values:
|
| 648 |
+
are_pos_dim_tensors &= torch.is_tensor(v) and v.dim() > 0
|
| 649 |
+
are_zero_dim_tensors &= _is_zero_dim_tensor(v)
|
| 650 |
+
are_non_tensors &= not torch.is_tensor(v)
|
| 651 |
+
types = {type(v) for v in non_none_state_values}
|
| 652 |
+
if len(types) != 1 or not (
|
| 653 |
+
are_pos_dim_tensors or are_zero_dim_tensors or are_non_tensors
|
| 654 |
+
):
|
| 655 |
+
raise ValueError(
|
| 656 |
+
f"Differing optimizer state types for state {state_name}, "
|
| 657 |
+
f"values {non_none_state_values}, and unflattened parameter "
|
| 658 |
+
f"names {unflat_param_names}"
|
| 659 |
+
)
|
| 660 |
+
if are_pos_dim_tensors:
|
| 661 |
+
flat_tensor = _flatten_tensor_optim_state(
|
| 662 |
+
state_name,
|
| 663 |
+
state_values,
|
| 664 |
+
unflat_param_names,
|
| 665 |
+
unflat_param_shapes,
|
| 666 |
+
handle,
|
| 667 |
+
)
|
| 668 |
+
# Shard the flattened tensor immediately to minimize max memory
|
| 669 |
+
# usage
|
| 670 |
+
if (
|
| 671 |
+
fsdp_state.world_size != 1
|
| 672 |
+
and fsdp_state.sharding_strategy != ShardingStrategy.NO_SHARD
|
| 673 |
+
):
|
| 674 |
+
sharded_flat_tensor, _ = FlatParamHandle._get_shard(
|
| 675 |
+
flat_tensor,
|
| 676 |
+
fsdp_state.rank,
|
| 677 |
+
fsdp_state.world_size,
|
| 678 |
+
)
|
| 679 |
+
else:
|
| 680 |
+
sharded_flat_tensor = flat_tensor
|
| 681 |
+
flat_state[state_name] = sharded_flat_tensor
|
| 682 |
+
elif are_zero_dim_tensors:
|
| 683 |
+
flat_state[state_name] = _flatten_zero_dim_tensor_optim_state(
|
| 684 |
+
state_name,
|
| 685 |
+
state_values,
|
| 686 |
+
unflat_param_names,
|
| 687 |
+
)
|
| 688 |
+
else:
|
| 689 |
+
assert are_non_tensors
|
| 690 |
+
flat_state[state_name] = _flatten_non_tensor_optim_state(
|
| 691 |
+
state_name,
|
| 692 |
+
state_values,
|
| 693 |
+
unflat_param_names,
|
| 694 |
+
)
|
| 695 |
+
|
| 696 |
+
return flat_state
|
| 697 |
+
|
| 698 |
+
|
| 699 |
+
def _flatten_tensor_optim_state(
|
| 700 |
+
state_name: str,
|
| 701 |
+
pos_dim_tensors: List[torch.Tensor],
|
| 702 |
+
unflat_param_names: List[str],
|
| 703 |
+
unflat_param_shapes: Sequence[torch.Size],
|
| 704 |
+
handle: FlatParamHandle,
|
| 705 |
+
) -> torch.Tensor:
|
| 706 |
+
"""
|
| 707 |
+
Flattens the positive-dimension tensor optimizer state given by the values
|
| 708 |
+
``tensors`` for the state ``state_name`` for a single flat parameter
|
| 709 |
+
from ``handle`` corresponding to the unflattened parameter names
|
| 710 |
+
``unflat_param_names`` and unflatted parameter shapes
|
| 711 |
+
``unflat_param_shapes``. This flattens each unflattened parameter's tensor
|
| 712 |
+
state into one tensor.
|
| 713 |
+
|
| 714 |
+
NOTE: We use zero tensors for any unflattened parameters without state
|
| 715 |
+
since some value is required to fill those entries. This assumes that the
|
| 716 |
+
zero tensor is mathematically equivalent to having no state, which is true
|
| 717 |
+
for Adam's "exp_avg" and "exp_avg_sq" but may not be true for all
|
| 718 |
+
optimizers.
|
| 719 |
+
|
| 720 |
+
Args:
|
| 721 |
+
state_name (str): Optimizer state name.
|
| 722 |
+
pos_dim_tensors (List[torch.Tensor]): Positive-dimension tensor
|
| 723 |
+
optimizer state values for the unflattened parameters corresponding
|
| 724 |
+
to the single flat parameter.
|
| 725 |
+
unflat_param_names (List[str]): A :class:`list` of unflattened
|
| 726 |
+
parameter names corresponding to the single flat parameter.
|
| 727 |
+
unflat_param_shapes (List[torch.Size]): Unflattened parameter shapes
|
| 728 |
+
corresponding to the single flat parameter.
|
| 729 |
+
handle (FlatParamHandle): The flat parameter's handle.
|
| 730 |
+
|
| 731 |
+
Returns:
|
| 732 |
+
torch.Tensor: A flat tensor containing the optimizer state
|
| 733 |
+
corresponding to ``state_name`` constructed by concatenating the
|
| 734 |
+
unflattened parameter tensor states in ``pos_dim_tensors`` (using zero
|
| 735 |
+
tensors for any unflattened parameters without the state).
|
| 736 |
+
"""
|
| 737 |
+
flat_param = handle.flat_param
|
| 738 |
+
non_none_tensors = [t for t in pos_dim_tensors if t is not None]
|
| 739 |
+
# Check that all are tensors with the same dtype
|
| 740 |
+
dtypes = {t.dtype for t in non_none_tensors}
|
| 741 |
+
if len(dtypes) != 1:
|
| 742 |
+
raise ValueError(
|
| 743 |
+
"All unflattened parameters comprising a single flat "
|
| 744 |
+
"parameter must have positive-dimension tensor state with the "
|
| 745 |
+
f"same dtype but got dtypes {dtypes} for state {state_name} and "
|
| 746 |
+
f"unflattened parameter names {unflat_param_names}"
|
| 747 |
+
)
|
| 748 |
+
dtype = next(iter(dtypes))
|
| 749 |
+
# Check that each tensor state matches its parameter's shape
|
| 750 |
+
for tensor, shape in zip(pos_dim_tensors, unflat_param_shapes):
|
| 751 |
+
if tensor is None and len(shape) == 0:
|
| 752 |
+
raise ValueError("Flattening a zero-dimension parameter is not supported")
|
| 753 |
+
elif tensor is not None and tensor.shape != shape:
|
| 754 |
+
raise ValueError(
|
| 755 |
+
"Tensor optimizer state does not have same shape as its "
|
| 756 |
+
f"parameter: {tensor.shape} {shape}"
|
| 757 |
+
)
|
| 758 |
+
# Flatten the tensor states: we do not need to add any right-hand-side
|
| 759 |
+
# padding since the flat optimizer state tensor is sharded via
|
| 760 |
+
# `_get_shard()`, which pads the shard as needed (just like for the flat
|
| 761 |
+
# parameter)
|
| 762 |
+
cpu_device = torch.device("cpu")
|
| 763 |
+
tensors_to_flatten = [
|
| 764 |
+
torch.flatten(state_value.to(cpu_device))
|
| 765 |
+
if state_value is not None
|
| 766 |
+
else torch.flatten(
|
| 767 |
+
torch.zeros(
|
| 768 |
+
size=shape,
|
| 769 |
+
dtype=dtype,
|
| 770 |
+
device=cpu_device,
|
| 771 |
+
)
|
| 772 |
+
)
|
| 773 |
+
for state_value, shape in zip(pos_dim_tensors, unflat_param_shapes)
|
| 774 |
+
]
|
| 775 |
+
flat_tensor = handle.flatten_tensors(tensors_to_flatten, handle._aligned_numel)
|
| 776 |
+
flat_param_shape = flat_param._unpadded_unsharded_size # type: ignore[attr-defined]
|
| 777 |
+
assert flat_tensor.shape == flat_param_shape, (
|
| 778 |
+
f"tensor optim state: {flat_tensor.shape} "
|
| 779 |
+
f"flat parameter: {flat_param_shape}"
|
| 780 |
+
)
|
| 781 |
+
return flat_tensor
|
| 782 |
+
|
| 783 |
+
|
| 784 |
+
def _flatten_zero_dim_tensor_optim_state(
|
| 785 |
+
state_name: str,
|
| 786 |
+
zero_dim_tensors: List[torch.Tensor],
|
| 787 |
+
unflat_param_names: List[str],
|
| 788 |
+
) -> torch.Tensor:
|
| 789 |
+
"""
|
| 790 |
+
Flattens the zero-dimension tensor optimizer state given by the values
|
| 791 |
+
``zero_dim_tensors`` for the state ``state_name`` for a single flat
|
| 792 |
+
parameter corresponding to the unflattened parameter names
|
| 793 |
+
``unflat_param_names`` by enforcing that all tensors are the same and using
|
| 794 |
+
that common value.
|
| 795 |
+
|
| 796 |
+
NOTE: The requirement that the tensors are the same across all unflattened
|
| 797 |
+
parameters comprising the flat parameter is needed to maintain the
|
| 798 |
+
invariant that FSDP performs the same computation as its non-sharded
|
| 799 |
+
equivalent. This means that none of the unflattened parameters can be
|
| 800 |
+
missing this state since imposing a value may differ from having no value.
|
| 801 |
+
For example, for Adam's "step", no value means maximum bias correction,
|
| 802 |
+
while having some positive value means less bias correction.
|
| 803 |
+
|
| 804 |
+
Args:
|
| 805 |
+
state_name (str): Optimizer state name.
|
| 806 |
+
zero_dim_tensors (List[torch.Tensor]): Zero-dimension optimizer state
|
| 807 |
+
for the unflattened parameters corresponding to the single
|
| 808 |
+
flat parameter.
|
| 809 |
+
unflat_param_names (List[str]): A :class:`list` of unflattened
|
| 810 |
+
parameter names corresponding to the single flat parameter.
|
| 811 |
+
|
| 812 |
+
Returns:
|
| 813 |
+
torch.Tensor: A zero-dimensional tensor giving the value of the state
|
| 814 |
+
``state_name`` for all unflattened parameters corresponding to the
|
| 815 |
+
names ``unflat_param_names``.
|
| 816 |
+
"""
|
| 817 |
+
non_none_tensors = [t for t in zero_dim_tensors if t is not None]
|
| 818 |
+
# Enforce that all have the same value and dtype
|
| 819 |
+
values_set = {t.item() if t is not None else None for t in zero_dim_tensors}
|
| 820 |
+
dtypes = {t.dtype if t is not None else None for t in zero_dim_tensors}
|
| 821 |
+
if (
|
| 822 |
+
len(non_none_tensors) != len(zero_dim_tensors)
|
| 823 |
+
or len(values_set) != 1
|
| 824 |
+
or len(dtypes) != 1
|
| 825 |
+
):
|
| 826 |
+
raise ValueError(
|
| 827 |
+
"All unflattened parameters comprising a single flat "
|
| 828 |
+
"parameter must have scalar state with the same value and dtype "
|
| 829 |
+
f"but got values {values_set} and dtypes {dtypes} for state "
|
| 830 |
+
f"{state_name} and unflattened parameter names "
|
| 831 |
+
f"{unflat_param_names}"
|
| 832 |
+
)
|
| 833 |
+
value = next(iter(values_set))
|
| 834 |
+
dtype = next(iter(dtypes))
|
| 835 |
+
return torch.tensor(value, dtype=dtype, device=torch.device("cpu"))
|
| 836 |
+
|
| 837 |
+
|
| 838 |
+
def _flatten_non_tensor_optim_state(
|
| 839 |
+
state_name: str,
|
| 840 |
+
non_tensors: List[Any],
|
| 841 |
+
unflat_param_names: List[str],
|
| 842 |
+
) -> Any:
|
| 843 |
+
"""
|
| 844 |
+
Flattens the non-tensor optimizer state given by the values ``non_tensors``
|
| 845 |
+
for the state ``state_name`` for a single flat parameter corresponding
|
| 846 |
+
to the unflattened parameter names ``unflat_param_names`` by enforcing that
|
| 847 |
+
all values are the same and using that common value.
|
| 848 |
+
|
| 849 |
+
See the note in :func:`_flatten_zero_dim_tensor_optim_state`.
|
| 850 |
+
|
| 851 |
+
Args:
|
| 852 |
+
state_name (str): Optimizer state name.
|
| 853 |
+
non_tensors (List[Any]): Non-tensor optimizer state for the unflattened
|
| 854 |
+
parameters corresponding to the single flat parameter.
|
| 855 |
+
unflat_param_names (List[str]): A :class:`list` of unflattened
|
| 856 |
+
parameter names corresponding to the single flat parameter.
|
| 857 |
+
|
| 858 |
+
Returns:
|
| 859 |
+
Any: A non-tensor giving the value of the state ``state_name`` for all
|
| 860 |
+
unflattened parameters corresponding to the names
|
| 861 |
+
``unflat_param_names``.
|
| 862 |
+
"""
|
| 863 |
+
non_none_non_tensors = [nt for nt in non_tensors if nt is not None]
|
| 864 |
+
# Enforce that all have the same value (same type already checked)
|
| 865 |
+
non_tensor_set = set(non_tensors)
|
| 866 |
+
if len(non_none_non_tensors) != len(non_tensors) or len(non_tensor_set) != 1:
|
| 867 |
+
raise ValueError(
|
| 868 |
+
"All unflattened parameters comprising a single flat "
|
| 869 |
+
"parameter must have scalar state with the same value and dtype "
|
| 870 |
+
f"but got values {non_tensor_set} for state {state_name} and "
|
| 871 |
+
f"unflattened parameter names {unflat_param_names}"
|
| 872 |
+
)
|
| 873 |
+
non_tensor = next(iter(non_tensor_set))
|
| 874 |
+
return non_tensor
|
| 875 |
+
|
| 876 |
+
|
| 877 |
+
def _rekey_sharded_optim_state_dict(
|
| 878 |
+
sharded_osd: Dict[str, Any],
|
| 879 |
+
model: nn.Module,
|
| 880 |
+
optim: torch.optim.Optimizer,
|
| 881 |
+
optim_input: Optional[
|
| 882 |
+
Union[
|
| 883 |
+
List[Dict[str, Any]],
|
| 884 |
+
Iterable[nn.Parameter],
|
| 885 |
+
]
|
| 886 |
+
],
|
| 887 |
+
using_optim_input: bool,
|
| 888 |
+
is_named_optimizer: bool = False,
|
| 889 |
+
) -> Dict[str, Any]:
|
| 890 |
+
"""
|
| 891 |
+
Rekeys the optimizer state dict from unflattened parameter names to flat
|
| 892 |
+
parameter IDs according to the calling rank's ``optim``, which may be
|
| 893 |
+
different across ranks. In particular, the unflattened parameter names are
|
| 894 |
+
represented as :class:`_OptimStateKey` s.
|
| 895 |
+
"""
|
| 896 |
+
param_to_fqns = _get_param_to_fqns(model)
|
| 897 |
+
flat_param_to_fqn = _get_flat_param_to_fqn(model)
|
| 898 |
+
param_to_param_key: Dict[nn.Parameter, Union[int, str]] = cast(
|
| 899 |
+
Dict[nn.Parameter, Union[int, str]],
|
| 900 |
+
(
|
| 901 |
+
_get_param_to_param_id_from_optim_input(model, optim_input)
|
| 902 |
+
if using_optim_input
|
| 903 |
+
else _get_param_to_param_key(
|
| 904 |
+
optim, model, is_named_optimizer, param_to_fqns, flat_param_to_fqn
|
| 905 |
+
)
|
| 906 |
+
),
|
| 907 |
+
)
|
| 908 |
+
# All parameter keys in `param_to_param_key` should be in
|
| 909 |
+
# `param_to_fqns` -- strict inequality follows when not all parameters are
|
| 910 |
+
# passed to the optimizer
|
| 911 |
+
assert len(param_to_param_key) <= len(param_to_fqns)
|
| 912 |
+
|
| 913 |
+
unflat_param_names_to_flat_param_key: Dict[
|
| 914 |
+
Tuple[str, ...], Union[int, str]
|
| 915 |
+
] = {} # for "state"
|
| 916 |
+
unflat_param_name_to_flat_param_key: Dict[
|
| 917 |
+
str, Union[int, str]
|
| 918 |
+
] = {} # for "param_groups"
|
| 919 |
+
for param, unflat_param_names in param_to_fqns.items():
|
| 920 |
+
if param not in param_to_param_key:
|
| 921 |
+
# This parameter was not passed to the optimizer
|
| 922 |
+
continue
|
| 923 |
+
flat_param_key = param_to_param_key[param]
|
| 924 |
+
unflat_param_names_to_flat_param_key[tuple(unflat_param_names)] = flat_param_key
|
| 925 |
+
for unflat_param_name in unflat_param_names:
|
| 926 |
+
unflat_param_name_to_flat_param_key[unflat_param_name] = flat_param_key
|
| 927 |
+
|
| 928 |
+
sharded_osd_state = sharded_osd["state"]
|
| 929 |
+
rekeyed_osd_state: Dict[Union[str, int], Any] = {}
|
| 930 |
+
for key, param_state in sharded_osd_state.items():
|
| 931 |
+
if isinstance(key, str):
|
| 932 |
+
rekeyed_osd_state[key] = param_state
|
| 933 |
+
continue
|
| 934 |
+
flat_param_key = unflat_param_names_to_flat_param_key.get(
|
| 935 |
+
key.unflat_param_names, key.unflat_param_names
|
| 936 |
+
)
|
| 937 |
+
rekeyed_osd_state[flat_param_key] = param_state
|
| 938 |
+
|
| 939 |
+
# Only process param_groups if it exists in sharded_osd
|
| 940 |
+
if "param_groups" in sharded_osd:
|
| 941 |
+
rekeyed_osd_param_groups: List[Dict[str, Any]] = []
|
| 942 |
+
for unflat_param_group in sharded_osd["param_groups"]:
|
| 943 |
+
flat_param_group = copy.deepcopy(unflat_param_group)
|
| 944 |
+
flat_param_keys = sorted(
|
| 945 |
+
{
|
| 946 |
+
unflat_param_name_to_flat_param_key[unflat_param_name]
|
| 947 |
+
for unflat_param_name in unflat_param_group["params"]
|
| 948 |
+
}
|
| 949 |
+
)
|
| 950 |
+
flat_param_group["params"] = flat_param_keys
|
| 951 |
+
rekeyed_osd_param_groups.append(flat_param_group)
|
| 952 |
+
return {"state": rekeyed_osd_state, "param_groups": rekeyed_osd_param_groups}
|
| 953 |
+
else:
|
| 954 |
+
return {"state": rekeyed_osd_state}
|
| 955 |
+
|
| 956 |
+
|
| 957 |
+
def _get_param_id_to_param_from_optim_input(
|
| 958 |
+
model: nn.Module,
|
| 959 |
+
optim_input: Optional[
|
| 960 |
+
Union[
|
| 961 |
+
List[Dict[str, Any]],
|
| 962 |
+
Iterable[nn.Parameter],
|
| 963 |
+
]
|
| 964 |
+
] = None,
|
| 965 |
+
) -> Dict[int, nn.Parameter]:
|
| 966 |
+
"""
|
| 967 |
+
Constructs a mapping from parameter IDs to parameters. This may be used
|
| 968 |
+
both for models with ``FlatParameter`` s and without.
|
| 969 |
+
|
| 970 |
+
NOTE: This method is only preserved for backward compatibility. The method
|
| 971 |
+
:meth:`_get_param_key_to_param` is the preferred code path that does not
|
| 972 |
+
rely on ``optim_input``.
|
| 973 |
+
|
| 974 |
+
NOTE: We critically assume that, whether the optimizer input is a list of
|
| 975 |
+
parameters or a list of parameter groups, :class:`torch.optim.Optimizer`
|
| 976 |
+
enumerates the parameter IDs in order. In other words, for a parameter list
|
| 977 |
+
input, the parameter IDs should be in that list order, and for a parameter
|
| 978 |
+
groups input, the parameter IDs should be in order within each parameter
|
| 979 |
+
group and in order across parameter groups.
|
| 980 |
+
|
| 981 |
+
Args:
|
| 982 |
+
model (nn.Module): Model whose parameters are passed into the
|
| 983 |
+
optimizer.
|
| 984 |
+
optim_input (Optional[Union[List[Dict[str, Any]],
|
| 985 |
+
Iterable[nn.Parameter]]]): Input passed into the optimizer
|
| 986 |
+
representing either a :class:`list` of parameter groups or an
|
| 987 |
+
iterable of parameters; if ``None``, then this method assumes the
|
| 988 |
+
input was ``model.parameters()``. (Default: ``None``)
|
| 989 |
+
|
| 990 |
+
Returns:
|
| 991 |
+
List[nn.Parameter]: Mapping from parameter IDs to parameters,
|
| 992 |
+
where the parameter ID is implicitly the index in the :class:`list`.
|
| 993 |
+
"""
|
| 994 |
+
# Assume the standard case of passing `model.parameters()` to the optimizer
|
| 995 |
+
# if `optim_input` is not specified
|
| 996 |
+
if optim_input is None:
|
| 997 |
+
return dict(enumerate(model.parameters()))
|
| 998 |
+
try:
|
| 999 |
+
params = cast(List[nn.Parameter], list(optim_input))
|
| 1000 |
+
except TypeError as e:
|
| 1001 |
+
raise TypeError(
|
| 1002 |
+
"Optimizer input should be an iterable of Tensors or dicts, "
|
| 1003 |
+
f"but got {optim_input}"
|
| 1004 |
+
) from e
|
| 1005 |
+
if len(params) == 0:
|
| 1006 |
+
raise ValueError("Optimizer input should not be empty")
|
| 1007 |
+
|
| 1008 |
+
# Check if the optimizer input represents tensors or parameter groups
|
| 1009 |
+
all_tensors = True
|
| 1010 |
+
all_dicts = True
|
| 1011 |
+
for param in params:
|
| 1012 |
+
all_tensors &= isinstance(param, torch.Tensor)
|
| 1013 |
+
all_dicts &= isinstance(param, dict)
|
| 1014 |
+
if not all_tensors and not all_dicts:
|
| 1015 |
+
raise TypeError("Optimizer input should be an iterable of Tensors or dicts")
|
| 1016 |
+
if all_tensors:
|
| 1017 |
+
return dict(enumerate(params))
|
| 1018 |
+
assert all_dicts
|
| 1019 |
+
param_id_to_param: List[nn.Parameter] = []
|
| 1020 |
+
for param_group in params:
|
| 1021 |
+
has_params_key = "params" in param_group # type: ignore[operator]
|
| 1022 |
+
assert has_params_key, (
|
| 1023 |
+
'A parameter group should map "params" to a list of the '
|
| 1024 |
+
"parameters in the group"
|
| 1025 |
+
)
|
| 1026 |
+
# Implicitly map `flat_param_id` (current length of the list) to
|
| 1027 |
+
# `param`
|
| 1028 |
+
param_id_to_param.extend(param_group["params"]) # type: ignore[index]
|
| 1029 |
+
return dict(enumerate(param_id_to_param))
|
| 1030 |
+
|
| 1031 |
+
|
| 1032 |
+
def _get_flat_param_to_fqn(model: torch.nn.Module) -> Dict[FlatParameter, str]:
|
| 1033 |
+
"""
|
| 1034 |
+
Constructs a mapping from ``FlatParameter`` to a cleaned (devoid of prefixes
|
| 1035 |
+
from wrappers) fully qualified name (FQN). Note that this FQN is "non-canonical"
|
| 1036 |
+
because ``FlatParameter`` s do not come from the original module but are
|
| 1037 |
+
registered only after FSDP has been applied. This function returns the FSDP-given
|
| 1038 |
+
name for the ``FlatParameter`` (usually module._flat_param) as opposed to the
|
| 1039 |
+
canonical FQNs returned for ``FlatParameter`` s in ``_common_utils._get_param_to_fqns(...)``).
|
| 1040 |
+
|
| 1041 |
+
Consequently, this function will only return a non-empty mapping if FSDP was
|
| 1042 |
+
applied with ``use_orig_params=False`` as, otherwise, the original parameters
|
| 1043 |
+
are used within the module and there would be no ``FlatParameter`` s in the module.
|
| 1044 |
+
|
| 1045 |
+
"""
|
| 1046 |
+
|
| 1047 |
+
def module_fn(module, prefix, tree_level, flat_param_to_fqn):
|
| 1048 |
+
for param_name, param in _named_parameters_with_duplicates(
|
| 1049 |
+
module, recurse=False
|
| 1050 |
+
):
|
| 1051 |
+
if not isinstance(param, FlatParameter):
|
| 1052 |
+
continue
|
| 1053 |
+
fqn = clean_tensor_name(prefix + param_name)
|
| 1054 |
+
flat_param_to_fqn[param] = fqn
|
| 1055 |
+
|
| 1056 |
+
def return_fn(flat_param_to_fqn):
|
| 1057 |
+
return flat_param_to_fqn
|
| 1058 |
+
|
| 1059 |
+
flat_param_to_fqn_ret: Dict[FlatParameter, str] = {}
|
| 1060 |
+
return _apply_to_modules(
|
| 1061 |
+
model,
|
| 1062 |
+
module_fn,
|
| 1063 |
+
return_fn,
|
| 1064 |
+
[fqn for fqn, _ in _named_parameters_with_duplicates(model)],
|
| 1065 |
+
flat_param_to_fqn_ret,
|
| 1066 |
+
)
|
| 1067 |
+
|
| 1068 |
+
|
| 1069 |
+
def _get_param_key_to_param(
|
| 1070 |
+
optim: torch.optim.Optimizer,
|
| 1071 |
+
model: Optional[nn.Module] = None,
|
| 1072 |
+
is_named_optimizer: bool = False,
|
| 1073 |
+
param_to_fqns: Optional[Dict[nn.Parameter, List[str]]] = None,
|
| 1074 |
+
flat_param_to_fqn: Optional[Dict[FlatParameter, str]] = None,
|
| 1075 |
+
) -> Dict[Union[int, str], nn.Parameter]:
|
| 1076 |
+
"""
|
| 1077 |
+
Constructs a mapping from parameter keys to parameters. For the regular
|
| 1078 |
+
optimizers, the keys are parameter IDs. For NamedOptimizer, the keys
|
| 1079 |
+
are FQNs. This API may be used both for models with ``FlatParameter`` s and
|
| 1080 |
+
without.
|
| 1081 |
+
"""
|
| 1082 |
+
clean_fqn_to_curr_fqn: Dict[str, str] = {}
|
| 1083 |
+
if is_named_optimizer:
|
| 1084 |
+
assert (
|
| 1085 |
+
param_to_fqns is not None and flat_param_to_fqn is not None
|
| 1086 |
+
), "The optimizer is a NamedOptimizer, `param_to_fqns` must not be None."
|
| 1087 |
+
assert model is not None
|
| 1088 |
+
for key, _ in _named_parameters_with_duplicates(model):
|
| 1089 |
+
clean_fqn_to_curr_fqn[clean_tensor_name(key)] = key
|
| 1090 |
+
|
| 1091 |
+
param_key_to_param: Dict[Union[str, int], nn.Parameter] = {}
|
| 1092 |
+
pid = 0
|
| 1093 |
+
for param_group in optim.param_groups:
|
| 1094 |
+
if is_named_optimizer:
|
| 1095 |
+
for param in param_group["params"]:
|
| 1096 |
+
assert flat_param_to_fqn is not None
|
| 1097 |
+
if param in flat_param_to_fqn:
|
| 1098 |
+
# FlatParameter case
|
| 1099 |
+
key = flat_param_to_fqn[param]
|
| 1100 |
+
else:
|
| 1101 |
+
assert param_to_fqns is not None
|
| 1102 |
+
# use_orig_params case
|
| 1103 |
+
assert len(param_to_fqns[param]) == 1
|
| 1104 |
+
key = param_to_fqns[param][0]
|
| 1105 |
+
try:
|
| 1106 |
+
key = clean_fqn_to_curr_fqn[key]
|
| 1107 |
+
except KeyError as e:
|
| 1108 |
+
raise KeyError(
|
| 1109 |
+
f"Can't find {key} from {list(clean_fqn_to_curr_fqn.keys())}."
|
| 1110 |
+
) from e
|
| 1111 |
+
param_key_to_param[key] = param
|
| 1112 |
+
else:
|
| 1113 |
+
for param in param_group["params"]:
|
| 1114 |
+
param_key_to_param[pid] = param
|
| 1115 |
+
pid += 1
|
| 1116 |
+
|
| 1117 |
+
return param_key_to_param
|
| 1118 |
+
|
| 1119 |
+
|
| 1120 |
+
def _get_param_to_param_key(
|
| 1121 |
+
optim: torch.optim.Optimizer,
|
| 1122 |
+
model: Optional[nn.Module] = None,
|
| 1123 |
+
is_named_optimizer: bool = False,
|
| 1124 |
+
param_to_fqns: Optional[Dict[nn.Parameter, List[str]]] = None,
|
| 1125 |
+
flat_param_to_fqn: Optional[Dict[FlatParameter, str]] = None,
|
| 1126 |
+
) -> Dict[nn.Parameter, Union[int, str]]:
|
| 1127 |
+
"""
|
| 1128 |
+
Constructs the inverse mapping of :func:`_get_param_key_to_param`. This API
|
| 1129 |
+
only supports the case where `optim` is a regular optimizer, not NamedOptimizer.
|
| 1130 |
+
So the parameter keys will be parameter ids.
|
| 1131 |
+
"""
|
| 1132 |
+
param_id_to_param = _get_param_key_to_param(
|
| 1133 |
+
optim, model, is_named_optimizer, param_to_fqns, flat_param_to_fqn
|
| 1134 |
+
)
|
| 1135 |
+
return {param: param_id for param_id, param in param_id_to_param.items()}
|
| 1136 |
+
|
| 1137 |
+
|
| 1138 |
+
def _get_param_to_param_id_from_optim_input(
|
| 1139 |
+
model: nn.Module,
|
| 1140 |
+
optim_input: Optional[
|
| 1141 |
+
Union[
|
| 1142 |
+
List[Dict[str, Any]],
|
| 1143 |
+
Iterable[nn.Parameter],
|
| 1144 |
+
]
|
| 1145 |
+
] = None,
|
| 1146 |
+
) -> Dict[nn.Parameter, int]:
|
| 1147 |
+
"""Constructs the inverse mapping of :func:`_get_param_id_to_param_from_optim_input`."""
|
| 1148 |
+
param_id_to_param = _get_param_id_to_param_from_optim_input(model, optim_input)
|
| 1149 |
+
return {param: param_id for param_id, param in param_id_to_param.items()}
|
| 1150 |
+
|
| 1151 |
+
|
| 1152 |
+
def _check_missing_keys_on_rank(
|
| 1153 |
+
r0_optim_state_keys: List[_OptimStateKey],
|
| 1154 |
+
optim_state_key_to_param_key: Dict[_OptimStateKey, Union[str, int]],
|
| 1155 |
+
param_key_to_param: Dict[Union[str, int], nn.Parameter],
|
| 1156 |
+
group: Optional[dist.ProcessGroup],
|
| 1157 |
+
) -> None:
|
| 1158 |
+
# Ensure that all ranks have at least the optimizer states needed by
|
| 1159 |
+
# rank 0's optimizer
|
| 1160 |
+
missing_keys: List[_OptimStateKey] = []
|
| 1161 |
+
for r0_optim_state_key in r0_optim_state_keys:
|
| 1162 |
+
if r0_optim_state_key not in optim_state_key_to_param_key:
|
| 1163 |
+
# A parameter from rank 0's optimizer does not exist for this
|
| 1164 |
+
# rank's optimizer
|
| 1165 |
+
missing_keys.append(r0_optim_state_key)
|
| 1166 |
+
continue
|
| 1167 |
+
param_key = optim_state_key_to_param_key[r0_optim_state_key]
|
| 1168 |
+
if isinstance(param_key, int):
|
| 1169 |
+
assert param_key >= 0 and param_key < len(
|
| 1170 |
+
param_key_to_param
|
| 1171 |
+
), "Check the `param_key_to_param` construction"
|
| 1172 |
+
# We cannot use FSDPState.compute_device as this API is a global view.
|
| 1173 |
+
device = _get_pg_default_device(group)
|
| 1174 |
+
num_missing = torch.tensor([len(missing_keys)], dtype=torch.int32, device=device)
|
| 1175 |
+
dist.all_reduce(num_missing, group=group)
|
| 1176 |
+
if num_missing.item() > 0:
|
| 1177 |
+
obj_list = [None for _ in range(dist.get_world_size(group))]
|
| 1178 |
+
dist.all_gather_object(obj_list, missing_keys, group=group)
|
| 1179 |
+
error_msg = (
|
| 1180 |
+
"FSDP currently requires each rank to have at least the "
|
| 1181 |
+
"optimizer states needed by rank 0's optimizer but some ranks "
|
| 1182 |
+
"are missing some of those states"
|
| 1183 |
+
)
|
| 1184 |
+
for rank, keys in enumerate(obj_list):
|
| 1185 |
+
keys = cast(List[_OptimStateKey], keys)
|
| 1186 |
+
if len(keys) > 0:
|
| 1187 |
+
error_msg += (
|
| 1188 |
+
f"\nRank {rank} is missing states for the parameters: "
|
| 1189 |
+
f"{[key.unflat_param_names for key in keys]}"
|
| 1190 |
+
)
|
| 1191 |
+
raise RuntimeError(error_msg)
|
| 1192 |
+
|
| 1193 |
+
|
| 1194 |
+
def _map_param_key_to_optim_keys(
|
| 1195 |
+
optim_state_dict: Dict[str, Any],
|
| 1196 |
+
group: Optional[dist.ProcessGroup],
|
| 1197 |
+
param_key_to_param: Dict[Union[int, str], nn.Parameter],
|
| 1198 |
+
param_to_fqns: Dict[nn.Parameter, List[str]],
|
| 1199 |
+
fqn_to_fsdp_param_info: Dict[str, FSDPParamInfo],
|
| 1200 |
+
merge_keys: bool = False,
|
| 1201 |
+
) -> Tuple[List[_OptimStateKey], Dict[_OptimStateKey, Union[int, str]]]:
|
| 1202 |
+
"""
|
| 1203 |
+
Construct the local mapping between the ``_OptimStateKey`` and parameter keys
|
| 1204 |
+
and all the ``_OptimStateKey`` across ranks. If ``merge_keys`` is False, rank0
|
| 1205 |
+
must contain all the ``_OptimStateKey``, an exception will be raised otherwise.
|
| 1206 |
+
Note that ``merge_keys`` should equal to ``use_orig_params``.
|
| 1207 |
+
"""
|
| 1208 |
+
rank = dist.get_rank(group)
|
| 1209 |
+
optim_state_key_to_param_key: Dict[_OptimStateKey, Union[int, str]] = {} # local
|
| 1210 |
+
all_optim_state_keys: List[_OptimStateKey] = []
|
| 1211 |
+
|
| 1212 |
+
for param_key, param in param_key_to_param.items():
|
| 1213 |
+
# Do not include parameters without state to avoid empty mappings
|
| 1214 |
+
# just like in normal `torch.optim.Optimizer.state_dict()`
|
| 1215 |
+
if param_key not in optim_state_dict["state"]:
|
| 1216 |
+
continue
|
| 1217 |
+
fqns = param_to_fqns[param]
|
| 1218 |
+
is_fsdp_managed = isinstance(param, FlatParameter)
|
| 1219 |
+
if is_fsdp_managed:
|
| 1220 |
+
assert fqns[0] in fqn_to_fsdp_param_info, (
|
| 1221 |
+
fqns[0],
|
| 1222 |
+
list(fqn_to_fsdp_param_info.keys()),
|
| 1223 |
+
)
|
| 1224 |
+
is_fsdp_managed = fqns[0] in fqn_to_fsdp_param_info
|
| 1225 |
+
optim_state_key = _OptimStateKey(
|
| 1226 |
+
unflat_param_names=tuple(fqns),
|
| 1227 |
+
is_fsdp_managed=is_fsdp_managed,
|
| 1228 |
+
)
|
| 1229 |
+
if rank == 0 or merge_keys:
|
| 1230 |
+
all_optim_state_keys.append(optim_state_key)
|
| 1231 |
+
optim_state_key_to_param_key[optim_state_key] = param_key
|
| 1232 |
+
|
| 1233 |
+
if merge_keys:
|
| 1234 |
+
all_keys: List[List[_OptimStateKey]] = [
|
| 1235 |
+
[] for _ in range(dist.get_world_size(group))
|
| 1236 |
+
]
|
| 1237 |
+
dist.all_gather_object(all_keys, all_optim_state_keys, group=group)
|
| 1238 |
+
merge_all_optim_state_keys = [
|
| 1239 |
+
key for local_keys in all_keys for key in local_keys
|
| 1240 |
+
]
|
| 1241 |
+
all_optim_state_keys = sorted(set(merge_all_optim_state_keys))
|
| 1242 |
+
else:
|
| 1243 |
+
key_obj_list: List[Optional[List[_OptimStateKey]]] = (
|
| 1244 |
+
[all_optim_state_keys] if rank == 0 else [None]
|
| 1245 |
+
)
|
| 1246 |
+
dist.broadcast_object_list(key_obj_list, src=0, group=group)
|
| 1247 |
+
assert key_obj_list[0] is not None
|
| 1248 |
+
all_optim_state_keys = key_obj_list[0]
|
| 1249 |
+
_check_missing_keys_on_rank(
|
| 1250 |
+
all_optim_state_keys,
|
| 1251 |
+
optim_state_key_to_param_key,
|
| 1252 |
+
param_key_to_param,
|
| 1253 |
+
group,
|
| 1254 |
+
)
|
| 1255 |
+
|
| 1256 |
+
return all_optim_state_keys, optim_state_key_to_param_key
|
| 1257 |
+
|
| 1258 |
+
|
| 1259 |
+
def _unflatten_param_groups(
|
| 1260 |
+
state_dict: Dict[str, Any],
|
| 1261 |
+
param_key_to_param: Dict[Union[int, str], nn.Parameter],
|
| 1262 |
+
param_to_fqns: Dict[nn.Parameter, List[str]],
|
| 1263 |
+
) -> List[Dict[str, Any]]:
|
| 1264 |
+
param_groups: List[Dict[str, Any]] = []
|
| 1265 |
+
for flat_param_group in state_dict["param_groups"]:
|
| 1266 |
+
unflat_param_group = copy.deepcopy(flat_param_group)
|
| 1267 |
+
param_group_params = [
|
| 1268 |
+
param_key_to_param[flat_param_key]
|
| 1269 |
+
for flat_param_key in flat_param_group["params"]
|
| 1270 |
+
]
|
| 1271 |
+
nested_unflat_param_names = [
|
| 1272 |
+
param_to_fqns[param] for param in param_group_params
|
| 1273 |
+
]
|
| 1274 |
+
unflat_param_group["params"] = [
|
| 1275 |
+
unflat_param_name
|
| 1276 |
+
for unflat_param_names in nested_unflat_param_names
|
| 1277 |
+
for unflat_param_name in unflat_param_names
|
| 1278 |
+
] # flatten the list of lists
|
| 1279 |
+
param_groups.append(unflat_param_group)
|
| 1280 |
+
return param_groups
|
| 1281 |
+
|
| 1282 |
+
|
| 1283 |
+
def _is_named_optimizer(optim_state_dict: Dict[str, Any]) -> bool:
|
| 1284 |
+
"""
|
| 1285 |
+
Returns whether the state_dict is from a NamedOptimizer.
|
| 1286 |
+
This function checks that the keys in the state_dict['state'] are strings
|
| 1287 |
+
(which usually are FQNs) versus integers (which usually refer to param_ids
|
| 1288 |
+
from a vanilla torch.optim.Optimizer).
|
| 1289 |
+
"""
|
| 1290 |
+
state = optim_state_dict.get("state", None)
|
| 1291 |
+
if not state:
|
| 1292 |
+
# If we cannot find a state, assume it is not NamedOptimizer as
|
| 1293 |
+
# NamedOptimizer has eager initialization.
|
| 1294 |
+
return False
|
| 1295 |
+
try:
|
| 1296 |
+
key = next(iter(state.keys()))
|
| 1297 |
+
except Exception as e:
|
| 1298 |
+
raise Exception(optim_state_dict) from e # noqa: TRY002
|
| 1299 |
+
return isinstance(key, str)
|
| 1300 |
+
|
| 1301 |
+
|
| 1302 |
+
@dataclass
|
| 1303 |
+
class StateInfo:
|
| 1304 |
+
# The key of these dictionaries are the state name, e.g., `exp_avg`.
|
| 1305 |
+
tensors: Dict[str, _PosDimTensorInfo]
|
| 1306 |
+
scalar_tensors: Dict[str, torch.Tensor]
|
| 1307 |
+
non_tensors: Dict[str, Any]
|
| 1308 |
+
|
| 1309 |
+
|
| 1310 |
+
def _allgather_state_info(
|
| 1311 |
+
fsdp_state: _FSDPState,
|
| 1312 |
+
input_states: Dict[str, Any],
|
| 1313 |
+
) -> List[Dict[str, StateInfo]]:
|
| 1314 |
+
"""
|
| 1315 |
+
Given the ``input_states``, allgather StateInfo for each state. The function
|
| 1316 |
+
uses all_gather_object to gather StateInfo so no GPU tensors are sent.
|
| 1317 |
+
"""
|
| 1318 |
+
|
| 1319 |
+
processed_state_dict: Dict[str, StateInfo] = {}
|
| 1320 |
+
gathered_state_info: List[Dict[str, StateInfo]] = [
|
| 1321 |
+
{} for _ in range(fsdp_state.world_size)
|
| 1322 |
+
]
|
| 1323 |
+
|
| 1324 |
+
for fqn, optim_state in input_states.items():
|
| 1325 |
+
# Allgather the scalar tensor state, non-tensor states and tensors metadata.
|
| 1326 |
+
processed_state = StateInfo({}, {}, {})
|
| 1327 |
+
for state_name, value in sorted_items(optim_state):
|
| 1328 |
+
if torch.is_tensor(value):
|
| 1329 |
+
if value.dim() == 0:
|
| 1330 |
+
# Ensure that `step` is on CPU.
|
| 1331 |
+
processed_state.scalar_tensors[state_name] = value.cpu()
|
| 1332 |
+
else:
|
| 1333 |
+
processed_state.tensors[state_name] = _PosDimTensorInfo(
|
| 1334 |
+
value.shape, value.dtype
|
| 1335 |
+
)
|
| 1336 |
+
else:
|
| 1337 |
+
processed_state.non_tensors[state_name] = value
|
| 1338 |
+
processed_state_dict[fqn] = processed_state
|
| 1339 |
+
dist.all_gather_object(
|
| 1340 |
+
gathered_state_info,
|
| 1341 |
+
processed_state_dict,
|
| 1342 |
+
group=fsdp_state.process_group,
|
| 1343 |
+
)
|
| 1344 |
+
return gathered_state_info
|
| 1345 |
+
|
| 1346 |
+
|
| 1347 |
+
def _convert_all_state_info(
|
| 1348 |
+
fsdp_param_info: FSDPParamInfo,
|
| 1349 |
+
gathered_state_info: List[Dict[str, StateInfo]],
|
| 1350 |
+
input_states: Dict[str, Any],
|
| 1351 |
+
output_states: Dict[str, Dict[str, Any]],
|
| 1352 |
+
) -> Tuple[Optional[torch.dtype], Dict[str, List[Optional[torch.Tensor]]]]:
|
| 1353 |
+
"""
|
| 1354 |
+
Given the ``gathered_state_info`` and ``input_states``, the API converted
|
| 1355 |
+
the StateInfo into the original state if the state is not a non-scalar
|
| 1356 |
+
tensor. For a multi-dimensional tensor, the local state will be stored in
|
| 1357 |
+
``state_buffer`` in a correct order for later allgather purpose.
|
| 1358 |
+
"""
|
| 1359 |
+
|
| 1360 |
+
state_buffers: Dict[str, List[Optional[torch.Tensor]]] = {}
|
| 1361 |
+
|
| 1362 |
+
for fqn, gathered_state in output_states.items():
|
| 1363 |
+
state_info = [s[fqn] for s in gathered_state_info]
|
| 1364 |
+
all_tensor_states = sorted(
|
| 1365 |
+
{n for state in state_info for n in state.tensors.keys()}
|
| 1366 |
+
)
|
| 1367 |
+
empty_ranks: Set[int] = set()
|
| 1368 |
+
dtype: Optional[torch.dtype] = None
|
| 1369 |
+
# First check all the non-scalar states and get the information of
|
| 1370 |
+
# states on each rank.
|
| 1371 |
+
for state_name in all_tensor_states:
|
| 1372 |
+
numels = []
|
| 1373 |
+
_empty_ranks: Set[int] = set()
|
| 1374 |
+
for rank, object_state in enumerate(state_info):
|
| 1375 |
+
numels.append(0)
|
| 1376 |
+
info = object_state.tensors.get(state_name, None)
|
| 1377 |
+
if info is not None:
|
| 1378 |
+
numels[-1] = info.shape.numel()
|
| 1379 |
+
if not dtype:
|
| 1380 |
+
dtype = info.dtype
|
| 1381 |
+
else:
|
| 1382 |
+
assert dtype == info.dtype
|
| 1383 |
+
if numels[-1] == 0:
|
| 1384 |
+
_empty_ranks.add(rank)
|
| 1385 |
+
|
| 1386 |
+
assert not empty_ranks or empty_ranks == _empty_ranks
|
| 1387 |
+
empty_ranks = _empty_ranks
|
| 1388 |
+
if state_name not in state_buffers:
|
| 1389 |
+
state_buffers[state_name] = [
|
| 1390 |
+
None for _ in fsdp_param_info.param_indices
|
| 1391 |
+
]
|
| 1392 |
+
local_state = input_states[fqn].get(state_name, None)
|
| 1393 |
+
# N.B. We need to move the state to compute_device. The reason is
|
| 1394 |
+
# not yet clear and we need to figure out why the state may be on a
|
| 1395 |
+
# different device.
|
| 1396 |
+
if local_state is not None:
|
| 1397 |
+
local_state = local_state.to(fsdp_param_info.state.compute_device)
|
| 1398 |
+
state_buffers[state_name][fsdp_param_info.param_indices[fqn]] = local_state
|
| 1399 |
+
|
| 1400 |
+
# Restoring the scalar and non-tensor states. If the corresponding
|
| 1401 |
+
# non-scalar states do not exist on the rank, we also skip the scalar
|
| 1402 |
+
# non-tensor states on that rank.
|
| 1403 |
+
for rank, object_state in enumerate(state_info):
|
| 1404 |
+
if rank in empty_ranks:
|
| 1405 |
+
continue
|
| 1406 |
+
for name, non_tensor_value in object_state.non_tensors.items():
|
| 1407 |
+
curr_non_tensor_value = gathered_state.get(name, None)
|
| 1408 |
+
assert (
|
| 1409 |
+
curr_non_tensor_value is None
|
| 1410 |
+
or curr_non_tensor_value == non_tensor_value
|
| 1411 |
+
), (
|
| 1412 |
+
f"Rank {rank} has different values for {name}: {non_tensor_value}."
|
| 1413 |
+
+ f" Other ranks: {curr_non_tensor_value}"
|
| 1414 |
+
)
|
| 1415 |
+
gathered_state[name] = non_tensor_value
|
| 1416 |
+
|
| 1417 |
+
for name, scalar_tensor_value in object_state.scalar_tensors.items():
|
| 1418 |
+
curr_scalar_tensor_value = gathered_state.get(name, None)
|
| 1419 |
+
assert curr_scalar_tensor_value is None or torch.equal(
|
| 1420 |
+
scalar_tensor_value, curr_scalar_tensor_value
|
| 1421 |
+
), (
|
| 1422 |
+
f"Rank {rank} has different values for {name}: {scalar_tensor_value}."
|
| 1423 |
+
+ f" Other ranks: {curr_scalar_tensor_value}"
|
| 1424 |
+
)
|
| 1425 |
+
gathered_state[name] = scalar_tensor_value
|
| 1426 |
+
|
| 1427 |
+
return dtype, state_buffers # type: ignore[possibly-undefined]
|
| 1428 |
+
|
| 1429 |
+
|
| 1430 |
+
def _unflatten_orig_param_states(
|
| 1431 |
+
fsdp_param_info: FSDPParamInfo,
|
| 1432 |
+
output_states: Dict[str, Dict[str, Any]],
|
| 1433 |
+
state_name: str,
|
| 1434 |
+
shard_state: bool,
|
| 1435 |
+
to_save: bool,
|
| 1436 |
+
cpu_offload: bool,
|
| 1437 |
+
) -> None:
|
| 1438 |
+
"""
|
| 1439 |
+
Given a output state dict, ``output_states``, which the keys are FQNs to the
|
| 1440 |
+
original parameters (not FlatParameters nor parmeter ID), and the values
|
| 1441 |
+
are gathered states, unflatten the states to the original dimensions.
|
| 1442 |
+
|
| 1443 |
+
This function performs the unflattening process in-place.
|
| 1444 |
+
"""
|
| 1445 |
+
if not to_save:
|
| 1446 |
+
return
|
| 1447 |
+
flat_param = fsdp_param_info.handle.flat_param
|
| 1448 |
+
fsdp_state = fsdp_param_info.state
|
| 1449 |
+
for fqn, gathered_state in output_states.items():
|
| 1450 |
+
value = gathered_state[state_name]
|
| 1451 |
+
param_idx = fsdp_param_info.param_indices[fqn]
|
| 1452 |
+
|
| 1453 |
+
# TODO: This solution is not general and only apply to PTD TP solution.
|
| 1454 |
+
if isinstance(value, DTensor):
|
| 1455 |
+
placement = value.placements[0]
|
| 1456 |
+
# If gathered state is a DTensor and its TP placement is not Replicate(), we need to
|
| 1457 |
+
# gather the tensor on its TP dimension before chunking them into DTensor again.
|
| 1458 |
+
if placement != Replicate():
|
| 1459 |
+
placement_dim = placement.dim # type: ignore[attr-defined]
|
| 1460 |
+
value_local = value.redistribute(placements=(Replicate(),))
|
| 1461 |
+
reshape_size = list(flat_param._shapes[param_idx])
|
| 1462 |
+
reshape_size[placement_dim] *= value.device_mesh.size(0)
|
| 1463 |
+
reshape_size = torch.Size(reshape_size)
|
| 1464 |
+
value = value.reshape(reshape_size)
|
| 1465 |
+
# If gathered state is a replicate DTensor, we directly reshape it.
|
| 1466 |
+
else:
|
| 1467 |
+
value = value.reshape(flat_param._shapes[param_idx])
|
| 1468 |
+
else:
|
| 1469 |
+
# If gathered state is a tensor, we directly reshape it into unflatten state.
|
| 1470 |
+
value = value.reshape(flat_param._shapes[param_idx])
|
| 1471 |
+
|
| 1472 |
+
if shard_state:
|
| 1473 |
+
osd_config = fsdp_state._optim_state_dict_config
|
| 1474 |
+
if getattr(osd_config, "_use_dtensor", False):
|
| 1475 |
+
assert fsdp_state._device_mesh is not None
|
| 1476 |
+
value = _ext_chunk_dtensor(
|
| 1477 |
+
value,
|
| 1478 |
+
fsdp_state.rank,
|
| 1479 |
+
fsdp_state._device_mesh,
|
| 1480 |
+
fsdp_state._fsdp_extension,
|
| 1481 |
+
)
|
| 1482 |
+
else:
|
| 1483 |
+
assert fsdp_state.process_group is not None
|
| 1484 |
+
value = _ext_chunk_tensor(
|
| 1485 |
+
value,
|
| 1486 |
+
fsdp_state.rank,
|
| 1487 |
+
fsdp_state.world_size,
|
| 1488 |
+
fsdp_state._device_handle.device_count(),
|
| 1489 |
+
fsdp_state.process_group,
|
| 1490 |
+
fsdp_state._fsdp_extension,
|
| 1491 |
+
)
|
| 1492 |
+
elif not cpu_offload:
|
| 1493 |
+
with SimpleProfiler.profile("clone"):
|
| 1494 |
+
value = value.detach().clone()
|
| 1495 |
+
|
| 1496 |
+
if cpu_offload:
|
| 1497 |
+
with SimpleProfiler.profile(SimpleProfiler.Type.D2H):
|
| 1498 |
+
value = value.cpu()
|
| 1499 |
+
gathered_state[state_name] = value
|
| 1500 |
+
|
| 1501 |
+
|
| 1502 |
+
def _allgather_orig_param_states(
|
| 1503 |
+
fsdp_param_info: FSDPParamInfo,
|
| 1504 |
+
gathered_state_info: List[Dict[str, StateInfo]],
|
| 1505 |
+
input_states: Dict[str, Any],
|
| 1506 |
+
shard_state: bool,
|
| 1507 |
+
to_save: bool,
|
| 1508 |
+
cpu_offload: bool,
|
| 1509 |
+
) -> Dict[str, Dict[str, Any]]:
|
| 1510 |
+
"""
|
| 1511 |
+
Given the ``gathered_state_info`` and ``input_states``, the API allgathers
|
| 1512 |
+
all tensor states and restore non-tensor states from ``gathered_state_info``.
|
| 1513 |
+
"""
|
| 1514 |
+
fsdp_state = fsdp_param_info.state
|
| 1515 |
+
if fsdp_state.rank == 0 and dist.get_debug_level() == dist.DebugLevel.DETAIL:
|
| 1516 |
+
logger.info(
|
| 1517 |
+
"Memory Summary before calling to _allgather_orig_param_states %s",
|
| 1518 |
+
fsdp_state._device_handle.memory_summary(),
|
| 1519 |
+
)
|
| 1520 |
+
|
| 1521 |
+
output_states: Dict[str, Dict[str, Any]] = {fqn: {} for fqn in input_states.keys()}
|
| 1522 |
+
|
| 1523 |
+
dtype, state_buffers = _convert_all_state_info(
|
| 1524 |
+
fsdp_param_info, gathered_state_info, input_states, output_states
|
| 1525 |
+
)
|
| 1526 |
+
|
| 1527 |
+
if len(state_buffers) == 0:
|
| 1528 |
+
return output_states
|
| 1529 |
+
|
| 1530 |
+
has_state_params: List[bool] = [
|
| 1531 |
+
True if fqn in output_states else False
|
| 1532 |
+
for fqn, idx in fsdp_param_info.param_indices.items()
|
| 1533 |
+
]
|
| 1534 |
+
|
| 1535 |
+
# Loop through the ``state_buffers`` and construct the flattened, concatenated,
|
| 1536 |
+
# sharded states. The size of the constructed state will be the same size as
|
| 1537 |
+
# flat_param (also sharded).
|
| 1538 |
+
# Then we perform an allgather_into_tensor to get the full flat_param state.
|
| 1539 |
+
# The full flat_param state is the result of concatenation of multiple states
|
| 1540 |
+
# the order of of flat_param._fqns.
|
| 1541 |
+
# The final step is to split the flat_param state into original param states
|
| 1542 |
+
# and return the result.
|
| 1543 |
+
flat_param = fsdp_param_info.handle.flat_param
|
| 1544 |
+
empty_func = functools.partial(
|
| 1545 |
+
torch.empty, dtype=dtype, device=fsdp_state.compute_device
|
| 1546 |
+
)
|
| 1547 |
+
gathered_tensor = empty_func(flat_param._padded_unsharded_size)
|
| 1548 |
+
# Synchronize can be slow but this will be easier for us to debug.
|
| 1549 |
+
fsdp_state._device_handle.synchronize()
|
| 1550 |
+
for state_name, buffers in state_buffers.items():
|
| 1551 |
+
local_buffers: List[torch.Tensor] = []
|
| 1552 |
+
begin = fsdp_state.rank * flat_param._sharded_size.numel()
|
| 1553 |
+
# End is inclusive.
|
| 1554 |
+
end = begin + flat_param._sharded_size.numel() - 1
|
| 1555 |
+
# param_idx corresponds to the parameter index in the FlatParameter.
|
| 1556 |
+
mem_offset, param_idx = 0, 0
|
| 1557 |
+
for numel, is_padding in zip(
|
| 1558 |
+
flat_param._numels_with_padding, flat_param._is_padding_mask
|
| 1559 |
+
):
|
| 1560 |
+
frozen_and_no_state = not is_padding and (
|
| 1561 |
+
not fsdp_param_info.param_requires_grad[param_idx]
|
| 1562 |
+
and not has_state_params[param_idx]
|
| 1563 |
+
)
|
| 1564 |
+
|
| 1565 |
+
if is_padding or frozen_and_no_state:
|
| 1566 |
+
# This memory range is a padding or the param is frozen and does
|
| 1567 |
+
# not require gradient. For the later case, we treat it as a
|
| 1568 |
+
# padding and add empty values to the local_buffers.
|
| 1569 |
+
|
| 1570 |
+
padding_begin, padding_end = mem_offset, mem_offset + numel - 1
|
| 1571 |
+
if padding_begin <= begin <= padding_end:
|
| 1572 |
+
# The range is an align padding before the first parameter in
|
| 1573 |
+
# the shard. The shard includes parts of this align padding.
|
| 1574 |
+
padding_len = (
|
| 1575 |
+
padding_end - begin + 1
|
| 1576 |
+
if end >= padding_end
|
| 1577 |
+
else end - begin + 1
|
| 1578 |
+
)
|
| 1579 |
+
elif padding_begin <= end <= padding_end:
|
| 1580 |
+
# The range is an align padding after the last parameter in
|
| 1581 |
+
# the shard. The shard includes parts of this align padding.
|
| 1582 |
+
padding_len = (
|
| 1583 |
+
end - padding_begin + 1
|
| 1584 |
+
if begin <= padding_begin
|
| 1585 |
+
else end - begin + 1
|
| 1586 |
+
)
|
| 1587 |
+
elif begin < padding_begin <= padding_end < end:
|
| 1588 |
+
# The range is an align padding that is completely in the
|
| 1589 |
+
# shard.
|
| 1590 |
+
padding_len = numel
|
| 1591 |
+
else:
|
| 1592 |
+
padding_len = 0
|
| 1593 |
+
if padding_len:
|
| 1594 |
+
local_buffers.append(empty_func(padding_len))
|
| 1595 |
+
|
| 1596 |
+
if not is_padding:
|
| 1597 |
+
# This memory range is a parameter in FlatParameter. So there
|
| 1598 |
+
# should be an corresponding state in the optimizer unless the
|
| 1599 |
+
# parameter is frozen, which we treat it as a padding above.
|
| 1600 |
+
|
| 1601 |
+
# We need to check if this rank owns the buffer. If this is None:
|
| 1602 |
+
# 1.) the rank does not own any part of the original parameter.
|
| 1603 |
+
# As a result, there is no corresponding optimizer state on
|
| 1604 |
+
# the rank as well.
|
| 1605 |
+
# 2.) the parameter is frozen AND no optimizer state for the
|
| 1606 |
+
# parameter. If a parameter is frozen, there can still be
|
| 1607 |
+
# optimizer state if the parameter is not frozen in the
|
| 1608 |
+
# previous steps.
|
| 1609 |
+
if buffers[param_idx] is not None:
|
| 1610 |
+
local_buffers.append(cast(torch.Tensor, buffers[param_idx]))
|
| 1611 |
+
param_idx += 1
|
| 1612 |
+
|
| 1613 |
+
mem_offset += numel
|
| 1614 |
+
|
| 1615 |
+
shard_numel_padded = flat_param._sharded_size.numel() - (
|
| 1616 |
+
sum(t.numel() for t in local_buffers)
|
| 1617 |
+
)
|
| 1618 |
+
|
| 1619 |
+
assert flat_param._shard_numel_padded == shard_numel_padded, (
|
| 1620 |
+
"Manually calculated _sharded_numel_padded is incorrect. "
|
| 1621 |
+
f"_shard_numel_padded={flat_param._shard_numel_padded}, "
|
| 1622 |
+
f"shard_numel_padded={shard_numel_padded}, "
|
| 1623 |
+
f"_sharded_size.numel={flat_param._sharded_size.numel()}, "
|
| 1624 |
+
f"_numels_with_padding={flat_param._numels_with_padding}, "
|
| 1625 |
+
f"begin={begin}, end={end},"
|
| 1626 |
+
)
|
| 1627 |
+
if shard_numel_padded > 0:
|
| 1628 |
+
# Add right-handed padding.
|
| 1629 |
+
local_buffers.append(empty_func(shard_numel_padded))
|
| 1630 |
+
local_shard = torch.cat(local_buffers)
|
| 1631 |
+
assert local_shard.numel() * fsdp_state.world_size == gathered_tensor.numel(), (
|
| 1632 |
+
"The size of local shard times the world size should equal to the "
|
| 1633 |
+
"gathered tensor size. The inconsistency may be from a bug of "
|
| 1634 |
+
"FlatParameter's metadata or the reconstruction logic in optimizer "
|
| 1635 |
+
"state dict."
|
| 1636 |
+
)
|
| 1637 |
+
fsdp_state._device_handle.synchronize()
|
| 1638 |
+
with SimpleProfiler.profile(SimpleProfiler.Type.ALLGATHER):
|
| 1639 |
+
dist.all_gather_into_tensor(
|
| 1640 |
+
gathered_tensor, local_shard, group=fsdp_state.process_group
|
| 1641 |
+
)
|
| 1642 |
+
# Synchronize can be slow but this will be easier for us to debug.
|
| 1643 |
+
fsdp_state._device_handle.synchronize()
|
| 1644 |
+
|
| 1645 |
+
unpadded_tensor = gathered_tensor[: flat_param._unpadded_unsharded_size.numel()]
|
| 1646 |
+
flat_param_handle = fsdp_param_info.handle
|
| 1647 |
+
orig_states = flat_param_handle._get_unflat_views_aligned(unpadded_tensor)
|
| 1648 |
+
assert len(orig_states) == len(fsdp_param_info.param_indices), (
|
| 1649 |
+
"The number of parameters from FlatParameter is not consistent to "
|
| 1650 |
+
"the number of states used by optimizer state dict reconstruction "
|
| 1651 |
+
"logic."
|
| 1652 |
+
)
|
| 1653 |
+
for fqn, idx in fsdp_param_info.param_indices.items():
|
| 1654 |
+
if fsdp_param_info.param_requires_grad[idx] or fqn in output_states:
|
| 1655 |
+
output_states[fqn][state_name] = orig_states[idx]
|
| 1656 |
+
|
| 1657 |
+
_unflatten_orig_param_states(
|
| 1658 |
+
fsdp_param_info,
|
| 1659 |
+
output_states,
|
| 1660 |
+
state_name,
|
| 1661 |
+
shard_state,
|
| 1662 |
+
to_save,
|
| 1663 |
+
cpu_offload,
|
| 1664 |
+
)
|
| 1665 |
+
|
| 1666 |
+
del gathered_tensor
|
| 1667 |
+
return output_states
|
| 1668 |
+
|
| 1669 |
+
|
| 1670 |
+
def _gather_all_orig_param_state(
|
| 1671 |
+
fsdp_param_info: FSDPParamInfo,
|
| 1672 |
+
input_states: Dict[str, Any],
|
| 1673 |
+
shard_state: bool,
|
| 1674 |
+
to_save: bool,
|
| 1675 |
+
cpu_offload: bool,
|
| 1676 |
+
) -> Dict[str, Any]:
|
| 1677 |
+
"""
|
| 1678 |
+
Given a optimizer state dict, ``input_states``, which the keys are FQNs to the
|
| 1679 |
+
original parameters (not FlatParameters nor parmeter ID), gather all the
|
| 1680 |
+
states and unflatten them to the original dimensions. Note that all the
|
| 1681 |
+
params referred by the ``input_states`` must be managed by FSDP.
|
| 1682 |
+
"""
|
| 1683 |
+
fsdp_state = fsdp_param_info.state
|
| 1684 |
+
if (
|
| 1685 |
+
fsdp_state.world_size == 1
|
| 1686 |
+
or fsdp_state.sharding_strategy == ShardingStrategy.NO_SHARD
|
| 1687 |
+
):
|
| 1688 |
+
return input_states if to_save else {}
|
| 1689 |
+
|
| 1690 |
+
with SimpleProfiler.profile(SimpleProfiler.Type.RESHARDING):
|
| 1691 |
+
with SimpleProfiler.profile(SimpleProfiler.Type.ALLGATHER_OBJ):
|
| 1692 |
+
gathered_state_info = _allgather_state_info(fsdp_state, input_states)
|
| 1693 |
+
output_states = _allgather_orig_param_states(
|
| 1694 |
+
fsdp_param_info,
|
| 1695 |
+
gathered_state_info,
|
| 1696 |
+
input_states,
|
| 1697 |
+
shard_state,
|
| 1698 |
+
to_save,
|
| 1699 |
+
cpu_offload,
|
| 1700 |
+
)
|
| 1701 |
+
if to_save:
|
| 1702 |
+
for key, idx in fsdp_param_info.param_indices.items():
|
| 1703 |
+
if key in output_states:
|
| 1704 |
+
continue
|
| 1705 |
+
if not fsdp_param_info.param_requires_grad[idx]:
|
| 1706 |
+
continue
|
| 1707 |
+
|
| 1708 |
+
raise RuntimeError(
|
| 1709 |
+
f"{key} is not in the output state. "
|
| 1710 |
+
"The FSDPParamInfo has the param keys "
|
| 1711 |
+
f"{sorted(fsdp_param_info.param_indices.keys())} while "
|
| 1712 |
+
"the output_states has the param keys "
|
| 1713 |
+
f"{sorted(output_states.keys())}."
|
| 1714 |
+
)
|
| 1715 |
+
return output_states
|
| 1716 |
+
else:
|
| 1717 |
+
return {}
|
| 1718 |
+
|
| 1719 |
+
|
| 1720 |
+
def _convert_state_with_orig_params(
|
| 1721 |
+
all_optim_state_keys: List[_OptimStateKey],
|
| 1722 |
+
optim_state_key_to_param_key: Dict[_OptimStateKey, Union[int, str]],
|
| 1723 |
+
fqn_to_fsdp_param_info: Dict[str, FSDPParamInfo],
|
| 1724 |
+
optim_state_dict: Dict[Union[str, int], Any],
|
| 1725 |
+
to_save: bool,
|
| 1726 |
+
shard_state: bool,
|
| 1727 |
+
cpu_offload: bool = True,
|
| 1728 |
+
) -> Dict[str, Any]:
|
| 1729 |
+
fsdp_osd_state: Dict[str, Any] = {}
|
| 1730 |
+
# This variable is used to deduplicate the FSDPParamInfo as one FSDPParamInfo
|
| 1731 |
+
# usually corresponds to multiple parameters. We could not use FSDPParamInfo
|
| 1732 |
+
# as the key because FSDPParamInfo is not hashable. As a result, we fall back
|
| 1733 |
+
# to `id(FSDPParamInfo)`, which the type is an integer.
|
| 1734 |
+
all_states: Dict[int, Dict[str, Any]] = {}
|
| 1735 |
+
# Iterate in rank 0's flat parameter ID order to ensure aligned all-gathers
|
| 1736 |
+
# across ranks
|
| 1737 |
+
for optim_state_key in all_optim_state_keys:
|
| 1738 |
+
param_key: Union[str, int, None] = optim_state_key_to_param_key.get(
|
| 1739 |
+
optim_state_key, None
|
| 1740 |
+
)
|
| 1741 |
+
|
| 1742 |
+
if param_key is None and not optim_state_key.is_fsdp_managed:
|
| 1743 |
+
continue
|
| 1744 |
+
|
| 1745 |
+
if optim_state_key.is_fsdp_managed:
|
| 1746 |
+
fqn = optim_state_key.unflat_param_names[0]
|
| 1747 |
+
fsdp_param_info = fqn_to_fsdp_param_info.get(fqn, None)
|
| 1748 |
+
if fsdp_param_info is None:
|
| 1749 |
+
# This can happen if the not all FSDP instances have all the
|
| 1750 |
+
# parameters. This can happen with FSDP + some MPMD style
|
| 1751 |
+
# parallelism.
|
| 1752 |
+
|
| 1753 |
+
# TODO: it is unclear if we need to do the same check with
|
| 1754 |
+
# non-FSDP managed keys.
|
| 1755 |
+
continue
|
| 1756 |
+
state = {} if param_key is None else optim_state_dict[param_key]
|
| 1757 |
+
if id(fsdp_param_info) not in all_states:
|
| 1758 |
+
all_states[id(fsdp_param_info)] = {}
|
| 1759 |
+
all_states[id(fsdp_param_info)][fqn] = state
|
| 1760 |
+
|
| 1761 |
+
elif to_save:
|
| 1762 |
+
assert len(optim_state_key.unflat_param_names) == 1
|
| 1763 |
+
unflat_param_name = optim_state_key.unflat_param_names[0]
|
| 1764 |
+
with SimpleProfiler.profile("none_fsdp_managed_copy"):
|
| 1765 |
+
param_key = cast(Union[str, int], param_key)
|
| 1766 |
+
fsdp_osd_state[unflat_param_name] = copy.copy(
|
| 1767 |
+
optim_state_dict[param_key]
|
| 1768 |
+
)
|
| 1769 |
+
if cpu_offload:
|
| 1770 |
+
for state_name, value in sorted_items(
|
| 1771 |
+
fsdp_osd_state[unflat_param_name]
|
| 1772 |
+
):
|
| 1773 |
+
if not torch.is_tensor(value):
|
| 1774 |
+
continue
|
| 1775 |
+
fsdp_osd_state[unflat_param_name][state_name] = value.cpu()
|
| 1776 |
+
|
| 1777 |
+
# Instead of gathering the state of each parameter individually, we perform
|
| 1778 |
+
# the gathering all at once to speed up the process.
|
| 1779 |
+
for _all_states in all_states.values():
|
| 1780 |
+
fqn = next(iter(_all_states.keys()))
|
| 1781 |
+
fsdp_param_info = fqn_to_fsdp_param_info[fqn]
|
| 1782 |
+
assert len(fsdp_param_info.param_requires_grad) > 0, (
|
| 1783 |
+
"With use_orig_params, FSDPParamInfo should have requires_grad "
|
| 1784 |
+
"information. However, the length is zero."
|
| 1785 |
+
)
|
| 1786 |
+
for key, idx in fsdp_param_info.param_indices.items():
|
| 1787 |
+
if key in _all_states:
|
| 1788 |
+
continue
|
| 1789 |
+
if not fsdp_param_info.param_requires_grad[idx]:
|
| 1790 |
+
continue
|
| 1791 |
+
raise RuntimeError(
|
| 1792 |
+
f"{key} is not in the optimizer state. "
|
| 1793 |
+
"The FSDPParamInfo has the param keys "
|
| 1794 |
+
f"{sorted(fsdp_param_info.param_indices.keys())} while "
|
| 1795 |
+
"the optimizer has the param keys "
|
| 1796 |
+
f"{sorted(_all_states.keys())}."
|
| 1797 |
+
)
|
| 1798 |
+
fsdp_osd_state.update(
|
| 1799 |
+
_gather_all_orig_param_state(
|
| 1800 |
+
fsdp_param_info,
|
| 1801 |
+
_all_states,
|
| 1802 |
+
shard_state,
|
| 1803 |
+
to_save,
|
| 1804 |
+
cpu_offload,
|
| 1805 |
+
)
|
| 1806 |
+
)
|
| 1807 |
+
|
| 1808 |
+
return fsdp_osd_state
|
| 1809 |
+
|
| 1810 |
+
|
| 1811 |
+
def _convert_state_with_flat_params(
|
| 1812 |
+
all_optim_state_keys: List[_OptimStateKey],
|
| 1813 |
+
optim_state_key_to_param_key: Dict[_OptimStateKey, Union[int, str]],
|
| 1814 |
+
fqn_to_fsdp_param_info: Dict[str, FSDPParamInfo],
|
| 1815 |
+
optim_state_dict: Dict[Union[str, int], Any],
|
| 1816 |
+
to_save: bool,
|
| 1817 |
+
shard_state: bool,
|
| 1818 |
+
cpu_offload: bool = True,
|
| 1819 |
+
) -> Dict[str, Any]:
|
| 1820 |
+
fsdp_osd_state: Dict[str, Any] = {}
|
| 1821 |
+
# Iterate in rank 0's flat parameter ID order to ensure aligned all-gathers
|
| 1822 |
+
# across ranks
|
| 1823 |
+
for optim_state_key in all_optim_state_keys:
|
| 1824 |
+
param_key: Union[str, int, None] = optim_state_key_to_param_key.get(
|
| 1825 |
+
optim_state_key, None
|
| 1826 |
+
)
|
| 1827 |
+
|
| 1828 |
+
assert param_key is not None, (
|
| 1829 |
+
"If use_orig_params is False, we must be able to find the "
|
| 1830 |
+
f"corresponding param id. {optim_state_key} {param_key}"
|
| 1831 |
+
)
|
| 1832 |
+
|
| 1833 |
+
if optim_state_key.is_fsdp_managed:
|
| 1834 |
+
# If there are multiple unflat_param_names (not use_orig_params),
|
| 1835 |
+
# they share the same FSDPParamInfo. So the first unflat_param_name
|
| 1836 |
+
# is sufficient to fetch the FSDPParamInfo.
|
| 1837 |
+
fqn = optim_state_key.unflat_param_names[0]
|
| 1838 |
+
fsdp_param_info = fqn_to_fsdp_param_info[fqn]
|
| 1839 |
+
unflat_state = _unflatten_optim_state(
|
| 1840 |
+
fsdp_param_info,
|
| 1841 |
+
optim_state_dict[param_key],
|
| 1842 |
+
to_save,
|
| 1843 |
+
shard_state,
|
| 1844 |
+
cpu_offload,
|
| 1845 |
+
)
|
| 1846 |
+
if to_save:
|
| 1847 |
+
assert len(unflat_state) == len(optim_state_key.unflat_param_names)
|
| 1848 |
+
for unflat_param_name, unflat_param_state in zip(
|
| 1849 |
+
optim_state_key.unflat_param_names,
|
| 1850 |
+
unflat_state,
|
| 1851 |
+
):
|
| 1852 |
+
fsdp_osd_state[unflat_param_name] = unflat_param_state
|
| 1853 |
+
elif to_save:
|
| 1854 |
+
assert len(optim_state_key.unflat_param_names) == 1
|
| 1855 |
+
unflat_param_name = optim_state_key.unflat_param_names[0]
|
| 1856 |
+
fsdp_osd_state[unflat_param_name] = copy.copy(optim_state_dict[param_key])
|
| 1857 |
+
if cpu_offload:
|
| 1858 |
+
for state_name, value in sorted_items(
|
| 1859 |
+
fsdp_osd_state[unflat_param_name]
|
| 1860 |
+
):
|
| 1861 |
+
if not torch.is_tensor(value):
|
| 1862 |
+
continue
|
| 1863 |
+
fsdp_osd_state[unflat_param_name][state_name] = value.cpu()
|
| 1864 |
+
|
| 1865 |
+
return fsdp_osd_state
|
| 1866 |
+
|
| 1867 |
+
|
| 1868 |
+
@torch.no_grad()
|
| 1869 |
+
def _optim_state_dict(
|
| 1870 |
+
model: nn.Module,
|
| 1871 |
+
optim: torch.optim.Optimizer,
|
| 1872 |
+
optim_state_dict: Dict[str, Any],
|
| 1873 |
+
optim_input: Optional[
|
| 1874 |
+
Union[
|
| 1875 |
+
List[Dict[str, Any]],
|
| 1876 |
+
Iterable[nn.Parameter],
|
| 1877 |
+
]
|
| 1878 |
+
],
|
| 1879 |
+
rank0_only: bool,
|
| 1880 |
+
shard_state: bool,
|
| 1881 |
+
group: Optional[dist.ProcessGroup],
|
| 1882 |
+
using_optim_input: bool,
|
| 1883 |
+
use_orig_params: bool = False,
|
| 1884 |
+
cpu_offload: bool = True,
|
| 1885 |
+
) -> Dict[str, Any]:
|
| 1886 |
+
"""
|
| 1887 |
+
Consolidates the optimizer state and returns it as a :class:`dict`
|
| 1888 |
+
following the convention of :meth:`torch.optim.Optimizer.state_dict`,
|
| 1889 |
+
i.e. with keys ``"state"`` and ``"param_groups"``.
|
| 1890 |
+
The flat parameters in ``FSDP`` modules contained in ``model`` are mapped
|
| 1891 |
+
back to their unflattened parameters.
|
| 1892 |
+
|
| 1893 |
+
Parameter keys are not well-defined. For a regular optimizer, the optimizer
|
| 1894 |
+
state_dict contains a mapping from parameter IDs to parameter states.
|
| 1895 |
+
Parameter IDs are the order of parameters in ``optim.param_groups()`` across
|
| 1896 |
+
all the groups. This API also allows user to pass ``optim_input`` for the
|
| 1897 |
+
mapping between parameters and parameter IDs. Using ``optim_input`` is being
|
| 1898 |
+
deprecated.
|
| 1899 |
+
|
| 1900 |
+
If the optimizer is a ``NamedOptimizer``, the optimizer state_dict does not
|
| 1901 |
+
contain parameter IDs mapping but a mapping from parameter FQNs to parameter
|
| 1902 |
+
states. This API finds the mapping from FQNs to parameters if the optimizer
|
| 1903 |
+
is a ``NamedOptimizer``.
|
| 1904 |
+
|
| 1905 |
+
If ``use_orig_params`` is True, each rank will have all FSDP-managed
|
| 1906 |
+
parameters but some of these parameters may be empty due to the sharding.
|
| 1907 |
+
For a regular optim.Optimizer, states for those empty parameters will
|
| 1908 |
+
not be initialized. So, when aggregating the FQNs across ranks, no assert
|
| 1909 |
+
will be raised on a rank even if it does not have all the states -- it is
|
| 1910 |
+
valid and FSDP knows how to aggregate them. However, FSDP has to ignore
|
| 1911 |
+
handling those parameters that are not managed by FSDP and do not exist on
|
| 1912 |
+
the local rank -- those are managed by other parallelisms and FSDP does not
|
| 1913 |
+
know how to handle/aggregate them.
|
| 1914 |
+
|
| 1915 |
+
Args:
|
| 1916 |
+
model (nn.Module): Root module (which may or may not be a
|
| 1917 |
+
:class:`FullyShardedDataParallel` instance) whose parameters
|
| 1918 |
+
were passed into the optimizer ``optim``.
|
| 1919 |
+
optim (torch.optim.Optimizer): Optimizer for ``model`` 's
|
| 1920 |
+
parameters.
|
| 1921 |
+
rank0_only (bool): If ``True``, saves the populated :class:`dict`
|
| 1922 |
+
only on rank 0; if ``False``, saves it on all ranks. (Default:
|
| 1923 |
+
``True``)
|
| 1924 |
+
shard_state (bool): If ``True``, shard and distribute all
|
| 1925 |
+
non-zero-dimension states.
|
| 1926 |
+
|
| 1927 |
+
Returns:
|
| 1928 |
+
Dict[str, Any]: A :class:`dict` containing the optimizer state for
|
| 1929 |
+
``model`` 's original unflattened parameters and including keys
|
| 1930 |
+
"state" and "param_groups" following the convention of
|
| 1931 |
+
:meth:`torch.optim.Optimizer.state_dict`. If ``rank0_only=False``,
|
| 1932 |
+
then nonzero ranks return an empty :class:`dict`.
|
| 1933 |
+
"""
|
| 1934 |
+
SimpleProfiler.reset()
|
| 1935 |
+
cm = ExitStack()
|
| 1936 |
+
cm.enter_context(SimpleProfiler.profile(SimpleProfiler.Type.ALL))
|
| 1937 |
+
_reset_flat_param_grad_info_if_needed(traversal_utils._get_fsdp_handles(model))
|
| 1938 |
+
to_save = not rank0_only or dist.get_rank(group) == 0 or shard_state
|
| 1939 |
+
|
| 1940 |
+
with SimpleProfiler.profile("preprocessing"):
|
| 1941 |
+
param_to_fqns = _get_param_to_fqns(model)
|
| 1942 |
+
flat_param_to_fqn = _get_flat_param_to_fqn(model)
|
| 1943 |
+
is_named_optimizer = _is_named_optimizer(optim_state_dict)
|
| 1944 |
+
|
| 1945 |
+
param_key_to_param = cast(
|
| 1946 |
+
Dict[Union[int, str], nn.Parameter],
|
| 1947 |
+
(
|
| 1948 |
+
_get_param_id_to_param_from_optim_input(model, optim_input)
|
| 1949 |
+
if using_optim_input
|
| 1950 |
+
else _get_param_key_to_param(
|
| 1951 |
+
optim, model, is_named_optimizer, param_to_fqns, flat_param_to_fqn
|
| 1952 |
+
)
|
| 1953 |
+
),
|
| 1954 |
+
)
|
| 1955 |
+
fqn_to_fsdp_param_info = _get_fqn_to_fsdp_param_info(model)
|
| 1956 |
+
|
| 1957 |
+
with SimpleProfiler.profile("preprocessing_with_comm"):
|
| 1958 |
+
(
|
| 1959 |
+
all_optim_state_keys,
|
| 1960 |
+
optim_state_key_to_param_key,
|
| 1961 |
+
) = _map_param_key_to_optim_keys(
|
| 1962 |
+
optim_state_dict,
|
| 1963 |
+
group,
|
| 1964 |
+
param_key_to_param,
|
| 1965 |
+
param_to_fqns,
|
| 1966 |
+
fqn_to_fsdp_param_info,
|
| 1967 |
+
merge_keys=use_orig_params,
|
| 1968 |
+
)
|
| 1969 |
+
|
| 1970 |
+
with SimpleProfiler.profile("state_converting"):
|
| 1971 |
+
convert_fn = (
|
| 1972 |
+
_convert_state_with_orig_params
|
| 1973 |
+
if use_orig_params
|
| 1974 |
+
else _convert_state_with_flat_params
|
| 1975 |
+
)
|
| 1976 |
+
fsdp_osd_state = convert_fn(
|
| 1977 |
+
all_optim_state_keys,
|
| 1978 |
+
optim_state_key_to_param_key,
|
| 1979 |
+
fqn_to_fsdp_param_info,
|
| 1980 |
+
optim_state_dict["state"],
|
| 1981 |
+
to_save,
|
| 1982 |
+
shard_state,
|
| 1983 |
+
cpu_offload,
|
| 1984 |
+
)
|
| 1985 |
+
|
| 1986 |
+
# At this point, communication is complete and ranks can return early if nothing
|
| 1987 |
+
# will be saved on that rank.
|
| 1988 |
+
if not to_save:
|
| 1989 |
+
return {}
|
| 1990 |
+
|
| 1991 |
+
fsdp_osd: Dict[str, Any] = {"state": fsdp_osd_state}
|
| 1992 |
+
|
| 1993 |
+
flat_param_fqns = set(flat_param_to_fqn.values())
|
| 1994 |
+
for key, value in optim_state_dict["state"].items():
|
| 1995 |
+
if key in fsdp_osd_state:
|
| 1996 |
+
continue
|
| 1997 |
+
if key in flat_param_fqns:
|
| 1998 |
+
continue
|
| 1999 |
+
if key in param_key_to_param:
|
| 2000 |
+
continue
|
| 2001 |
+
# This key is not recognized by FSDP. It may be a user-defined state
|
| 2002 |
+
# or some parameters state that FSDP is unable to map from
|
| 2003 |
+
# ``optim.param_groups``.
|
| 2004 |
+
warnings.warn(
|
| 2005 |
+
f"Found a optim state, {key}, that FSDP cannot process. FSDP "
|
| 2006 |
+
"will directly copy everything to the returned state_dict. In "
|
| 2007 |
+
"most cases, this is a user-defined state that is not "
|
| 2008 |
+
"associated with any particular parameter. Another possible "
|
| 2009 |
+
"case is this state is managed by TorchRec. Otherwise, there may "
|
| 2010 |
+
" be a mismatched assumption of optim_state_dict of this mode."
|
| 2011 |
+
)
|
| 2012 |
+
fsdp_osd_state[key] = value
|
| 2013 |
+
|
| 2014 |
+
if "param_groups" in optim_state_dict:
|
| 2015 |
+
fsdp_osd["param_groups"] = _unflatten_param_groups(
|
| 2016 |
+
optim_state_dict, param_key_to_param, param_to_fqns
|
| 2017 |
+
)
|
| 2018 |
+
|
| 2019 |
+
cm.close()
|
| 2020 |
+
SimpleProfiler.dump_and_reset("FSDP _optim_state_dict() profiling: ")
|
| 2021 |
+
|
| 2022 |
+
return fsdp_osd
|
| 2023 |
+
|
| 2024 |
+
|
| 2025 |
+
def _get_fqn_to_fsdp_param_info(model: nn.Module) -> Dict[str, FSDPParamInfo]:
|
| 2026 |
+
"""
|
| 2027 |
+
Construct the mapping from a param's fqn to its corresponding ``FSDPParamInfo``
|
| 2028 |
+
if the param is managed by FSDP. Shared parameters, or original parameters that
|
| 2029 |
+
are shared across multiple nn.Modules, are required to belong to one and only
|
| 2030 |
+
one FSDP instance and thus correspond to one ``FlatParameter``. Within the one
|
| 2031 |
+
``FlatParameter``, ``FlatParameter._fqns`` only stores the first FQN of a shared
|
| 2032 |
+
parameter. Thus, the keys in the mapping are guaranteed to map to unique parameters.
|
| 2033 |
+
"""
|
| 2034 |
+
|
| 2035 |
+
def module_fn(module, prefix, tree_level, fqn_to_param_info):
|
| 2036 |
+
fsdp_state = _get_module_fsdp_state_if_fully_sharded_module(module)
|
| 2037 |
+
if fsdp_state is None:
|
| 2038 |
+
return
|
| 2039 |
+
_lazy_init(fsdp_state, module)
|
| 2040 |
+
handle = _module_handle(fsdp_state, module)
|
| 2041 |
+
if not handle:
|
| 2042 |
+
return
|
| 2043 |
+
flat_param = handle.flat_param
|
| 2044 |
+
fsdp_param_info = FSDPParamInfo(fsdp_state, handle, {}, [])
|
| 2045 |
+
# NOTE: `idx` indexes into the data structures *without* padding
|
| 2046 |
+
# elements
|
| 2047 |
+
for idx, local_fqn in enumerate(flat_param._fqns):
|
| 2048 |
+
fqn = clean_tensor_name(prefix + local_fqn)
|
| 2049 |
+
if fqn in fqn_to_param_info:
|
| 2050 |
+
assert fqn_to_param_info[fqn].handle.flat_param is flat_param, fqn
|
| 2051 |
+
fqn_to_param_info[fqn] = fsdp_param_info
|
| 2052 |
+
fsdp_param_info.param_indices[fqn] = idx
|
| 2053 |
+
if flat_param._params is not None:
|
| 2054 |
+
fsdp_param_info.param_requires_grad.append(
|
| 2055 |
+
flat_param._params[idx].requires_grad
|
| 2056 |
+
)
|
| 2057 |
+
|
| 2058 |
+
def return_fn(fqn_to_param_info):
|
| 2059 |
+
return fqn_to_param_info
|
| 2060 |
+
|
| 2061 |
+
fqn_to_param_info: Dict[str, FSDPParamInfo] = {}
|
| 2062 |
+
# FlatParameter._fqns stores the local fqn, starting from the root of the
|
| 2063 |
+
# FSDP. Using _apply_to_modules() with model (may not be the FSDP root
|
| 2064 |
+
# module) allows us to construct the global fqn.
|
| 2065 |
+
return _apply_to_modules(
|
| 2066 |
+
model,
|
| 2067 |
+
module_fn,
|
| 2068 |
+
return_fn,
|
| 2069 |
+
[fqn for fqn, _ in _named_parameters_with_duplicates(model)],
|
| 2070 |
+
fqn_to_param_info,
|
| 2071 |
+
)
|
| 2072 |
+
|
| 2073 |
+
|
| 2074 |
+
@no_type_check
|
| 2075 |
+
def _set_optim_use_dtensor(
|
| 2076 |
+
fsdp_state: _FSDPState,
|
| 2077 |
+
state_dict_settings: StateDictSettings,
|
| 2078 |
+
) -> None:
|
| 2079 |
+
# If device_mesh is passed in when initalizing FSDP, we automatically turn the
|
| 2080 |
+
# _use_dtensor flag to be true for ShardedOptimStateDictConfig() if state_dict_type
|
| 2081 |
+
# has to be set to SHARDED_STATE_DICT.
|
| 2082 |
+
if getattr(fsdp_state, "_device_mesh", None):
|
| 2083 |
+
state_dict_type = state_dict_settings.state_dict_type
|
| 2084 |
+
if state_dict_type == StateDictType.LOCAL_STATE_DICT:
|
| 2085 |
+
raise RuntimeError(
|
| 2086 |
+
"Found state_dict_type LOCAL_STATE_DICT.",
|
| 2087 |
+
"DeviceMesh is not compatible with LOCAL_STATE_DICT.",
|
| 2088 |
+
"Please set state_dict_type to SHARDED_STATE_DICT to get DTensor state_dict.",
|
| 2089 |
+
)
|
| 2090 |
+
else:
|
| 2091 |
+
state_dict_settings.optim_state_dict_config._use_dtensor = True
|
janus/lib/python3.10/site-packages/torch/distributed/fsdp/_traversal_utils.py
ADDED
|
@@ -0,0 +1,113 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
NOTE: This file must be imported like
|
| 3 |
+
``import torch.distributed.fsdp._traversal_utils`` and not like
|
| 4 |
+
``from torch.distirbuted.fsdp._traversal_utils import ...`` to avoid circular
|
| 5 |
+
imports. For brevity, we may import the file as ``traversal_utils``.
|
| 6 |
+
"""
|
| 7 |
+
|
| 8 |
+
import collections
|
| 9 |
+
from typing import Deque, List, Set, Tuple
|
| 10 |
+
|
| 11 |
+
import torch.nn as nn
|
| 12 |
+
from torch.distributed._composable.contract import _get_registry
|
| 13 |
+
from torch.distributed.fsdp._common_utils import _FSDPState, _get_module_fsdp_state
|
| 14 |
+
|
| 15 |
+
|
| 16 |
+
"""
|
| 17 |
+
[Note: FSDP State Traversal]
|
| 18 |
+
For the wrapper code path, ``_FSDPState`` is the ``FullyShardedDataParallel``
|
| 19 |
+
module wrapping a fully sharded module, and for the non-wrapper code path,
|
| 20 |
+
``_FSDPState`` is an object that gets embedded on a fully sharded module.
|
| 21 |
+
See [Note: Fully Sharded Module] for the definition.
|
| 22 |
+
|
| 23 |
+
There are three common traversal idioms: Given a root module,
|
| 24 |
+
- ``_get_fsdp_states()`` returns all ``_FSDPState`` s in the tree.
|
| 25 |
+
- ``get_fsdp_root_states()`` returns all local root ``_FSDPState`` s in the
|
| 26 |
+
tree (i.e. those with ``_is_root == True``).
|
| 27 |
+
- ``_get_fsdp_handles()``returns all ``FlatParamHandle`` s in the tree.
|
| 28 |
+
|
| 29 |
+
All of these methods must take in the root module (i.e. an ``nn.Module``) and
|
| 30 |
+
not a general ``_FSDPState`` because ``_FSDPState`` does not support a graph
|
| 31 |
+
traversal, whereas ``nn.Module`` has ``nn.Module.modules()`` for traversal.
|
| 32 |
+
"""
|
| 33 |
+
|
| 34 |
+
|
| 35 |
+
def _composable(module: nn.Module) -> bool:
|
| 36 |
+
"""
|
| 37 |
+
Returns if ``module`` can compose with ``fully_shard``.
|
| 38 |
+
"""
|
| 39 |
+
# TODO: Add any other composable APIs that are mutually exclusive.
|
| 40 |
+
registry = _get_registry(module)
|
| 41 |
+
if registry is None:
|
| 42 |
+
return True
|
| 43 |
+
return "replicate" not in registry
|
| 44 |
+
|
| 45 |
+
|
| 46 |
+
# TODO (awgu): We may be able to remove this function if we retired the
|
| 47 |
+
# `use_orig_params=False` code path since so far we only need the module for
|
| 48 |
+
# `FlatParameter` registration, which is not needed for `use_orig_params=True`.
|
| 49 |
+
def _get_fsdp_states_with_modules(
|
| 50 |
+
module: nn.Module,
|
| 51 |
+
) -> Tuple[List[_FSDPState], List[nn.Module]]:
|
| 52 |
+
"""
|
| 53 |
+
Returns a tuple containing:
|
| 54 |
+
1. A list of the ``_FSDPState`` instances in the module tree rooted at
|
| 55 |
+
``module`` without any duplicates and following the ``module.modules()``
|
| 56 |
+
traversal order (which is assumed to be depth-first).
|
| 57 |
+
2. A corresponding list of the modules owning the states in the first list.
|
| 58 |
+
|
| 59 |
+
For the wrapper code path, both returned lists are the same, each
|
| 60 |
+
containing all ``FullyShardedDataParallel`` instances. For the composable
|
| 61 |
+
code path, this returns a list of all composable state instances and a list
|
| 62 |
+
of the corresponding fully sharded modules. See [Note: Fully Sharded
|
| 63 |
+
Module].
|
| 64 |
+
|
| 65 |
+
NOTE: The traversal does not proceed into any module annotated by an
|
| 66 |
+
incompatible API (e.g. ``replicate``).
|
| 67 |
+
"""
|
| 68 |
+
fsdp_states: List[_FSDPState] = []
|
| 69 |
+
fsdp_modules: List[nn.Module] = []
|
| 70 |
+
# Track the visited FSDP states since multiple modules may share the same
|
| 71 |
+
# one and we want to return a de-duplicated list
|
| 72 |
+
visited_fsdp_states: Set[_FSDPState] = set()
|
| 73 |
+
# Track the visited modules in case of shared modules, which implies the
|
| 74 |
+
# module graph is no longer a tree
|
| 75 |
+
visited_modules: Set[nn.Module] = set()
|
| 76 |
+
|
| 77 |
+
# Perform depth-first search from `module` to ensure that we do not
|
| 78 |
+
# traverse into an incompatible API's subtree (use DFS instead of BFS to
|
| 79 |
+
# match `.modules()` order)
|
| 80 |
+
deque: Deque[nn.Module] = collections.deque([module])
|
| 81 |
+
while deque:
|
| 82 |
+
submodule = deque.popleft()
|
| 83 |
+
visited_modules.add(submodule)
|
| 84 |
+
if not _composable(submodule):
|
| 85 |
+
continue
|
| 86 |
+
for child_module in reversed(list(submodule.children())):
|
| 87 |
+
if child_module not in visited_modules:
|
| 88 |
+
deque.appendleft(child_module)
|
| 89 |
+
optional_state = _get_module_fsdp_state(submodule)
|
| 90 |
+
if optional_state is not None and optional_state not in visited_fsdp_states:
|
| 91 |
+
visited_fsdp_states.add(optional_state)
|
| 92 |
+
fsdp_states.append(optional_state)
|
| 93 |
+
fsdp_modules.append(submodule)
|
| 94 |
+
return fsdp_states, fsdp_modules
|
| 95 |
+
|
| 96 |
+
|
| 97 |
+
def _get_fsdp_states(module: nn.Module) -> List[_FSDPState]:
|
| 98 |
+
"""See :func:`_get_fsdp_states_with_modules`."""
|
| 99 |
+
fsdp_states, _ = _get_fsdp_states_with_modules(module)
|
| 100 |
+
return fsdp_states
|
| 101 |
+
|
| 102 |
+
|
| 103 |
+
def _get_fsdp_handles(module: nn.Module) -> List:
|
| 104 |
+
"""
|
| 105 |
+
Returns all ``FlatParamHandle`` s in the module tree rooted at ``module``
|
| 106 |
+
following the rules in :func:`_get_fsdp_state`.
|
| 107 |
+
"""
|
| 108 |
+
handles = [
|
| 109 |
+
fsdp_state._handle
|
| 110 |
+
for fsdp_state in _get_fsdp_states(module)
|
| 111 |
+
if fsdp_state._handle is not None
|
| 112 |
+
]
|
| 113 |
+
return handles
|
janus/lib/python3.10/site-packages/torch/distributed/fsdp/_unshard_param_utils.py
ADDED
|
@@ -0,0 +1,336 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# mypy: allow-untyped-defs
|
| 2 |
+
import contextlib
|
| 3 |
+
import warnings
|
| 4 |
+
from typing import cast, Generator
|
| 5 |
+
|
| 6 |
+
import torch
|
| 7 |
+
import torch.distributed.fsdp._traversal_utils as traversal_utils
|
| 8 |
+
import torch.nn as nn
|
| 9 |
+
from torch.distributed.fsdp._common_utils import (
|
| 10 |
+
_FSDPState,
|
| 11 |
+
_get_module_fsdp_state,
|
| 12 |
+
_has_fsdp_params,
|
| 13 |
+
_module_handle,
|
| 14 |
+
HandleTrainingState,
|
| 15 |
+
TrainingState,
|
| 16 |
+
)
|
| 17 |
+
from torch.distributed.fsdp._runtime_utils import (
|
| 18 |
+
_lazy_init,
|
| 19 |
+
_reset_flat_param_grad_info_if_needed,
|
| 20 |
+
_reshard,
|
| 21 |
+
_reshard_grads,
|
| 22 |
+
_unshard,
|
| 23 |
+
_unshard_grads,
|
| 24 |
+
)
|
| 25 |
+
from torch.distributed.utils import _p_assert
|
| 26 |
+
|
| 27 |
+
from ._flat_param import FlatParamHandle
|
| 28 |
+
|
| 29 |
+
|
| 30 |
+
FLAT_PARAM = "_flat_param"
|
| 31 |
+
|
| 32 |
+
|
| 33 |
+
@torch.no_grad()
|
| 34 |
+
def _writeback_to_local_shard(
|
| 35 |
+
handle: FlatParamHandle,
|
| 36 |
+
writeback_grad: bool,
|
| 37 |
+
):
|
| 38 |
+
"""
|
| 39 |
+
For the handle, writes back the this rank's shard of the unsharded
|
| 40 |
+
flattened parameter to the sharded flattened parameter. If
|
| 41 |
+
``writeback_grad=True``, then writes back to the sharded gradient as
|
| 42 |
+
well.
|
| 43 |
+
|
| 44 |
+
Precondition: The handle's ``FlatParameter`` 's data points to the
|
| 45 |
+
padded unsharded flattened parameter.
|
| 46 |
+
"""
|
| 47 |
+
|
| 48 |
+
def _get_shard(flat_param_or_grad: torch.Tensor) -> torch.Tensor:
|
| 49 |
+
if handle.uses_sharded_strategy:
|
| 50 |
+
# For sharded strategies, get the *unpadded* shard instead of
|
| 51 |
+
# the *padded* shard to persist user changes to the padding
|
| 52 |
+
# (though FSDP does not explicitly support this)
|
| 53 |
+
shard, _ = FlatParamHandle._get_unpadded_shard(
|
| 54 |
+
flat_param_or_grad,
|
| 55 |
+
handle.rank,
|
| 56 |
+
handle.world_size,
|
| 57 |
+
)
|
| 58 |
+
return shard
|
| 59 |
+
# For `NO_SHARD`, the `flat_param` or its gradient may be modified,
|
| 60 |
+
# so we write it back directly
|
| 61 |
+
return flat_param_or_grad
|
| 62 |
+
|
| 63 |
+
param_shard = _get_shard(handle.flat_param)
|
| 64 |
+
handle.flat_param._local_shard[: param_shard.numel()].copy_(param_shard) # type: ignore[attr-defined]
|
| 65 |
+
if writeback_grad:
|
| 66 |
+
existing_grad = handle.sharded_grad
|
| 67 |
+
if existing_grad is not None:
|
| 68 |
+
assert handle.flat_param.grad is not None
|
| 69 |
+
grad_shard = _get_shard(handle.flat_param.grad)
|
| 70 |
+
existing_grad[: grad_shard.numel()].copy_(grad_shard)
|
| 71 |
+
|
| 72 |
+
|
| 73 |
+
def _deregister_flat_param(state: _FSDPState, module: nn.Module) -> None:
|
| 74 |
+
"""
|
| 75 |
+
De-registers the flattened parameter from the wrapped module, hiding it
|
| 76 |
+
from ``nn.Module`` methods.
|
| 77 |
+
|
| 78 |
+
We do not use ``del`` because we want ``FLAT_PARAM`` to always be an
|
| 79 |
+
attribute but dynamically change whether it is visible to ``nn.Module``
|
| 80 |
+
methods.
|
| 81 |
+
"""
|
| 82 |
+
if _has_fsdp_params(state, module):
|
| 83 |
+
# TODO: figure out the case for the composable APIs.
|
| 84 |
+
cast(nn.Module, module.module)._parameters.pop(FLAT_PARAM, None)
|
| 85 |
+
|
| 86 |
+
|
| 87 |
+
def _register_flat_param(state: _FSDPState, module: nn.Module) -> None:
|
| 88 |
+
"""
|
| 89 |
+
Registers the flattened parameter to the wrapped module, making it
|
| 90 |
+
visible to ``nn.Module`` methods.
|
| 91 |
+
|
| 92 |
+
We do not use :meth:`nn.Module.register_parameter` because we want
|
| 93 |
+
``FLAT_PARAM`` to always be an attribute but dynamically change whether
|
| 94 |
+
it is visible to ``nn.Module`` methods.
|
| 95 |
+
"""
|
| 96 |
+
handle = _module_handle(state, module)
|
| 97 |
+
if _has_fsdp_params(state, module):
|
| 98 |
+
# TODO: figure out the case for the composable APIs.
|
| 99 |
+
cast(nn.Module, module.module)._parameters[FLAT_PARAM] = handle.flat_param
|
| 100 |
+
|
| 101 |
+
|
| 102 |
+
@contextlib.contextmanager
|
| 103 |
+
def _unflatten_as_params(state: _FSDPState, module: nn.Module) -> Generator:
|
| 104 |
+
"""
|
| 105 |
+
Assumes that the flattened parameter is unsharded. When in the context,
|
| 106 |
+
de-registers the flattened parameter and unflattens the original
|
| 107 |
+
parameters as ``nn.Parameter`` views into the flattened parameter.
|
| 108 |
+
After the context, re-registers the flattened parameter and restores
|
| 109 |
+
the original parameters as ``Tensor`` views into the flattened
|
| 110 |
+
parameter.
|
| 111 |
+
"""
|
| 112 |
+
handle = _module_handle(state, module)
|
| 113 |
+
if not handle:
|
| 114 |
+
yield
|
| 115 |
+
else:
|
| 116 |
+
_deregister_flat_param(state, module)
|
| 117 |
+
try:
|
| 118 |
+
with handle.unflatten_as_params():
|
| 119 |
+
yield
|
| 120 |
+
finally:
|
| 121 |
+
if not handle._use_orig_params:
|
| 122 |
+
_register_flat_param(state, module)
|
| 123 |
+
|
| 124 |
+
|
| 125 |
+
def _validate_unshard_params_args(
|
| 126 |
+
state: _FSDPState,
|
| 127 |
+
writeback: bool,
|
| 128 |
+
rank0_only: bool,
|
| 129 |
+
offload_to_cpu: bool,
|
| 130 |
+
with_grads: bool,
|
| 131 |
+
) -> None:
|
| 132 |
+
if with_grads and (offload_to_cpu or not state._use_orig_params):
|
| 133 |
+
raise NotImplementedError(
|
| 134 |
+
f"with_grads={with_grads}, "
|
| 135 |
+
f"use_orig_params={state._use_orig_params}, "
|
| 136 |
+
f"offload_to_cpu={offload_to_cpu} "
|
| 137 |
+
f"is not supported yet"
|
| 138 |
+
)
|
| 139 |
+
if offload_to_cpu and state._handle and (not state._handle.uses_sharded_strategy):
|
| 140 |
+
raise NotImplementedError(
|
| 141 |
+
"offload_to_cpu=True and NO_SHARD is not supported yet"
|
| 142 |
+
)
|
| 143 |
+
if writeback and rank0_only:
|
| 144 |
+
# TODO: Rank 0 can broadcast the `FlatParameter` to allow all ranks to
|
| 145 |
+
# persist the changes.
|
| 146 |
+
raise NotImplementedError(
|
| 147 |
+
"writeback=True and rank0_only=True is not supported yet"
|
| 148 |
+
)
|
| 149 |
+
if offload_to_cpu and not rank0_only:
|
| 150 |
+
warnings.warn(
|
| 151 |
+
"offload_to_cpu=True and rank0_only=False may result in the"
|
| 152 |
+
"unsharded parameters being redundantly copied to CPU memory for "
|
| 153 |
+
"GPUs sharing the same CPU memory, which risks CPU OOM. We "
|
| 154 |
+
"recommend using offload_to_cpu=True with rank0_only=True."
|
| 155 |
+
)
|
| 156 |
+
|
| 157 |
+
|
| 158 |
+
@contextlib.contextmanager
|
| 159 |
+
def _unshard_fsdp_state_params(
|
| 160 |
+
module: nn.Module,
|
| 161 |
+
state: _FSDPState,
|
| 162 |
+
writeback: bool,
|
| 163 |
+
rank0_only: bool,
|
| 164 |
+
offload_to_cpu: bool,
|
| 165 |
+
with_grads: bool,
|
| 166 |
+
):
|
| 167 |
+
"""
|
| 168 |
+
This unshards the parameters for a single FSDP state ``state`` that
|
| 169 |
+
corresponds to ``module``.
|
| 170 |
+
"""
|
| 171 |
+
_validate_unshard_params_args(
|
| 172 |
+
state, writeback, rank0_only, offload_to_cpu, with_grads
|
| 173 |
+
)
|
| 174 |
+
state._device_handle.synchronize()
|
| 175 |
+
# If handles are shared by other module(s), the handle may be already unsharded.
|
| 176 |
+
maybe_handle = _module_handle(state, module)
|
| 177 |
+
handle = None
|
| 178 |
+
if (
|
| 179 |
+
maybe_handle
|
| 180 |
+
and maybe_handle._training_state != HandleTrainingState.SUMMON_FULL_PARAMS
|
| 181 |
+
):
|
| 182 |
+
handle = maybe_handle
|
| 183 |
+
if not handle:
|
| 184 |
+
yield
|
| 185 |
+
return
|
| 186 |
+
|
| 187 |
+
assert (
|
| 188 |
+
handle._training_state == HandleTrainingState.IDLE
|
| 189 |
+
), f"Expects the handle training to be IDLE but got {handle._training_state}"
|
| 190 |
+
|
| 191 |
+
handle._training_state = HandleTrainingState.SUMMON_FULL_PARAMS
|
| 192 |
+
|
| 193 |
+
_reset_flat_param_grad_info_if_needed(handle)
|
| 194 |
+
free_unsharded_flat_param = handle.needs_unshard()
|
| 195 |
+
# No need to call `wait_stream()` since we unshard in the computation
|
| 196 |
+
# stream directly
|
| 197 |
+
computation_stream = state._device_handle.current_stream()
|
| 198 |
+
_unshard(state, handle, computation_stream, computation_stream)
|
| 199 |
+
if with_grads:
|
| 200 |
+
_unshard_grads(handle)
|
| 201 |
+
|
| 202 |
+
if rank0_only and state.rank != 0:
|
| 203 |
+
# Free the unsharded flattened parameter early
|
| 204 |
+
_reshard(state, handle, free_unsharded_flat_param)
|
| 205 |
+
if with_grads:
|
| 206 |
+
_reshard_grads(handle)
|
| 207 |
+
try:
|
| 208 |
+
yield
|
| 209 |
+
finally:
|
| 210 |
+
handle._training_state = HandleTrainingState.IDLE
|
| 211 |
+
else:
|
| 212 |
+
# Unflatten the unsharded flattened parameters
|
| 213 |
+
with contextlib.ExitStack() as stack:
|
| 214 |
+
# Invariant: rank == 0 or !rank0_only
|
| 215 |
+
if offload_to_cpu and handle.uses_sharded_strategy:
|
| 216 |
+
stack.enter_context(handle.to_cpu())
|
| 217 |
+
# NOTE: Since PyTorch enforces that a parameter and its
|
| 218 |
+
# gradients need to match metadata (e.g. device), we must
|
| 219 |
+
# move gradients to CPU *after* we move parameters.
|
| 220 |
+
# NOTE: This assumes 1 `FlatParameter`
|
| 221 |
+
if not state._use_orig_params:
|
| 222 |
+
stack.enter_context(_unflatten_as_params(state, module))
|
| 223 |
+
try:
|
| 224 |
+
yield
|
| 225 |
+
finally:
|
| 226 |
+
stack.close()
|
| 227 |
+
if writeback:
|
| 228 |
+
_writeback_to_local_shard(handle, with_grads)
|
| 229 |
+
_reshard(state, handle, free_unsharded_flat_param)
|
| 230 |
+
if with_grads:
|
| 231 |
+
_reshard_grads(handle)
|
| 232 |
+
handle._training_state = HandleTrainingState.IDLE
|
| 233 |
+
|
| 234 |
+
|
| 235 |
+
@contextlib.contextmanager
|
| 236 |
+
def _unshard_params_for_summon(
|
| 237 |
+
module: nn.Module,
|
| 238 |
+
state: _FSDPState,
|
| 239 |
+
writeback: bool,
|
| 240 |
+
rank0_only: bool,
|
| 241 |
+
offload_to_cpu: bool,
|
| 242 |
+
with_grads: bool,
|
| 243 |
+
):
|
| 244 |
+
_validate_unshard_params_args(
|
| 245 |
+
state, writeback, rank0_only, offload_to_cpu, with_grads
|
| 246 |
+
)
|
| 247 |
+
_lazy_init(state, module)
|
| 248 |
+
if state.training_state == TrainingState.FORWARD_BACKWARD:
|
| 249 |
+
raise AssertionError(
|
| 250 |
+
"Cannot manually unshard parameters during forward/backward"
|
| 251 |
+
)
|
| 252 |
+
elif state.training_state == TrainingState.SUMMON_FULL_PARAMS:
|
| 253 |
+
raise AssertionError(
|
| 254 |
+
"Cannot manually unshard parameters when already unsharding parameters"
|
| 255 |
+
)
|
| 256 |
+
with _unshard_fsdp_state_params(
|
| 257 |
+
module=module,
|
| 258 |
+
state=state,
|
| 259 |
+
writeback=writeback,
|
| 260 |
+
rank0_only=rank0_only,
|
| 261 |
+
offload_to_cpu=offload_to_cpu,
|
| 262 |
+
with_grads=with_grads,
|
| 263 |
+
):
|
| 264 |
+
try:
|
| 265 |
+
state.training_state = TrainingState.SUMMON_FULL_PARAMS
|
| 266 |
+
yield
|
| 267 |
+
finally:
|
| 268 |
+
state.training_state = TrainingState.IDLE
|
| 269 |
+
|
| 270 |
+
|
| 271 |
+
@contextlib.contextmanager
|
| 272 |
+
def _unshard_params(
|
| 273 |
+
module: nn.Module,
|
| 274 |
+
recurse: bool,
|
| 275 |
+
writeback: bool,
|
| 276 |
+
rank0_only: bool,
|
| 277 |
+
offload_to_cpu: bool,
|
| 278 |
+
with_grads: bool,
|
| 279 |
+
):
|
| 280 |
+
"""
|
| 281 |
+
This unshards FSDP-managed parameters for all modules with FSDP applied in
|
| 282 |
+
the module tree rooted at ``module``.
|
| 283 |
+
"""
|
| 284 |
+
if not recurse:
|
| 285 |
+
optional_state = _get_module_fsdp_state(module)
|
| 286 |
+
if optional_state is None:
|
| 287 |
+
with contextlib.nullcontext():
|
| 288 |
+
yield
|
| 289 |
+
return
|
| 290 |
+
states_and_modules = ([optional_state], [module])
|
| 291 |
+
else:
|
| 292 |
+
states_and_modules = traversal_utils._get_fsdp_states_with_modules(module)
|
| 293 |
+
with contextlib.ExitStack() as stack:
|
| 294 |
+
for state, module in zip(*states_and_modules):
|
| 295 |
+
stack.enter_context(
|
| 296 |
+
_unshard_params_for_summon(
|
| 297 |
+
module=module,
|
| 298 |
+
state=state,
|
| 299 |
+
writeback=writeback,
|
| 300 |
+
rank0_only=rank0_only,
|
| 301 |
+
offload_to_cpu=offload_to_cpu,
|
| 302 |
+
with_grads=with_grads,
|
| 303 |
+
)
|
| 304 |
+
)
|
| 305 |
+
yield
|
| 306 |
+
|
| 307 |
+
|
| 308 |
+
def _deregister_orig_params(state: _FSDPState, module: nn.Module) -> None:
|
| 309 |
+
"""
|
| 310 |
+
Deregisters the original parameters; registers the ``FlatParameter``.
|
| 311 |
+
"""
|
| 312 |
+
handle = _module_handle(state, module)
|
| 313 |
+
if not handle:
|
| 314 |
+
return
|
| 315 |
+
_p_assert(
|
| 316 |
+
handle._use_orig_params,
|
| 317 |
+
f"Inconsistent `_use_orig_params` -- FSDP: {state._use_orig_params} "
|
| 318 |
+
f"handle: {handle._use_orig_params}",
|
| 319 |
+
)
|
| 320 |
+
handle._deregister_orig_params()
|
| 321 |
+
_register_flat_param(state, module)
|
| 322 |
+
|
| 323 |
+
|
| 324 |
+
def _register_orig_params(state: _FSDPState, module: nn.Module) -> None:
|
| 325 |
+
"""
|
| 326 |
+
Deregisters the ``FlatParameter``; registers the original parameters.
|
| 327 |
+
"""
|
| 328 |
+
handle = _module_handle(state, module)
|
| 329 |
+
if not handle:
|
| 330 |
+
return
|
| 331 |
+
_deregister_flat_param(state, module)
|
| 332 |
+
if handle.is_sharded(handle.flat_param):
|
| 333 |
+
handle._use_sharded_views()
|
| 334 |
+
handle._use_sharded_grad_views()
|
| 335 |
+
else:
|
| 336 |
+
handle._use_unsharded_views(as_params=True)
|
janus/lib/python3.10/site-packages/torch/distributed/fsdp/_wrap_utils.py
ADDED
|
@@ -0,0 +1,262 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# mypy: allow-untyped-defs
|
| 2 |
+
import collections
|
| 3 |
+
import functools
|
| 4 |
+
import inspect
|
| 5 |
+
import warnings
|
| 6 |
+
from functools import partial
|
| 7 |
+
from typing import Any, Callable, Dict, List, Set, Tuple, Type, Union
|
| 8 |
+
|
| 9 |
+
import torch.nn as nn
|
| 10 |
+
from torch.distributed.fsdp._common_utils import (
|
| 11 |
+
_get_module_fsdp_state,
|
| 12 |
+
_override_module_mixed_precision,
|
| 13 |
+
)
|
| 14 |
+
from torch.distributed.fsdp.wrap import (
|
| 15 |
+
_construct_wrap_fn,
|
| 16 |
+
_or_policy,
|
| 17 |
+
_Policy,
|
| 18 |
+
_post_order_apply,
|
| 19 |
+
_recursive_wrap,
|
| 20 |
+
_run_mixed_precision_override_policy,
|
| 21 |
+
_wrap_module_cls_individually,
|
| 22 |
+
)
|
| 23 |
+
|
| 24 |
+
|
| 25 |
+
def _auto_wrap(
|
| 26 |
+
root_module: nn.Module,
|
| 27 |
+
policy: Union[Callable, _Policy],
|
| 28 |
+
ignored_modules: Set[nn.Module],
|
| 29 |
+
ignored_params: Set[nn.Parameter],
|
| 30 |
+
root_kwargs: Dict[str, Any],
|
| 31 |
+
fsdp_fn: Callable, # e.g. `FullyShardedDataParallel` or `fully_shard`
|
| 32 |
+
):
|
| 33 |
+
"""
|
| 34 |
+
Auto wraps modules in ``root_module`` 's tree according to ``policy``
|
| 35 |
+
following a post-order traversal.
|
| 36 |
+
|
| 37 |
+
Precondition: ``root_kwargs`` should contain all arguments except
|
| 38 |
+
``module``. This function accepts the kwargs dict directly since it gets
|
| 39 |
+
forwarded into the post-order traversal function.
|
| 40 |
+
"""
|
| 41 |
+
mixed_precision = root_kwargs["mixed_precision"]
|
| 42 |
+
is_wrapper = inspect.isclass(fsdp_fn)
|
| 43 |
+
# TODO: We may relax this no-nested-wrapping constraint to support manual
|
| 44 |
+
# wrapping followed by auto wrapping.
|
| 45 |
+
_check_nested_wrapping(root_module)
|
| 46 |
+
|
| 47 |
+
if isinstance(policy, _Policy):
|
| 48 |
+
root_kwargs["auto_wrap_policy" if is_wrapper else "policy"] = None
|
| 49 |
+
target_module_to_kwargs = policy._run_policy(
|
| 50 |
+
root_module, ignored_modules, root_kwargs
|
| 51 |
+
)
|
| 52 |
+
if mixed_precision is not None:
|
| 53 |
+
target_module_to_kwargs = _run_mixed_precision_override_policy(
|
| 54 |
+
root_module,
|
| 55 |
+
mixed_precision._module_classes_to_ignore,
|
| 56 |
+
ignored_modules,
|
| 57 |
+
root_kwargs,
|
| 58 |
+
target_module_to_kwargs,
|
| 59 |
+
)
|
| 60 |
+
overridden_module_classes = _override_module_mixed_precision(
|
| 61 |
+
root_module, mixed_precision._module_classes_to_ignore
|
| 62 |
+
)
|
| 63 |
+
_warn_on_overridden_mixed_precision(overridden_module_classes)
|
| 64 |
+
use_orig_params = root_kwargs.get("use_orig_params", False)
|
| 65 |
+
_validate_frozen_params(
|
| 66 |
+
root_module,
|
| 67 |
+
set(target_module_to_kwargs.keys()),
|
| 68 |
+
ignored_params,
|
| 69 |
+
use_orig_params,
|
| 70 |
+
)
|
| 71 |
+
wrap_fn = _construct_wrap_fn(root_module, target_module_to_kwargs, fsdp_fn)
|
| 72 |
+
_post_order_apply(root_module, wrap_fn)
|
| 73 |
+
return
|
| 74 |
+
|
| 75 |
+
recursive_wrap_kwargs = {
|
| 76 |
+
"module": root_module,
|
| 77 |
+
"auto_wrap_policy": policy,
|
| 78 |
+
"wrapper_cls": fsdp_fn,
|
| 79 |
+
"ignored_modules": ignored_modules,
|
| 80 |
+
"ignored_params": ignored_params,
|
| 81 |
+
"only_wrap_children": True,
|
| 82 |
+
}
|
| 83 |
+
if mixed_precision is not None:
|
| 84 |
+
# Wrap modules of the ignored types separately and register forward
|
| 85 |
+
# hooks to cast to fp32 and back to the original dtype, respectively
|
| 86 |
+
overridden_module_classes = _override_module_mixed_precision(
|
| 87 |
+
root_module, mixed_precision._module_classes_to_ignore
|
| 88 |
+
)
|
| 89 |
+
policy = functools.partial(
|
| 90 |
+
_or_policy,
|
| 91 |
+
policies=[
|
| 92 |
+
policy,
|
| 93 |
+
partial(
|
| 94 |
+
_wrap_module_cls_individually,
|
| 95 |
+
module_classes=mixed_precision._module_classes_to_ignore,
|
| 96 |
+
),
|
| 97 |
+
],
|
| 98 |
+
)
|
| 99 |
+
recursive_wrap_kwargs["auto_wrap_policy"] = policy
|
| 100 |
+
_warn_on_overridden_mixed_precision(overridden_module_classes)
|
| 101 |
+
_recursive_wrap(**recursive_wrap_kwargs, **root_kwargs) # type: ignore[arg-type]
|
| 102 |
+
|
| 103 |
+
|
| 104 |
+
def _check_nested_wrapping(root_module: nn.Module):
|
| 105 |
+
for module_name, module in root_module.named_modules():
|
| 106 |
+
if _get_module_fsdp_state(module) is not None:
|
| 107 |
+
raise ValueError(
|
| 108 |
+
"FSDP auto wrapping requires modules to not already have "
|
| 109 |
+
f"FSDP applied but found {module_name} in\n{root_module}"
|
| 110 |
+
)
|
| 111 |
+
|
| 112 |
+
|
| 113 |
+
def _warn_on_overridden_mixed_precision(
|
| 114 |
+
overridden_module_classes: Set[Type[nn.Module]],
|
| 115 |
+
):
|
| 116 |
+
if len(overridden_module_classes) == 0:
|
| 117 |
+
return
|
| 118 |
+
warnings.warn(
|
| 119 |
+
"Both mixed precision and an auto_wrap_policy were specified to FSDP, "
|
| 120 |
+
f"where the wrapped module has submodules of type:\n{overridden_module_classes}\n"
|
| 121 |
+
"These modules will be wrapped as separate FSDP instacnes with mixed "
|
| 122 |
+
"precision disabled."
|
| 123 |
+
)
|
| 124 |
+
|
| 125 |
+
|
| 126 |
+
def _validate_frozen_params(
|
| 127 |
+
root_module: nn.Module,
|
| 128 |
+
modules_to_wrap: Set[nn.Module],
|
| 129 |
+
ignored_params: Set[nn.Parameter],
|
| 130 |
+
use_orig_params: bool,
|
| 131 |
+
):
|
| 132 |
+
"""
|
| 133 |
+
This checks that, given ``modules_to_wrap``, each module would manage
|
| 134 |
+
parameters that are uniformly frozen or non-frozen. This uniformity
|
| 135 |
+
requirement is strict for ``use_orig_params=False`` (hard error) and highly
|
| 136 |
+
recommended for ``use_orig_params=True`` (user warning).
|
| 137 |
+
"""
|
| 138 |
+
post_order_named_modules = _get_post_order_named_modules(root_module)
|
| 139 |
+
visited_modules: Set[nn.Module] = set()
|
| 140 |
+
for module_name, module in post_order_named_modules:
|
| 141 |
+
if module in modules_to_wrap:
|
| 142 |
+
param_to_fqn = _get_managed_param_to_fqn(
|
| 143 |
+
module, ignored_params, visited_modules, module_name
|
| 144 |
+
)
|
| 145 |
+
frozen_param_fqns: List[str] = []
|
| 146 |
+
frozen_param_numel = 0
|
| 147 |
+
nonfrozen_param_fqns: List[str] = []
|
| 148 |
+
nonfrozen_param_numel = 0
|
| 149 |
+
for param, fqn in param_to_fqn.items():
|
| 150 |
+
if param.requires_grad:
|
| 151 |
+
nonfrozen_param_fqns.append(fqn)
|
| 152 |
+
nonfrozen_param_numel += param.numel()
|
| 153 |
+
else:
|
| 154 |
+
frozen_param_fqns.append(fqn)
|
| 155 |
+
frozen_param_numel += param.numel()
|
| 156 |
+
if len(frozen_param_fqns) > 0 and len(nonfrozen_param_fqns) > 0:
|
| 157 |
+
msg = f"{module_name} has both parameters with requires_grad=True and False."
|
| 158 |
+
if use_orig_params:
|
| 159 |
+
total_param_numel = frozen_param_numel + nonfrozen_param_numel
|
| 160 |
+
msg += (
|
| 161 |
+
" We do not recommend wrapping such modules since "
|
| 162 |
+
"the gradient memory usage will be higher than expected "
|
| 163 |
+
f"({total_param_numel} numel instead of {nonfrozen_param_numel} numel "
|
| 164 |
+
"before sharding via reduce-scatter). "
|
| 165 |
+
)
|
| 166 |
+
else:
|
| 167 |
+
msg += " FSDP does not support wrapping such modules when use_orig_params=False. "
|
| 168 |
+
msg += "If possible, wrap the frozen parameters with FSDP separately.\n"
|
| 169 |
+
msg += (
|
| 170 |
+
f"The following parameters have requires_grad=True:\n{nonfrozen_param_fqns}\n"
|
| 171 |
+
f"The following parameters have requires_grad=False:\n{frozen_param_fqns}"
|
| 172 |
+
)
|
| 173 |
+
if use_orig_params:
|
| 174 |
+
warnings.warn(msg)
|
| 175 |
+
else:
|
| 176 |
+
raise ValueError(msg)
|
| 177 |
+
|
| 178 |
+
|
| 179 |
+
def _get_post_order_named_modules(
|
| 180 |
+
root_module: nn.Module,
|
| 181 |
+
) -> List[Tuple[str, nn.Module]]:
|
| 182 |
+
"""
|
| 183 |
+
This returns the named modules following a post-order traversal, which is a
|
| 184 |
+
valid reverse topological sort. We achieve this using the reverse of a
|
| 185 |
+
stack-based DFS order instead of reversing ``root_module.named_modules()``
|
| 186 |
+
since the former gives the modules in registration order at each level in
|
| 187 |
+
the module tree (as opposed to the reverse), which allows us to error/warn
|
| 188 |
+
on the first registered module that violates the condition.
|
| 189 |
+
|
| 190 |
+
For example, consider the following module structure:
|
| 191 |
+
M(
|
| 192 |
+
S1(),
|
| 193 |
+
S2(
|
| 194 |
+
SS1(),
|
| 195 |
+
SS2(),
|
| 196 |
+
),
|
| 197 |
+
S3(),
|
| 198 |
+
)
|
| 199 |
+
The reverse DFS order is [S1, SS1, SS2, S2, S3, M], while the reverse
|
| 200 |
+
``named_modules()`` order is [S3, SS2, SS1, S2, S1, M].
|
| 201 |
+
"""
|
| 202 |
+
visited_modules = {root_module}
|
| 203 |
+
stack = [("", root_module)]
|
| 204 |
+
# Append and reverse at the end for linear-time algorithm
|
| 205 |
+
reverse_post_order_named_modules: List[Tuple[str, nn.Module]] = []
|
| 206 |
+
while stack:
|
| 207 |
+
module_name, module = stack.pop()
|
| 208 |
+
reverse_post_order_named_modules.append((module_name, module))
|
| 209 |
+
for child_module_name, child_module in module.named_children():
|
| 210 |
+
if child_module is None: # only for overrides of `named_children()`
|
| 211 |
+
continue
|
| 212 |
+
if child_module not in visited_modules:
|
| 213 |
+
visited_modules.add(child_module)
|
| 214 |
+
if module_name != "":
|
| 215 |
+
child_module_name = module_name + "." + child_module_name
|
| 216 |
+
stack.append((child_module_name, child_module))
|
| 217 |
+
post_order_named_modules = list(reversed(reverse_post_order_named_modules))
|
| 218 |
+
return post_order_named_modules
|
| 219 |
+
|
| 220 |
+
|
| 221 |
+
def _get_managed_param_to_fqn(
|
| 222 |
+
module_to_wrap: nn.Module,
|
| 223 |
+
ignored_params: Set[nn.Parameter],
|
| 224 |
+
visited_modules: Set[nn.Module],
|
| 225 |
+
root_prefix: str,
|
| 226 |
+
) -> Dict[nn.Parameter, str]:
|
| 227 |
+
"""
|
| 228 |
+
This returns a dict that maps managed parameter to its FQN for the given
|
| 229 |
+
``module_to_wrap``. The dict's keys are exactly the parameters that would
|
| 230 |
+
be managed by the module, where this is achieved by calling this function
|
| 231 |
+
on the modules to wrap in reverse topological order, destructively updating
|
| 232 |
+
``visited_modules``, and not traversing into those modules. The FQNs are
|
| 233 |
+
prefixed from the root (via ``root_prefix``) to be more informative.
|
| 234 |
+
|
| 235 |
+
NOTE: This function is meant to be called pre-wrapping and iteratively in
|
| 236 |
+
reverse topological order to cover the full module tree. This differs from
|
| 237 |
+
the ``_get_param_to_fqn()`` function meant to be called post-wrapping and
|
| 238 |
+
on the full module tree in one shot. Given those differences, we do not try
|
| 239 |
+
to unify the two.
|
| 240 |
+
"""
|
| 241 |
+
param_to_fqn: Dict[nn.Parameter, str] = {}
|
| 242 |
+
# Run BFS (or any tree traversal works)
|
| 243 |
+
queue = collections.deque([(module_to_wrap, root_prefix)])
|
| 244 |
+
visited_modules.add(module_to_wrap)
|
| 245 |
+
while queue:
|
| 246 |
+
module, prefix = queue.popleft()
|
| 247 |
+
for param_name, param in module.named_parameters(recurse=False):
|
| 248 |
+
if param not in ignored_params:
|
| 249 |
+
fqn = param_name if prefix == "" else prefix + "." + param_name
|
| 250 |
+
param_to_fqn[param] = fqn
|
| 251 |
+
for child_module_name, child_module in module.named_children():
|
| 252 |
+
if child_module is None: # only for overrides of `named_children()`
|
| 253 |
+
continue
|
| 254 |
+
if child_module not in visited_modules:
|
| 255 |
+
visited_modules.add(child_module)
|
| 256 |
+
child_prefix = (
|
| 257 |
+
child_module_name
|
| 258 |
+
if prefix == ""
|
| 259 |
+
else prefix + "." + child_module_name
|
| 260 |
+
)
|
| 261 |
+
queue.append((child_module, child_prefix))
|
| 262 |
+
return param_to_fqn
|
janus/lib/python3.10/site-packages/torch/distributed/fsdp/fully_sharded_data_parallel.py
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
janus/lib/python3.10/site-packages/torch/distributed/fsdp/sharded_grad_scaler.py
ADDED
|
@@ -0,0 +1,396 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# mypy: allow-untyped-defs
|
| 2 |
+
import logging
|
| 3 |
+
from collections import abc, defaultdict
|
| 4 |
+
from typing import Any, Dict, Iterable, List, Optional, overload, Sequence, Tuple, Union
|
| 5 |
+
|
| 6 |
+
import torch
|
| 7 |
+
import torch.distributed as dist
|
| 8 |
+
from torch.amp.grad_scaler import _MultiDeviceReplicator, GradScaler, OptState
|
| 9 |
+
from torch.distributed.distributed_c10d import ProcessGroup
|
| 10 |
+
|
| 11 |
+
|
| 12 |
+
logger = logging.getLogger(__name__)
|
| 13 |
+
|
| 14 |
+
|
| 15 |
+
def _refresh_per_optimizer_state() -> Dict[str, Any]:
|
| 16 |
+
return {"stage": OptState.READY, "found_inf_per_device": {}}
|
| 17 |
+
|
| 18 |
+
|
| 19 |
+
def _is_supported_device(tensor: torch.Tensor) -> bool:
|
| 20 |
+
return tensor.is_cuda or tensor.device.type in (
|
| 21 |
+
"xla",
|
| 22 |
+
"cpu",
|
| 23 |
+
"hpu",
|
| 24 |
+
"mtia",
|
| 25 |
+
torch._C._get_privateuse1_backend_name(),
|
| 26 |
+
)
|
| 27 |
+
|
| 28 |
+
|
| 29 |
+
class _GeneralMultiDeviceReplicator(_MultiDeviceReplicator):
|
| 30 |
+
"""
|
| 31 |
+
Lazily serves tensor to request device. This class extends
|
| 32 |
+
_MultiDeviceReplicator to allow support for "cpu" as a device.
|
| 33 |
+
"""
|
| 34 |
+
|
| 35 |
+
def __init__(self, master_tensor: torch.Tensor) -> None:
|
| 36 |
+
assert _is_supported_device(master_tensor)
|
| 37 |
+
self.master = master_tensor
|
| 38 |
+
self._per_device_tensors: Dict[torch.device, torch.Tensor] = {}
|
| 39 |
+
|
| 40 |
+
|
| 41 |
+
class ShardedGradScaler(GradScaler):
|
| 42 |
+
"""
|
| 43 |
+
ShardedGradScaler helps perform gradient scaling in a shard aware manner. It extends
|
| 44 |
+
functionality from GradScaler:
|
| 45 |
+
* Supports Pytorch DDP and FSDP implementations
|
| 46 |
+
* Support CPU offloaded tensors (as used in fully sharded data parallel[FSDP])
|
| 47 |
+
* Supports the custom Mixed Precision loss dtype (fp16, bf16) that FSDP returns
|
| 48 |
+
* Sync inf/nan for scaled gradient tensors on any torch.device (where tensors are placed) across
|
| 49 |
+
nodes
|
| 50 |
+
|
| 51 |
+
Example::
|
| 52 |
+
|
| 53 |
+
# Creates a ShardedGradScaler once at the beginning of training.
|
| 54 |
+
scaler = ShardedGradScaler()
|
| 55 |
+
|
| 56 |
+
for epoch in epochs:
|
| 57 |
+
for input, target in data:
|
| 58 |
+
optimizer.zero_grad()
|
| 59 |
+
output = model(input)
|
| 60 |
+
loss = loss_fn(output, target)
|
| 61 |
+
|
| 62 |
+
# Scales loss. Calls backward() on scaled loss to create scaled gradients.
|
| 63 |
+
scaler.scale(loss).backward()
|
| 64 |
+
|
| 65 |
+
# scaler.step() first unscales gradients of the optimizer's params.
|
| 66 |
+
# If gradients don't contain infs/NaNs, optimizer.step() is then called,
|
| 67 |
+
# otherwise, optimizer.step() is skipped.
|
| 68 |
+
scaler.step(optimizer)
|
| 69 |
+
|
| 70 |
+
# Updates the scale for next iteration.
|
| 71 |
+
scaler.update()
|
| 72 |
+
|
| 73 |
+
See :class:`GradScaler` for explanation of scaling/unscaling and more use cases.
|
| 74 |
+
|
| 75 |
+
Args:
|
| 76 |
+
init_scale (float, optional, default=2.**16): Initial scale factor.
|
| 77 |
+
growth_factor (float, optional, default=2.0): Factor by which the scale is multiplied during
|
| 78 |
+
:meth:`update` if no inf/NaN gradients occur for ``growth_interval`` consecutive iterations.
|
| 79 |
+
backoff_factor (float, optional, default=0.5): Factor by which the scale is multiplied during
|
| 80 |
+
:meth:`update` if inf/NaN gradients occur in an iteration.
|
| 81 |
+
growth_interval (int, optional, default=2000): Number of consecutive iterations without inf/NaN gradients
|
| 82 |
+
that must occur for the scale to be multiplied by ``growth_factor``.
|
| 83 |
+
enabled (bool, optional): If ``False``, disables gradient scaling. :meth:`step` simply
|
| 84 |
+
invokes the underlying ``optimizer.step()``, and other methods become no-ops.
|
| 85 |
+
Default: ``True``
|
| 86 |
+
process_group (ProcessGroup, optional, default=torch.distributed.group.WORLD):
|
| 87 |
+
process group for sharding
|
| 88 |
+
"""
|
| 89 |
+
|
| 90 |
+
def __init__(
|
| 91 |
+
self,
|
| 92 |
+
device: str = "cuda",
|
| 93 |
+
init_scale: float = 2.0**16,
|
| 94 |
+
backoff_factor: float = 0.5,
|
| 95 |
+
growth_factor: float = 2.0,
|
| 96 |
+
growth_interval: int = 2000,
|
| 97 |
+
enabled: bool = True,
|
| 98 |
+
process_group: Optional[ProcessGroup] = dist.group.WORLD,
|
| 99 |
+
) -> None:
|
| 100 |
+
super().__init__(
|
| 101 |
+
device,
|
| 102 |
+
init_scale=init_scale,
|
| 103 |
+
backoff_factor=backoff_factor,
|
| 104 |
+
growth_factor=growth_factor,
|
| 105 |
+
growth_interval=growth_interval,
|
| 106 |
+
enabled=enabled,
|
| 107 |
+
)
|
| 108 |
+
if self._enabled:
|
| 109 |
+
self.process_group = process_group
|
| 110 |
+
self._per_optimizer_states = defaultdict(_refresh_per_optimizer_state)
|
| 111 |
+
|
| 112 |
+
@overload
|
| 113 |
+
def scale(self, outputs: torch.Tensor) -> torch.Tensor:
|
| 114 |
+
...
|
| 115 |
+
|
| 116 |
+
@overload
|
| 117 |
+
def scale(self, outputs: List[torch.Tensor]) -> List[torch.Tensor]:
|
| 118 |
+
...
|
| 119 |
+
|
| 120 |
+
@overload
|
| 121 |
+
def scale(self, outputs: Tuple[torch.Tensor, ...]) -> Tuple[torch.Tensor, ...]:
|
| 122 |
+
...
|
| 123 |
+
|
| 124 |
+
@overload
|
| 125 |
+
def scale(self, outputs: Iterable[torch.Tensor]) -> Iterable[torch.Tensor]:
|
| 126 |
+
...
|
| 127 |
+
|
| 128 |
+
def scale(
|
| 129 |
+
self, outputs: Union[torch.Tensor, Iterable[torch.Tensor]]
|
| 130 |
+
) -> Union[torch.Tensor, Iterable[torch.Tensor]]:
|
| 131 |
+
if not self._enabled:
|
| 132 |
+
return outputs
|
| 133 |
+
|
| 134 |
+
if isinstance(outputs, torch.Tensor):
|
| 135 |
+
assert _is_supported_device(outputs)
|
| 136 |
+
if self._scale is None:
|
| 137 |
+
self._lazy_init_scale_growth_tracker(outputs.device)
|
| 138 |
+
assert self._scale is not None
|
| 139 |
+
scaled_output = outputs * self._scale.to(
|
| 140 |
+
device=outputs.device, non_blocking=True
|
| 141 |
+
)
|
| 142 |
+
# Here we ensure the return dtype is the same as the outputs dtype.
|
| 143 |
+
# For the FSDP + Mixed Precision use case, the loss output is in the Mixed Precision
|
| 144 |
+
# format (fp16, bf16) and so the scaled loss should be of the same dtype.
|
| 145 |
+
return scaled_output.type(outputs.dtype)
|
| 146 |
+
|
| 147 |
+
stash: List[_GeneralMultiDeviceReplicator] = []
|
| 148 |
+
|
| 149 |
+
def apply_scale(val: Union[torch.Tensor, Iterable[torch.Tensor]]):
|
| 150 |
+
if isinstance(val, torch.Tensor):
|
| 151 |
+
assert _is_supported_device(val)
|
| 152 |
+
if len(stash) == 0:
|
| 153 |
+
if self._scale is None:
|
| 154 |
+
self._lazy_init_scale_growth_tracker(val.device)
|
| 155 |
+
assert self._scale is not None
|
| 156 |
+
stash.append(_GeneralMultiDeviceReplicator(self._scale))
|
| 157 |
+
scaled_val = val * stash[0].get(val.device)
|
| 158 |
+
# Here we ensure the return dtype is the same as the outputs dtype.
|
| 159 |
+
# For the FSDP + Mixed Precision use case, the loss output is in the Mixed Precision
|
| 160 |
+
# format (fp16, bf16) and so the scaled loss should be of the same dtype.
|
| 161 |
+
return scaled_val.type(val.dtype)
|
| 162 |
+
if isinstance(val, abc.Iterable):
|
| 163 |
+
iterator = map(apply_scale, val)
|
| 164 |
+
if isinstance(val, (list, tuple)):
|
| 165 |
+
return type(val)(iterator)
|
| 166 |
+
return iterator
|
| 167 |
+
raise ValueError("outputs must be a Tensor or an iterable of Tensors")
|
| 168 |
+
|
| 169 |
+
return apply_scale(outputs)
|
| 170 |
+
|
| 171 |
+
def _foreach_non_finite_check_and_unscale_cpu_(
|
| 172 |
+
self,
|
| 173 |
+
grads: Sequence[torch.Tensor],
|
| 174 |
+
found_inf: torch.Tensor,
|
| 175 |
+
inv_scale: torch.Tensor,
|
| 176 |
+
) -> None:
|
| 177 |
+
if len(grads) == 0:
|
| 178 |
+
return
|
| 179 |
+
assert inv_scale.numel() == 1, "inv_scale must be a 1-element tensor."
|
| 180 |
+
assert found_inf.numel() == 1, "found_inf must be a 1-element tensor."
|
| 181 |
+
|
| 182 |
+
for grad in grads:
|
| 183 |
+
if grad.device.type != "cpu":
|
| 184 |
+
logger.error(
|
| 185 |
+
"tensor device is %s but was expected to be ``cpu``",
|
| 186 |
+
grad.device,
|
| 187 |
+
)
|
| 188 |
+
raise ValueError(
|
| 189 |
+
"Gradients were found on a non-CPU device when"
|
| 190 |
+
" expected to be on CPU."
|
| 191 |
+
)
|
| 192 |
+
if (
|
| 193 |
+
torch.isinf(grad).any().item() is True
|
| 194 |
+
or torch.isnan(grad).any().item() is True
|
| 195 |
+
):
|
| 196 |
+
found_inf.data = torch.tensor([1.0])
|
| 197 |
+
break
|
| 198 |
+
else:
|
| 199 |
+
grad.data *= inv_scale.item()
|
| 200 |
+
|
| 201 |
+
def _unscale_grads_(
|
| 202 |
+
self,
|
| 203 |
+
optimizer: torch.optim.Optimizer,
|
| 204 |
+
inv_scale: torch.Tensor,
|
| 205 |
+
found_inf: torch.Tensor,
|
| 206 |
+
allow_fp16: bool = True,
|
| 207 |
+
) -> Dict[torch.device, torch.Tensor]:
|
| 208 |
+
per_device_inv_scale = _GeneralMultiDeviceReplicator(inv_scale)
|
| 209 |
+
per_device_found_inf = _GeneralMultiDeviceReplicator(found_inf)
|
| 210 |
+
|
| 211 |
+
# To set up _amp_foreach_non_finite_check_and_unscale_, split grads by device and dtype.
|
| 212 |
+
# There could be thousands of grads, so we'd like to iterate through them just once.
|
| 213 |
+
# However, we don't know their devices or dtypes in advance.
|
| 214 |
+
|
| 215 |
+
# https://stackoverflow.com/questions/5029934/defaultdict-of-defaultdict
|
| 216 |
+
# Google says mypy struggles with defaultdicts type annotations.
|
| 217 |
+
per_device_and_dtype_grads = defaultdict(lambda: defaultdict(list)) # type: ignore[var-annotated]
|
| 218 |
+
with torch.no_grad():
|
| 219 |
+
for group in optimizer.param_groups:
|
| 220 |
+
for param in group["params"]:
|
| 221 |
+
if param.grad is None:
|
| 222 |
+
continue
|
| 223 |
+
if (not allow_fp16) and param.grad.dtype == torch.float16:
|
| 224 |
+
raise ValueError("Attempting to unscale FP16 gradients.")
|
| 225 |
+
if param.grad.is_sparse:
|
| 226 |
+
# is_coalesced() == False means the sparse grad has values with duplicate indices.
|
| 227 |
+
# coalesce() deduplicates indices and adds all values that have the same index.
|
| 228 |
+
# For scaled fp16 values, there's a good chance coalescing will cause overflow,
|
| 229 |
+
# so we should check the coalesced _values().
|
| 230 |
+
if param.grad.dtype is torch.float16:
|
| 231 |
+
# coalesce is not supported in torch.float16
|
| 232 |
+
param_grad_fp32 = param.grad.type(torch.float32).coalesce()
|
| 233 |
+
param.grad = param_grad_fp32.type(torch.float16)
|
| 234 |
+
to_unscale = param.grad._values()
|
| 235 |
+
else:
|
| 236 |
+
to_unscale = param.grad
|
| 237 |
+
|
| 238 |
+
per_device_and_dtype_grads[to_unscale.device][
|
| 239 |
+
to_unscale.dtype
|
| 240 |
+
].append(to_unscale)
|
| 241 |
+
|
| 242 |
+
for device, per_dtype_grads in per_device_and_dtype_grads.items():
|
| 243 |
+
for grads in per_dtype_grads.values():
|
| 244 |
+
if grads[0].device.type == "cpu":
|
| 245 |
+
self._foreach_non_finite_check_and_unscale_cpu_(
|
| 246 |
+
grads,
|
| 247 |
+
per_device_found_inf.get(device),
|
| 248 |
+
per_device_inv_scale.get(device),
|
| 249 |
+
)
|
| 250 |
+
else:
|
| 251 |
+
torch._amp_foreach_non_finite_check_and_unscale_(
|
| 252 |
+
grads,
|
| 253 |
+
per_device_found_inf.get(device),
|
| 254 |
+
per_device_inv_scale.get(device),
|
| 255 |
+
)
|
| 256 |
+
# There exist contexts (e.g. w/ `use_orig_params=True`) wherein some
|
| 257 |
+
# ranks may have no (non-zero sized) parameter shards, necessitating the
|
| 258 |
+
# initialization of `per_device_found_inf._per_device_tensors` here
|
| 259 |
+
if not per_device_found_inf._per_device_tensors:
|
| 260 |
+
assert self._scale is not None
|
| 261 |
+
per_device_found_inf.get(self._scale.device)
|
| 262 |
+
return per_device_found_inf._per_device_tensors
|
| 263 |
+
|
| 264 |
+
def unscale_(self, optimizer: torch.optim.Optimizer) -> None:
|
| 265 |
+
if not self._enabled:
|
| 266 |
+
return
|
| 267 |
+
|
| 268 |
+
self._check_scale_growth_tracker("unscale_")
|
| 269 |
+
|
| 270 |
+
optimizer_state = self._per_optimizer_states[id(optimizer)]
|
| 271 |
+
|
| 272 |
+
if optimizer_state["stage"] is OptState.UNSCALED:
|
| 273 |
+
raise RuntimeError(
|
| 274 |
+
"unscale_() has already been called on this optimizer since the last update()."
|
| 275 |
+
)
|
| 276 |
+
elif optimizer_state["stage"] is OptState.STEPPED:
|
| 277 |
+
raise RuntimeError("unscale_() is being called after step().")
|
| 278 |
+
|
| 279 |
+
# FP32 division can be imprecise for certain compile options, so we carry out the reciprocal in FP64.
|
| 280 |
+
assert self._scale is not None
|
| 281 |
+
inv_scale = self._scale.double().reciprocal().float()
|
| 282 |
+
found_inf = torch.full(
|
| 283 |
+
(1,), 0.0, dtype=torch.float32, device=self._scale.device
|
| 284 |
+
)
|
| 285 |
+
|
| 286 |
+
optimizer_state["found_inf_per_device"] = self._unscale_grads_(
|
| 287 |
+
optimizer, inv_scale, found_inf, True
|
| 288 |
+
)
|
| 289 |
+
optimizer_state["stage"] = OptState.UNSCALED
|
| 290 |
+
|
| 291 |
+
# Synchronize the detected inf across the ranks
|
| 292 |
+
optimizer_state = self._per_optimizer_states[id(optimizer)]
|
| 293 |
+
works = []
|
| 294 |
+
found_inf_on_cpus = []
|
| 295 |
+
found_inf_on_devices = []
|
| 296 |
+
|
| 297 |
+
for found_inf in optimizer_state["found_inf_per_device"].values():
|
| 298 |
+
if self._device != "cpu" and found_inf.device.type == "cpu":
|
| 299 |
+
found_inf_on_cpus.append(found_inf)
|
| 300 |
+
found_inf_on_device = found_inf.to(self._device)
|
| 301 |
+
found_inf_on_devices.append(found_inf_on_device)
|
| 302 |
+
works.append(
|
| 303 |
+
dist.all_reduce(
|
| 304 |
+
found_inf_on_device, async_op=True, group=self.process_group
|
| 305 |
+
)
|
| 306 |
+
)
|
| 307 |
+
else:
|
| 308 |
+
works.append(
|
| 309 |
+
dist.all_reduce(found_inf, async_op=True, group=self.process_group)
|
| 310 |
+
)
|
| 311 |
+
for work in works:
|
| 312 |
+
work.wait()
|
| 313 |
+
if found_inf_on_cpus:
|
| 314 |
+
torch._foreach_copy_(found_inf_on_cpus, found_inf_on_devices)
|
| 315 |
+
|
| 316 |
+
def _amp_update_scale_cpu_(self, found_inf: torch.Tensor) -> None:
|
| 317 |
+
"""
|
| 318 |
+
If found_inf is 1.0 (True), then scale is multiplied by backoff_factor and growth_tracker is set to zero.
|
| 319 |
+
Otherwise, scale is multiplied by the growth factor when the growth interval is reached.
|
| 320 |
+
"""
|
| 321 |
+
assert self._scale is not None and self._growth_tracker is not None
|
| 322 |
+
|
| 323 |
+
if found_inf.item() >= 1.0:
|
| 324 |
+
self._scale *= self._backoff_factor
|
| 325 |
+
self._growth_tracker.fill_(0)
|
| 326 |
+
else:
|
| 327 |
+
successful = self._growth_tracker + 1
|
| 328 |
+
if successful == self._growth_interval:
|
| 329 |
+
self._scale *= self._growth_factor
|
| 330 |
+
self._growth_tracker.fill_(0)
|
| 331 |
+
else:
|
| 332 |
+
self._growth_tracker = successful
|
| 333 |
+
|
| 334 |
+
def update(self, new_scale: Optional[Union[float, torch.Tensor]] = None) -> None:
|
| 335 |
+
"""
|
| 336 |
+
Updates the scale factor.
|
| 337 |
+
If any optimizer steps were skipped the scale is multiplied by ``backoff_factor``
|
| 338 |
+
to reduce it. If ``growth_interval`` unskipped iterations occurred consecutively,
|
| 339 |
+
the scale is multiplied by ``growth_factor`` to increase it.
|
| 340 |
+
Passing ``new_scale`` sets the new scale value manually. (``new_scale`` is not
|
| 341 |
+
used directly, it's used to fill GradScaler's internal scale tensor. So if
|
| 342 |
+
``new_scale`` was a tensor, later in-place changes to that tensor will not further
|
| 343 |
+
affect the scale GradScaler uses internally.)
|
| 344 |
+
Args:
|
| 345 |
+
new_scale (float or :class:`torch.Tensor`, optional, default=None): New scale factor.
|
| 346 |
+
.. warning::
|
| 347 |
+
:meth:`update` should only be called at the end of the iteration, after ``scaler.step(optimizer)`` has
|
| 348 |
+
been invoked for all optimizers used this iteration.
|
| 349 |
+
"""
|
| 350 |
+
|
| 351 |
+
if not self._enabled:
|
| 352 |
+
return
|
| 353 |
+
|
| 354 |
+
_scale, _growth_tracker = self._check_scale_growth_tracker("update") # type: ignore[var-annotated]
|
| 355 |
+
|
| 356 |
+
if new_scale is not None:
|
| 357 |
+
# Accept a new user-defined scale.
|
| 358 |
+
if isinstance(new_scale, float):
|
| 359 |
+
self._scale.fill_(new_scale) # type: ignore[union-attr]
|
| 360 |
+
else:
|
| 361 |
+
reason = "new_scale should be a float or a 1-element torch.cuda.FloatTensor or \
|
| 362 |
+
torch.FloatTensor with requires_grad=False."
|
| 363 |
+
assert new_scale.device.type == self._device, reason
|
| 364 |
+
assert new_scale.numel() == 1, reason
|
| 365 |
+
assert new_scale.requires_grad is False, reason
|
| 366 |
+
self._scale.copy_(new_scale) # type: ignore[union-attr]
|
| 367 |
+
else:
|
| 368 |
+
# Consume shared inf/nan data collected from optimizers to update the scale.
|
| 369 |
+
# If all found_inf tensors are on the same device as self._scale, this operation is asynchronous.
|
| 370 |
+
found_infs = [
|
| 371 |
+
found_inf.to(device=_scale.device, non_blocking=True)
|
| 372 |
+
for state in self._per_optimizer_states.values()
|
| 373 |
+
for found_inf in state["found_inf_per_device"].values()
|
| 374 |
+
]
|
| 375 |
+
|
| 376 |
+
assert len(found_infs) > 0, "No inf checks were recorded prior to update."
|
| 377 |
+
|
| 378 |
+
found_inf_combined = found_infs[0]
|
| 379 |
+
if len(found_infs) > 1:
|
| 380 |
+
for i in range(1, len(found_infs)):
|
| 381 |
+
found_inf_combined += found_infs[i]
|
| 382 |
+
|
| 383 |
+
if _scale.device.type == "cpu":
|
| 384 |
+
self._amp_update_scale_cpu_(found_inf_combined)
|
| 385 |
+
else:
|
| 386 |
+
torch._amp_update_scale_(
|
| 387 |
+
self._scale, # type: ignore[arg-type]
|
| 388 |
+
self._growth_tracker, # type: ignore[arg-type]
|
| 389 |
+
found_inf_combined,
|
| 390 |
+
self._growth_factor, # type: ignore[arg-type]
|
| 391 |
+
self._backoff_factor, # type: ignore[arg-type]
|
| 392 |
+
self._growth_interval, # type: ignore[arg-type]
|
| 393 |
+
)
|
| 394 |
+
|
| 395 |
+
# To prepare for next iteration, clear the data collected from optimizers this iteration.
|
| 396 |
+
self._per_optimizer_states = defaultdict(_refresh_per_optimizer_state)
|
janus/lib/python3.10/site-packages/torch/distributed/rpc/__init__.py
ADDED
|
@@ -0,0 +1,249 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# mypy: allow-untyped-defs
|
| 2 |
+
import logging
|
| 3 |
+
import os
|
| 4 |
+
import threading
|
| 5 |
+
import warnings
|
| 6 |
+
from datetime import timedelta
|
| 7 |
+
from typing import Generator, Tuple
|
| 8 |
+
from urllib.parse import urlparse
|
| 9 |
+
|
| 10 |
+
import torch
|
| 11 |
+
import torch.distributed as dist
|
| 12 |
+
|
| 13 |
+
|
| 14 |
+
__all__ = ["is_available"]
|
| 15 |
+
|
| 16 |
+
|
| 17 |
+
logger = logging.getLogger(__name__)
|
| 18 |
+
|
| 19 |
+
|
| 20 |
+
_init_counter = 0
|
| 21 |
+
_init_counter_lock = threading.Lock()
|
| 22 |
+
|
| 23 |
+
|
| 24 |
+
def is_available() -> bool:
|
| 25 |
+
return hasattr(torch._C, "_rpc_init")
|
| 26 |
+
|
| 27 |
+
|
| 28 |
+
if is_available() and not torch._C._rpc_init():
|
| 29 |
+
raise RuntimeError("Failed to initialize torch.distributed.rpc")
|
| 30 |
+
|
| 31 |
+
|
| 32 |
+
if is_available():
|
| 33 |
+
import numbers
|
| 34 |
+
|
| 35 |
+
import torch.distributed.autograd as dist_autograd
|
| 36 |
+
from torch._C._distributed_c10d import Store
|
| 37 |
+
from torch._C._distributed_rpc import ( # noqa: F401
|
| 38 |
+
_cleanup_python_rpc_handler,
|
| 39 |
+
_DEFAULT_INIT_METHOD,
|
| 40 |
+
_DEFAULT_NUM_WORKER_THREADS,
|
| 41 |
+
_DEFAULT_RPC_TIMEOUT_SEC,
|
| 42 |
+
_delete_all_user_and_unforked_owner_rrefs,
|
| 43 |
+
_destroy_rref_context,
|
| 44 |
+
_disable_jit_rref_pickle,
|
| 45 |
+
_disable_server_process_global_profiler,
|
| 46 |
+
_enable_jit_rref_pickle,
|
| 47 |
+
_enable_server_process_global_profiler,
|
| 48 |
+
_get_current_rpc_agent,
|
| 49 |
+
_invoke_remote_builtin,
|
| 50 |
+
_invoke_remote_python_udf,
|
| 51 |
+
_invoke_remote_torchscript,
|
| 52 |
+
_invoke_rpc_builtin,
|
| 53 |
+
_invoke_rpc_python_udf,
|
| 54 |
+
_invoke_rpc_torchscript,
|
| 55 |
+
_is_current_rpc_agent_set,
|
| 56 |
+
_reset_current_rpc_agent,
|
| 57 |
+
_rref_context_get_debug_info,
|
| 58 |
+
_set_and_start_rpc_agent,
|
| 59 |
+
_set_profiler_node_id,
|
| 60 |
+
_set_rpc_timeout,
|
| 61 |
+
_TensorPipeRpcBackendOptionsBase,
|
| 62 |
+
_UNSET_RPC_TIMEOUT,
|
| 63 |
+
enable_gil_profiling,
|
| 64 |
+
get_rpc_timeout,
|
| 65 |
+
PyRRef,
|
| 66 |
+
RemoteProfilerManager,
|
| 67 |
+
RpcAgent,
|
| 68 |
+
RpcBackendOptions,
|
| 69 |
+
TensorPipeAgent,
|
| 70 |
+
WorkerInfo,
|
| 71 |
+
)
|
| 72 |
+
|
| 73 |
+
from . import api, backend_registry, functions
|
| 74 |
+
from .api import * # noqa: F401,F403
|
| 75 |
+
from .backend_registry import BackendType
|
| 76 |
+
from .options import TensorPipeRpcBackendOptions # noqa: F401
|
| 77 |
+
from .server_process_global_profiler import _server_process_global_profile
|
| 78 |
+
|
| 79 |
+
rendezvous_iterator: Generator[Tuple[Store, int, int], None, None]
|
| 80 |
+
|
| 81 |
+
__all__ += ["init_rpc", "BackendType", "TensorPipeRpcBackendOptions"]
|
| 82 |
+
__all__ = __all__ + api.__all__ + backend_registry.__all__ # noqa: PLE0605
|
| 83 |
+
|
| 84 |
+
def init_rpc(
|
| 85 |
+
name,
|
| 86 |
+
backend=None,
|
| 87 |
+
rank=-1,
|
| 88 |
+
world_size=None,
|
| 89 |
+
rpc_backend_options=None,
|
| 90 |
+
):
|
| 91 |
+
r"""
|
| 92 |
+
Initializes RPC primitives such as the local RPC agent
|
| 93 |
+
and distributed autograd, which immediately makes the current
|
| 94 |
+
process ready to send and receive RPCs.
|
| 95 |
+
|
| 96 |
+
Args:
|
| 97 |
+
name (str): a globally unique name of this node. (e.g.,
|
| 98 |
+
``Trainer3``, ``ParameterServer2``, ``Master``, ``Worker1``)
|
| 99 |
+
Name can only contain number, alphabet, underscore, colon,
|
| 100 |
+
and/or dash, and must be shorter than 128 characters.
|
| 101 |
+
backend (BackendType, optional): The type of RPC backend
|
| 102 |
+
implementation. Supported values is
|
| 103 |
+
``BackendType.TENSORPIPE`` (the default).
|
| 104 |
+
See :ref:`rpc-backends` for more information.
|
| 105 |
+
rank (int): a globally unique id/rank of this node.
|
| 106 |
+
world_size (int): The number of workers in the group.
|
| 107 |
+
rpc_backend_options (RpcBackendOptions, optional): The options
|
| 108 |
+
passed to the RpcAgent constructor. It must be an agent-specific
|
| 109 |
+
subclass of :class:`~torch.distributed.rpc.RpcBackendOptions`
|
| 110 |
+
and contains agent-specific initialization configurations. By
|
| 111 |
+
default, for all agents, it sets the default timeout to 60
|
| 112 |
+
seconds and performs the rendezvous with an underlying process
|
| 113 |
+
group initialized using ``init_method = "env://"``,
|
| 114 |
+
meaning that environment variables ``MASTER_ADDR`` and
|
| 115 |
+
``MASTER_PORT`` need to be set properly. See
|
| 116 |
+
:ref:`rpc-backends` for more information and find which options
|
| 117 |
+
are available.
|
| 118 |
+
"""
|
| 119 |
+
torch._C._log_api_usage_once("torch.distributed.init_rpc")
|
| 120 |
+
if backend is not None and not isinstance(
|
| 121 |
+
backend, backend_registry.BackendType
|
| 122 |
+
):
|
| 123 |
+
raise TypeError("Argument backend must be a member of BackendType")
|
| 124 |
+
|
| 125 |
+
if rpc_backend_options is not None and not isinstance(
|
| 126 |
+
rpc_backend_options, RpcBackendOptions
|
| 127 |
+
):
|
| 128 |
+
raise TypeError(
|
| 129 |
+
"Argument rpc_backend_options must be an instance of RpcBackendOptions"
|
| 130 |
+
)
|
| 131 |
+
|
| 132 |
+
# Try to detect the backend from the options
|
| 133 |
+
if backend is None and rpc_backend_options is not None:
|
| 134 |
+
for candidate_backend in BackendType:
|
| 135 |
+
if isinstance(
|
| 136 |
+
rpc_backend_options,
|
| 137 |
+
type(
|
| 138 |
+
backend_registry.construct_rpc_backend_options(
|
| 139 |
+
candidate_backend
|
| 140 |
+
)
|
| 141 |
+
),
|
| 142 |
+
):
|
| 143 |
+
backend = candidate_backend
|
| 144 |
+
break
|
| 145 |
+
else:
|
| 146 |
+
raise TypeError(
|
| 147 |
+
f"Could not infer backend for options {rpc_backend_options}"
|
| 148 |
+
)
|
| 149 |
+
# Ignore type error because mypy doesn't handle dynamically generated type objects (#4865)
|
| 150 |
+
if backend != BackendType.TENSORPIPE: # type: ignore[attr-defined]
|
| 151 |
+
logger.warning(
|
| 152 |
+
"RPC was initialized with no explicit backend but with options " # type: ignore[attr-defined]
|
| 153 |
+
"corresponding to %(backend)s, hence that backend will be used "
|
| 154 |
+
"instead of the default BackendType.TENSORPIPE. To silence this "
|
| 155 |
+
"warning pass `backend=%(backend)s` explicitly.",
|
| 156 |
+
{"backend": backend},
|
| 157 |
+
)
|
| 158 |
+
|
| 159 |
+
if backend is None:
|
| 160 |
+
backend = BackendType.TENSORPIPE # type: ignore[attr-defined]
|
| 161 |
+
|
| 162 |
+
if rpc_backend_options is None:
|
| 163 |
+
# default construct a set of RPC backend options.
|
| 164 |
+
rpc_backend_options = backend_registry.construct_rpc_backend_options(
|
| 165 |
+
backend
|
| 166 |
+
)
|
| 167 |
+
|
| 168 |
+
# Create store, performs rendezvous for static RPC group.
|
| 169 |
+
if not world_size:
|
| 170 |
+
# If world_size is not set in construction and also not set in environment variables
|
| 171 |
+
# The store will be created for the dynamic group setting
|
| 172 |
+
store = dist._create_store_from_options(rpc_backend_options, rank)
|
| 173 |
+
else:
|
| 174 |
+
# This rendezvous state sometimes is destroyed before all processes
|
| 175 |
+
# finishing handshaking. To avoid that issue, we make it global to
|
| 176 |
+
# keep it alive.
|
| 177 |
+
global rendezvous_iterator
|
| 178 |
+
rendezvous_iterator = dist.rendezvous(
|
| 179 |
+
rpc_backend_options.init_method, rank=rank, world_size=world_size
|
| 180 |
+
)
|
| 181 |
+
store, _, _ = next(rendezvous_iterator)
|
| 182 |
+
# Use same timeout as RPC.
|
| 183 |
+
store.set_timeout(timedelta(seconds=rpc_backend_options.rpc_timeout))
|
| 184 |
+
|
| 185 |
+
# Use a PrefixStore to distinguish multiple invocations.
|
| 186 |
+
with _init_counter_lock:
|
| 187 |
+
global _init_counter
|
| 188 |
+
store = dist.PrefixStore(str(f"rpc_prefix_{_init_counter}"), store)
|
| 189 |
+
_init_counter += 1
|
| 190 |
+
|
| 191 |
+
# Initialize autograd before RPC since _init_rpc_backend guarantees all
|
| 192 |
+
# processes sync via the store. If we initialize autograd after RPC,
|
| 193 |
+
# there could be a race where some nodes might have initialized autograd
|
| 194 |
+
# and others might not have. As a result, a node calling
|
| 195 |
+
# torch.distributed.autograd.backward() would run into errors since
|
| 196 |
+
# other nodes might not have been initialized.
|
| 197 |
+
dist_autograd._init(rank)
|
| 198 |
+
|
| 199 |
+
_set_profiler_node_id(rank)
|
| 200 |
+
# Initialize RPC.
|
| 201 |
+
_init_rpc_backend(backend, store, name, rank, world_size, rpc_backend_options)
|
| 202 |
+
|
| 203 |
+
def _validate_rpc_args(backend, store, name, rank, world_size, rpc_backend_options):
|
| 204 |
+
type_mapping = {
|
| 205 |
+
backend: backend_registry.BackendType,
|
| 206 |
+
store: dist.Store,
|
| 207 |
+
name: str,
|
| 208 |
+
rank: numbers.Integral,
|
| 209 |
+
# world_size can be None for a dynamic group
|
| 210 |
+
world_size: (numbers.Integral, type(None)),
|
| 211 |
+
rpc_backend_options: RpcBackendOptions,
|
| 212 |
+
}
|
| 213 |
+
for arg, arg_type in type_mapping.items():
|
| 214 |
+
if not isinstance(arg, arg_type): # type: ignore[arg-type]
|
| 215 |
+
raise RuntimeError(
|
| 216 |
+
f"Argument {arg} must be of type {arg_type} but got type {type(arg)}"
|
| 217 |
+
)
|
| 218 |
+
|
| 219 |
+
def _init_rpc_backend(
|
| 220 |
+
backend=BackendType.TENSORPIPE, # type: ignore[attr-defined]
|
| 221 |
+
store=None,
|
| 222 |
+
name=None,
|
| 223 |
+
rank=-1,
|
| 224 |
+
world_size=None,
|
| 225 |
+
rpc_backend_options=None,
|
| 226 |
+
):
|
| 227 |
+
_validate_rpc_args(backend, store, name, rank, world_size, rpc_backend_options)
|
| 228 |
+
|
| 229 |
+
if _is_current_rpc_agent_set():
|
| 230 |
+
raise RuntimeError("RPC is already initialized")
|
| 231 |
+
|
| 232 |
+
# Initialize RPC.
|
| 233 |
+
rpc_agent = backend_registry.init_backend(
|
| 234 |
+
backend,
|
| 235 |
+
store=store,
|
| 236 |
+
name=name,
|
| 237 |
+
rank=rank,
|
| 238 |
+
world_size=world_size,
|
| 239 |
+
rpc_backend_options=rpc_backend_options,
|
| 240 |
+
)
|
| 241 |
+
|
| 242 |
+
api._init_rpc_states(rpc_agent)
|
| 243 |
+
|
| 244 |
+
@api._require_initialized
|
| 245 |
+
def _get_debug_info():
|
| 246 |
+
info = _rref_context_get_debug_info()
|
| 247 |
+
info.update(api._get_current_rpc_agent().get_debug_info())
|
| 248 |
+
info.update(dist_autograd._get_debug_info())
|
| 249 |
+
return info
|
janus/lib/python3.10/site-packages/torch/distributed/rpc/__pycache__/__init__.cpython-310.pyc
ADDED
|
Binary file (6.63 kB). View file
|
|
|
janus/lib/python3.10/site-packages/torch/distributed/rpc/__pycache__/_utils.cpython-310.pyc
ADDED
|
Binary file (1.2 kB). View file
|
|
|
janus/lib/python3.10/site-packages/torch/distributed/rpc/__pycache__/api.cpython-310.pyc
ADDED
|
Binary file (28.4 kB). View file
|
|
|
janus/lib/python3.10/site-packages/torch/distributed/rpc/__pycache__/backend_registry.cpython-310.pyc
ADDED
|
Binary file (9.94 kB). View file
|
|
|
janus/lib/python3.10/site-packages/torch/distributed/rpc/__pycache__/constants.cpython-310.pyc
ADDED
|
Binary file (781 Bytes). View file
|
|
|
janus/lib/python3.10/site-packages/torch/distributed/rpc/__pycache__/functions.cpython-310.pyc
ADDED
|
Binary file (7.46 kB). View file
|
|
|
janus/lib/python3.10/site-packages/torch/distributed/rpc/__pycache__/internal.cpython-310.pyc
ADDED
|
Binary file (8.35 kB). View file
|
|
|
janus/lib/python3.10/site-packages/torch/distributed/rpc/__pycache__/options.cpython-310.pyc
ADDED
|
Binary file (7.15 kB). View file
|
|
|