diff --git a/.gitattributes b/.gitattributes index 27ac1f84407d45beffe556466148d234b4d0d97f..2255039109caf79bea0015785796db29cf1b58fe 100644 --- a/.gitattributes +++ b/.gitattributes @@ -454,3 +454,4 @@ janus/lib/python3.10/site-packages/sympy/utilities/tests/__pycache__/test_wester janus/lib/python3.10/site-packages/sympy/solvers/tests/__pycache__/test_solveset.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text janus/lib/python3.10/site-packages/sympy/solvers/__pycache__/solveset.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text janus/lib/python3.10/site-packages/torch/_decomp/__pycache__/decompositions.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text +janus/lib/python3.10/site-packages/torch/onnx/__pycache__/symbolic_opset9.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text diff --git a/janus/lib/python3.10/site-packages/torch/distributed/_composable/__pycache__/__init__.cpython-310.pyc b/janus/lib/python3.10/site-packages/torch/distributed/_composable/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c6ccf5dbab5e0d9290ebf713474b8d9d927ace4e Binary files /dev/null and b/janus/lib/python3.10/site-packages/torch/distributed/_composable/__pycache__/__init__.cpython-310.pyc differ diff --git a/janus/lib/python3.10/site-packages/torch/distributed/_composable/__pycache__/checkpoint_activation.cpython-310.pyc b/janus/lib/python3.10/site-packages/torch/distributed/_composable/__pycache__/checkpoint_activation.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..61313cc5ff11aa0891ccf01ad19e0bacf48a8b84 Binary files /dev/null and b/janus/lib/python3.10/site-packages/torch/distributed/_composable/__pycache__/checkpoint_activation.cpython-310.pyc differ diff --git a/janus/lib/python3.10/site-packages/torch/distributed/_composable/__pycache__/contract.cpython-310.pyc b/janus/lib/python3.10/site-packages/torch/distributed/_composable/__pycache__/contract.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..98f772407445200a252131dbe39cc08056dfd7ae Binary files /dev/null and b/janus/lib/python3.10/site-packages/torch/distributed/_composable/__pycache__/contract.cpython-310.pyc differ diff --git a/janus/lib/python3.10/site-packages/torch/distributed/_composable/__pycache__/fully_shard.cpython-310.pyc b/janus/lib/python3.10/site-packages/torch/distributed/_composable/__pycache__/fully_shard.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e1f5c9047993d6deb70ac157e04436f7def0c9f2 Binary files /dev/null and b/janus/lib/python3.10/site-packages/torch/distributed/_composable/__pycache__/fully_shard.cpython-310.pyc differ diff --git a/janus/lib/python3.10/site-packages/torch/distributed/_composable/__pycache__/replicate.cpython-310.pyc b/janus/lib/python3.10/site-packages/torch/distributed/_composable/__pycache__/replicate.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..11d0bc8132e1846eb6e7484d6295066cfb720ad7 Binary files /dev/null and b/janus/lib/python3.10/site-packages/torch/distributed/_composable/__pycache__/replicate.cpython-310.pyc differ diff --git a/janus/lib/python3.10/site-packages/torch/distributed/_composable/checkpoint_activation.py b/janus/lib/python3.10/site-packages/torch/distributed/_composable/checkpoint_activation.py new file mode 100644 index 0000000000000000000000000000000000000000..88253abb4b9cbb6f31e5eb9cc25003d654d82ee1 --- /dev/null +++ b/janus/lib/python3.10/site-packages/torch/distributed/_composable/checkpoint_activation.py @@ -0,0 +1,126 @@ +# mypy: allow-untyped-decorators +# mypy: allow-untyped-defs +from contextlib import contextmanager, nullcontext +from typing import Any, ContextManager, Dict, Optional, Tuple + +import torch +import torch.nn as nn +from torch.utils.checkpoint import ( + _checkpoint_without_reentrant_generator, + _DEFAULT_DETERMINISM_MODE, +) + +from .contract import contract + + +@contextmanager +def _no_hook(module: nn.Module, user_ctx: Optional[ContextManager] = None): + r""" + Disable hooks installed by checkpoint to avoid unintentional recursion + during backward recomputation. + """ + + with user_ctx if user_ctx else nullcontext(): + orig_enable_hook = checkpoint.state(module).enable_hook + checkpoint.state(module).enable_hook = False + try: + yield + finally: + checkpoint.state(module).enable_hook = orig_enable_hook + + +@contract() +def checkpoint(module: nn.Module, **kwargs) -> nn.Module: + r""" + This is a composable activation checkpointing API. Unlike functional + activation checkpointing APIs, this one does not require changing model + source code. Unlike ``nn.Module`` wrapper activation checkpointing APIs, + this one does not modify model structure or fully-qualified names either. + Under the hood, it registers activation checkpointing logic as pre- and + post-forward hooks. Hence, this API can be easily applied to any model or + sub-modules in the model. + + Args: + module (nn.Module): the target model or sub-module to apply activation + checkpointing. + + Example:: + >>> # xdoctest: +SKIP + >>> import torch.nn as nn + >>> + >>> class MyModel(nn.Module): + >>> def __init__(self) -> None: + >>> super().__init__() + >>> self.l1 = nn.Linear(10, 10) + >>> self.l2 = nn.Linear(10, 10) + >>> + >>> def forward(self, x): + >>> return self.l2(self.l1(x)) + >>> + >>> model = MyModel() + >>> checkpoint(model.l1) # apply activation checkpointing only to l1 + >>> model(torch.zeros(2, 10)).sum().backward() + + """ + torch._C._log_api_usage_once("torch.distributed.checkpoint") + + use_reentrant = kwargs.pop("use_reentrant", False) + if use_reentrant: + raise NotImplementedError( + "use_reentrant=True is not supported in composable checkpoint. " + "Please use torch.utils.checkpoint.checkpoint instead." + ) + preserve_rng_state = kwargs.pop("preserve_rng_state", True) + user_context_fns = kwargs.pop("context_fn", None) + determinism_check = kwargs.pop("determinism_check", _DEFAULT_DETERMINISM_MODE) + debug = kwargs.pop("debug", False) + + if kwargs: + raise ValueError( + "Unexpected keyword arguments: " + ",".join(arg for arg in kwargs) + ) + + def forward_pre_hook( + module: nn.Module, args: Tuple[Any, ...], kwargs: Dict[str, Any] + ) -> None: + if checkpoint.state(module).enable_hook: + + def context_fns(): + if user_context_fns is not None: + ctx1, ctx2 = user_context_fns() + return ctx1, _no_hook(module, ctx2) + else: + return nullcontext(), _no_hook(module) + + checkpoint.state( + module + )._ac_generator = _checkpoint_without_reentrant_generator( + module, + preserve_rng_state, + context_fns, + determinism_check, + debug, + *args, + **kwargs, + ) + next(checkpoint.state(module)._ac_generator) + + def forward_hook(module: nn.Module, inputs: Tuple[Any, ...], output: Any) -> Any: + if checkpoint.state(module).enable_hook: + try: + next(checkpoint.state(module)._ac_generator) + except StopIteration: + pass + else: + raise RuntimeError( + "Expected non-reentrant activation checkpoint generator to be exhausted, but it was not!" + ) + + # Ensure that we no longer hold on to the generator. always_call=True helps ensure we + # clear this even in the case of exception in fwd pass. + checkpoint.state(module)._ac_generator = None + + checkpoint.state(module).enable_hook = True + module.register_forward_pre_hook(forward_pre_hook, with_kwargs=True) + module.register_forward_hook(forward_hook, prepend=True, always_call=True) + return module diff --git a/janus/lib/python3.10/site-packages/torch/distributed/_composable/contract.py b/janus/lib/python3.10/site-packages/torch/distributed/_composable/contract.py new file mode 100644 index 0000000000000000000000000000000000000000..e7cd1713fae4c2eef3677242180ca719566ffe7f --- /dev/null +++ b/janus/lib/python3.10/site-packages/torch/distributed/_composable/contract.py @@ -0,0 +1,224 @@ +# mypy: allow-untyped-defs +import uuid +from collections import OrderedDict +from functools import wraps +from typing import Callable, Dict, List, Optional, Sequence, Type, Union + +import torch +import torch.nn as nn +from torch.distributed._composable_state import _State +from torch.distributed.utils import _get_root_modules + + +def generate_state_key(string="__composable_api_state_key"): + return f"{string}_{str(uuid.uuid4())}" + + +STATE_KEY = generate_state_key() +REGISTRY_KEY = generate_state_key() + + +# TODO: we can add additional info to RegistryItem to share across APIs. E.g., +# we can add args and kwargs here, and then we can detect whether fully_shard +# is combined with reentrant activation checkpointing and error out with a clear +# message. +class RegistryItem: + pass + + +def contract(state_cls: Type[_State] = _State): + r""" + Decorate a function as a composable distributed API, where the first + argument of the function must be an :class:`nn.Module` instance or sequence + of :class:`nn.Module` instances. + + The decorator verifies that the decorated function does not modify + fully-qualified names (FQNs) for parameters, buffers, or modules. The + decorated function can return different module instances than the input + modules; the FQN invariant will be enforced following the input order. + + When a function ``func`` is decorated by ``@contract()``, a + ``.state(module: nn.Module)`` method will be installed to the decorated + function. Then you can retrieve and modify the state on a module by calling + ``func.state(module)``. + + Example:: + >>> # xdoctest: +SKIP + >>> import torch.nn as nn + >>> + >>> class MyModel(nn.Module): + >>> def __init__(self) -> None: + >>> super().__init__() + >>> self.l1 = nn.Linear(10, 10) + >>> self.l2 = nn.Linear(10, 10) + >>> + >>> def forward(self, x): + >>> return self.l2(self.l1(x)) + >>> + >>> @contract() + >>> def my_feature(module: nn.Module) -> nn.Module: + >>> my_feature.state(module).some_state = "any value" + >>> return module + >>> + >>> model = MyModel() + >>> my_feature(model.l1) + >>> assert my_feature.state(model.l1).some_state == "any value" + >>> my_feature(model.l2) + >>> model(torch.randn(2, 10)).sum().backward() + """ + + # wraps will make functions decorated with contract() pickleable - needed for integration with torch.package + @wraps(state_cls) + def inner(func): + @wraps(func) + def wrapper( + module: Union[nn.Module, Sequence[nn.Module]], *args, **kwargs + ) -> Optional[nn.Module]: + inp_module = module + if isinstance(module, nn.Module): + modules = [module] + else: + # If the user passes a sequence of modules, then we assume that + # we only need to insert the state object on the root modules + # (i.e. those without a parent) among the passed-in modules. + modules = _get_root_modules(list(module)) + state = state_cls() # shared across all modules + registry_item = RegistryItem() # shared across all modules + + # `func` is allowed to return different module instances than the + # input modules as long as FQNs are preserved following the input + # module order + all_orig_named_params: List[Dict[str, nn.Parameter]] = [] + all_orig_named_buffers: List[Dict[str, torch.Tensor]] = [] + all_orig_named_modules: List[Dict[str, nn.Module]] = [] + + for module in modules: + default_all_state: Dict[Callable, _State] = OrderedDict() + default_registry: Dict[str, RegistryItem] = OrderedDict() + all_state: Dict[Callable, _State] = module.__dict__.setdefault( # type: ignore[call-overload] + STATE_KEY, default_all_state + ) + if not isinstance(all_state, dict): + raise AssertionError( + f"Distributed composable API states corrupted: {all_state}" + ) + registry: Dict[str, RegistryItem] = module.__dict__.setdefault( # type: ignore[call-overload] + REGISTRY_KEY, default_registry + ) + if not isinstance(registry, dict): + raise AssertionError( + f"Distributed composable API registry corrupted: {registry}" + ) + if func in all_state or func.__name__ in registry: + raise AssertionError( + "Each distinct composable distributed API can only be applied to a " + f"module once. {func.__name__} has already been applied to the " + f"following module:\n{module}" + ) + all_state.setdefault(func, state) + registry.setdefault(func.__name__, registry_item) + + all_orig_named_params.append(OrderedDict(module.named_parameters())) + all_orig_named_buffers.append(OrderedDict(module.named_buffers())) + all_orig_named_modules.append(OrderedDict(module.named_modules())) + + updated = func(inp_module, *args, **kwargs) + if updated is None: + updated = inp_module + if isinstance(updated, nn.Module): + updated_modules = [updated] + else: + updated_modules = _get_root_modules(list(inp_module)) + + all_new_named_params: List[Dict[str, nn.Parameter]] = [] + all_new_named_buffers: List[Dict[str, torch.Tensor]] = [] + all_new_named_modules: List[Dict[str, nn.Module]] = [] + for module in updated_modules: + all_new_named_params.append(OrderedDict(module.named_parameters())) + all_new_named_buffers.append(OrderedDict(module.named_buffers())) + all_new_named_modules.append(OrderedDict(module.named_modules())) + + num_orig_modules = len(all_orig_named_modules) + num_new_modules = len(all_new_named_modules) + if num_orig_modules != num_new_modules: + raise AssertionError( + f"{func.__name__} should return the same number of modules as input modules" + f"Inputs: {num_orig_modules} modules\n" + f"Outputs: {num_new_modules} modules" + ) + + def check_fqn(orig_fqns: List[str], new_fqns: List[str], check_key: str): + if orig_fqns == new_fqns: + return + + orig_fqn_set, new_fqn_set = set(orig_fqns), set(new_fqns) + orig_only = orig_fqn_set - new_fqn_set + new_only = new_fqn_set - orig_fqn_set + if len(orig_only) or len(new_only): + raise RuntimeError( + f"{check_key}" + "Composable distributed API implementations cannot modify FQNs.\n" + f"FQNs only in original: {orig_only}\n" + f"FQNs only in new: {new_only}" + ) + else: + raise RuntimeError( + f"{check_key}" + "Composable distributed API implementations cannot modify " + "the order of FQNs.\n" + f"Original FQNs: {orig_only}\n" + f"New FQNs: {new_only}" + ) + + for orig_named_params, new_named_params in zip( + all_orig_named_params, all_new_named_params + ): + check_fqn( + list(orig_named_params.keys()), + list(new_named_params.keys()), + "Checking parameters: ", + ) + for orig_named_buffers, new_named_buffers in zip( + all_orig_named_buffers, all_new_named_buffers + ): + check_fqn( + list(orig_named_buffers.keys()), + list(new_named_buffers.keys()), + "Checking buffers: ", + ) + for orig_named_modules, new_named_modules in zip( + all_orig_named_modules, all_new_named_modules + ): + check_fqn( + list(orig_named_modules.keys()), + list(new_named_modules.keys()), + "Checking modules: ", + ) + + # TODO: verify that installed distributed paradigms are compatible with + # each other. + + return updated + + def get_state(module: nn.Module) -> Optional[_State]: + return module.__dict__.setdefault( # type: ignore[call-overload] + STATE_KEY, + {}, # TODO(@yhcharles): this is a temporary fix, need a better way + ).get( + func + ) # type: ignore[call-overload] + + wrapper.state = get_state # type: ignore[attr-defined] + + return wrapper + + return inner + + +def _get_registry(module: nn.Module) -> Optional[Dict[str, RegistryItem]]: + r""" + Get an ``OrderedDict`` of composable APIs that have been applied to the + ``module``, indexed by the API name. If no API has been applied, then this + returns ``None``. + """ + return getattr(module, REGISTRY_KEY, None) diff --git a/janus/lib/python3.10/site-packages/torch/distributed/_composable/fsdp/__init__.py b/janus/lib/python3.10/site-packages/torch/distributed/_composable/fsdp/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..191a836632085e2f3b22d9330b84fc46f0475a09 --- /dev/null +++ b/janus/lib/python3.10/site-packages/torch/distributed/_composable/fsdp/__init__.py @@ -0,0 +1,2 @@ +from ._fsdp_api import CPUOffloadPolicy, MixedPrecisionPolicy, OffloadPolicy +from .fully_shard import FSDPModule, fully_shard, register_fsdp_forward_method diff --git a/janus/lib/python3.10/site-packages/torch/distributed/_composable/fsdp/__pycache__/__init__.cpython-310.pyc b/janus/lib/python3.10/site-packages/torch/distributed/_composable/fsdp/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..fd5e29d4c435620d0765c8159529fd2b938af116 Binary files /dev/null and b/janus/lib/python3.10/site-packages/torch/distributed/_composable/fsdp/__pycache__/__init__.cpython-310.pyc differ diff --git a/janus/lib/python3.10/site-packages/torch/distributed/_composable/fsdp/__pycache__/_fsdp_common.cpython-310.pyc b/janus/lib/python3.10/site-packages/torch/distributed/_composable/fsdp/__pycache__/_fsdp_common.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4ec1f8e0196f0ca2b8bf9776544d10b5b2901bcf Binary files /dev/null and b/janus/lib/python3.10/site-packages/torch/distributed/_composable/fsdp/__pycache__/_fsdp_common.cpython-310.pyc differ diff --git a/janus/lib/python3.10/site-packages/torch/distributed/_composable/fsdp/__pycache__/_fsdp_init.cpython-310.pyc b/janus/lib/python3.10/site-packages/torch/distributed/_composable/fsdp/__pycache__/_fsdp_init.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..03a00f3000c0e9f874fbca60ed9d68fb04532a91 Binary files /dev/null and b/janus/lib/python3.10/site-packages/torch/distributed/_composable/fsdp/__pycache__/_fsdp_init.cpython-310.pyc differ diff --git a/janus/lib/python3.10/site-packages/torch/distributed/_composable/fsdp/__pycache__/_fsdp_param.cpython-310.pyc b/janus/lib/python3.10/site-packages/torch/distributed/_composable/fsdp/__pycache__/_fsdp_param.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1ade973925ea10aad4e76fcb9e9e5720e62fe5c9 Binary files /dev/null and b/janus/lib/python3.10/site-packages/torch/distributed/_composable/fsdp/__pycache__/_fsdp_param.cpython-310.pyc differ diff --git a/janus/lib/python3.10/site-packages/torch/distributed/_composable/fsdp/__pycache__/_fsdp_param_group.cpython-310.pyc b/janus/lib/python3.10/site-packages/torch/distributed/_composable/fsdp/__pycache__/_fsdp_param_group.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..49e10866257411aafa7bdbd242c0011670bab0d9 Binary files /dev/null and b/janus/lib/python3.10/site-packages/torch/distributed/_composable/fsdp/__pycache__/_fsdp_param_group.cpython-310.pyc differ diff --git a/janus/lib/python3.10/site-packages/torch/distributed/_composable/fsdp/__pycache__/_fsdp_state.cpython-310.pyc b/janus/lib/python3.10/site-packages/torch/distributed/_composable/fsdp/__pycache__/_fsdp_state.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e29050806394e28afee0b3f68e5edc77cb7acb6f Binary files /dev/null and b/janus/lib/python3.10/site-packages/torch/distributed/_composable/fsdp/__pycache__/_fsdp_state.cpython-310.pyc differ diff --git a/janus/lib/python3.10/site-packages/torch/distributed/_composable/fsdp/_fsdp_common.py b/janus/lib/python3.10/site-packages/torch/distributed/_composable/fsdp/_fsdp_common.py new file mode 100644 index 0000000000000000000000000000000000000000..31b74079aaa8be31ef27ddcccf6ead55a56ac2f2 --- /dev/null +++ b/janus/lib/python3.10/site-packages/torch/distributed/_composable/fsdp/_fsdp_common.py @@ -0,0 +1,152 @@ +# mypy: allow-untyped-defs +import math +import traceback +from dataclasses import dataclass +from enum import auto, Enum +from typing import Any, cast, List, Optional + +import torch +import torch._dynamo.compiled_autograd as ca +import torch.distributed as dist +import torch.nn as nn +from torch.distributed._composable.contract import _get_registry +from torch.distributed.tensor import DeviceMesh, DTensor +from torch.distributed.tensor._dtensor_spec import DTensorSpec + + +@dataclass +class DataParallelMeshInfo: + mesh: DeviceMesh + shard_mesh_dim: Optional[int] = None + replicate_mesh_dim: Optional[int] = None + + def __post_init__(self): + if self.shard_mesh_dim is None and self.replicate_mesh_dim is None: + raise AssertionError( + "At least one of shard_mesh_dim and replicate_mesh_dim must not be None" + ) + + +@dataclass +class FSDPMeshInfo(DataParallelMeshInfo): + def __post_init__(self): + super().__post_init__() + if self.shard_mesh_dim is None: + raise AssertionError("Expects non-None shard_mesh_dim") + self.shard_mesh_size: int = self.mesh.size(self.shard_mesh_dim) + self.shard_process_group = self.mesh.get_group(self.shard_mesh_dim) + self.shard_mesh_rank: int = self.shard_process_group.rank() + + +@dataclass +class DDPMeshInfo(DataParallelMeshInfo): + def __post_init__(self): + super().__post_init__() + if self.replicate_mesh_dim is None: + raise AssertionError("Expects non-None replicate_mesh_dim") + self.replicate_mesh_size: int = self.mesh.size(self.replicate_mesh_dim) + self.replicate_process_group = self.mesh.get_group(self.replicate_mesh_dim) + self.replicate_mesh_rank: int = self.replicate_process_group.rank() + + +@dataclass +class HSDPMeshInfo(FSDPMeshInfo, DDPMeshInfo): + def __post_init__(self): + # Calls `FSDPMeshInfo` -> `DDPMeshInfo` -> `DataParallelMeshInfo` + super().__post_init__() + + +class TrainingState(Enum): + """Describes the training state of one FSDP state / parameter group.""" + + # Transition to forward starting pre-forward until post-forward + FORWARD = auto() + # Transition to pre-backward when unsharding in backward + PRE_BACKWARD = auto() + # Transition to post-backward when resharding and reducing gradients + POST_BACKWARD = auto() + # Idle before/after forward or before pre-backward/after post-backward + IDLE = auto() + + +def _raise_assert_with_print(*args: Any, **kwargs: Any): + print(f"[Rank {dist.get_rank()}] ", end="") + print(*args, **kwargs) + traceback.print_stack() + raise AssertionError(*args, **kwargs) + + +def _is_composable_with_fsdp(module: nn.Module) -> bool: + registry = _get_registry(module) + if registry is None: + return True + # Registry keys by function name + return "replicate" not in registry + + +def _get_dim0_padded_size(tensor_size: torch.Size, dim0_factor: int) -> torch.Size: + padded_dim0 = math.ceil(tensor_size[0] / dim0_factor) * dim0_factor + return cast(torch.Size, torch.Size([padded_dim0]) + tensor_size[1:]) + + +def _chunk_with_empty( + tensor: torch.Tensor, num_chunks: int, dim: int +) -> List[torch.Tensor]: + chunks = list(torch.chunk(tensor, num_chunks, dim=dim)) + while len(chunks) < num_chunks: + chunks.append(chunks[0].new_empty(0)) + return chunks + + +def _get_dim0_chunked_size( + chunk: torch.Tensor, unchunked_size: torch.Size +) -> torch.Size: + if chunk.numel() > 0: + return chunk.size() + # For 0 numel, we need to preserve trailing dims for DTensor APIs + return cast(torch.Size, torch.Size([0]) + unchunked_size[1:]) + + +def _from_local_no_grad( + local_tensor: torch.Tensor, + sharding_spec: DTensorSpec, +) -> DTensor: + """ + This method is similar to ``DTensor.from_local()`` except that in eager mode + it avoids some CPU overhead by avoiding default args and not being differentiable. + """ + + if not ca.compiled_autograd_enabled: + return DTensor( + # Use the local tensor directly instead of constructing a new tensor + # variable, e.g. with `view_as()`, since this is not differentiable + local_tensor, + sharding_spec, + requires_grad=local_tensor.requires_grad, + ) + else: + return DTensor.from_local( + local_tensor, + sharding_spec.mesh, + sharding_spec.placements, + shape=sharding_spec.shape, + stride=sharding_spec.stride, + ) + + +def _to_dtype_if_needed( + tensor: torch.Tensor, dtype: Optional[torch.dtype] +) -> torch.Tensor: + if dtype is not None and tensor.dtype != dtype: + return tensor.to(dtype) + return tensor + + +def _cast_fp_tensor(dtype: torch.dtype, x: torch.Tensor) -> torch.Tensor: + if ( + not isinstance(x, torch.Tensor) + or not torch.is_floating_point(x) + or x.dtype == dtype + ): + return x + return x.to(dtype) diff --git a/janus/lib/python3.10/site-packages/torch/distributed/_composable/fsdp/_fsdp_param.py b/janus/lib/python3.10/site-packages/torch/distributed/_composable/fsdp/_fsdp_param.py new file mode 100644 index 0000000000000000000000000000000000000000..f6fc631ec8ac46f5494c93c471d93ef5f2a58722 --- /dev/null +++ b/janus/lib/python3.10/site-packages/torch/distributed/_composable/fsdp/_fsdp_param.py @@ -0,0 +1,754 @@ +# mypy: allow-untyped-defs +import itertools +from dataclasses import dataclass, field +from enum import auto, Enum +from typing import Any, cast, List, Optional, Sequence, Tuple + +import torch +import torch._dynamo.compiled_autograd as ca +import torch.nn as nn +from torch._prims_common import make_contiguous_strides_for +from torch.distributed._functional_collectives import AsyncCollectiveTensor +from torch.distributed.tensor import DTensor, Replicate, Shard +from torch.distributed.tensor._dtensor_spec import DTensorSpec, TensorMeta +from torch.distributed.tensor.device_mesh import _mesh_resources +from torch.distributed.tensor.placement_types import _StridedShard, Placement + +from ._fsdp_api import CPUOffloadPolicy, MixedPrecisionPolicy, OffloadPolicy +from ._fsdp_common import ( + _chunk_with_empty, + _from_local_no_grad, + _get_dim0_chunked_size, + _raise_assert_with_print, + _to_dtype_if_needed, + FSDPMeshInfo, + HSDPMeshInfo, +) + + +""" +[Note: FSDP tensors] +FSDP considers the following tensors: +- Original parameter: parameter passed to :class:`FSDPParam`, i.e. the one + on the module when applying FSDP +- Sharded parameter: sharding the original parameter on dim-0 as a DTensor + over the main mesh +- All-gather inputs: the ``torch.Tensor`` or ``Tensor`` s passed to all-gather, + derived from the sharded parameter +- All-gather output: the ``torch.Tensor`` or ``Tensor`` s resulting from + all-gathering the all-gather inputs +- Unsharded parameter: parameter used for forward/backward computation, derived + from the all-gather output; autograd leaf + +We define these tensors to describe the general framework that can accomodate +extensions, where: +- all-gather-inputs = pre-all-gather-transform(sharded-parameter) +- unsharded-parameter = post-all-gather-transform(all-gather-outputs) + +For the default ``torch.Tensor`` case, there is only one all-gather input, and +it shares the same underlying tensor data as the sharded parameter, meaning +that they can be thought of as the same tensors. The same applies for the +all-gather output and unsharded parameter. For non-``torch.Tensor`` extensions, +these equivalences may no longer hold due to the pre/post-all-gather +transforms, and some may have multiple all-gather inputs/outputs (e.g. +quantized data and scales). + +[Note: FSDP and autograd] +FSDP dynamically frees and allocates the unsharded parameter. Since autograd +can pack a reference to it or a view to save for backward, we use storage +resizing to implement the freeing/allocation since that preserves the aliasing. +This implies that we construct the unsharded parameter object once and write to +it in-place thereafter. For the default ``torch.Tensor` original parameter +case, the all-gather output and unsharded parameter share the same +data, so we use storage resizing on the all-gather output. +""" + +lib = torch.library.Library("fsdp", "FRAGMENT") # noqa: TOR901 + +lib.define("set_(Tensor(a!) tensor, Tensor data) -> ()") + + +@torch.library.impl(lib, "set_", "Meta") +@torch.library.impl(lib, "set_", "CUDA") +@torch.library.impl(lib, "set_", "CPU") +def set_(tensor, data): + tensor.set_(data) + + +""" +[Note: Avoiding functionalization for fsdp.set_ and inductor.resize_storage_bytes_(0)] + +Currently we don't functionalize `fsdp.set_` op or `inductor.resize_storage_bytes_(0)` op +(i.e. they show up as a mutation op in the middle of the AOT joint graph). + +Reason: +Traceable FSDP2 compiled autograd BWD graph have the following traits: +(1) Two inputs of the graph were aliased to each other (one from hook closed-over tensors, one from FWD saved tensors). +(2) One of them is mutated (set_ and resize_(0) to handle the all-gathered param). +(3) They are both subclasses. +The combination of these traits is not supported by AOTAutograd (it's difficult to reason about subclass aliasing). +So this doesn't work at all for Traceable FSDP2. + +The compromise we use is to avoid functionalization for the FSDP2 set_ and resize_(0) ops. +This avoids the problem above, because from AOTAutograd point-of-view there are no mutations +that functionalization needs to handle. (Although we need to be careful not to DCE those mutable ops.) + +We can avoid this functionalization because: +(1) The nn.Parameter is never used before its .set_() is called in eager code (i.e. no alias of it is created), +so it's safe to call .set_() in the middle of the graph to swap out its storage and start using the nn.Parameter downstream. +(2) We always re-allocate the buffer for nn.Parameter to store the AllGather output and to be used in downstream user ops. +So calling resize-to-0 in the middle of the graph to free nn.Parameter memory after use should always be okay +(since we always allocate anew next time we need it, we strictly don't need to keep the old tensor storage around anymore). + +Q: But doesn't the torch.compile stack have the "functional graph" assumption in many places? +A: Yes - this is WIP but we will try to get back to functional graph as early as possible in the lowering process. +Specifically, we believe we can move both .set_ and .resize_(0) ops to end of graph in AOT joint graph before partitioner +(i.e. effectively "re-functionalizing" those ops). Put it in another way, we avoid functionalization for those two ops just to +make AOTAutograd alias analysis happy, and as soon as we are past that point, we "re-functionalize" the graph. +This requires a custom FX pass but we believe it's not hard to write and maintain. + +Q: What's the importance of partitioner not saving views of nn.Parameter as FWD saved tensors? +A: This is critical: we do want to save FWD nn.Parameter graph input (instead of its view) for BWD use, +so that downstream ops in BWD graph uses the post-`.set_` nn.Parameter instead of any of its saved views as input. +This is because .set_ will not update any of the nn.Parameter's views, so BWD downstream ops must use the original +nn.Parameter in order to see the result of .set_. +""" + + +@torch.library.impl(lib, "set_", "Functionalize") +def set__functionalize(tensor, data): + torch._sync(tensor) + torch._sync(data) + # AOTDispatcher needs to know if any inputs had their storages mutated. + # (Why? It sometimes detaches inputs before sending them into the graph, + # when it sees that they do not need to have any gradients computed) + torch._functionalize_set_storage_changed(tensor) + tensor_inner = torch._from_functional_tensor(tensor) + data_inner = torch._from_functional_tensor(data) + with torch._C._ExcludeDispatchKeyGuard( + torch._C.DispatchKeySet(torch._C.DispatchKey.Functionalize) + ): + torch.ops.fsdp.set_.default(tensor_inner, data_inner) + + +torch.fx.node.has_side_effect(torch.ops.fsdp.set_.default) + + +class ShardedState(Enum): + """ + - ``SHARDED``: The sharded parameter is registered to the module. It is the + only contributor to parameter memory. + - ``SHARDED_POST_FORWARD``: The unsharded parameter is resharded to a + smaller world size. Since this data should not be used for computation, + we do not register it to the module. Users should reshard the module + before any in-place modifications. Both it and the sharded parameter + contribute to parameter memory. + - ``UNSHARDED``: The unsharded parameter is registered to the module. Both + it and the sharded parameter contribute to parameter memory. + """ + + SHARDED = auto() + SHARDED_POST_FORWARD = auto() + UNSHARDED = auto() + + +@dataclass +class ParamModuleInfo: + """ + For a parameter, this stores the module and the parameter name to be able + to do a parameter swap via ``setattr(module, param_name, ...)`` or to get + the parameter via ``getattr(module, param_name)``. We additionally save + shared modules and shared parameter names to update them accordingly. + """ + + # Parameter names are unprefixed, e.g. "weight", not "lin.weight" + module: nn.Module + param_name: str + shared_modules: List[nn.Module] = field(default_factory=list) + shared_param_names: List[str] = field(default_factory=list) + + +@dataclass +class ExtensionsData: + # User-defined metadata passed from pre to post-all-gather + all_gather_metadata: Optional[Any] = None + # Save the all-gather input sizes to unflatten the all-gather outputs to ND + all_gather_input_sizes: Sequence[torch.Size] = () # ND + + def clear(self): + self.all_gather_metadata = None + self.all_gather_input_sizes = () + + +class FSDPParam: + """ + This class manages a parameter with FSDP or FSDP variants applied, + implementing dim-0 per-parameter sharding. + """ + + orig_dtype: torch.dtype + param_dtype: Optional[torch.dtype] + reduce_dtype: Optional[torch.dtype] + _orig_size: torch.Size # ND + sharded_size: torch.Size # ND + contiguous_sharded_stride: Tuple[int, ...] + padded_sharded_param_size: torch.Size # ND + sharded_post_forward_size: torch.Size # ND + contiguous_sharded_post_forward_stride: Tuple[int, ...] + _sharded_param_data: torch.Tensor # 1D + sharded_param: nn.Parameter # ND + _sharded_post_forward_param_data: Optional[torch.Tensor] # 1D + _sharded_post_forward_param: Optional[nn.Parameter] # ND + _unsharded_param: nn.Parameter # ND + unsharded_accumulated_grad: Optional[torch.Tensor] # ND + _sharding_spec: DTensorSpec + # DTensor attributes (only defined for DTensor `param`): + _tp_spec: DTensorSpec + all_gather_outputs: List[torch.Tensor] # 1D + # All-gather extension attributes + _extensions_data: ExtensionsData + _unsharded_inner_tensors: List[torch.Tensor] + + def __init__( + self, + param: nn.Parameter, + module_info: ParamModuleInfo, + mesh_info: FSDPMeshInfo, + post_forward_mesh_info: Optional[FSDPMeshInfo], + device: torch.device, + mp_policy: MixedPrecisionPolicy, + offload_policy: OffloadPolicy, + ): + self._module_info: ParamModuleInfo = module_info + self.mesh_info = mesh_info + self.post_forward_mesh_info = post_forward_mesh_info + self.device = device + self.offload_to_cpu: bool = isinstance(offload_policy, CPUOffloadPolicy) + self.pin_memory = ( + self.offload_to_cpu and cast(CPUOffloadPolicy, offload_policy).pin_memory + ) + self.grad_offload_event: Optional[torch.cuda.Event] = None + self._init_sharded_param(param, device) + if self.post_forward_mesh_info: + self._init_sharded_post_forward_param_metadata(param) + self._init_extensions() + self.all_gather_outputs: List[torch.Tensor] = [] + self.unsharded_accumulated_grad = None + self._param_fqn: Optional[str] = None # prefixed from root module + # TODO: Remove this padding logic once DTensor pads the local tensor: + # https://github.com/pytorch/pytorch/issues/113045 + self._post_load_hook_handle = ( + module_info.module.register_load_state_dict_post_hook( + lambda *args, **kwargs: self.reset_sharded_param() + ) + ) + + @torch.no_grad() + def _init_sharded_param(self, param: nn.Parameter, device: torch.device): + if param.device != device and param.device.type != "meta": + raise AssertionError( + f"Expects the parameter to already be moved to device {device} but got {param.device}" + ) + # TODO: Replace the sharded DTensor parameter construction logic with + # `distribute_tensor` after https://github.com/pytorch/pytorch/issues/116101 + # TODO: Simplify the following sharded parameter padding logic after + # https://github.com/pytorch/pytorch/issues/113045 + self.is_dtensor = isinstance(param, DTensor) + if self.is_dtensor: + self._tp_spec = cast(DTensor, param)._spec + dp_mesh, tp_mesh = (self.mesh_info.mesh, self._tp_spec.mesh) + dp_global_mesh = _mesh_resources.get_root_mesh(dp_mesh) + tp_global_mesh = _mesh_resources.get_root_mesh(tp_mesh) + if dp_global_mesh != tp_global_mesh or ( + dp_global_mesh is None or tp_global_mesh is None + ): + raise AssertionError( + "FSDP requires the DP and TP mesh to have the same parent mesh but got: \n" + f"DP's global mesh: {dp_global_mesh}\nTP's global mesh: {tp_global_mesh}" + ) + + name_dims_error = "FSDP requires named DeviceMesh dims for ND parallelism" + assert dp_mesh.mesh_dim_names is not None, name_dims_error + assert tp_mesh.mesh_dim_names is not None, name_dims_error + submesh_names = dp_mesh.mesh_dim_names + tp_mesh.mesh_dim_names + self._spmd_mesh = dp_global_mesh[submesh_names] + if len(self._tp_spec.placements) != 1: + raise NotImplementedError( + f"FSDP only supports 1D TP, not {self._tp_spec.placements}" + ) + split_factor = self._tp_spec.num_shards_map[0] + assert ( + 2 <= self._spmd_mesh.ndim <= 3 + ), f"_spmd_mesh.ndim can only be 2 or 3 but got {self._spmd_mesh.ndim}." + self._spmd_placements: Tuple[Placement, ...] + dp_shard_tp_placement = ( + ( + _StridedShard(0, split_factor=split_factor) + if split_factor > 1 + else Shard(0) + ), + self._tp_spec.placements[0], + ) + if self._spmd_mesh.ndim == 2: + self._spmd_placements = dp_shard_tp_placement + else: + assert self.mesh_info.replicate_mesh_dim == 0 + self._spmd_placements = (Replicate(),) + dp_shard_tp_placement + self._sharding_spec = DTensorSpec( + self._spmd_mesh, + self._spmd_placements, + tensor_meta=self._tp_spec.tensor_meta, + ) + # NOTE: FSDP+TP does not support uneven sharding for now + # TODO: enable uneven sharding for FSDP+TP + if split_factor > 1: # FSDP has strided sharding on tensor dim 0 + num_shards = self._sharding_spec.num_shards_map[0] + tensor_size_dim_0 = self._sharding_spec.shape[0] + if tensor_size_dim_0 % num_shards != 0: + raise NotImplementedError( + "FSDP+TP sharding does not support uneven sharding for now: " + f"tensor dim 0 has size {tensor_size_dim_0} which cannot be " + f"evenly sharded into {num_shards} shards." + ) + + param_data = cast(DTensor, param)._local_tensor + else: + self._spmd_mesh = self.mesh_info.mesh + if isinstance(self.mesh_info, HSDPMeshInfo): + self._spmd_placements = (Replicate(), Shard(0)) + else: + self._spmd_placements = (Shard(0),) + self._sharding_spec = DTensorSpec( + self._spmd_mesh, + self._spmd_placements, + tensor_meta=TensorMeta( + param.size(), + param.stride(), + param.dtype, + ), + ) + param_data = param + self._orig_size = param_data.size() + self._contiguous_orig_stride = make_contiguous_strides_for(self._orig_size) + shard_rank = self.mesh_info.shard_mesh_rank + shard_world_size = self.mesh_info.shard_mesh_size + chunks = _chunk_with_empty(param_data, shard_world_size, dim=0) + sharded_param = chunks[shard_rank] + self.sharded_size = _get_dim0_chunked_size(sharded_param, param_data.size()) + self.contiguous_sharded_stride = make_contiguous_strides_for(self.sharded_size) + padded_sharded_size = chunks[0].size() # 0th always padded + padded_sharded_param = param_data.new_zeros(padded_sharded_size) + self.padded_sharded_param_size = padded_sharded_param.size() + if sharded_param.numel() > 0: + padded_sharded_param[: sharded_param.size(0)].copy_(sharded_param) + if self.offload_to_cpu and not padded_sharded_param.is_meta: + padded_sharded_param = padded_sharded_param.cpu() + if self.pin_memory: + padded_sharded_param = padded_sharded_param.pin_memory() + self._sharded_param_data = padded_sharded_param.view(-1) + self.sharded_param = nn.Parameter( + self.to_sharded_dtensor(padded_sharded_param[: sharded_param.size(0)]) + ) + self.sharded_param.requires_grad_(param.requires_grad) + # Let `param_data` be freed normally when its ref count reaches 0 when + # the `fully_shard` call returns to allow provided parameters to alias + self._setattr_on_modules(self.sharded_param) + self.sharded_state = ShardedState.SHARDED + + def _init_sharded_post_forward_param_metadata(self, param: torch.Tensor) -> None: + mesh_info = self.post_forward_mesh_info + assert mesh_info is not None # mypy + param_data = param._local_tensor if isinstance(param, DTensor) else param + chunks = _chunk_with_empty(param_data, mesh_info.shard_mesh_size, dim=0) + self.sharded_post_forward_size = _get_dim0_chunked_size( + chunks[mesh_info.shard_mesh_rank], param_data.size() + ) + self.contiguous_sharded_post_forward_stride = make_contiguous_strides_for( + self.sharded_post_forward_size + ) + + def init_dtype_attrs(self, mp_policy: MixedPrecisionPolicy): + param_dtype, reduce_dtype = (mp_policy.param_dtype, mp_policy.reduce_dtype) + self.orig_dtype = self.sharded_param.dtype + # Clamp `param_dtype` to `None` if no casting is required + if param_dtype == self.orig_dtype: + param_dtype = None + self.param_dtype = param_dtype + self.reduce_dtype = reduce_dtype + # None indicates that the mixed precision is not enabled + + def _init_extensions(self) -> None: + inner_tensor = self._sharded_local_tensor + has_fsdp_pre_all_gather = hasattr(inner_tensor, "fsdp_pre_all_gather") + has_fsdp_post_all_gather = hasattr(inner_tensor, "fsdp_post_all_gather") + if has_fsdp_pre_all_gather != has_fsdp_post_all_gather: + raise AssertionError( + "Both fsdp_pre_all_gather and fsdp_post_all_gather should be defined " + f"if using all-gather extensions: {inner_tensor}" + ) + if has_fsdp_pre_all_gather: + if self.padded_sharded_param_size != self._sharded_local_tensor.size(): + raise NotImplementedError( + "FSDP all-gather extensions require even sharding on dim-0.\n" + f"{self._orig_size} is not divisible by FSDP world size {self.mesh_info.mesh.size()}." + ) + self._extensions_data = ExtensionsData() + self._unsharded_inner_tensors: List[torch.Tensor] = [] + + def init_all_gather_outputs( + self, + all_gather_input_numels: List[int], + all_gather_input_dtypes: List[torch.dtype], + world_size: int, + device: torch.device, + force_recreate: bool = False, + ): + if not force_recreate and len(self.all_gather_outputs) > 0: + return # already initialized + self.all_gather_outputs = [ + torch.empty(torch.Size([numel * world_size]), dtype=dtype, device=device) + for numel, dtype in zip(all_gather_input_numels, all_gather_input_dtypes) + ] + + def init_unsharded_param(self): + """ + [Note: Invariants for torch.compile Traceable FSDP2] + 1. Under compile, we always re-populate the content of `self._unsharded_param` + per AllGather using the slow path. + 2. Under compile, we always recreate `self.all_gather_outputs` per AllGather. + This is to ensure the buffer creation is internal to the graph and + avoid `self.all_gather_outputs` being captured as a graph input. + 3. Under compile, at the end of `free_unsharded_param()`, we always clean up + `self.all_gather_outputs` and `self._unsharded_inner_tensors`, + to avoid them being captured as graph output. + + With these invariants, only these tensors will be inputs to the graph: + - Sharded parameters + - Placeholders for the `self._unsharded_param` nn.Parameter + """ + if not ca.compiled_autograd_enabled and hasattr( + self, "_unsharded_param" + ): # after the 1st all-gather + inner_tensor = self._sharded_local_tensor + if not hasattr(inner_tensor, "fsdp_post_all_gather"): + return # already initialized + for tensor in self._unsharded_inner_tensors: + alloc_storage(tensor) + all_gather_outputs = self._unflatten_all_gather_outputs() + inner_tensor.fsdp_post_all_gather( + all_gather_outputs, + self._extensions_data.all_gather_metadata, + self.param_dtype or self.orig_dtype, + out=self._unsharded_param, + ) + self._extensions_data.clear() + return + inner_tensor = self._sharded_local_tensor + if not ca.compiled_autograd_enabled and hasattr( + inner_tensor, "fsdp_post_all_gather" + ): + all_gather_outputs = self._unflatten_all_gather_outputs() + ( + unsharded_tensor, + self._unsharded_inner_tensors, + ) = inner_tensor.fsdp_post_all_gather( + all_gather_outputs, + self._extensions_data.all_gather_metadata, + self.param_dtype or self.orig_dtype, + ) + self._extensions_data.clear() + else: + # For the default path (no post-all-gather), the all-gather output + # gives the unsharded parameter data directly + assert len(self.all_gather_outputs) == 1, f"{len(self.all_gather_outputs)}" + unsharded_tensor = self.all_gather_outputs[0] + unsharded_param = torch.as_strided( + unsharded_tensor, + self._orig_size, + self._contiguous_orig_stride, + storage_offset=0, + ) + if self.is_dtensor: + unsharded_param = _from_local_no_grad(unsharded_param, self._tp_spec) + if hasattr(self, "_unsharded_param"): + assert ca.compiled_autograd_enabled + with torch.no_grad(), torch.autograd._unsafe_preserve_version_counter( + self._unsharded_param + ): + torch.ops.fsdp.set_.default(self._unsharded_param, unsharded_param) + else: + self._unsharded_param = nn.Parameter( + unsharded_param, requires_grad=self.sharded_param.requires_grad + ) + + def _unflatten_all_gather_outputs(self) -> Tuple[torch.Tensor, ...]: + return tuple( + t.view(-1, *s[1:]) + for t, s in zip( + self.all_gather_outputs, self._extensions_data.all_gather_input_sizes + ) + ) + + def to_sharded(self) -> None: + self._setattr_on_modules(self.sharded_param) + self.free_unsharded_param() + self.sharded_state = ShardedState.SHARDED + + def to_sharded_post_forward(self) -> None: + if self.is_dtensor: + raise NotImplementedError( + "Resharding to smaller mesh with TP is not supported yet" + ) + self._assert_in_states(ShardedState.UNSHARDED) + assert self.post_forward_mesh_info is not None # mypy + assert len(self.all_gather_outputs) == 1 + shard_world_size = self.post_forward_mesh_info.shard_mesh_size + if (numel := self.all_gather_outputs[0].numel()) % shard_world_size != 0: + _raise_assert_with_print( + f"All-gather output size ({numel}) must be divisible by the shard " + f"world size ({shard_world_size})" + ) + shard_rank = self.post_forward_mesh_info.shard_mesh_rank + sharded_numel = numel // shard_world_size + self._sharded_post_forward_param_data = ( + self.all_gather_outputs[0].narrow( + 0, sharded_numel * shard_rank, sharded_numel + ) + ).clone() # clone to be able to free all-gather output + sharded_post_forward_tensor = torch.as_strided( + self._sharded_post_forward_param_data, + size=self.sharded_post_forward_size, + stride=self.contiguous_sharded_post_forward_stride, + storage_offset=0, + ) + self._sharded_post_forward_param = nn.Parameter( + self.to_sharded_post_forward_dtensor(sharded_post_forward_tensor) + ) + self._setattr_on_modules(self._sharded_post_forward_param) + self.free_unsharded_param() + self.sharded_state = ShardedState.SHARDED_POST_FORWARD + + def to_unsharded(self) -> None: + # Assume that the data has been allocated and all-gathered + set_requires_grad_if_needed(self.sharded_param, self._unsharded_param) + self._setattr_on_modules(self._unsharded_param) + if self.sharded_state == ShardedState.SHARDED_POST_FORWARD: + # The data is allocated in the default stream via the post-forward + # reshard and must be kept alive for the next all-gather copy-in. + # Since we call this method after the copy-out, the data's lifetime + # is ensured without further synchronization. + self._sharded_post_forward_param = None + self._sharded_post_forward_param_data = None # free + self.sharded_state = ShardedState.UNSHARDED + + def _setattr_on_modules(self, param: nn.Parameter) -> None: + unsafe_setattr_param( + self._module_info.module, self._module_info.param_name, param + ) + for shared_module, shared_param_name in zip( + self._module_info.shared_modules, self._module_info.shared_param_names + ): + unsafe_setattr_param(shared_module, shared_param_name, param) + + def to_sharded_dtensor(self, tensor: torch.Tensor) -> DTensor: + """ + Converts a local tensor representing either the sharded parameter or + sharded gradient to DTensor. + """ + if tensor.shape != self.sharded_size: + _raise_assert_with_print( + f"Expects size {self.sharded_size} but got {tensor.shape}" + ) + return _from_local_no_grad( + tensor, + self._sharding_spec, + ) + + def to_sharded_post_forward_dtensor(self, tensor: torch.Tensor) -> DTensor: + if tensor.shape != self.sharded_post_forward_size: + _raise_assert_with_print( + f"Expects size {self.sharded_post_forward_size} but got {tensor.shape}" + ) + assert isinstance(self.post_forward_mesh_info, HSDPMeshInfo) + # TODO: Prefer this DTensor to be read-only and generalize the + # placement once we support TP. + post_forward_sharding_spec = DTensorSpec( + self.post_forward_mesh_info.mesh, + (Replicate(), Shard(0)), + tensor_meta=self._sharding_spec.tensor_meta, + ) + return _from_local_no_grad(tensor, post_forward_sharding_spec) + + def to_accumulated_grad_if_needed(self) -> None: + # Access `_unsharded_param` to bypass the sharded state check since we + # prefer to reshard before upcasting the gradient to save memory + if ( + self.reduce_dtype is None + or self._unsharded_param.grad is None + or self._unsharded_param.grad.dtype == self.reduce_dtype + ): + return + unsharded_grad = self._unsharded_param.grad + self._unsharded_param.grad = None + self.unsharded_accumulated_grad = unsharded_grad.to(self.reduce_dtype) + + def accumulate_unsharded_grad_if_needed(self) -> None: + if ( + self.unsharded_accumulated_grad is not None + and self.unsharded_param.grad is not None + ): + self.unsharded_accumulated_grad += self.unsharded_param.grad + self.unsharded_param.grad = None + + def alloc_all_gather_outputs(self) -> None: + for tensor in self.all_gather_outputs: + alloc_storage(tensor) + + def free_unsharded_param(self) -> None: + for tensor in itertools.chain( + self.all_gather_outputs, self._unsharded_inner_tensors + ): + free_storage(tensor) + if ca.compiled_autograd_enabled: + self.all_gather_outputs = [] + self._unsharded_inner_tensors = [] + + @property + def all_gather_inputs(self) -> List[torch.Tensor]: # 1D + self._assert_in_states(ShardedState.SHARDED, ShardedState.SHARDED_POST_FORWARD) + if self.sharded_state == ShardedState.SHARDED: + if not ca.compiled_autograd_enabled and hasattr( + self._sharded_local_tensor, "fsdp_pre_all_gather" + ): + sharded_local_tensor = self._sharded_local_tensor + if self.offload_to_cpu: + sharded_local_tensor = sharded_local_tensor.to( + self.device, non_blocking=True + ) + ( + all_gather_inputs, + self._extensions_data.all_gather_metadata, + ) = sharded_local_tensor.fsdp_pre_all_gather(self.mesh_info.mesh) + self._extensions_data.all_gather_input_sizes = [ + t.size() for t in all_gather_inputs + ] + return [t.view(-1) for t in all_gather_inputs] + sharded_param_data = self._sharded_param_data + if self.offload_to_cpu: + sharded_param_data = sharded_param_data.to( + self.device, non_blocking=True + ) + return [_to_dtype_if_needed(sharded_param_data, self.param_dtype)] + elif self.sharded_state == ShardedState.SHARDED_POST_FORWARD: + if not ca.compiled_autograd_enabled and hasattr( + self._sharded_local_tensor, "fsdp_pre_all_gather" + ): + raise NotImplementedError + all_gather_input = _to_dtype_if_needed( + cast(torch.Tensor, self._sharded_post_forward_param_data), + self.param_dtype, + ) + return [all_gather_input] + return [torch.empty(0)] # mypy + + @property + def unsharded_param(self) -> nn.Parameter: # ND + self._assert_in_states(ShardedState.UNSHARDED) + return self._unsharded_param + + @property + def unsharded_grad_data(self) -> torch.Tensor: + grad = self.unsharded_param.grad + assert grad is not None, "Expects unsharded_param.grad to not be None" + return self._get_grad_inner_tensor(grad) + + @property + def unsharded_accumulated_grad_data(self) -> torch.Tensor: + grad = self.unsharded_accumulated_grad + assert grad is not None, "Expects unsharded_accumulated_grad to not be None" + return self._get_grad_inner_tensor(grad) + + def _get_grad_inner_tensor(self, grad: torch.Tensor) -> torch.Tensor: + if self.is_dtensor: + if isinstance(grad, AsyncCollectiveTensor): + grad = grad.wait() + assert isinstance(grad, DTensor), f"{type(grad)}" + if any(pl.is_partial() for pl in grad.placements): + placements = [ + Replicate() if pl.is_partial() else pl for pl in grad.placements + ] + grad = grad.redistribute(placements=placements) + grad = grad._local_tensor + return grad + + @property + def _sharded_local_tensor(self) -> torch.Tensor: + return cast(DTensor, self.sharded_param)._local_tensor + + def _assert_in_states(self, *states: ShardedState) -> None: + if self.sharded_state not in states: + _raise_assert_with_print( + f"Expects to be in one of {states}, not {self.sharded_state}" + ) + + def reset_sharded_param(self): + # For ops like `nn.Module._apply` or `load_state_dict(assign=True)` + # that change the sharded parameter tensor, we may need to re-pad the + # sharded local tensor and re-save the reference. + module_info = self._module_info + new_param = getattr(module_info.module, module_info.param_name) + if new_param is not self.sharded_param: + if torch.__future__.get_swap_module_params_on_conversion(): + raise AssertionError( + f"Expects swap_tensors to preserve object but got {new_param} " + f"instead of {self.sharded_param}" + ) + self.sharded_param = new_param + local_tensor = new_param._local_tensor + if local_tensor.is_meta: + return + padded_sharded_size = self.padded_sharded_param_size + if local_tensor.size() != padded_sharded_size: + padded_local_tensor = local_tensor.new_zeros(padded_sharded_size) + padded_local_tensor[: local_tensor.size(0)].copy_(local_tensor) + local_tensor = padded_local_tensor + if self.pin_memory and not local_tensor.is_pinned(): + local_tensor = local_tensor.cpu().pin_memory() + self._sharded_param_data = local_tensor.view(-1) + assert isinstance(self.sharded_param, DTensor) # mypy + self.sharded_param._local_tensor = local_tensor[: self.sharded_size[0]] + + def __repr__(self): + return f"FSDPParam(fqn={self._param_fqn}, orig_size={self._orig_size})" + + +def alloc_storage(tensor: torch.Tensor) -> None: + size = tensor.numel() * tensor.itemsize + if (storage := tensor.untyped_storage()).size() != size: + storage.resize_(size) + + +def free_storage(tensor: torch.Tensor) -> None: + if (storage := tensor.untyped_storage()).size() != 0: + storage.resize_(0) + + +# NOTE: These bypass `nn.Module.__setattr__` checks, which incur non-trivial +# CPU overhead, if the module did not override it. For FSDP, we know we do not +# need those checks when transitioning between sharded/unsharded parameters. +def unsafe_setattr_param( + module: nn.Module, param_name: str, param: nn.Parameter +) -> None: + if getattr(module.__setattr__, "__func__", None) is nn.Module.__setattr__: + module._parameters[param_name] = param + else: # slow path + setattr(module, param_name, param) + + +def set_requires_grad_if_needed( + src_tensor: torch.Tensor, dst_tensor: torch.Tensor +) -> None: + # Only call `requires_grad_` if needed to avoid the Python <> C++ context + # switch overhead + if src_tensor.requires_grad != dst_tensor.requires_grad: + dst_tensor.requires_grad_(src_tensor.requires_grad) diff --git a/janus/lib/python3.10/site-packages/torch/distributed/_composable/fully_shard.py b/janus/lib/python3.10/site-packages/torch/distributed/_composable/fully_shard.py new file mode 100644 index 0000000000000000000000000000000000000000..4afa0f431075f7499b5808161fb42925494f1185 --- /dev/null +++ b/janus/lib/python3.10/site-packages/torch/distributed/_composable/fully_shard.py @@ -0,0 +1,131 @@ +# mypy: allow-untyped-decorators +from typing import Callable, Iterable, Optional, Union +from typing_extensions import deprecated + +import torch +import torch.distributed as dist +import torch.nn as nn +from torch.distributed._composable.contract import contract +from torch.distributed._composable_state import _get_module_state, _insert_module_state +from torch.distributed.fsdp._common_utils import _FSDPState +from torch.distributed.fsdp._dynamo_utils import _annotate_modules_for_dynamo +from torch.distributed.fsdp._init_utils import ( + _init_buffer_state, + _init_core_state, + _init_device_handle, + _init_ignored_module_states, + _init_param_handle_from_module, + _init_prefetching_state, + _init_process_group_state, + _init_runtime_state, + _init_state_dict_state, + HYBRID_SHARDING_STRATEGIES, +) +from torch.distributed.fsdp._runtime_utils import ( + _register_post_forward_hook, + _register_pre_forward_hook, + _register_root_pre_forward_hook, +) +from torch.distributed.fsdp._state_dict_utils import _register_all_state_dict_hooks +from torch.distributed.fsdp._wrap_utils import _auto_wrap +from torch.distributed.fsdp.api import ( + BackwardPrefetch, + CPUOffload, + MixedPrecision, + ShardingStrategy, +) +from torch.distributed.fsdp.wrap import _Policy + + +@contract(state_cls=_FSDPState) +@deprecated( + "`torch.distributed._composable.fully_shard` is being deprecated. " + "You can continue to use the wrapper based FSDP. " + "See usage in: https://github.com/pytorch/pytorch/blob/main/torch/distributed/fsdp/fully_sharded_data_parallel.py. " + "`torch.distributed._composable.fully_shard` will be removed after PyTorch 2.5.", + category=FutureWarning, +) +def fully_shard( + module: nn.Module, + *, + process_group: Optional[dist.ProcessGroup] = None, + policy: Optional[_Policy] = None, + strategy: Optional[ShardingStrategy] = None, + mixed_precision: Optional[MixedPrecision] = None, + cpu_offload: Optional[CPUOffload] = None, + ignored_modules: Optional[Iterable[torch.nn.Module]] = None, + device_id: Optional[Union[int, torch.device]] = None, + param_init_fn: Optional[Callable[[nn.Module], None]] = None, + sync_module_states: bool = False, + forward_prefetch: bool = False, + ignored_states: Union[ + Optional[Iterable[torch.nn.Parameter]], Optional[Iterable[torch.nn.Module]] + ] = None, +) -> nn.Module: + """Applies ``FullyShardedDataParallel`` (FSDP) semantics to ``module``.""" + torch._C._log_api_usage_once("torch.distributed.fully_shard") + # Enforce the new auto wrap policy + if policy is not None and not isinstance(policy, _Policy): + raise ValueError(f"Expects a `_Policy` but got {policy}") + state = fully_shard.state(module) + state = _init_ignored_module_states(state, module, ignored_modules, ignored_states) + state = _init_device_handle(state, module, state._ignored_params, device_id) + _annotate_modules_for_dynamo(module, state._ignored_modules, True) + state = _init_process_group_state(state, process_group, strategy, policy) + if policy is not None: + root_kwargs = { + "process_group": process_group, + "strategy": strategy, + "mixed_precision": mixed_precision, + "cpu_offload": cpu_offload, + "ignored_modules": ignored_modules, + "device_id": device_id, + "param_init_fn": param_init_fn, + "sync_module_states": sync_module_states, + "forward_prefetch": forward_prefetch, + "ignored_states": ignored_states, + } + if strategy in HYBRID_SHARDING_STRATEGIES: + root_kwargs["process_group"] = (state.process_group, state._inter_node_pg) + _auto_wrap( + module, + policy, + state._ignored_modules, + state._ignored_params, + root_kwargs, + fully_shard, + ) + state = _init_core_state( + state, + strategy or ShardingStrategy.FULL_SHARD, + mixed_precision, + cpu_offload, + limit_all_gathers=True, + use_orig_params=True, + backward_prefetch_limit=1, + forward_prefetch_limit=1, + ) + state = _init_runtime_state(state) + state = _init_prefetching_state( + state, BackwardPrefetch.BACKWARD_PRE, forward_prefetch=forward_prefetch + ) + state = _init_buffer_state(state, module) + state = _init_param_handle_from_module( + state, module, device_id, param_init_fn, sync_module_states + ) + state = _init_state_dict_state(state) + _register_all_state_dict_hooks(state) + _register_pre_forward_hook(state, module) + _register_post_forward_hook(state, module) + _register_root_pre_forward_hook(state, module) # prepend last + # Always insert the state for the passed-in module even if it has no + # managed parameters, in which case it has no handles and does not appear + # in `_fully_sharded_module_to_handles` + _insert_module_state(module, state) + for submodule in module.modules(): + if ( + submodule in state._fully_sharded_module_to_handle + and _get_module_state(submodule) is None + ): + _insert_module_state(submodule, state) + return module diff --git a/janus/lib/python3.10/site-packages/torch/distributed/_tools/__pycache__/mod_tracker.cpython-310.pyc b/janus/lib/python3.10/site-packages/torch/distributed/_tools/__pycache__/mod_tracker.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..00e7819aadbeb3dbaa0ad92fd33ea67a73874486 Binary files /dev/null and b/janus/lib/python3.10/site-packages/torch/distributed/_tools/__pycache__/mod_tracker.cpython-310.pyc differ diff --git a/janus/lib/python3.10/site-packages/torch/distributed/_tools/runtime_estimator.py b/janus/lib/python3.10/site-packages/torch/distributed/_tools/runtime_estimator.py new file mode 100644 index 0000000000000000000000000000000000000000..87f4d3f36b60e09e47e013018690359e6cc72140 --- /dev/null +++ b/janus/lib/python3.10/site-packages/torch/distributed/_tools/runtime_estimator.py @@ -0,0 +1,527 @@ +# Owner(s): ["module: unknown"] +import math +import os +from collections import defaultdict +from typing import Any, Callable, Dict, List, Set, Tuple +from typing_extensions import Self + +import torch +import torch.utils._pytree as pytree +from torch._guards import active_fake_mode +from torch._inductor.utils import get_device_tflops, get_gpu_dram_gbps +from torch._subclasses.fake_tensor import FakeTensorMode +from torch.distributed._tools.mod_tracker import ModTracker +from torch.utils._mode_utils import no_dispatch +from torch.utils._python_dispatch import TorchDispatchMode +from torch.utils.flop_counter import flop_registry + + +aten = torch.ops.aten + +# This value is hard-coded here: +# https://github.com/pytorch/pytorch/blob/5fba5d83f0703ff8077ab65448a998e9ad6598fd/c10/cuda/CUDACachingAllocator.cpp#L117 +_PYTORCH_MIN_ALLOCATE = ( + 2**9 if int(os.environ.get("PYTORCH_NO_CUDA_MEMORY_CACHING", 0)) == 0 else 1 +) + +# No fall-back kernel needed/exists for view ops +_VIEW_OPS = { + aten.lift_fresh, + aten.t, + aten.transpose, + aten.view, + aten.detach, + aten._unsafe_view, + aten.split, + aten.adjoint, + aten.as_strided, + aten.diagonal, + aten.expand, + aten.expand_as, + aten.movedim, + aten.permute, + aten.select, + aten.squeeze, + aten.mT, + aten.mH, + aten.real, + aten.imag, + aten.view_as, + aten.unflatten, + aten.unfold, + aten.unbind, + aten.unsqueeze, + aten.vsplit, + aten.hsplit, + aten.split_with_sizes, + aten.swapaxes, + aten.swapdims, + aten.chunk, +} +# We can ignore benchmarking tensor create ops +_CREATE_OPS = { + aten.randint, + aten.randn, + aten.rand, + aten.randn_like, + aten.rand_like, + aten.randint_like, + aten.arange, + aten.ones_like, + aten.zeros_like, +} + +_IGNORE_OPS = _VIEW_OPS | _CREATE_OPS + +__all__ = ["RuntimeEstimator"] + + +class RuntimeEstimator(TorchDispatchMode): + """ + Estimates the GPU runtime in milliseconds using various estimation methods under the ``FakeTensorMode``. + + This class provides a ``TorchDispatchMode`` based context manager that can be used to estimate the eager + runtime of PyTorch functions. It supports two estimation modes, benchmarking (`operator-level-benchmark`) and + roofline cost modeling (`operator-level-cost-model`). + For modules executed under this context manager, it agggregates the forward and backward operation runtimes + and also records their execution orders. + + Attributes: + mod_runtimes (Dict[str, Dict[str, float]]): A dictionary of module runtimes. The key to the outer dictionary + is the fully qualified name (FQN) of the module. For each module the forward and backward runtimes of the + operations are aggregated in the inner dictionary keyed by 'fw' and 'bw'. + mod_fw_pre_order (List[str]): List of module FQNs in pre-forward execution order. + mod_bw_pre_order (List[str]): List of module FQNs in pre-backward execution order. + mod_fw_post_order (List[str]): List of module FQNs in post-forward execution order. + mod_bw_post_order (List[str]): List of module FQNs in post-backward execution order. + total_runtime (float): The total estimated runtime in milliseconds. + + Note: + 1) The benchmarking estimate mode will execute kernels on GPU and assumes that every operation can run in + isolation without causing an OOM error. It is also designed to be used only under ``FakeTensorMode``. + 2) Currently wrapper tensor sub-classes such as ``DTensor`` won't produce correct estimates. We plan to support + them in future PRs. + 3) We only estimate the compute time, if your code has communication, it will not be considered. Again, we will + support this in future PRs. + + Example usage: + + .. code-block:: python + + runtime_estimator = RuntimeEstimator() + with FakeTensorMode(): + module = ... + optimizer = ... + inp = ... + with runtime_estimator(estimate_mode_type="operator-level-cost-model"): + loss = module(inp) + loss.backward() + optimizer.step() + optimizer.zero_grad() + runtime_estimator.display_modulewise_stats() + """ + + _float_types: Set[torch.dtype] = { + torch.float16, + torch.bfloat16, + torch.float32, + torch.float64, + } + _no_fallback_kernel: Set[torch._ops._OpNamespace] = set() + fake_mode: FakeTensorMode + + def __init__(self) -> None: + super().__init__() + self._estimate: Callable + self._estimate_mode_type: str + self._mod_tracker = ModTracker() + self.mod_runtimes: Dict[str, Dict[str, float]] = defaultdict( + lambda: defaultdict(lambda: 0.0) + ) + self.mod_fw_pre_order: List[str] = [] + self.mod_bw_pre_order: List[str] = [] + self.mod_fw_post_order: List[str] = [] + self.mod_bw_post_order: List[str] = [] + self.total_runtime: float = 0.0 + + # Adapted from: https://github.com/pytorch/pytorch/blob/9b902b3ee3bd608a19543362b66bf06c373dd374/torch/_subclasses/fake_tensor.py#L1969 # noqa: PGH004,B950 + # NB: returns fake tensors + @classmethod + def _maybe_run_and_benchmark_fallback_kernel( # type: ignore[no-untyped-def] + cls, + func, + args, + kwargs, + orig_not_implemented_exception, + ): + """ + Runs and benchmarks a fallback kernel for a given function. + + Args: + func (Callable): The function to benchmark. + args (Tuple): The arguments to pass to the function. + kwargs (Dict[str, Any]): The keyword arguments to pass to the function. + orig_not_implemented_exception (Exception): The original exception to raise if the fallback kernel + is not implemented. + + Returns: + Tuple[Any, float]: A tuple containing the result of the function and + the mean operation time in milliseconds. + """ + # these should all be supported, just to be safe + # avoid fallback for operators which inplace modify metadata + # because the input fake tensors would be umodified + if torch.Tag.inplace_view in func.tags: # type: ignore[attr-defined] + raise orig_not_implemented_exception + + inp_impls = {} + flat_args, args_spec = pytree.tree_flatten((args, kwargs)) + # Don't use in_kernel_invocation_manager(fake_mode) as we want to do + # REAL compute (not with meta device) + with no_dispatch(): + + def to_real_tensor(e): # type: ignore[no-untyped-def] + if cls.fake_mode.is_our_fake(e): + if e.dtype in cls._float_types: + out = torch.rand_like(e, device=e.fake_device) + else: + out = torch.ones_like(e, device=e.fake_device) + if e.is_sparse: + out._coalesced_(e.is_coalesced()) + inp_impls[id(out)] = e + return out + return e + + flat_args = [to_real_tensor(a) for a in flat_args] + args, kwargs = pytree.tree_unflatten(flat_args, args_spec) + r = func(*args, **kwargs) + warmup_iters, actual_iters = 2, 3 + for _ in range(warmup_iters): + func(*args, **kwargs) + start_event = torch.cuda.Event(enable_timing=True) + end_event = torch.cuda.Event(enable_timing=True) + start_event.record(torch.cuda.current_stream()) + for _ in range(actual_iters): + func(*args, **kwargs) + end_event.record(torch.cuda.current_stream()) + torch.cuda.synchronize() + cuda_time = start_event.elapsed_time(end_event) + mean_op_time = cuda_time / actual_iters + + storages = set() + + for e in flat_args: + if isinstance(e, torch.Tensor): + if not e.is_sparse: + storages.add(e._typed_storage()._cdata) + + # TODO: also check metadata change on inputs + # proper aliasing/metadata relationship between outputs and inputs will + # not be set up, bc of conversion to device, unless we can reuse an + # input impl + + def map_out(e): # type: ignore[no-untyped-def] + if id(e) not in inp_impls and ( + isinstance(e, torch.Tensor) + and not e.is_sparse + and e._typed_storage()._cdata in storages + ): + raise orig_not_implemented_exception + + if isinstance(e, torch.Tensor): + if id(e) in inp_impls: + return inp_impls[id(e)] + else: + return cls.fake_mode.fake_tensor_converter.from_real_tensor( + cls.fake_mode, e + ) + else: + return e + + return (pytree.tree_map(map_out, r), mean_op_time) + + @classmethod + def _benchmark_estimate(cls, func, args, kwargs) -> Tuple[Any, float]: # type: ignore[no-untyped-def] + """ + Estimates the runtime of a function using benchmarking. + + Args: + func: The function to estimate. + args: The arguments to pass to the function. + kwargs: The keyword arguments to pass to the function. + res: The result of the function. + + Returns: + Tuple[Any, float]: A tuple containing the result of the function and + the mean operation time in milliseconds. + """ + assert isinstance( + cls.fake_mode, FakeTensorMode + ), "Initialize/Assign FakeTensorMode before using this function" + mean_op_time = 0.0 + if func._overloadpacket not in _VIEW_OPS: + try: + res, mean_op_time = cls._maybe_run_and_benchmark_fallback_kernel( + func, + args, + kwargs, + NotImplementedError, + ) + return (res, mean_op_time) + except NotImplementedError: + cls._no_fallback_kernel.add(func._overloadpacket) + res = func(*args, **kwargs or {}) + return (res, mean_op_time) + + # Adapted from: https://github.com/pytorch/pytorch/blob/9b902b3ee3bd608a19543362b66bf06c373dd374/torch/_inductor/scheduler.py#L589 # noqa: PGH004,B950 + @classmethod + def _roofline_estimate(cls, func, args, kwargs) -> Tuple[Any, float]: # type: ignore[no-untyped-def] + """ + Estimates the runtime of a function using a roofline cost model. + + Args: + func: The function to estimate. + args: The arguments to pass to the function. + kwargs: The keyword arguments to pass to the function. + out: The output of the function. + + Returns: + Tuple[Any, float]: A tuple containing the result of the function and + the mean operation time in milliseconds. + """ + assert ( + torch.cuda.is_available() + ), "Roofline estimation needs to access CUDA capabilities to make estimations" + + def get_num_bytes(t: torch.Tensor) -> int: + """ + Calculates the memory consumption of a tensor. + + Args: + t (torch.Tensor): The input tensor. + + Returns: + int: The memory consumption of the tensor in bytes. + """ + num_bytes = t.untyped_storage().nbytes() + mem_consumed = ( + math.ceil(num_bytes / _PYTORCH_MIN_ALLOCATE) * _PYTORCH_MIN_ALLOCATE + ) + return mem_consumed + + def get_compute_time(func_packet, args, kwargs, out, out_dtypes) -> float: # type: ignore[no-untyped-def] + """ + Estimates the compute time of an aten operator. + + Args: + func_packet: The operator overload packet. + args: The arguments to the operator. + kwargs: The keyword arguments to the operator. + out: The output of the operator. + out_dtypes: The output data types. + + Returns: + float: The estimated compute time in nanoseconds. + """ + if func_packet in flop_registry: + assert ( + len(out_dtypes) == 1 + ), f"Only support single out dtype got {out_dtypes} for {func_packet}" + dtype = out_dtypes.pop() + # This actually gives peta-FLOPs/s hence multiply by 1e15 to get the FLOPs/s + peak_gpu_flops = get_device_tflops(dtype) * 1e15 + # We can expect to achieve 75% of theoretical peak flops + factor = 0.75 + peak_empirical_flops = factor * peak_gpu_flops + flop_count_func = flop_registry[func_packet] + # We divide by a factor of 2 to get the MACs (multiply and accumulate) + flop_count = flop_count_func(*args, **kwargs, out_val=out) / 2 + # We multiply by 1e9 to get the time in nano seconds + compute_time = (flop_count / peak_empirical_flops) * 1e9 + return compute_time + return 0.0 + + def get_transfer_time(flat_args_kwargs, flat_outs) -> float: # type: ignore[no-untyped-def] + """ + Estimates the memory transfer time of input and output tensors. + + Args: + flat_args_kwargs (List[torch.Tensor]): The flat list of arguments and keyword arguments. + flat_outs (List[torch.Tensor]): The flat list of outputs. + + Returns: + float: The estimated memory transfer time in nanoseconds. + """ + gpu_memory_bandwidth = get_gpu_dram_gbps() + read_bytes = sum( + get_num_bytes(t) + for t in flat_args_kwargs + if isinstance(t, torch.Tensor) + ) + write_bytes = sum( + get_num_bytes(t) for t in flat_outs if isinstance(t, torch.Tensor) + ) + counted_bytes = read_bytes + write_bytes + # The GPU memory bandwidth is in GB/s so the transfer time is in nanoseconds + transfer_time = counted_bytes / gpu_memory_bandwidth + return transfer_time + + # Roofline Cost Model Explanation + + # The roofline cost model estimates the execution time of an operator based on + # the device's empirical maximum FLOPs/sec (pi) and device DRAM bandwidth (beta). + + # Variables: + # - pi: Maximum empirical FLOPs/sec of the device + # - beta: Maximum empirical device DRAM bandwidth (bytes/sec) of the device + # - I: Arithmetic intensity of the operator (FLOPs/bytes) + # - op_flops: FLOPs required by the operator + # - op_bytes: Bytes transferred to and from DRAM for the operator + + # Calculation Steps: + # 1. Calculate arithmetic intensity: I = op_flops / op_bytes + # 2. Calculate estimated FLOPs/sec: est_flops_sec = min(pi, beta * I) + # 3. Calculate estimated operator time: estimated_op_time = op_flops / est_flops_sec + # This simplifies to: estimated_op_time = max(op_flops / pi, op_flops / (beta * I)) + # Further simplifying: estimated_op_time = max(op_flops / pi, op_bytes / beta) + + # Simplified Formulas: + # - compute_time = op_flops / pi + # - transfer_time = op_bytes / beta + # - estimated_op_time = max(compute_time, transfer_time) + + kwargs = kwargs if kwargs else {} + out = func(*args, **kwargs) + op_time = 0.0 + func_packet = func._overloadpacket + if func_packet not in _IGNORE_OPS: + flat_args_kwargs, args_spec = pytree.tree_flatten((args, kwargs)) + flat_outs, out_spec = pytree.tree_flatten(out) + transfer_time = get_transfer_time(flat_args_kwargs, flat_outs) + + out_dtypes = { + t.dtype + for t in flat_outs + if isinstance(t, torch.Tensor) and t.dtype in cls._float_types + } + + args, kwargs = pytree.tree_unflatten(flat_args_kwargs, args_spec) + out = pytree.tree_unflatten(flat_outs, out_spec) + + compute_time = get_compute_time(func_packet, args, kwargs, out, out_dtypes) + # We get the estimated time as the max of the transfer time and + # compute time. We divide by 1e6 to get the time in ms + op_time = max(transfer_time, compute_time) / 1e6 + + return (out, op_time) + + def display_modulewise_stats(self, depth: int = 2) -> None: + """ + Displays module-wise statistics collected by ``RuntimeEstimator``. + + Prints the pre-forward and pre-backward execution orders. + Displays the module-wise forward and backward runtimes in milliseconds. + + Args: + depth (int): The maximum depth of module hierarchy to display (default to 2). + """ + print("Pre-Forward Execution Order: ") + for mod_fqn in self.mod_fw_pre_order: + mod_depth = mod_fqn.count(".") + 1 + if mod_depth > depth: + continue + print(mod_fqn) + print("Pre-Backward Execution Order: ") + for mod_fqn in self.mod_bw_pre_order: + mod_depth = mod_fqn.count(".") + 1 + if mod_depth > depth: + continue + print(mod_fqn) + for mod_fqn, runtimes in self.mod_runtimes.items(): + mod_depth = mod_fqn.count(".") + 1 + if mod_depth > depth: + continue + print( + f"{mod_fqn} fw: {runtimes.get('fw', 0.0):.3f}ms bw: {runtimes.get('bw', 0.0):.3f}ms" + ) + + def __torch_dispatch__(self, func, types, args=..., kwargs=None): # type: ignore[no-untyped-def] + # TODO: @sanketpurandare: Flatten tensors by desugaring the tensor subclasses + # TODO: @sanketpurandare: Add logic for incorporating communication time + res, op_time = self._estimate(func, args, kwargs) + for par in self._mod_tracker.parents: + if self._mod_tracker.is_bw: + self.mod_runtimes[par]["bw"] += op_time + else: + self.mod_runtimes[par]["fw"] += op_time + self.total_runtime += op_time + return res + + def __call__(self, estimate_mode_type: str) -> Self: + """ + Sets the estimate mode type. + + Currently supported modes: + - "operator-level-benchmark": Estimates runtime using operator benchmarking. + - "operator-level-cost-model": Estimates runtime using roofline cost model. + + Args: + estimate_mode_type (str): The type of estimate mode to use. + + Returns: + RuntimeEstimator: The runtime estimator instance. + + Raises: + NotImplementedError: If the estimate mode type is not supported. + """ + if estimate_mode_type == "operator-level-benchmark": + self._estimate = RuntimeEstimator._benchmark_estimate + elif estimate_mode_type == "operator-level-cost-model": + self._estimate = RuntimeEstimator._roofline_estimate + else: + raise NotImplementedError( + f"estimate_mode_type {estimate_mode_type} not supported" + ) + self._estimate_mode_type = estimate_mode_type + return self + + def __enter__(self) -> Self: + fake_mode = active_fake_mode() + assert isinstance( + fake_mode, FakeTensorMode + ), "No FakeTensorMode found, designed to used under FakeTensorMode" + RuntimeEstimator.fake_mode = fake_mode + self.total_runtime = 0.0 + self.mod_runtimes = defaultdict(lambda: defaultdict(lambda: 0.0)) + self.mod_fw_pre_order.clear() + self.mod_bw_pre_order.clear() + self.mod_fw_post_order.clear() + self.mod_bw_post_order.clear() + self._mod_tracker.register_user_hooks( + pre_fw_hook=lambda mod, inp: self.mod_fw_pre_order.append( + self._mod_tracker.get_known_fqn(mod) + ), + pre_bw_hook=lambda mod, g_out: self.mod_bw_pre_order.append( + self._mod_tracker.get_known_fqn(mod) + ), + post_fw_hook=lambda mod, inp, out: self.mod_fw_post_order.append( + self._mod_tracker.get_known_fqn(mod) + ), + post_bw_hook=lambda mod, g_inp: self.mod_bw_post_order.append( + self._mod_tracker.get_known_fqn(mod) + ), + ) + self._mod_tracker.__enter__() + super().__enter__() + return self + + def __exit__(self, *args: Any) -> None: + print( + f"Estimated ({self._estimate_mode_type})" + f"total_time: {self.total_runtime:.3f} ms" + ) + if len(self._no_fallback_kernel) > 0: + print("no_fallback_kernel: ", list(self._no_fallback_kernel)) + super().__exit__(*args) + self._mod_tracker.clear_user_hooks() + self._mod_tracker.__exit__() diff --git a/janus/lib/python3.10/site-packages/torch/distributed/fsdp/__pycache__/__init__.cpython-310.pyc b/janus/lib/python3.10/site-packages/torch/distributed/fsdp/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c0771f83bb3a453b72eedba1cad061a41f2f5bd7 Binary files /dev/null and b/janus/lib/python3.10/site-packages/torch/distributed/fsdp/__pycache__/__init__.cpython-310.pyc differ diff --git a/janus/lib/python3.10/site-packages/torch/distributed/fsdp/__pycache__/_common_utils.cpython-310.pyc b/janus/lib/python3.10/site-packages/torch/distributed/fsdp/__pycache__/_common_utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..86300bb2ea13d23253eeae4696082789b4b00305 Binary files /dev/null and b/janus/lib/python3.10/site-packages/torch/distributed/fsdp/__pycache__/_common_utils.cpython-310.pyc differ diff --git a/janus/lib/python3.10/site-packages/torch/distributed/fsdp/__pycache__/_exec_order_utils.cpython-310.pyc b/janus/lib/python3.10/site-packages/torch/distributed/fsdp/__pycache__/_exec_order_utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b577db5c338f596181d5600c0103242b41f674e4 Binary files /dev/null and b/janus/lib/python3.10/site-packages/torch/distributed/fsdp/__pycache__/_exec_order_utils.cpython-310.pyc differ diff --git a/janus/lib/python3.10/site-packages/torch/distributed/fsdp/__pycache__/_flat_param.cpython-310.pyc b/janus/lib/python3.10/site-packages/torch/distributed/fsdp/__pycache__/_flat_param.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..eeb05f7e2f12f0039a1ca4f0ddaa9bed41d85ec4 Binary files /dev/null and b/janus/lib/python3.10/site-packages/torch/distributed/fsdp/__pycache__/_flat_param.cpython-310.pyc differ diff --git a/janus/lib/python3.10/site-packages/torch/distributed/fsdp/__pycache__/_fsdp_extensions.cpython-310.pyc b/janus/lib/python3.10/site-packages/torch/distributed/fsdp/__pycache__/_fsdp_extensions.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..101df2c56ae49213007f032e4c69d96cd2bcbf3d Binary files /dev/null and b/janus/lib/python3.10/site-packages/torch/distributed/fsdp/__pycache__/_fsdp_extensions.cpython-310.pyc differ diff --git a/janus/lib/python3.10/site-packages/torch/distributed/fsdp/__pycache__/_init_utils.cpython-310.pyc b/janus/lib/python3.10/site-packages/torch/distributed/fsdp/__pycache__/_init_utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..42c0f42e290b4a5020dd7289cee35ab4f7156c74 Binary files /dev/null and b/janus/lib/python3.10/site-packages/torch/distributed/fsdp/__pycache__/_init_utils.cpython-310.pyc differ diff --git a/janus/lib/python3.10/site-packages/torch/distributed/fsdp/__pycache__/_limiter_utils.cpython-310.pyc b/janus/lib/python3.10/site-packages/torch/distributed/fsdp/__pycache__/_limiter_utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f493f00565caf0a8cc0e41a33e00c87f459d13d3 Binary files /dev/null and b/janus/lib/python3.10/site-packages/torch/distributed/fsdp/__pycache__/_limiter_utils.cpython-310.pyc differ diff --git a/janus/lib/python3.10/site-packages/torch/distributed/fsdp/__pycache__/_state_dict_utils.cpython-310.pyc b/janus/lib/python3.10/site-packages/torch/distributed/fsdp/__pycache__/_state_dict_utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..fa07422d13e77011c53a9cffb2cc046dfa88632d Binary files /dev/null and b/janus/lib/python3.10/site-packages/torch/distributed/fsdp/__pycache__/_state_dict_utils.cpython-310.pyc differ diff --git a/janus/lib/python3.10/site-packages/torch/distributed/fsdp/__pycache__/_trace_utils.cpython-310.pyc b/janus/lib/python3.10/site-packages/torch/distributed/fsdp/__pycache__/_trace_utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d05413effb6a8b39e922d5716155a53fcfba654d Binary files /dev/null and b/janus/lib/python3.10/site-packages/torch/distributed/fsdp/__pycache__/_trace_utils.cpython-310.pyc differ diff --git a/janus/lib/python3.10/site-packages/torch/distributed/fsdp/__pycache__/_traversal_utils.cpython-310.pyc b/janus/lib/python3.10/site-packages/torch/distributed/fsdp/__pycache__/_traversal_utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3682d05f654b589e3093ac0cdfe4ee9324ff1be3 Binary files /dev/null and b/janus/lib/python3.10/site-packages/torch/distributed/fsdp/__pycache__/_traversal_utils.cpython-310.pyc differ diff --git a/janus/lib/python3.10/site-packages/torch/distributed/fsdp/__pycache__/_unshard_param_utils.cpython-310.pyc b/janus/lib/python3.10/site-packages/torch/distributed/fsdp/__pycache__/_unshard_param_utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..768ff6a1aa5d67fc5b29c676ad56f87047e1138e Binary files /dev/null and b/janus/lib/python3.10/site-packages/torch/distributed/fsdp/__pycache__/_unshard_param_utils.cpython-310.pyc differ diff --git a/janus/lib/python3.10/site-packages/torch/distributed/fsdp/_common_utils.py b/janus/lib/python3.10/site-packages/torch/distributed/fsdp/_common_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..396f058c45a1aef3bba5e3c8b8b3f4b6dfbe1bd3 --- /dev/null +++ b/janus/lib/python3.10/site-packages/torch/distributed/fsdp/_common_utils.py @@ -0,0 +1,558 @@ +# mypy: allow-untyped-defs +""" +This file includes private common utilities for FSDP. +""" +import logging +import traceback +import warnings +import weakref +from enum import auto, Enum +from functools import partial +from typing import ( + Any, + Callable, + cast, + Dict, + Generator, + Iterable, + List, + no_type_check, + Optional, + Set, + Tuple, + Type, + TYPE_CHECKING, +) + +import torch +import torch.distributed as dist +import torch.distributed.fsdp._flat_param as flat_param_file +import torch.nn as nn +from torch.distributed._composable_state import _get_module_state, _State +from torch.distributed.algorithms._checkpoint.checkpoint_wrapper import ( + _CHECKPOINT_PREFIX, +) +from torch.distributed.utils import _apply_to_tensors +from torch.utils._mode_utils import no_dispatch + +from .api import ( + FullOptimStateDictConfig, + FullStateDictConfig, + OptimStateDictConfig, + ShardingStrategy, + StateDictConfig, + StateDictType, +) + + +if TYPE_CHECKING: + from torch.distributed.device_mesh import DeviceMesh + from torch.distributed.fsdp._fsdp_extensions import FSDPExtensions + + from ._flat_param import FlatParamHandle + +FSDP_WRAPPED_MODULE = "_fsdp_wrapped_module" +FSDP_PREFIX = FSDP_WRAPPED_MODULE + "." +FSDP_FLATTENED = "_fsdp_flattened" + +# Save a global mapping from module to its input tensor dtype to be populated +# during the forward pre-hook and consumed in the forward post-hook when +# overriding a module's mixed precision +# NOTE: We currently take the last input tensor's dtype in the case of multiple +# floating-point input tensors, which may be incorrect. However, since there is +# not a 1:1 correspondence between input and output tensors, we must use *some* +# heuristic like this to predict the desired output dtype. +_MODULE_TO_INP_DTYPE: weakref.WeakKeyDictionary = weakref.WeakKeyDictionary() + + +class _FSDPDeviceHandle: + """ + This is a simple abstraction for FSDP computing devices, + which enables custom backends that implement CUDA-like + semantics to be integrated with FSDP. + """ + + def __init__(self, device: torch.device, backend: Any = None): + if backend is None: + try: + self.__backend = getattr(torch, device.type) + self.__device = device + except AttributeError as exc: + raise AttributeError( + f"Device '{device}' does not have a corresponding backend registered as 'torch.{device.type}'." + ) from exc + else: + self.__backend = backend + + @classmethod + def from_device(cls, device: torch.device) -> "_FSDPDeviceHandle": + """ + Return a device handle corresponding to the device, and through this handle, + operations with the same semantics as CUDA can be performed on the device. + Just return torch.cuda if the device is cuda to make attribute-access faster. + Custom backend must first register a module with the same name with {device.type} on torch. + """ + if device.type == "cuda": + return cast(_FSDPDeviceHandle, torch.cuda) + elif device.type == "mtia": + return cast(_FSDPDeviceHandle, torch.mtia) + return cls(device) + + def __getattr__(self, __name: str) -> Any: + try: + return getattr(self.__backend, __name) + except AttributeError as exc: + raise AttributeError( + f"Custom backend '{self.__device.type}' not implement 'torch.{self.__device.type}.{__name}'" + ) from exc + + +class _UninitializedDeviceHandle(_FSDPDeviceHandle): + def __init__(self) -> None: + pass + + def __getattribute__(self, __name: str) -> Any: + raise RuntimeError("Trying to use an uninitialized device handle.") + + +class _FSDPState(_State): + def __init__(self) -> None: + # TODO: Move all the attributes to this class to enable typing for + # FSDP/fully_shard. + self._ignored_modules: Set[nn.Module] = set() + self._ignored_params: Set[nn.Parameter] = set() + # Buffer names are cleaned (without wrapper prefixes) + self._ignored_buffer_names: Set[str] = set() + self.process_group: Optional[dist.ProcessGroup] = None + self.rank: int = -1 + self.world_size: int = -1 + self._device_mesh: Optional[DeviceMesh] = None + self.sharding_strategy = ShardingStrategy.FULL_SHARD + self._use_orig_params: bool = False + self.training_state = TrainingState.IDLE + self._unshard_params_ctx: Dict[nn.Module, Generator] = {} + self._state_dict_type: StateDictType = StateDictType.FULL_STATE_DICT + self._state_dict_config: StateDictConfig = FullStateDictConfig() + self._optim_state_dict_config: OptimStateDictConfig = FullOptimStateDictConfig() + self._is_root: Optional[bool] = None + self._handle: Optional[flat_param_file.FlatParamHandle] = None + self._fully_sharded_module_to_handle: Dict[ + nn.Module, Optional[flat_param_file.FlatParamHandle] + ] = {} + self.compute_device: Optional[torch.device] = None + self._gradient_predivide_factor: int = 0 + self._gradient_postdivide_factor: int = 0 + self._comm_hook: Optional[Callable] = None + self._comm_hook_state: Optional[Any] = None + self._unshard_event: Optional[torch.Event] = None + # Abstract device handle for fsdp compute device. For now, + # the compute device must implement cuda semantics used by fsdp + self._device_handle: _FSDPDeviceHandle = _UninitializedDeviceHandle() + # All following attributes should only be used for root states: + # Save these static lists to avoid the repeated tree traversals + self._all_fsdp_states: List[_FSDPState] = [] + self._all_handles: List[flat_param_file.FlatParamHandle] = [] + self._fsdp_extension: Optional[FSDPExtensions] = None + + +def _get_module_fsdp_state(module: nn.Module) -> Optional[_FSDPState]: + state = _get_module_state(module) + if state is None or not isinstance(state, _FSDPState): + return None + return state + + +def _get_module_fsdp_state_if_fully_sharded_module( + module: nn.Module, +) -> Optional[_FSDPState]: + state = _get_module_fsdp_state(module) + if state is None: + return None + if state == module: # FullyShardedDataParallel module case. + return state + if module in state._fully_sharded_module_to_handle: # fully_shard case. + return state + return None + + +class TrainingState(Enum): + """ + An enum that indicates the state of a ``FullyShardedDataParallel` instance. + """ + + IDLE = auto() + FORWARD_BACKWARD = auto() + SUMMON_FULL_PARAMS = auto() + + +class HandleTrainingState(Enum): + """ + An enum that indicates the state of a ``FlatParamHandle`. + """ + + IDLE = auto() + FORWARD = auto() + BACKWARD_PRE = auto() + BACKWARD_POST = auto() + SUMMON_FULL_PARAMS = auto() + + +def _is_composable(state: _FSDPState): + # TODO: This is a temporary hack for differentiate between code paths. + return not isinstance(state, nn.Module) + + +@no_type_check +def _module_handle(state: _FSDPState, module: nn.Module) -> Optional["FlatParamHandle"]: + """ + Returns the ``FlatParamHandle`` s corresponding to ``module``. This is + the handle that contains some parameter in ``module``. + """ + if _is_composable(state): + # A valid FSDP state may have no managed parameters and hence no + # handles, meaning no entry in `_fully_sharded_module_to_handles` + if state._handle is None: + return None + assert ( + module in state._fully_sharded_module_to_handle + ), f"Expects a fully sharded module but got {module} on rank {state.rank}" + return state._fully_sharded_module_to_handle[module] + else: + # NOTE: This assumes `module` is a `FullyShardedDataParallel` instance. + return module._handle + + +@no_type_check +def _has_fsdp_params(state: _FSDPState, module: nn.Module) -> bool: + """Returns if ``module`` has parameters managed by FSDP.""" + return _module_handle(state, module) is not None + + +def _get_sharding_strategy(handle): + """ + Returns the sharding strategy of the handle. + """ + return handle._sharding_strategy if handle else None + + +def clean_tensor_name(tensor_name: str) -> str: + """ + Cleans the parameter or buffer name by removing any module wrapper + prefixes. + """ + tensor_name = tensor_name.replace(FSDP_PREFIX, "") + # TODO: Explicitly replacing the checkpoint wrapper prefix is not ideal as + # it couples `CheckpointWrapper` and FSDP and also does not scale for more + # module wrappers. + tensor_name = tensor_name.replace(_CHECKPOINT_PREFIX, "") + return tensor_name + + +def _set_fsdp_flattened(tensor: torch.Tensor) -> None: + """ + Sets an attribute on ``tensor`` to mark it as flattened by FSDP. This is to + avoid re-flattening it during nested construction. + """ + setattr(tensor, FSDP_FLATTENED, True) + + +def _is_fsdp_flattened(tensor: torch.Tensor) -> bool: + """Returns if ``tensor`` has been marked as flattened by FSDP.""" + return getattr(tensor, FSDP_FLATTENED, False) + + +def _named_parameters_with_duplicates( + module: nn.Module, **kwargs: Any +) -> List[Tuple[str, nn.Parameter]]: + """ + This API is required as some modules overwrite `named_parameters()` but do not support + `remove_duplicate`. + """ + assert ( + "remove_duplicate" not in kwargs + ), "_named_parameters_with_duplicates cannot be used with `remove_duplicate` argument." + kwargs["remove_duplicate"] = False + try: + ret = list(module.named_parameters(**kwargs)) + except AssertionError as e: + kwargs.pop("remove_duplicate") + ret = list(module.named_parameters(**kwargs)) + return ret + + +def _get_param_to_fqns( + model: torch.nn.Module, + dedup_shared_params: bool = True, +) -> Dict[nn.Parameter, List[str]]: + """ + Constructs a mapping from parameter to a list of its \"canonical\" FQNs. Here, + we use canonical to mean the fully-qualified name assigned to the parameter + based on its position in the original nn.Module hierarchy before any wrapper + or parallelism has been applied to it. This is in contrast to FQNs that may be + generated after parallelisms or wrappers have been applied to the model. + + Each normal parameter maps to a singleton list containing its FQN, while each + ``FlatParameter`` maps to a list of its original parameter FQNs, which may + have length greater than one. All FQNs are prefixed starting from ``model``. + + In the case where FSDP was applied with ``use_orig_params=True``, there should be no + ``FlatParameter`` s registered to the model's modules and this mapping will only + contain mappings from ``nn.Parameter`` s to singleton FQN lists. + + It is only in the case where FSDP was applied with ``use_orig_params=False`` where + a ``FlatParameter`` will be registered in place of the original parameters and there + will be mappings from each ``FlatParameter`` to lists of FQNs corresponding to the + original parameters. + + Args: + model (torch.nn.Module): Root module (which may or may not be a + :class:`FullyShardedDataParallel` instance). + dedup_shared_params (bool): For shared parameters, if ``True``, only + includes the FQNs corresponding to the first encounter of the + shared parameter in the module traversal; if ``False``, then + includes the FQNs across all encounters. (Default: ``True``) + """ + + def module_fn(module, prefix, tree_level, param_to_fqns): + for param_name, param in _named_parameters_with_duplicates( + module, recurse=False + ): + local_fqns = ( + param._fqns + if isinstance(param, flat_param_file.FlatParameter) + else [param_name] + ) # prefixed from `module` + global_fqns = [ + clean_tensor_name(prefix + name) for name in local_fqns + ] # prefixed from the top level `model` (i.e. including `prefix`) + is_shared_param = param in param_to_fqns + if not is_shared_param: + param_to_fqns[param] = global_fqns + else: + if isinstance(param, flat_param_file.FlatParameter): + # DMP overwrites `named_parameters` and skip (advance to + # the next child module) the wrapped_module (e.g., + # _dmp_wrapped_module and _fsdp_wrapped_module). When a user + # calls `named_child` to traverse the module recursively and + # calls `named_parameters` with `recurse=False`, parameters + # will be traversed more than once. + # This hack is specified designed for DMP + FSDP. We + # overwrite the flat_parameters traversal result to only obtain + # the last one, which happens to be the correct one. + # + # TODO: Remove this hack once DMP + FSDP is not supported. + warnings.warn( + "FlatParameter is being traversed more than once. " + "This case should only happen when using " + "DistributedModelParallel with FullyShardedDataParallel." + ) + param_to_fqns[param] = global_fqns + elif not dedup_shared_params: + param_to_fqns[param].extend(global_fqns) + + def return_fn(param_to_fqns): + return param_to_fqns + + param_to_unflat_param_names: Dict[torch.nn.Parameter, List[str]] = {} + return _apply_to_modules( + model, + module_fn, + return_fn, + [key for key, _ in _named_parameters_with_duplicates(model)], + param_to_unflat_param_names, + ) + + +@no_type_check +def _log_post_backward_hook( + state: _FSDPState, handle: "FlatParamHandle", logger: logging.Logger +) -> None: + # Under TORCH_DISTRIBUTED_DEBUG=INFO, log the module names this hook fires for. + # Below logging of module names this post-bwd hook fires for can help debug certain + # cases where hooks don't fire, such as under certain activation checkpoint configs. + if state._use_orig_params and handle._debug_level == dist.DebugLevel.INFO: + param_fqns = _get_handle_fqns_from_root(state, handle) + logger.warning("FSDP firing post-backward hooks for parameters %s", param_fqns) + + +@no_type_check +def _get_handle_fqns_from_root( + state: _FSDPState, handle: "FlatParamHandle" +) -> Optional[List[str]]: + if handle is None: + return None + param_to_fqn = state._exec_order_data.param_to_fqn + handle_params = handle.flat_param._params # only populated for use_orig_params + param_fqns = [ + fqn for fqn_list in [param_to_fqn[p] for p in handle_params] for fqn in fqn_list + ] + return param_fqns + + +def _apply_to_modules( + root_module: torch.nn.Module, + module_fn: Callable, + return_fn: Callable, + filter_fqns: Optional[List[str]] = None, + *args, + **kwargs, +): + """ + Performs a pre-order traversal of the modules in the hierarchy rooted at + ``root_module``, applying ``module_fn`` at each module and finally + returning a value using ``return_fn``. The traversal constructs the full + module prefix name (e.g. "module.submodule." just like in model state dict) + and makes that available to ``module_fn``. + + ``filter_fqns`` is used because some module may have its own prefix similar + to ``FullyShardedDataParallel`` and the ``named_parameters()`` is overwritten + to remove the prefix. + """ + + def f(module: torch.nn.Module, prefix: str, tree_level: int, *args, **kwargs): + # Call the module function before recursing over children (pre-order) + module_fn(module, prefix, tree_level, *args, **kwargs) + for submodule_name, submodule in module.named_children(): + if submodule is None: + continue + new_prefix = prefix + submodule_name + "." + new_tree_level = tree_level + 1 + if filter_fqns is not None: + for fqn in filter_fqns: + if fqn.startswith(new_prefix): + break + else: + # DMP's named_parameter() will mess up the traversal with + # ``named_children`` + `named_parameter(recurse=False)``. + # This hack is a must to make the traversal work. + # TODO: Remove this hack once DMP + FSDP is not supported. + # It turns out that recursive wrapping may trigger this as + # well. + if ( + submodule_name == "_fsdp_wrapped_module" + or submodule_name == "_dmp_wrapped_module" + ): + new_prefix = prefix + elif submodule_name == "module": + new_prefix = prefix + f(submodule, new_prefix, new_tree_level, *args, **kwargs) + + f(root_module, "", 0, *args, **kwargs) + return return_fn(*args, **kwargs) + + +@no_type_check +def _assert_in_training_states( + state: _FSDPState, + training_states: List[TrainingState], +) -> None: + """Asserts that FSDP is in the states ``_training_states``.""" + # Raise a `ValueError` instead of using `assert` to ensure that these + # logical assertions run even if `assert`s are disabled + if state.training_state not in training_states: + msg = ( + f"expected to be in states {training_states} but current state is " + f"{state.training_state}" + ) + # Print the error on rank 0 in case this is called in the backward pass + if state.rank == 0: + if isinstance(state, nn.Module): + print(f"Asserting FSDP instance is: {state}") + print(f"ERROR: {msg}") + traceback.print_stack() + raise ValueError(msg) + + +def _get_root_modules(modules: Set[nn.Module]) -> Set[nn.Module]: + """ + Returns: + Set[nn.Module]: The subset of ``modules`` that are root modules (i.e. + parent-less) with respect to the modules in the set itself. In other + words, these are the modules in ``modules`` that are not the child of + any other module in ``modules``. + """ + root_modules: Set[nn.Module] = set() + module_to_submodules = {module: set(module.modules()) for module in modules} + for candidate_module in modules: + is_root_module = True + for module, submodules in module_to_submodules.items(): + is_child_module = ( + candidate_module is not module and candidate_module in submodules + ) + if is_child_module: + is_root_module = False + break + if is_root_module: + root_modules.add(candidate_module) + return root_modules + + +def _override_module_mixed_precision( + root: torch.nn.Module, + module_classes_to_override: Iterable[Type[nn.Module]], + wrap_override_dict: Dict[str, Any] = {"mixed_precision": None}, # noqa: B006 +) -> Set[Type[nn.Module]]: + module_classes_to_override = tuple(set(module_classes_to_override)) + # Return a set of the actually overridden module classes + overridden_module_classes: Set[Type[nn.Module]] = set() + for mod in root.modules(): + if isinstance(mod, module_classes_to_override): + overridden_module_classes.add(type(mod)) + mod._wrap_overrides = wrap_override_dict # type: ignore[assignment] + # TODO: We need to run this mixed precision ignored module in fp32, + # but ensure subsequent modules, that may possibly be running with + # mixed precision, still receive the appropriate precision inputs + # without user having to adjust mixed precision config too much. + # As a result, we attach pre and post forward hooks to up / down + # cast. We should revisit this design. + + def cast_fn( + dtype: torch.dtype, module: nn.Module, x: torch.Tensor + ) -> torch.Tensor: + if not torch.is_floating_point(x) or x.dtype == dtype: + return x + _MODULE_TO_INP_DTYPE[module] = x.dtype + return x.to(dtype) + + def forward_pre_hook(module, args): + return _apply_to_tensors(partial(cast_fn, torch.float32, module), args) + + def forward_post_hook(module, args, output): + # NOTE: If the forward did not have any floating-point tensors, + # then the dtype will not be set for this module, and we do not + # upcast the dtype. + if module in _MODULE_TO_INP_DTYPE: + old_dtype = _MODULE_TO_INP_DTYPE[module] + return _apply_to_tensors( + partial(cast_fn, old_dtype, module), output + ) + + # We intentionally append both of these hooks so that they run after + # all other hooks. + mod.register_forward_pre_hook(forward_pre_hook, prepend=False) + mod.register_forward_hook(forward_post_hook, prepend=False) + return overridden_module_classes + + +def _no_dispatch_record_stream(tensor: torch.Tensor, stream: torch.Stream) -> None: + # FIXME record_stream doesn't work with non-cuda/mtia tensors + if tensor.device.type not in [ + "cuda", + "mtia", + torch._C._get_privateuse1_backend_name(), + ]: + return + + if torch.distributed._functional_collectives.is_torchdynamo_compiling(): + return + # from @ezyang: + # The no_dispatch was added in https://github.com/pytorch/pytorch/pull/88014 cc @fegin + # Looking over the PR, it looks like this is because we don't actually support Stream arguments + # in torch dispatch, so it just chokes. + # If Dynamo is able to answer "are there any torch dispatch modes" active (it should answer False), + # a better version of this would just be to check if there are any modes before disabling dispatch. + # TODO(voz): Extend a dynamo util to answer the above, unify the codepaths here. + tensor.record_stream(stream) + else: + with no_dispatch(): + tensor.record_stream(stream) diff --git a/janus/lib/python3.10/site-packages/torch/distributed/fsdp/_dynamo_utils.py b/janus/lib/python3.10/site-packages/torch/distributed/fsdp/_dynamo_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..e58c91a5807b9e3d1807d095b7a6409cf818f737 --- /dev/null +++ b/janus/lib/python3.10/site-packages/torch/distributed/fsdp/_dynamo_utils.py @@ -0,0 +1,46 @@ +# mypy: allow-untyped-defs +from typing import Set + +import torch.nn as nn + + +def _annotate_modules_for_dynamo( + module: nn.Module, + ignored_modules: Set[nn.Module], + use_orig_params: bool, +): + """ + Annotates the submodules in ``module`` 's tree, except those in + ``ignored_modules``, indicating that the submodules are FSDP-managed and + saving the ``use_orig_params`` setting passed to the FSDP constructor. + """ + for submodule in module.modules(): + if submodule not in ignored_modules: + """[note: Dynamo treats FSDP wrapped modules as UnspecializedNNModule] + + Dynamo doesn't get to see this instance (FullyShardedDataParallel) during tracing, since + it skips tracing all the torch.distributed.fsdp code. + - Why? Running the FSDP code eagerly avoids lots of issues trying to trace complex hooks, and also + gets us graph-breaks on FSDP module boundaries which we want anyway for comm ops. + - However, we _also_ want dynamo to treat the wrapped module inside FSDP 'unspecially' (*), + and we need a way to indicate to dynamo which modules are wrapped by FSDP. + + (*) UnspecializedNNModules in dynamo are traced-through without any assumptions, and with thorough + guards. NNModules otherwise are 'specialized', meaning there is less overhead due to assuming + their code is well-behaved. + + One particular issue with specialized NNModules for FSDP is that the + views created for orig_params are captured into the compiled graph on the first iteration, and while + they are always going to point to the correct flatparameter and give correct results, their order + of creation influences the order of backward execution, preventing overlap of comm and computation + during backward. We need to _use_ the new parameter views created on each forward iteration, in + order for backward to interleave hooks with compute per layer. UnspecializedNNModule lets us achieve + this by capturing the module code more 'functionally' and passing parameters in as inputs each time. + """ + submodule._is_fsdp_managed_module = True # type: ignore[assignment] + + # Dynamo only supports FSDP with use_orig_params=True. + # This is hacky, but I could not think of another way to add an assertion to dynamo + # for this, since Dynamo skips all the FSDP code frames and thus can't inspect the + # FSDP module directly + submodule._fsdp_use_orig_params = use_orig_params # type: ignore[assignment] diff --git a/janus/lib/python3.10/site-packages/torch/distributed/fsdp/_flat_param.py b/janus/lib/python3.10/site-packages/torch/distributed/fsdp/_flat_param.py new file mode 100644 index 0000000000000000000000000000000000000000..ede7d06ec9a1de076aa3aab3017aa80738f7579d --- /dev/null +++ b/janus/lib/python3.10/site-packages/torch/distributed/fsdp/_flat_param.py @@ -0,0 +1,2735 @@ +# mypy: allow-untyped-defs +import contextlib +import functools +import logging +import os +import warnings +from enum import auto, Enum +from itertools import accumulate, chain +from typing import ( + Any, + Callable, + cast, + Dict, + Generator, + Iterator, + List, + NamedTuple, + no_type_check, + Optional, + Sequence, + Set, + Tuple, + Union, +) + +import torch +import torch.distributed as dist +import torch.nn as nn +import torch.nn.functional as F +from torch import Tensor +from torch.distributed.fsdp._common_utils import ( + _FSDPDeviceHandle, + _named_parameters_with_duplicates, + _no_dispatch_record_stream, + _set_fsdp_flattened, + HandleTrainingState, +) +from torch.distributed.utils import ( + _alloc_storage, + _data_ptr_allocated, + _free_storage, + _p_assert, +) +from torch.nn.parameter import _ParameterMeta # type: ignore[attr-defined] +from torch.testing._internal.distributed.fake_pg import FakeProcessGroup + +from ._fsdp_extensions import ( + _ext_post_unflatten_transform, + _ext_pre_flatten_transform, + FSDPExtensions, +) + + +__all__ = [ + "FlatParameter", + "FlatParamHandle", + "FlatParamShardMetadata", + "ParamInfo", + "SharedParamInfo", + "HandleShardingStrategy", +] + +logger = logging.getLogger(__name__) + + +""" +[Note: Fully Sharded Module] +We define the "fully sharded module" to be the original ``nn.Module`` that owns +a ``FlatParamHandle``. It is the *single* module logically responsible for the +*single* unshard/reshard pair for the handle's ``FlatParameter`` for a given +forward or backward pass. The fully sharded module should be passed to the +``FlatParamHandle`` constructor. + +For the wrapper code path: +- The ``FullyShardedDataParallel`` module wrapping the fully sharded module +runs the unshard/reshard on behalf of the fully sharded module by overriding +``nn.Module.forward``. +- The fully sharded module is exactly the module passed to the +``FullyShardedDataParallel`` constructor's ``module`` argument. + +For the non-wrapper code path: +- Hooks registered on the fully sharded module run the unshard/reshard. +- The fully sharded module may either be the direct argument to ``fully_shard`` +or a submodule chosen by the provided wrapping policy. +""" + +# Environment variable toggling whether to use unsafe `setattr()` for view +# setting in `_use_sharded_views()` and `_use_unsharded_views()` +# We should use 'safe' by default since it respects method overrides, but for +# special cases such as for high CPU overhead or for intentionally bypassing +# checks in the overrides, we may use 'unsafe'. +_FSDP_USE_UNSAFE_SETATTR = "FSDP_USE_UNSAFE_SETATTR" + +# Environment variable toggling whether to check for parameter/gradient +# writeback in case their storages change after FSDP initialization +# We should check by default since it prevents silent correctness errors, but +# since such changes are atypical, we may want to skip the check to save CPU +# overhead, especially since the check happens in the pre-forward and +# pre-backward each iteration. +_FSDP_SKIP_WRITEBACK_CHECK = "FSDP_SKIP_WRITEBACK_CHECK" + +# Env var toggling whether when model is in .eval() mode, should we run in fp32 +# or the reduced precision. +_FSDP_USE_FULL_PREC_IN_EVAL = "FSDP_USE_FULL_PREC_IN_EVAL" + +# Some value to set padding in tensors to for debuggability +_FLAT_PARAM_PADDING_VALUE = 42 + +# Environment variables for disabling the all-gather and reduce-scatter +# communication ops for ablation studies. Note that without these communication +# ops the training won't converge, and you probably need to disable correctness +# checks in your model. +_FSDP_USE_FAKE_ALL_GATHER = "FSDP_USE_FAKE_ALL_GATHER" +_FSDP_USE_FAKE_REDUCE = "FSDP_USE_FAKE_REDUCE" + + +# TODO: Define this for now to avoid circular imports. See if we can remove. +class HandleShardingStrategy(Enum): + FULL_SHARD = auto() + SHARD_GRAD_OP = auto() + NO_SHARD = auto() + HYBRID_SHARD = auto() + _HYBRID_SHARD_ZERO2 = auto() + + +RESHARD_AFTER_FORWARD_HANDLE_STRATEGIES = ( + HandleShardingStrategy.FULL_SHARD, + HandleShardingStrategy.HYBRID_SHARD, +) +NO_RESHARD_AFTER_FORWARD_HANDLE_STRATEGIES = ( + HandleShardingStrategy.SHARD_GRAD_OP, + HandleShardingStrategy._HYBRID_SHARD_ZERO2, +) + + +class ParamInfo(NamedTuple): + """Information for an original parameter.""" + + param_name: str # unprefixed + module: nn.Module + module_name: str + + +class SharedParamInfo(NamedTuple): + """ + Additional information for a shared parameter. + + For each shared parameter, we designate one module and its parameter + variable to be the primary owner, determined as the first one encountered + in the parameter walk. These are prefixed with "prim". The primary module + and parameter do not have their own :class:`SharedParamInfo` instance. + """ + + param_name: str # unprefixed + module: nn.Module + module_name: str + prim_param_name: str # unprefixed + prim_module: nn.Module + prim_module_name: str + + +class _ShardParamInfo(NamedTuple): + """Shard-related information for an original parameter.""" + + in_shard: bool + # Use to index into the sharded flat parameter, e.g. + # `flat_param[offset_in_shard : offset_in_shard + numel_in_shard]` + offset_in_shard: Optional[int] + numel_in_shard: Optional[int] + # Use to get part of the parameter in the local shard from a flattened + # version of the unsharded parameter, e.g. + # `param.flatten()[intra_param_start_idx : intra_param_end_idx + 1]` + intra_param_start_idx: Optional[int] + intra_param_end_idx: Optional[int] # inclusive + + +class FlatParamShardMetadata(NamedTuple): + """ + This holds metadata specific to this rank's shard of the flat parameter. + + Attributes: + param_names (Tuple[str, ...]): Prefixed parameter names of this rank's + shard of the parameters; see :class:`FlatParameter`. + param_shapes (Tuple[torch.Size, ...]): Parameter shapes of this rank's + shard of the parameters; see :class:`FlatParameter`. + param_numels (Tuple[int, ...]): Parameter numels of this rank's shard + of the parameters; see :class:`FlatParameter`. + param_offsets (Tuple[Tuple[int, int], ...]): [start, end] offsets (in + units of numels) giving this rank's part of each flattened + original parameter. + """ + + param_names: Tuple[str, ...] + param_shapes: Tuple[torch.Size, ...] + param_numels: Tuple[int, ...] + param_offsets: Tuple[Tuple[int, int], ...] + + +class _FlatParameterMeta(_ParameterMeta): + # Make `isinstance(t, FlatParameter)` return True for custom tensor + # instances that have the _is_flat_param flag for BC + def __instancecheck__(self, instance): + # NB: do NOT test the super implementation + return isinstance(instance, torch.Tensor) and getattr( + instance, "_is_flat_param", False + ) + + +class FlatParameter(nn.Parameter, metaclass=_FlatParameterMeta): + """ + This is the flat parameter used by :class:`FullyShardedDataParallel`. + + It is comprised of one or more original parameters, which are flattened and + concatenated to construct the flat parameter. + + Under the current design, this parameter logically represents both the + unsharded and sharded flat parameter, and its data changes storages + dynamically. + - In the :class:`FullyShardedDataParallel` constructor, the parameter + is initialized as unsharded and then sharded in-place. + - At runtime, the parameter is lazily (re)-initialized. The sharded + parameter data is saved in ``self._local_shard``, and a new ``Tensor`` + ``self._full_param_padded`` is created, which is the all-gather + destination and owns the unsharded parameter storage thereafter. (See + :meth:`FlatParamHandle.init_flat_param_attributes`.) + - Throughout runtime, the parameter data changes storages as needed, + e.g. to the sharded flat parameter, low precision sharded flat + parameter, or the unsharded flat parameter. + + NOTE: Since ``use_orig_params=True`` supports intra-``FlatParameter`` + padding, we have two versions of the per-parameter numels, one that + includes the padding (``_numels_with_padding``) and one that does not + (``_numels``). The former may have length longer than the other data + structures, while the latter has the same length as the number of actual + original parameters like the other per-parameter data structures. + + NOTE: This is not a real class; instead, you will always get a Parameter + back out if you try to create one of these. This is similar to the trick + we implemented for Parameter to get it to work with subclasses; this + is primarily so that FlatParameter supports combination with FakeTensor. + + Attributes: + _unpadded_unsharded_size (torch.Size): Unsharded flat parameter's size + without right-hand-side padding for divisibility by the world size. + For ``use_orig_params=True``, this includes alignment padding. + _padded_unsharded_size (torch.Size): Unsharded flat parameter's size + with right-hand-side padding for divisibility by the world size. + For ``use_orig_params=True``, this includes alignment padding. This + is only set for sharded strategies since they require padding for + the all-gather. + _sharded_size (torch.Size): Sharded flat parameter's size with padding. + This is also set for ``NO_SHARD``, in which case it is the same as + the unsharded sizes. (We omit "padded" because there is no + analogous unpadded one.) + + _num_params (int): Number of original parameters flattened into this + flat parameter. This is the length of the per-parameter data + structures. + _param_infos (Tuple[ParamInfo, ...]): Each parameter's parameter info + entry; see :class:`ParamInfo` for details. + _shapes (Tuple[torch.Size, ...]): Each parameter's original shape. + _fqns (Tuple[str, ...]): Each parameter's fully-qualified name (FQN) + prefixed from the ``_fully_sharded_module``. The names are + guaranteed to be unique in the subtree rooted at that module. + _param_extensions (Tuple[Optional[Any], ...]): Each parameter's + extension (i.e. some per-parameter state) used to customize + pre-flatten and post-unflatten behavior or ``None``. This is + experimental, and users should not depend on its existence in the + future. + _numels_with_padding (Tuple[int, ...]): Each parameter's numel + including entries for the padding. This is used to construct views + into the flat parameter via ``torch.split()``. This may have length + longer than ``_num_params``. + _numels (Tuple[int, ...]): Each parameter's numel excluding entries for + padding. This has length equal to ``_num_params``. + _shard_param_infos (Tuple[_ShardParamInfo, ...]): Each parameter's + shard parameter info; see :class:`_ShardParamInfo` for details. + _shared_param_infos (Tuple[SharedParamInfo, ...]): Shared parameter + info entries; see :class:`SharedParamInfo` for details. + _modules (Set[nn.Module]): Modules that contain some original parameter + that is flattened into the flat parameter. + + _shard_numel_padded (int): Numel padded for this rank's sharded flat + parameter. + _local_shard (Tensor): Sharded flat parameter with padding if using a + sharded strategy. If using ``NO_SHARD``, then this is the unpadded + unsharded flat parameter, and there is no notion of a sharded flat + parameter or padded unsharded flat parameter. + _full_param_padded (Tensor): Unsharded flat parameter with padding. + This is not defined for ``NO_SHARD``. When using mixed precision + for parameters, this has the low precision. + _full_prec_full_param_padded (Tensor): Full precision unsharded flat + parameter with padding. This is used for unsharding outside of + computation when using mixed precision for parameters. This is + never defined for ``NO_SHARD``. + _post_backward_hook_handle (RemovableHandle): + Flat parameter's post-backward hook handle. (Compile only) + _post_backward_hook_state (Tuple[AccumulateGrad, RemovableHandle]): + Flat parameter's :class:`AccumulateGrad` object and post-backward + hook handle. (Eager only) + _mp_shard (Tensor): Low precision sharded flat parameter with padding. + This is only defined when parameter mixed precision is enabled. For + ``NO_SHARD``, this is used for computation. + _cpu_grad (Tensor): Sharded gradient with padding stored on CPU. + This is only defined when offloading parameters is enabled. + _saved_grad_shard (Tensor): Sharded gradient with padding from previous + iterations for gradient accumulation without :meth:`no_sync`. + + _params (Optional[List[nn.Parameter]]): If ``use_orig_params=True``, + then each original parameter variable; otherwise, ``None``. This + does not include any padding tensors. + _shared_params (Optional[List[nn.Parameter]]): The original shared + parameter variables if ``use_orig_params=True`` and ``None`` + otherwise. + _tensors (Optional[List[Optional[Tensor]]]): This saves the ``Tensor`` + views created in the forward and tracked by autograd when + ``use_orig_params=True`` and is ``None`` otherwise. This is to + preserve those ``Tensor`` variables for the backward to ensure that + the ``FlatParameter`` 's ``AccumulateGrad`` object does not change + in which case the post-backward hook does not run. This is relevant + for cases like reentrant activation checkpointing. + _is_grad_none_mask (Optional[List[bool]]): If ``use_orig_params=True``, + a mask over the original parameters' gradients indicating if it is + logically ``None`` or not; otherwise, ``None``. This does not + include entries for padding. This mask is needed because only some + of the parameters may have ``None`` gradient, in which case the + flat gradient must be non-``None`` and must use zeros to + approximate those original ``None`` gradients. This mask informs + FSDP to set the original parameter gradients to ``None`` (instead + of zeros) as needed. + """ + + _unpadded_unsharded_size: torch.Size + _padded_unsharded_size: torch.Size + _sharded_size: torch.Size + _num_params: int + _param_infos: Tuple[ParamInfo, ...] + _shapes: Tuple[torch.Size, ...] + _fqns: Tuple[str, ...] + _param_extensions: Tuple[Optional[Any], ...] + _numels_with_padding: Tuple[int, ...] + _numels: Tuple[int, ...] + _shard_param_infos: Tuple[_ShardParamInfo, ...] + _shared_param_infos: Tuple[SharedParamInfo, ...] + _modules: Set[nn.Module] + _shard_numel_padded: int + _local_shard: Tensor + _full_param_padded: Tensor + _full_prec_full_param_padded: Tensor + # Eager only + _post_backward_hook_state: Tuple[Any, Any] + # Compile only + _post_backward_hook_handle: Any + _mp_shard: Tensor + _cpu_grad: Tensor + _saved_grad_shard: Tensor + _params: Optional[List[nn.Parameter]] + _shared_params: Optional[List[nn.Parameter]] + _tensors: Optional[List[Optional[Tensor]]] + _is_grad_none_mask: Optional[List[bool]] + + _is_padding_mask: List[bool] + + def __new__(cls, data=None, requires_grad=True): + assert cls is FlatParameter, "subclasses FlatParameter not supported" + r = nn.Parameter.__new__(nn.Parameter, data, requires_grad) # type: ignore[call-arg] + r._is_flat_param = True # type: ignore[attr-defined] + return r + + # NB: This is not a regular method, because FlatParameters are not actually + # instances of this class (see __new__ above). So you must indirectly + # call this directly through the classmethod. + @classmethod + def _init_metadata( + cls, + self, + param_infos: List[ParamInfo], + numels: List[int], + shapes: List[torch.Size], + fqns: List[str], + shared_param_infos: List[SharedParamInfo], + param_extensions: List[Optional[Any]], + params: Optional[List[nn.Parameter]], + shared_params: Optional[List[nn.Parameter]], + is_padding_mask: List[bool], + ) -> None: + """ + Initialize attributes holding metadata about the original parameters comprising the flat parameter. + + We expose this method separate from the constructor to keep the + constructor only responsible for the flat parameter's tensor data. This + method should only be called once per model, while the constructor may + be called multiple times, e.g. when reloading from a checkpoint, in + which case only the tensor data needs to be passed to the constructor. + Since :meth:`load_state_dict` is implemented via :meth:`copy_`, the + metadata is correctly assumed to be unchanged. + + Args: + See the Attributes in the class docstring. + """ + assert len(param_infos) == len(shapes) + assert len(param_infos) == len(fqns) + assert len(param_infos) == len(param_extensions) + self._num_params = len(param_infos) + self._param_infos = param_infos + self._shapes = shapes + self._fqns = fqns + self._param_extensions = param_extensions + self._is_padding_mask = is_padding_mask + + numels_without_padding: List[int] = [] + for numel, is_padding in zip(numels, is_padding_mask): + if not is_padding: + numels_without_padding.append(numel) + self._numels = tuple(numels_without_padding) + self._numels_with_padding = tuple(numels) + assert len(self._numels) == self._num_params + + self._shared_param_infos = tuple(shared_param_infos) + self._modules = {pi.module for pi in self._param_infos}.union( + {spi.module for spi in self._shared_param_infos} + ) + assert (params is None) == (shared_params is None) + if params is not None: + assert shared_params is not None and len(shared_params) == len( + shared_param_infos + ) + self._params = [] + for param, is_padding in zip(params, is_padding_mask): + if not is_padding: + self._params.append(param) + self._shared_params = shared_params + # Mark the original parameters to avoid flattening them into + # another `FlatParameter` during recursive construction + for param in chain(self._params, self._shared_params): + _set_fsdp_flattened(param) + self._is_grad_none_mask = [False for _ in range(self._num_params)] + self._tensors = [None for _ in range(self._num_params)] + else: + self._params = None + self._shared_params = None + self._is_grad_none_mask = None + self._tensors = None + self._unpadded_unsharded_size = self.size() + _set_fsdp_flattened(self) + # Tracks whether the `FlatParameter`'s post-backward hook has been + # called to modify the behavior of the post-backward callback + self._post_backward_called = False + + +class FlatParamHandle: + """ + A handle that manages a flat parameter (:class:`FlatParameter`). + + This includes sharding and view management. + + Args: + params (Sequence[nn.Parameter]): The parameters to flatten into the + flat parameter. + fully_sharded_module (nn.Module): See [Note: Fully Sharded Module]. + device (torch.device): The compute and communication device, which + should be a non-CPU device. We refer to it as the compute device. + sharding_strategy (ShardingStrategy): Sharding strategy to apply to + this handle's ``FlatParameter``. + offload_params (bool): Whether to offload the handle's + ``FlatParameter`` to CPU. + mp_param_dtype (Optional[torch.dtype]): Parameter mixed precision + setting passed to the FSDP constructor. + mp_reduce_dtype (Optional[torch.dtype]): Gradient reduction mixed + precision setting passed to the FSDP constructor. + keep_low_precision_grads (bool): Whether to keep gradients in low + precision. + use_orig_params (bool): If ``True``, then FSDP preserves the original + parameter variables and returns them from ``named_parameters()`` + (e.g. to support different optimizer hyperparameters within one + :class:`FlatParameter`). If ``False``, then FSDP reconstructs the + parameters every iteration and returns the :class:`FlatParameter` s + from ``named_parameters()``. + """ + + ################## + # INITIALIZATION # + ################## + def __init__( + self, + params: Sequence[Union[nn.Parameter, Tensor]], + fully_sharded_module: nn.Module, + device: torch.device, + sharding_strategy: HandleShardingStrategy, + offload_params: bool, + mp_param_dtype: Optional[torch.dtype], + mp_reduce_dtype: Optional[torch.dtype], + keep_low_precision_grads: bool, + process_group: dist.ProcessGroup, + use_orig_params: bool, + *, + fsdp_extension: Optional[FSDPExtensions] = None, + ): + super().__init__() + params = list(params) + if len(params) == 0: + raise ValueError( + f"Cannot construct a {self.__class__.__name__} with an empty parameter list" + ) + self._init_setattr_fns() + self._skip_writeback_check = ( + os.environ.get(_FSDP_SKIP_WRITEBACK_CHECK, "") == "1" + ) + self._use_full_prec_in_eval = ( + os.environ.get(_FSDP_USE_FULL_PREC_IN_EVAL, "") == "1" + ) + self._use_fake_all_gather = os.environ.get(_FSDP_USE_FAKE_ALL_GATHER, "") == "1" + self._use_fake_reduce = os.environ.get(_FSDP_USE_FAKE_REDUCE, "") == "1" + if self._skip_writeback_check: + _warn_skip_writeback_check( + logger, + f"Since {_FSDP_SKIP_WRITEBACK_CHECK}=1, FSDP will not check " + "for parameter or gradient writeback. Changing parameter or " + "gradient storages may lead to silent correctness errors.", + ) + if self._use_fake_all_gather: + _warn_use_fake_all_gather( + logger, + f"Since {_FSDP_USE_FAKE_ALL_GATHER}=1, FSDP will not execute " + "all-gather ops. Your training will be incorrect, but " + "can reveal how much time spent on all-gather ops.", + ) + if self._use_fake_reduce: + _warn_use_fake_reduce( + logger, + f"Since {_FSDP_USE_FAKE_REDUCE}=1, FSDP will not execute " + "reduce-scatter ops. Your training will be incorrect, but " + "can reveal how much time spent on reduce-scatter ops.", + ) + # Only align addresses for `use_orig_params=True` (for now) + align_addresses = use_orig_params + self._init_get_unflat_views_fn(align_addresses) + self.device = device + self._device_handle = _FSDPDeviceHandle.from_device(self.device) + self.process_group = process_group + if self._use_fake_all_gather or self._use_fake_reduce: + self._fake_process_group = FakeProcessGroup( + rank=process_group.rank(), world_size=process_group.size() + ) + self.rank = process_group.rank() + self.world_size = process_group.size() + self._sharding_strategy = sharding_strategy + self._offload_params = offload_params + self._use_orig_params = use_orig_params + self._keep_low_precision_grads = keep_low_precision_grads + self._training_state = HandleTrainingState.IDLE + self._debug_level = dist.get_debug_level() + self._fully_sharded_module = fully_sharded_module + # For strategies that do not free after forward, we skip using sharded + # views after forward since the unsharded data exists. We still switch + # `self.flat_param` to point to the sharded flat parameter since what + # it points to parameterizes behavior. We use the following attribute + # to track which tensor data the parameters are unsharded views into. + self._unsharded_flat_param_for_skipped_views: Optional[Tensor] = None + # The index in the state's `all_handles`, which must be the + # same across ranks for the execution order validation to work + self._handle_index: Optional[int] = None + # Index in handles_to_pre_forward_order + self._pre_forward_order_index: Optional[int] = None + # Index in `handles_post_forward_order` + self._post_forward_index: Optional[int] = None + # Used for guarding against mistargeted forward prefetches + self._needs_pre_forward_unshard = False + # Used for guarding against mistargeted backward prefetches + self._needs_pre_backward_unshard = False + # Was the handle prefetched? Set on successful _prefetch_handle and unshard + self._prefetched = False + # Optimistically assume a valid input `params` and set dtype attributes + # before `_init_flat_param()`, which performs the actual validation + self._orig_param_dtype = params[0].dtype + self._init_param_reduce_dtypes(mp_param_dtype, mp_reduce_dtype) + assert self._fwd_bwd_param_dtype is not None # mypy + self._aligned_numel = ( + _get_aligned_numel(unsharded_dtype=self._fwd_bwd_param_dtype) + if align_addresses + else 0 + ) + self._fsdp_extension = fsdp_extension + self._init_flat_param_and_metadata( + params, fully_sharded_module, self._aligned_numel, use_orig_params # type: ignore[arg-type] + ) + self._use_unsharded_views(as_params=False) + + def _init_setattr_fns(self): + use_unsafe_setattr = os.environ.get(_FSDP_USE_UNSAFE_SETATTR, "") == "1" + self._setattr_tensor: Callable[[nn.Module, str, Tensor], None] + self._setattr_param: Callable[[nn.Module, str, nn.Parameter], None] + if use_unsafe_setattr: + self._setattr_tensor = _unsafe_setattr_tensor + self._setattr_param = _unsafe_setattr_param + else: + self._setattr_tensor = _safe_setattr_tensor_or_param + self._setattr_param = _safe_setattr_tensor_or_param + + def _init_get_unflat_views_fn(self, align_addresses: bool): + self._get_unflat_views = ( + self._get_unflat_views_aligned + if align_addresses + else self._get_unflat_views_unaligned + ) + + def _init_flat_param_and_metadata( + self, + params: List[Union[Tensor, nn.Parameter]], + module: nn.Module, + aligned_numel: int, + use_orig_params: bool, + ) -> None: + """ + Initialize the ``FlatParameter`` and its metadata. + + NOTE: This should only be called once at construction time, after which + the ``FlatParameter`` metadata is assumed to be static. + + NOTE: The elements of ``params`` should only be ``Tensor`` s when + composing with ``DTensor`` -based tensor parallelism, in which case the + elements may be ``DTensor`` local shards. + """ + if len(params) == 0: + raise ValueError("Expects non-empty `params`") + if aligned_numel < 0: + raise ValueError( + f"Expects non-negative `aligned_numel` but got {aligned_numel}" + ) + ( + dtype, + flat_param_requires_grad, + device, + ) = self._validate_tensors_to_flatten(params) + params_set = set(params) + # For alignment padding, only `numels` gets strictly non-`None` + # elements, and all other lists get `None` elements for padding. + param_infos: List[ParamInfo] = [] + numels: List[int] = [] + shapes: List[torch.Size] = [] + fqns: List[str] = [] + shared_param_infos: List[SharedParamInfo] = [] + shared_param_memo: Dict[ + Union[Tensor, nn.Parameter], Tuple[nn.Module, str, str] + ] = {} + params_to_flatten: List[Union[Tensor, nn.Parameter]] = [] + shared_params: List[Union[Tensor, nn.Parameter]] = [] + param_extensions: List[Any] = [] + is_padding_mask: List[bool] = [] + total_numel = total_numel_without_padding = 0 + for submodule_name, submodule in module.named_modules(remove_duplicate=False): + for param_name, param in _named_parameters_with_duplicates( + submodule, recurse=False + ): + if param not in params_set: + continue + if param in shared_param_memo: # shared reference + prim_module, prim_module_name, prim_param_name = shared_param_memo[ + param + ] + shared_params.append(param) + shared_param_infos.append( + SharedParamInfo( + param_name, + submodule, + submodule_name, + prim_param_name, + prim_module, + prim_module_name, + ) + ) + else: + if aligned_numel > 0: + numel_to_pad = aligned_numel - (total_numel % aligned_numel) + if numel_to_pad > 0 and numel_to_pad < aligned_numel: + padding_tensor = _construct_padding_tensor( + numel_to_pad, dtype, False, device + ) + params_to_flatten.append(padding_tensor) + is_padding_mask.append(True) + numels.append(numel_to_pad) + total_numel += numel_to_pad + transform_t, extension = _ext_pre_flatten_transform( + param, + self._fsdp_extension, + ) + param = cast(nn.Parameter, transform_t) + param_extensions.append(extension) + shared_param_memo[param] = (submodule, submodule_name, param_name) + params_to_flatten.append(param) + is_padding_mask.append(False) + param_infos.append(ParamInfo(param_name, submodule, submodule_name)) + numels.append(param.numel()) + shapes.append(param.shape) + fqn = ( + submodule_name + "." + param_name + if submodule_name + else param_name + ) + fqns.append(fqn) + total_numel += param.numel() + total_numel_without_padding += param.numel() + if len(params_to_flatten) == 0: + raise ValueError( + f"`params` were not found in `module`'s tree" + f"params: {params}\nmodule: {module}" + ) + if ( + self.rank == 0 + and aligned_numel > 0 + and total_numel != total_numel_without_padding + ): + logger.debug( + "FSDP FlatParameter address alignment created " + "%s numel of padding (%s vs. %s)", + total_numel - total_numel_without_padding, + total_numel, + total_numel_without_padding, + ) + if aligned_numel > 0: + # Pad to be divisible by world size to avoid a copy for the + # post-backward reduce-scatter + numel_to_pad = self.world_size - (total_numel % self.world_size) + if numel_to_pad > 0 and numel_to_pad < self.world_size: + if self.rank == 0: + logger.info( + "FSDP FlatParameter world size divisibility created " + "%s numel of padding", + numel_to_pad, + ) + padding_tensor = _construct_padding_tensor( + numel_to_pad, dtype, False, device + ) + params_to_flatten.append(padding_tensor) + is_padding_mask.append(True) + numels.append(numel_to_pad) + total_numel += numel_to_pad + # Pass `aligned_numel=0` since we already included padding tensors + self.flat_param: FlatParameter = self.flatten_tensors_into_flat_param( + params_to_flatten, + aligned_numel=0, + requires_grad=flat_param_requires_grad, + ) + FlatParameter._init_metadata( + self.flat_param, + param_infos, + numels, + shapes, + fqns, + shared_param_infos, + param_extensions, + _convert_to_params(params_to_flatten) if use_orig_params else None, + _convert_to_params(shared_params) if use_orig_params else None, + is_padding_mask, + ) + + def _validate_tensors_to_flatten( + self, tensors: List[Union[Tensor, nn.Parameter]] + ) -> Tuple: + """Validate the tensors to flatten and returns any necessary metadata.""" + dtype: Optional[torch.dtype] = None + # Return as the logical OR over each tensor's value + flat_param_requires_grad: Optional[bool] = None + device: Optional[torch.device] = None + # For `use_orig_params=True`, permit non-uniform `requires_grad` + for tensor in tensors: + if isinstance(tensor, FlatParameter): + raise ValueError("Cannot flatten a `FlatParameter`") + if dtype is None and not tensor.is_floating_point(): + raise ValueError("Cannot flatten integer dtype tensors") + if dtype is not None and tensor.dtype != dtype: + raise ValueError( + f"Must flatten tensors with uniform dtype but got {dtype} " + f"and {tensor.dtype}" + ) + if ( + not self._use_orig_params + and flat_param_requires_grad is not None + and tensor.requires_grad != flat_param_requires_grad + ): + raise ValueError( + "Must flatten tensors with uniform `requires_grad` when " + "`use_orig_params=False`" + ) + if device is not None and tensor.device != device: + raise ValueError( + "Must flatten tensors on the same device but got both " + f"{device} and {tensor.device}" + ) + dtype = tensor.dtype + flat_param_requires_grad = flat_param_requires_grad or tensor.requires_grad + device = tensor.device + assert flat_param_requires_grad is not None, "Requires non-empty `tensors` list" + return dtype, flat_param_requires_grad, device + + def flatten_tensors( + self, + tensors: List[Tensor], + aligned_numel: int, + ) -> Tensor: + """ + Flatten ``tensors`` into a single flat tensor. + + The flattening optionally includes + padding if ``aligned_numel`` is greater than 0, where ``aligned_numel`` + gives the numel required to have address alignment. + + NOTE: The padding alignment algorithm must be kept in sync with + :meth:`_init_flat_param_metadata`. We separate the two methods because + the initialization happens once, whereas this method may be called + multiple times throughout training (e.g. for checkpointing). + """ + if len(tensors) == 0: + raise ValueError("Expects non-empty `tensors`") + if aligned_numel < 0: + raise ValueError( + f"Expects non-negative `aligned_numel` but got {aligned_numel}" + ) + dtype, _, device = self._validate_tensors_to_flatten(tensors) + flat_tensors: List[Tensor] = [] + if aligned_numel > 0: + total_numel = 0 + for tensor in tensors: + numel_to_pad = aligned_numel - (total_numel % aligned_numel) + if numel_to_pad > 0 and numel_to_pad < aligned_numel: + padding_tensor = _construct_padding_tensor( + numel_to_pad, dtype, False, device + ) + flat_tensors.append(padding_tensor) + total_numel += numel_to_pad + flat_tensors.append(torch.flatten(_detach_if_needed(tensor))) + total_numel += tensor.numel() + numel_to_pad = self.world_size - (total_numel % self.world_size) + if numel_to_pad > 0 and numel_to_pad < self.world_size: + padding_tensor = _construct_padding_tensor( + numel_to_pad, dtype, False, device + ) + flat_tensors.append(padding_tensor) + total_numel += numel_to_pad + else: + flat_tensors = [ + torch.flatten(_detach_if_needed(tensor)) for tensor in tensors + ] + return torch.cat(flat_tensors, dim=0) + + def flatten_tensors_into_flat_param( + self, + tensors: List[Tensor], + aligned_numel: int, + requires_grad: bool, + ) -> FlatParameter: + flat_param_data = self.flatten_tensors(tensors, aligned_numel) + return FlatParameter(flat_param_data, requires_grad=requires_grad) + + def _init_param_reduce_dtypes( + self, + mp_param_dtype: Optional[torch.dtype], + mp_reduce_dtype: Optional[torch.dtype], + ) -> None: + """ + Initialize param and reduce dtypes. + + Precondition: ``self.flat_param`` is set. This ensures that this + handle's parameters have a single dtype. + + Postcondition: This sets ``self._fwd_bwd_param_dtype`` and + ``self._reduce_dtype``. If ``mp_param_dtype`` or ``mp_reduce_dtype`` + is ``None``, then we assume the original parameter dtype. One special + case is if ``mp_param_dtype`` is not ``None`` and ``mp_reduce_dtype`` + is ``None``, in which case we assume the gradient reduction dtype + matches the forward/backward parameter dtype. + """ + # Save whether these dtypes were specified so that we permit the + # parameter dtype to change up until the lazy initialization + self._low_prec_param_dtype_specified = mp_param_dtype is not None + self._low_prec_reduce_dtype_specified = mp_reduce_dtype is not None + if ( + self._low_prec_param_dtype_specified + and not self._low_prec_reduce_dtype_specified + ): + # Special case: infer gradient reduction mixed precision + self._fwd_bwd_param_dtype = mp_param_dtype + self._reduce_dtype = self._fwd_bwd_param_dtype + else: + self._fwd_bwd_param_dtype = mp_param_dtype or self._orig_param_dtype + self._reduce_dtype = mp_reduce_dtype or self._orig_param_dtype + assert self._fwd_bwd_param_dtype is not None + assert self._reduce_dtype is not None + + ################################### + # SHARD INITIALIZATION & METADATA # + ################################### + @torch.no_grad() + def shard(self): + """ + Shard the handle's ``FlatParameter``. + + This allocates new memory for + the sharded flat parameter and frees the unsharded flat parameter's + storage. + + Postcondition: ``self.flat_param`` is the sharded flat parameter. Shard + metadata attributes are set for all sharding strategies. + """ + flat_param = self.flat_param + if not self.uses_sharded_strategy: + self._init_shard_metadata(0, 0, flat_param.numel() - 1) + else: + _p_assert( + flat_param.storage_offset() == 0, + "The `FlatParameter` is not the sole occupant of its storage", + ) + sharded_flat_param, numel_padded = FlatParamHandle._get_shard( + flat_param, self.rank, self.world_size + ) + if not torch.distributed._functional_collectives.is_torchdynamo_compiling(): + allocated = flat_param._typed_storage()._size() > 0 + if allocated: + flat_param._typed_storage()._resize_(0) + flat_param.set_(sharded_flat_param) # type: ignore[call-overload] + start_idx = sharded_flat_param.numel() * self.rank + end_idx = sharded_flat_param.numel() * (self.rank + 1) - 1 # inclusive + self._init_shard_metadata(numel_padded, start_idx, end_idx) + if self._use_orig_params: + self._use_sharded_views() + + def _init_shard_metadata( + self, + numel_padded: int, + unsharded_start_idx: int, + unsharded_end_idx: int, + ) -> None: + """ + Initialize shard-related metadata for this rank's shard of the flat parameter. + + This includes ``_sharded_size``, ``_shard_param_infos``, and ``_shard_numel_padded``. + + Args: + numel_padded (int): Numel padded for this rank's sharded flat + parameter. + unsharded_start_idx (int): Start index in the unsharded flat + parameter assigned to this rank. + unsharded_end_idx (int): End index (inclusive) in the unsharded + flat parameter assigned to this rank. + + Precondition: ``self.flat_param`` 's data is the sharded flat + parameter. + """ + flat_param = self.flat_param + flat_param._sharded_size = flat_param.size() # type: ignore[attr-defined] + sharded_flat_param_numel = flat_param.numel() # includes `numel_padded` + _p_assert( + unsharded_start_idx >= 0 and unsharded_start_idx <= unsharded_end_idx, + f"unsharded_start_idx: {unsharded_start_idx} unsharded_end_idx: {unsharded_end_idx}", + ) + _p_assert( + numel_padded <= sharded_flat_param_numel, + f"numel_padded: {numel_padded} " + f"sharded_flat_param_numel: {sharded_flat_param_numel}", + ) + shard_param_infos = self._get_shard_metadata( + unsharded_start_idx, unsharded_end_idx + ) + assert ( + len(shard_param_infos) == flat_param._num_params + ), f"Expects length {flat_param._num_params} but got {len(shard_param_infos)}" + flat_param._shard_param_infos = shard_param_infos # type: ignore[attr-defined] + flat_param._shard_numel_padded = numel_padded # type: ignore[attr-defined] + + def _get_shard_metadata( + self, + unsharded_start_idx: int, + unsharded_end_idx: int, + ) -> Tuple[_ShardParamInfo, ...]: + """ + Compute the shard metadata based on ``unsharded_start_idx`` and ``unsharded_end_idx`` (inclusive). + + ``unsharded_start_idx`` and ``unsharded_end_idx`` give the interval of the + unsharded flat parameter specifying the shard. + """ + flat_param_offsets = self._get_flat_param_offsets() + assert len(flat_param_offsets) == len( + self.flat_param._numels_with_padding + ), f"Expected {len(self.flat_param._numels_with_padding)} but got {len(flat_param_offsets)}" + shard_param_infos: List[_ShardParamInfo] = [] + sharded_flat_param_numel = unsharded_end_idx - unsharded_start_idx + 1 + # `unsharded_param_start_idx` and `unsharded_param_end_idx` are indices + # into the unsharded flat parameter (inclusive) of the given parameter + for i, ( + (unsharded_param_start_idx, unsharded_param_end_idx), + is_padding, + ) in enumerate(zip(flat_param_offsets, self.flat_param._is_padding_mask)): + if is_padding: + continue + in_sharded_flat_param = ( + unsharded_start_idx <= unsharded_param_end_idx + and unsharded_end_idx >= unsharded_param_start_idx + ) + if not in_sharded_flat_param: + shard_param_info = _ShardParamInfo(False, None, None, None, None) + else: + if unsharded_start_idx <= unsharded_param_start_idx: + # This branch can only happen once since the rank's + # unsharded start index can only intersect one parameter + intra_param_start_idx = 0 + offset_in_shard = unsharded_param_start_idx - unsharded_start_idx + else: + intra_param_start_idx = ( + unsharded_start_idx - unsharded_param_start_idx + ) + offset_in_shard = 0 + assert ( + offset_in_shard >= 0 and offset_in_shard < sharded_flat_param_numel + ), ( + f"Invalid `offset_in_shard` of {offset_in_shard} for " + f"sharded flat parameter with {sharded_flat_param_numel} numel" + ) + intra_param_end_idx = ( + min(unsharded_param_end_idx, unsharded_end_idx) + - unsharded_param_start_idx + ) + numel_in_shard = intra_param_end_idx - intra_param_start_idx + 1 + shard_param_info = _ShardParamInfo( + True, + offset_in_shard, + numel_in_shard, + intra_param_start_idx, + intra_param_end_idx, + ) + shard_param_infos.append(shard_param_info) + return tuple(shard_param_infos) + + @staticmethod + def _get_unpadded_shard( + tensor: Tensor, + rank: int, + world_size: int, + ) -> Tuple[Tensor, int]: + """ + Return the unpadded shard of ``tensor`` for the given ``rank`` and ``world_size``. + + The returned value is a tuple of the shard of ``tensor`` without any + padding and the numel to pad for that shard. + + If ``tensor`` is already flattened or may be viewed in the flattened + shape (which is true in the expected usage), then this method does not + allocate any new tensor memory. + """ + chunks = torch.flatten(tensor).chunk(world_size) + if len(chunks) < (rank + 1): + # This rank gets an empty chunk fully padded with zeros since there + # are not enough chunks across ranks + chunk = chunks[0].new_empty(0) + else: + chunk = chunks[rank] + numel_to_pad = chunks[0].numel() - chunk.numel() + assert ( + numel_to_pad >= 0 + ), "Chunk's size should be at most the first chunk's size" + return chunk, numel_to_pad + + @staticmethod + def _get_shard( + tensor: Tensor, + rank: int, + world_size: int, + ) -> Tuple[Tensor, int]: + """ + Return the shard of ``tensor`` with padding for the given ``rank`` and ``world_size`` and the numel padded for that shard. + + This method allocates new memory (via :meth:`clone`) since the + unsharded ``tensor`` may be deallocated after this method returns. + """ + chunk, numel_to_pad = FlatParamHandle._get_unpadded_shard( + tensor, rank, world_size + ) + shard = chunk.clone() + if numel_to_pad > 0: + shard = F.pad(shard, [0, numel_to_pad]) + return shard, numel_to_pad + + @staticmethod + def _get_sharded_size(tensor: Tensor, rank: int, world_size: int) -> torch.Size: + """ + Return the shape of ``tensor`` after sharding including padding. + + This requires ``tensor`` to have 1D shape and ensures that the returned + shape is 1D. + """ + assert len(tensor.shape) == 1, f"{tensor.shape}" + unpadded_sharded_tensor, numel_to_pad = FlatParamHandle._get_unpadded_shard( + tensor, rank, world_size + ) + unpadded_sharded_size = unpadded_sharded_tensor.size() + assert len(unpadded_sharded_size) == 1, f"{unpadded_sharded_size}" + return torch.Size([unpadded_sharded_size[0] + numel_to_pad]) + + def _get_flat_param_offsets(self) -> List[Tuple[int, int]]: + """ + Return [start, end] offsets of each original parameter's flattened data in the unsharded flat parameter (without padding). + + NOTE: The returned list includes elements for alignment padding. + """ + cumulative_sum = list(accumulate(self.flat_param._numels_with_padding)) + starts = [0] + cumulative_sum[:-1] + ends = [end - 1 for end in cumulative_sum] # inclusive + param_offsets = list(zip(starts, ends)) + return param_offsets + + @no_type_check + def shard_metadata( + self, + ) -> FlatParamShardMetadata: + """ + Return the shard-related metadata specific to this rank's shard of the flat parameter. + + NOTE: The returned tuple does not include elements for alignment + padding but does account for the padding. + """ + fqns_list = [] + shapes_list = [] + numels_list = [] + shard_param_offsets = [] + for fqn, shape, numel, shard_param_info in zip( + self.flat_param._fqns, + self.flat_param._shapes, + self.flat_param._numels, + self.flat_param._shard_param_infos, + ): + if not shard_param_info.in_shard: + continue + fqns_list.append(fqn) + shapes_list.append(shape) + numels_list.append(numel) + shard_param_offsets.append( + ( + shard_param_info.intra_param_start_idx, + shard_param_info.intra_param_end_idx, + ) + ) + return FlatParamShardMetadata( + tuple(fqns_list), + tuple(shapes_list), + tuple(numels_list), + tuple(shard_param_offsets), + ) + + @no_type_check + @torch.no_grad() + def init_flat_param_attributes(self) -> None: + """ + This initializes some attributes on the handle's ``FlatParameter``. + This should be called during lazy initialization since it requires the + parameter to be on the compute device if not offloading to CPU and we + want to give users the chance to move the parameter appropriately after + the FSDP constructor. + + For each tensor attribute on the ``FlatParameter``, see the unshard and + reshard methods in this class for the allocation and free pattern. + """ + flat_param = self.flat_param + if flat_param.dtype != self._orig_param_dtype: + # Entering this branch means that the user changed the parameter + # dtype after FSDP initialization, in which case we may need to + # refresh some saved dtype attributes (dtypes specified as a part + # of mixed precision take precedence). + if not self._low_prec_param_dtype_specified: + self._fwd_bwd_param_dtype = flat_param.dtype + # For `reduce_dtype`, require `param_dtype` was not specified since + # then we infer the `reduce_dtype` from the specified `param_dtype` + if ( + not self._low_prec_reduce_dtype_specified + and not self._low_prec_param_dtype_specified + ): + self._reduce_dtype = flat_param.dtype + self._orig_param_dtype = flat_param.dtype + cpu_device = torch.device("cpu") + if self._offload_params: + _p_assert( + flat_param.device == cpu_device, + f"Expects the `FlatParameter` to be on CPU when parameter CPU " + f"offloading is enabled, not {flat_param.device}", + ) + else: + self._check_on_compute_device(self.flat_param) + flat_param._local_shard = flat_param.data + if self._offload_params: + # Pin the memory for faster H2D transfer + flat_param._local_shard = flat_param._local_shard.pin_memory( + device=self.device + ) + # Pre-allocate the sharded gradient on CPU to enable non-blocking + # D2H transfer during the backward pass + flat_param._cpu_grad = torch.zeros_like( + flat_param._local_shard, device=cpu_device + ).pin_memory(device=self.device) + if self._uses_param_mixed_precision: + # For parameter mixed precision, we maintain a low precision + # sharded tensor on the compute device to be all-gathered (for + # sharded strategies) or directly used (for `NO_SHARD`) for + # computation. + flat_param._mp_shard = torch.empty_like( + flat_param._local_shard, + device=self.device, + dtype=self._fwd_bwd_param_dtype, + ) + _free_storage(flat_param._mp_shard) + if self.uses_sharded_strategy: + # We maintain a padded unsharded tensor that serves as the + # all-gather destination and owns the original parameter storages. + unsharded_param_dtype = ( + self._fwd_bwd_param_dtype + if self._uses_param_mixed_precision + else flat_param.dtype + ) # use low precision if parameter mixed precision is enabled + padded_unsharded_numel = flat_param.numel() * self.world_size + flat_param._full_param_padded = torch.empty( + padded_unsharded_numel, + device=self.device, + dtype=unsharded_param_dtype, + ) + flat_param._padded_unsharded_size = flat_param._full_param_padded.size() + _free_storage(flat_param._full_param_padded) + + if self._uses_param_mixed_precision: + # For parameter mixed precision, we maintain a full precision + # padded unsharded tensor for when we force full precision. + flat_param._full_prec_full_param_padded = torch.empty( + padded_unsharded_numel, + device=self.device, + dtype=flat_param.dtype, # full precision + ) + _free_storage(flat_param._full_prec_full_param_padded) + + ################### + # UNSHARD/RESHARD # + ################### + def pre_unshard(self) -> bool: + """ + Return ``False`` if this is a no-op and ``True`` otherwise. + + Postcondition: ``self.flat_param`` 's data is on the device for + communication and is what should be all-gathered. This means that it + matches the dtype of the expected unsharded parameter. + """ + if ( + self._training_state == HandleTrainingState.SUMMON_FULL_PARAMS + and self._skipped_use_sharded_views + ): + # Since this path imposes special semantics for the unsharded flat + # parameter (e.g. forcing full precision), use sharded views to + # reuse the existing logic for that special handling + self._use_sharded_views() + ret = False + if self._use_orig_params and not self._skip_writeback_check: + ret = self._writeback_orig_params() + if ( + self.uses_sharded_strategy + and not self._offload_params + and not self.needs_unshard() + ): + pass # no-op + elif self._uses_param_mixed_precision and not self._force_full_precision: + self._use_low_precision_shard() + ret = True + elif self._offload_params and self.flat_param.device != self.device: + # NOTE: This creates a new tensor distinct from any attributes. + self.flat_param_to(self.device, non_blocking=True) + ret = True + self._check_on_compute_device(self.flat_param) + return ret + + def _use_low_precision_shard(self): + """Allocate on the compute device and switch to using the low precision sharded flat parameter.""" + self._check_low_precision_shard() + flat_param = self.flat_param + _alloc_storage( + flat_param._mp_shard, flat_param._local_shard.size() # type: ignore[attr-defined] + ) + # `copy_()` implicitly casts to the low precision + flat_param._mp_shard.copy_( # type: ignore[attr-defined] + flat_param._local_shard.to( # type: ignore[attr-defined] + self.device, non_blocking=True + ) + ) + # Invariant: `_mp_shard` is always on the compute device. + flat_param.data = flat_param._mp_shard # type: ignore[attr-defined] + + def unshard(self): + """ + Run the unshard logic. + + This includes all-gathering the flat parameter + and switching to using the unsharded flat parameter. If the handle does + not need unsharding, then this only switches to using the unsharded + flat parameter. For ``NO_SHARD``, this is a no-op. + + If FSDP is in :meth:`summon_full_params` and the handle uses parameter + mixed precision, then the parameter is forced to full precision. + """ + if not self.needs_unshard(): + # Even when not needing an unshard, we should switch to using + # the unsharded flat parameter + unsharded_flat_param = ( + self._get_padded_unsharded_flat_param() + if self.uses_sharded_strategy + else self.flat_param + ) + self._use_unsharded_flat_param(unsharded_flat_param) + return + unsharded_flat_param = self._alloc_padded_unsharded_flat_param() + padded_unsharded_flat_param = self._all_gather_flat_param(unsharded_flat_param) + self._use_unsharded_flat_param(padded_unsharded_flat_param) + + def needs_unshard(self) -> bool: + """Return if the handle's flat parameter needs to be unsharded.""" + if not self.uses_sharded_strategy: + return False + unsharded_flat_param = self._get_padded_unsharded_flat_param() + already_unsharded = _same_storage_size( + unsharded_flat_param, unsharded_flat_param.numel() + ) + return not already_unsharded + + def _alloc_padded_unsharded_flat_param(self): + """ + Allocate the *padded* unsharded flat parameter. + + The unpadded unsharded + flat parameter is always a view into the padded one. This padded + parameter is saved to a different attribute on the ``FlatParameter`` + depending on if we force full precision. + """ + self._check_sharded_strategy() + flat_param = self.flat_param + unsharded_flat_param = self._get_padded_unsharded_flat_param() + self._check_storage_freed(unsharded_flat_param) + _alloc_storage(unsharded_flat_param, flat_param._padded_unsharded_size) # type: ignore[attr-defined] + return unsharded_flat_param + + def _get_padded_unsharded_flat_param(self) -> torch.Tensor: + """ + Return a reference to the padded unsharded flat parameter depending on the calling context. + + This should only be called if using a sharded strategy. + """ + self._check_sharded_strategy() + flat_param = self.flat_param + if self._force_full_precision and self._uses_param_mixed_precision: + # When parameter mixed precision is enabled, we use a different + # tensor as the all-gather destination to preserve the invariant + # that `_full_param_padded` is in the low precision + unsharded_flat_param = flat_param._full_prec_full_param_padded # type: ignore[attr-defined] + _p_assert( + unsharded_flat_param.dtype != self._fwd_bwd_param_dtype, + f"Expects full precision but got {self._fwd_bwd_param_dtype}", + ) + # For no-reshard-after-forward strategies, `_full_param_padded` may + # still be allocated from a previous forward. As we are forcing + # full precision here, the full-precision unsharded copy may be + # modified, invalidating the existing low-precision unsharded copy, + # so we should free it here to ensure a new all-gather for the next + # forward/backward computation to persist the modifications. + if flat_param._full_param_padded.untyped_storage().size() > 0: + _free_storage(flat_param._full_param_padded) + else: + unsharded_flat_param = flat_param._full_param_padded # type: ignore[attr-defined] + return unsharded_flat_param + + def _all_gather_flat_param( + self, + padded_unsharded_flat_param: Tensor, + ) -> Tensor: + """ + All-gather the handle's flat parameter to the destination ``padded_unsharded_flat_param``. + + Then switch to use the all-gathered tensor. + """ + _p_assert( + hasattr(self, "process_group") and hasattr(self, "world_size"), + "Expects a process group and world size to have been set via `shard()`", + ) + sharded_flat_param = self.flat_param.data + expected_numel = sharded_flat_param.numel() * self.world_size + _p_assert( + padded_unsharded_flat_param.numel() == expected_numel, + f"Expects {expected_numel} numel but got {padded_unsharded_flat_param.numel()}", + ) + + pg = ( + self._fake_process_group + if self._use_fake_all_gather + else self.process_group + ) + + # HACK this should be handled by C10D + if sharded_flat_param.is_cpu: # type: ignore[attr-defined] + tensor_list = list( + torch.chunk(padded_unsharded_flat_param, dist.get_world_size(pg)) + ) + dist.all_gather(tensor_list, sharded_flat_param, group=pg) + else: + dist.all_gather_into_tensor( + padded_unsharded_flat_param, + sharded_flat_param, + pg, + ) + + if self._offload_params: + # In case of offloading, `flat_param.data` (i.e. sharded param) is + # created on the pre-unshard stream. We need to hand it over to the + # unshard stream for all-gather + _no_dispatch_record_stream( + sharded_flat_param, + self._device_handle.current_stream(), # unshard_stream + ) + return padded_unsharded_flat_param + + def _use_unsharded_flat_param( + self, + padded_unsharded_flat_param: torch.Tensor, + ) -> None: + """ + Switch to use the *unpadded* unsharded flat parameter. + + This is a view into the *padded* unsharded flat parameter. + """ + unsharded_size = self.flat_param._unpadded_unsharded_size + flat_param_part = padded_unsharded_flat_param[: unsharded_size.numel()] + # slicing [:] is not visible to autograd because of .data + self.flat_param.data = flat_param_part + in_forward = self._training_state == HandleTrainingState.FORWARD + in_pre_backward = self._training_state == HandleTrainingState.BACKWARD_PRE + if self._use_orig_params: + if self._skipped_use_sharded_views and in_pre_backward: + # This call corresponds to the complementary pre-backward + # `_use_unsharded_views()` to the skipped pre-forward + # `_use_sharded_views()`, so we should skip this one too. + return + # We use `Tensor` views in the forward so that they are tracked by + # autograd. We use them in the pre-backward as well to support + # reentrant activation checkpointing, which needs the views to be + # tracked by autograd in the backward pass's recomputed forward. + self._use_unsharded_views( + as_params=(not in_forward and not in_pre_backward) + ) + elif in_forward: + self._use_unsharded_views(as_params=False) + + def post_unshard(self): + """ + Run the post-unshard logic. + + This includes freeing the low precision shard if needed. + """ + if self._uses_param_mixed_precision and self.uses_sharded_strategy: + self._free_low_precision_sharded_param() + self._check_on_compute_device(self.flat_param) + + def _free_low_precision_sharded_param(self): + """Frees the low precision sharded flat parameter.""" + self._check_low_precision_shard() + # `_mp_shard` is allocated in the pre-unshard stream, consumed in the + # unshard stream for sharded strategies, and consumed in both the + # unshard and default streams for `NO_SHARD`. For sharded strategies, + # the current stream here is the unshard stream, and for `NO_SHARD`, + # it is the default stream. For `NO_SHARD`, only recording for the + # default stream suffices since the default stream waits for the + # unshard stream. + _no_dispatch_record_stream( + self.flat_param._mp_shard, self._device_handle.current_stream() # type: ignore[attr-defined] + ) + _free_storage(self.flat_param._mp_shard) # type: ignore[attr-defined] + + @torch.no_grad() + def unshard_grad(self): + """ + Unshard the handle's ``FlatParameter``'s gradient. + + If all ranks have + ``None`` gradient, then all original parameters will as well. This + method performs an all-reduce and an all-gather. The additional + all-reduce is tolerable since this method is not meant to be used on + the computation critical path. + + Postcondition: ``_saved_grad_shard`` is defined and contains the value + to set ``flat_param.grad`` after gradients are resharded. + """ + if not self.uses_sharded_strategy: + self._use_unsharded_grad_views() + return + flat_param = self.flat_param + self._check_unsharded(flat_param) + + # Check if all ranks have a `None` gradient + num_grad_none = torch.zeros(1, dtype=torch.int32, device=self.device) + num_grad_none[0] = flat_param.grad is None + dist.all_reduce(num_grad_none, group=self.process_group) + if num_grad_none[0] == self.world_size: + flat_param._saved_grad_shard = None # type: ignore[assignment] + self._use_unsharded_grad_views() + return + + if flat_param.grad is None: + # In the case that only some ranks have `None` gradient, we use + # zeros to approximate as a best effort attempt + if self._debug_level == dist.DebugLevel.INFO: + warnings.warn( + f"[Rank {self.rank}] Only some but not all ranks have a " + "`None` `FlatParameter` gradient, so FSDP is using zeros to " + "approximate those ranks' sharded gradients being `None`" + ) + flat_param._saved_grad_shard = None # type: ignore[assignment] + sharded_grad = torch.zeros(flat_param._sharded_size, device=self.device) # type: ignore[attr-defined] + else: + self._check_sharded(flat_param.grad) + flat_param._saved_grad_shard = flat_param.grad # type: ignore[attr-defined] + sharded_grad = flat_param._saved_grad_shard # type: ignore[attr-defined] + padded_unsharded_grad = torch.empty( + flat_param._padded_unsharded_size, # type: ignore[attr-defined] + device=self.device, + dtype=sharded_grad.dtype, + ) + dist.all_gather_into_tensor( + padded_unsharded_grad, sharded_grad, self.process_group + ) + unsharded_size = self.flat_param._unpadded_unsharded_size + flat_param.grad = padded_unsharded_grad[: unsharded_size.numel()].view( + unsharded_size + ) + self._use_unsharded_grad_views() + + def reshard_grad(self): + if self._use_orig_params: + self._use_sharded_grad_views() + if not self.uses_sharded_strategy: + return + self.flat_param.grad = self.flat_param._saved_grad_shard # type: ignore[attr-defined] + delattr(self.flat_param, "_saved_grad_shard") + + def prepare_gradient_for_backward(self): + """ + Prepare the gradient for the backward computation. + + This is done by saving and clearing any existing sharded gradient + in ``.grad`` to enable computing a new unsharded gradient. + """ + _p_assert( + self._training_state + in (HandleTrainingState.BACKWARD_PRE, HandleTrainingState.IDLE), + "Expects to be in `BACKWARD_PRE` or `IDLE` (if prefetching)", + ) + flat_param = self.flat_param + if flat_param.grad is not None and ( + flat_param.grad.size() != flat_param._unpadded_unsharded_size + or flat_param.grad.device != flat_param.device # grad on CPU + ): + self._check_on_compute_device(self.flat_param) + grad_offloaded = flat_param.grad.device != self.device + _p_assert( + not grad_offloaded or self._offload_params, + f"Expects the sharded gradient to be on {self.device} " + f"but got {flat_param.grad.device}", + ) + prev_iter_synced_gradients = ( + flat_param.grad.size() + == flat_param._local_shard.size() # type: ignore[attr-defined] + ) + if prev_iter_synced_gradients: + # TODO (awgu): Gradient accumulation outside `no_sync()` + # does not work with CPU offloading. The issue should be + # that, in the post-backward hook, we cannot do an addition + # between a CPU tensor (the existing sharded gradient) and + # a GPU tensor (the new sharded gradient). + if not grad_offloaded: + flat_param._saved_grad_shard = flat_param.grad.data # type: ignore[attr-defined] + sharded_grad = flat_param._saved_grad_shard # type: ignore[attr-defined] + else: + _p_assert( + hasattr(flat_param, "_cpu_grad"), + "`_cpu_grad` should be defined if the gradient is on CPU", + ) + sharded_grad = flat_param._cpu_grad # type: ignore[attr-defined] + # If user specified to keep the gradient in low precision, then + # the gradient may still be of the low precision dtype if the + # user did not set the gradient to `None` after the previous + # backward, in which case FSDP should cast back to the full + # precision dtype so that FSDP can accumulate in that dtype in + # the post-backward hook and assign to `.grad` in that dtype in + # the post-backward callback. + local_shard_dtype = flat_param._local_shard.dtype # type: ignore[attr-defined] + if ( + self._keep_low_precision_grads + and sharded_grad.dtype != local_shard_dtype + ): + sharded_grad.data = sharded_grad.to(local_shard_dtype) + else: + padded_unsharded_size = flat_param._padded_unsharded_size # type: ignore[attr-defined] + _p_assert( + flat_param.grad.size() == padded_unsharded_size, + "Expects `.grad` to be the unsharded gradient in " + f"`no_sync()` with size {padded_unsharded_size} " + f"but got size {flat_param.grad.size()}", + ) + flat_param.grad = None + + def prepare_gradient_for_optim(self): + """Prepare the gradient for optimizer computation by moving the sharded gradient to the ``.grad`` attribute.""" + + def cast_grad_to_param_dtype_if_needed(flat_param): + # TODO (rohan-varma): test for full precision with keep_low_precision_grads + if not self._force_full_precision and self._keep_low_precision_grads: + _p_assert(flat_param.grad is not None, "Unexpected None grad!") + if flat_param.grad.dtype != self._fwd_bwd_param_dtype: + flat_param.grad.data = flat_param.grad.to(self._fwd_bwd_param_dtype) + if self._use_orig_params: + self._use_sharded_grad_views() + + flat_param = self.flat_param + # TODO (awgu): We should replace these conditional checks to encode + # the logical intention more directly. + if hasattr(flat_param, "_cpu_grad"): + # NOTE: This branch includes `NO_SHARD`. + self._check_sharded(flat_param) + self._check_on_cpu(flat_param) + flat_param.grad = flat_param._cpu_grad # type: ignore[attr-defined] + cast_grad_to_param_dtype_if_needed(flat_param) + elif hasattr(flat_param, "_saved_grad_shard"): + self._check_sharded(flat_param) + self._check_on_compute_device(flat_param) + if flat_param._saved_grad_shard is not None: + self._check_on_compute_device(flat_param._saved_grad_shard) # type: ignore[attr-defined] + # If no sharded gradient was computed this iteration, then there is + # no need to forward `_saved_grad_shard` to `grad` + if flat_param._post_backward_called: # type: ignore[attr-defined] + flat_param.grad = flat_param._saved_grad_shard # type: ignore[attr-defined] + if flat_param.grad is not None: + cast_grad_to_param_dtype_if_needed(flat_param) + else: + _p_assert( + not self.uses_sharded_strategy + or not flat_param._post_backward_called, # type: ignore[attr-defined] + "All sharded parameters that received a gradient in the " + "post-backward should use `_saved_grad_shard`", + ) + # Delete `_saved_grad_shard` since its existence indicates a previous + # gradient to accumulate with in the post-backward hook + if hasattr(flat_param, "_saved_grad_shard"): + delattr(flat_param, "_saved_grad_shard") + + @contextlib.contextmanager + def to_cpu(self): + """ + Move the unpadded unsharded flat parameter to CPU while in the context and moves it back to the previous device upon exit. + + For now, this assumes the ``FlatParameter`` is the unpadded unsharded flat parameter + since (1) there is no reason to include the padding in the copy and (2) + there is no use case for the sharded flat parameter. + + Precondition: ``self.flat_param`` 's data is the unpadded unsharded + flat parameter on the compute device, and the handle uses a sharded + strategy. + Postcondition: Same as the precondition. + """ + self._check_sharded_strategy() + _p_assert( + self.flat_param.size() == self.flat_param._unpadded_unsharded_size, + f"Expects size {self.flat_param._unpadded_unsharded_size} but got {self.flat_param.size()}", + ) + self._check_on_compute_device(self.flat_param) + # Check that the unpadded unsharded flat parameter is a view into the + # padded unsharded flat parameter as expected + # NOTE: This check is not strictly needed for correctness but is a + # useful sanity check since the tensor should only be used internally. + _p_assert( + _same_storage(self.flat_param, self._get_padded_unsharded_flat_param()), + "Expects the unpadded parameter to be a view into the padded parameter", + ) + self.flat_param_to(torch.device("cpu")) + self._free_unsharded_flat_param() + try: + yield + finally: + _p_assert( + self.flat_param.size() == self.flat_param._unpadded_unsharded_size, + f"Expects size {self.flat_param._unpadded_unsharded_size} but got {self.flat_param.size()}", + ) + padded_unsharded_flat_param = self._alloc_padded_unsharded_flat_param() + # Copy from CPU to the compute device + padded_unsharded_flat_param[: self.flat_param.numel()].copy_( + self.flat_param + ) + self._use_unsharded_flat_param(padded_unsharded_flat_param) + + def reshard(self, free_unsharded_flat_param: bool): + """ + Run the reshard logic. + + This includes freeing the unsharded flat + parameter if ``free_unsharded_flat_param`` and switching to using the + sharded flat parameter. Note that this also implicitly offloads + the sharded flat parameter (if CPU offload is enabled) by pointing + it to the ``_local_shard`` attribute which resides on CPU. + """ + # Switch to the sharded `FlatParameter` before freeing to prevent + # "use-after-free"-type bugs with external profiling tools, where for + # `use_orig_params=True`, the `param` does not point to valid memory + # when setting `param.data = ...` in `_use_sharded_views()`. + self._use_sharded_flat_param() + if free_unsharded_flat_param: + self._free_unsharded_flat_param() + + def post_reshard(self): + """ + Run the post-reshard logic. + + This includes freeing any memory that + can now be freed given that the ``FlatParameter`` points to the full + precision sharded flat parameter. + + Precondition: ``self.flat_param`` 's data points to the full precision + sharded flat parameter. + """ + # For `NO_SHARD`, `_mp_shard` is not freed in the post-unshard since it + # is also the low precision *unsharded* flat parameter. Hence, we delay + # the free until the reshard. + if ( + self._uses_param_mixed_precision + and not self.uses_sharded_strategy + and not self._force_full_precision # did not use the low precision shard + ): + self._free_low_precision_sharded_param() + + def _free_unsharded_flat_param(self): + """ + Free the padded unsharded flat parameter. We allow this + function to be called even when storage is not allocated + + The tensor to free depends + on the calling context since the unshard may have forced full + precision, in which case a different tensor is used. + """ + self._check_sharded_strategy() + unsharded_flat_param = self._get_padded_unsharded_flat_param() + self._check_on_compute_device(unsharded_flat_param) + # Do not free the memory until all ops in the current stream finish + _no_dispatch_record_stream( + unsharded_flat_param, self._device_handle.current_stream() + ) + _free_storage(unsharded_flat_param) + + def _use_sharded_flat_param(self) -> None: + """Switches to using the sharded flat parameter.""" + flat_param = self.flat_param + if self._use_orig_params: + in_forward = self._training_state == HandleTrainingState.FORWARD + skip_use_sharded_views = ( + torch.is_grad_enabled() + and in_forward + and self._sharding_strategy + in NO_RESHARD_AFTER_FORWARD_HANDLE_STRATEGIES + ) + # Only incur the extra `.data` call if needed + if skip_use_sharded_views: + unsharded_flat_param = flat_param.data + if self._offload_params: + device = flat_param._local_shard.device # type: ignore[attr-defined] + _p_assert( + device == torch.device("cpu"), + f"Expects the local shard to be on CPU but got {device}", + ) + flat_param.data = flat_param._local_shard # type: ignore[attr-defined] + if self._use_orig_params: + if skip_use_sharded_views: # type: ignore[possibly-undefined] + self._unsharded_flat_param_for_skipped_views = unsharded_flat_param # type: ignore[possibly-undefined] + else: + self._use_sharded_views() + # For the post-forward reshard, we may try to use sharded gradient + # views (or unsharded gradient views if a gradient was accumulated + # in `no_sync()`), but for the post-backward reshard, we delay the + # call to after the reduce-scatter. + if ( + in_forward # type: ignore[possibly-undefined] + # Skip using gradient views if skipped using sharded views + # since exposing unsharded parameters with sharded gradients + # may be confusing to the user + and not self._skipped_use_sharded_views + ): + # TODO: Change `_unpadded_unsharded_size` if we change the + # gradient to be computed directly with padding. + accumulated_grad_in_no_sync = ( + flat_param.grad is not None + and self.uses_sharded_strategy + and flat_param.grad.shape == flat_param._unpadded_unsharded_size + ) + if accumulated_grad_in_no_sync: + self._use_unsharded_grad_views() + else: + self._use_sharded_grad_views() + + ######### + # VIEWS # + ######### + @no_type_check + def _get_unflat_views_unaligned( + self, + tensor: Optional[torch.Tensor] = None, + ) -> Iterator[Tensor]: + """ + Return unflattened ``Tensor`` views into ``tensor``. + + If `tensor`` is ``None``, ``flat_param`` is used. The unflattening is based + on ``flat_param`` 's metadata. + + Examples for ``tensor`` include ``flat_param.grad`` or unsharded + tensor optimizer state. + """ + flat_param = self.flat_param + if tensor is None: + tensor = flat_param + views = ( + _ext_post_unflatten_transform( + subtensor.view(shape), + param_extension, + self._fsdp_extension, + ) + for (subtensor, shape, param_extension) in zip( + torch.split(tensor, flat_param._numels, dim=0), + flat_param._shapes, + flat_param._param_extensions, + ) + ) + return views + + @no_type_check + def _get_unflat_views_aligned( + self, + tensor: Optional[Tensor] = None, + ) -> List[Tensor]: + """ + Return unflattened ``Tensor`` views into ``tensor`` with handling for padding. + + This method has the same contract as :meth:`_get_unflat_views_unaligned` + except it checks for ``None`` placeholders representing padding for + alignment, which may incur slightly more CPU overhead. + """ + flat_param = self.flat_param + if tensor is None: + tensor = flat_param + splits: List[Tensor] = torch.split( + tensor, flat_param._numels_with_padding, dim=0 + ) + idx = 0 + views: List[Tensor] = [] + for split, is_padding in zip(splits, flat_param._is_padding_mask): + if is_padding: + continue + views.append( + _ext_post_unflatten_transform( + split.view(flat_param._shapes[idx]), + flat_param._param_extensions[idx], + self._fsdp_extension, + ) + ) + idx += 1 + return views + + @no_type_check + @torch.enable_grad() + def _use_unsharded_views(self, as_params: bool) -> None: + """ + Unflatten the unsharded flat parameter by setting the original parameter variables to be views into it. + + Args: + as_params (bool): If ``True``, then registers the original + parameters as ``nn.Parameter`` s; if ``False``, then registers + the original parameters only as ``Tensor`` s. ``False`` should + be used during forward/backward computation and when hiding the + original parameters from :meth:`nn.Module.named_parameters`. + + Note: + when prefetching for next forward, current forward may be + annotated with `@torch.no_grad()` + `@torch.enable_grad()` ensures non-empty `view.grad_fn` + otherwise `_post_backward_hook` will not get called + """ + flat_param = self.flat_param + self._check_unsharded(flat_param) + views = self._get_unflat_views() + from torch.distributed.tensor import DTensor + + for i, (view, (param_name, module, _)) in enumerate( + zip(views, flat_param._param_infos) + ): + if self._use_orig_params and as_params: + if type(view) is DTensor: + # A `DTensor` `view` is not compatible with assigning + # `param.data = view`, so we cannot preserve the parameter + # variable. + self._setattr_param( + module, + param_name, + nn.Parameter(view, requires_grad=flat_param.requires_grad), + ) + continue + param = self.flat_param._params[i] + self._setattr_param(module, param_name, param) + param.data = view + elif as_params: + self._setattr_param( + module, + param_name, + nn.Parameter(view, requires_grad=flat_param.requires_grad), + ) + else: # `as_params=False` + param_var: Tensor = view + if self._use_orig_params: + if self._training_state == HandleTrainingState.FORWARD: + # Save the `Tensor` for the pre-backward + self.flat_param._tensors[i] = view # save for pre-backward + elif self._training_state == HandleTrainingState.BACKWARD_PRE: + # Use the saved `Tensor` variable from the forward to + # preserve the autograd graph so that the post-backward + # hook fires (e.g. for reentrant AC) + tensor = self.flat_param._tensors[i] + tensor.data = view + param_var = tensor + self._setattr_tensor(module, param_name, param_var) + if ( + self._use_orig_params + and self._training_state == HandleTrainingState.FORWARD + ): + module._parameters[param_name] = param_var + for i, ( + param_name, + module, + _, + prim_param_name, + prim_module, + _, + ) in enumerate(self.flat_param._shared_param_infos): + prim_param: Union[Tensor, nn.Parameter] = getattr( + prim_module, prim_param_name + ) + _p_assert( + not as_params or isinstance(prim_param, nn.Parameter), + f"as_params={as_params} type(prim_param)={type(prim_param)}", + ) + if self._use_orig_params and as_params: + shared_param = self.flat_param._shared_params[i] + self._setattr_param(module, param_name, shared_param) + shared_param.data = prim_param + elif as_params: + self._setattr_param(module, param_name, prim_param) + else: + self._setattr_tensor(module, param_name, prim_param) + if ( + self._use_orig_params + and self._training_state == HandleTrainingState.FORWARD + ): + module._parameters[param_name] = prim_param + + @no_type_check + def _use_unsharded_grad_views(self) -> None: + """ + Unflatten the unsharded flat parameter's gradient. + + The original parameter variables' gradients are set to be views into + the unsharded flat parameter's gradient. + """ + # Expects the gradient to be in `flat_param.grad` + if self.flat_param.grad is None: + for param in chain(self.flat_param._params, self.flat_param._shared_params): + param.grad = None + return + self._check_unsharded(self.flat_param.grad) + views = self._get_unflat_views(self.flat_param.grad) + for i, (view, (param_name, module, _)) in enumerate( + zip(views, self.flat_param._param_infos) + ): + _p_assert( + hasattr(module, param_name), + f"{self.flat_param._fqns[i]} is missing", + ) + param = getattr(module, param_name) + if ( + param.shape != view.shape + or param.dtype != view.dtype + or param.device != view.device + ): + # NOTE: This is a hack using `.data` to side step the check + # that parameter/gradient sizes/dtypes/devices match. From + # calling `reshard()`, `param` has the sharded size, has the + # full precision dtype, and if CPU offloading is enabled, is on + # CPU. Thus, one or more of the following cases can hold when + # in `no_sync()`, where `view` is the original parameter's + # gradient: + # 1. `view` can have the unsharded size. + # 2. `view` can have the parameter low precision dtype. + # 3. `view` can be on GPU. + if param.grad is None: + param.grad = torch.empty_like(param) + param.grad.data = view + else: + param.grad = view + for i, ( + param_name, + module, + module_name, + prim_param_name, + prim_module, + _, + ) in enumerate(self.flat_param._shared_param_infos): + _p_assert( + hasattr(module, param_name), + f"{module_name + '.' + param_name if module_name else param_name} is missing", + ) # did not save FQN info in `_shared_param_infos` + param = getattr(module, param_name) + prim_param = getattr(prim_module, prim_param_name) + if ( + param.shape != prim_param.grad.shape + or param.dtype != prim_param.grad.dtype + or param.device != prim_param.grad.device + ): + # NOTE: This is the same hack to use `.data` to side step the + # size check. + if param.grad is None: + param.grad = torch.empty_like(param) + param.grad.data = prim_param.grad + else: + param.grad = prim_param.grad + + @contextlib.contextmanager + def unflatten_as_params(self) -> Generator: + """ + Unflatten the original parameters. + + The function assumes that the flat parameter is unsharded. When in the context, + unflattens the original parameters as ``nn.Parameter`` views into the + flat parameter, and after the context, restores the original parameters + as ``Tensor`` views into the flat parameter. + """ + self._use_unsharded_views(as_params=True) + try: + yield + finally: + self._use_unsharded_views(as_params=False) + + @no_type_check + @torch.no_grad() + def _use_sharded_views(self) -> None: + """ + Set the original parameter variables' data to be flattened views into the sharded flat parameter. + + The views are kept as flattened to simplify the case where a parameter + is sharded across ranks. Parameters whose data is not present in the + sharded flat parameter have their data set to a size-0 empty tensor. We + do not delete them to ensure to preserve expected behaviors like model + printability. Parameters whose data is present must preserve their + variables to be passable to an optimizer. + """ + self._unsharded_flat_param_for_skipped_views = None + if not self.uses_sharded_strategy: + # For `NO_SHARD`, use the *unflattened* unsharded views since we + # have the unsharded parameter + self._use_unsharded_views(as_params=True) + return + flat_param = self.flat_param + self._check_sharded(flat_param) + # Construct once and reuse for all parameters not in the local shard + size_0_empty_tensor = torch.empty( + 0, + dtype=self.flat_param.dtype, # in case `flat_param` changed dtype + device=self.flat_param.device, + requires_grad=False, + ) + for param, shard_param_info, (param_name, module, _) in zip( + flat_param._params, flat_param._shard_param_infos, flat_param._param_infos + ): + self._setattr_param(module, param_name, param) + if not shard_param_info.in_shard: + # Allow the original data to be freed via garbage collection + param.data = size_0_empty_tensor + else: + offset = shard_param_info.offset_in_shard + numel_in_shard = shard_param_info.numel_in_shard + param.data = flat_param[offset : offset + numel_in_shard] + assert self.flat_param._shared_params is not None + for i, ( + param, + (param_name, module, _, prim_param_name, prim_module, _), + ) in enumerate( + zip(self.flat_param._shared_params, self.flat_param._shared_param_infos) + ): + self._setattr_param(module, param_name, param) + prim_param = getattr(prim_module, prim_param_name) + param.data = prim_param # could be both empty and non-empty + if self._training_state == HandleTrainingState.BACKWARD_POST: + # Clear the saved `Tensor`s since they are unneeded now + for i in range(len(self.flat_param._tensors)): + self.flat_param._tensors[i] = None + + @no_type_check + @torch.no_grad() + def _use_sharded_grad_views(self) -> None: + """ + Set the original parameter variables' gradients to be flattened views into the sharded flat parameter's gradient. + + This is a no-op if there is no gradient. + + Parameters whose data is not present in the sharded flat parameter and + parameters with ``requires_grad=False`` have their gradients set to + ``None``. Since the gradient variables do not need to be preserved, + this method does not manipulate existing ``Tensor`` data directly and + creates new ``Tensor`` variables instead. + """ + flat_param = self.flat_param + self._check_sharded(flat_param) + grad = self.sharded_grad + if grad is None: + for param in chain(flat_param._params, flat_param._shared_params): + param.grad = None + return + self._check_sharded(grad) + for param, shard_param_info, is_grad_none in zip( + flat_param._params, + flat_param._shard_param_infos, + flat_param._is_grad_none_mask, + ): + if not shard_param_info.in_shard: + param.grad = None + else: + numel_in_shard = shard_param_info.numel_in_shard + if param.requires_grad and not is_grad_none: + offset = shard_param_info.offset_in_shard + if self._keep_low_precision_grads or param.dtype != grad.dtype: + # NOTE: This is a hack using `.data` to side step the + # check that parameter/gradient dtypes match. Here, + # `param` has full precision; `grad` has low precision. + if param.grad is None: + # `.grad` must have the same shape as `param` + param.grad = torch.empty_like(param) + param.grad.data = grad[ + offset : offset + numel_in_shard + ].reshape(param.shape) + else: + param.grad = grad[offset : offset + numel_in_shard].reshape( + param.shape + ) + else: + param.grad = None + assert flat_param._shared_params is not None + for i, (param, (_, _, _, prim_param_name, prim_module, _)) in enumerate( + zip(flat_param._shared_params, flat_param._shared_param_infos) + ): + in_sharded_flat_param = hasattr(prim_module, prim_param_name) + if in_sharded_flat_param and param.requires_grad: + prim_param = getattr(prim_module, prim_param_name) + param.grad = prim_param.grad # share the same reference + else: + param.grad = None + + @no_type_check + @torch.no_grad() + def _writeback_orig_params(self) -> bool: + """ + Write back any parameters that changed storage to the handle's ``FlatParameter``. + + Iterates over the original parameters and writes back any parameters + that changed storages (due to a non-inplace operator) to the handle's + ``FlatParameter``. This method preserves the ``FlatParameter` 's + device even if an original parameter's device changes. + + Raises: + RuntimeError: If an original parameter or gradient changes storages + but no longer has the expected flattened shape. + Returns: ``True`` if some writeback happened, and ``False`` otherwise. + """ + if ( + self.uses_sharded_strategy + and not self.is_sharded(self.flat_param) + and not self._skipped_use_sharded_views + ): + # For `NO_SHARD`, we may still need to writeback + return False + flat_param = self.flat_param + wroteback = False + if self._skipped_use_sharded_views and self.uses_sharded_strategy: + # NOTE: We must use the unsharded flat parameter from which the + # unsharded views were computed, not the one from the current + # calling context (`_get_padded_unsharded_flat_param()`) since that + # may be different (e.g. the model changed from train to eval). + flat_param_tensor = self._unsharded_flat_param_for_skipped_views + _p_assert( + _data_ptr_allocated(flat_param_tensor), + "If skipped using sharded views, the unsharded flat parameter " + "should be allocated", + ) + else: + flat_param_tensor = flat_param + # NOTE: Since this method is called in the pre-unshard, which is only + # called during computation in the pre-forward or pre-backward, the + # sharded gradient should be guaranteed to be in `.grad`, not in + # `._saved_grad_shard`. + flat_param_grad = ( + flat_param.grad + if self.uses_sharded_strategy or not self._offload_params + else flat_param._cpu_grad + ) + for i, ( + param, + (in_shard, offset_in_shard, numel_in_shard, _, _), + (param_name, module, _), + ) in enumerate( + zip( + flat_param._params, + flat_param._shard_param_infos, + flat_param._param_infos, + ) + ): + if not in_shard: + continue + if not hasattr(module, param_name): + # Do not writeback if original parameters are deregistered + # (e.g. during model checkpointing) + continue + + # Check for parameter writeback + if self._skipped_use_sharded_views: + param = flat_param._tensors[i] + _p_assert( + param is not None, + f"Expects to have saved tensor for {flat_param._fqns[i]}", + ) + param_changed = getattr(module, param_name) is not param + needs_param_writeback = ( + param_changed # changed parameter variable itself + or not _same_storage(param, flat_param_tensor) + ) + if self._skipped_use_sharded_views and ( + param_changed or needs_param_writeback + ): + raise AssertionError( + "FSDP does not support changing the parameters between " + f"forward and backward for {self._sharding_strategy}" + ) + if param_changed: + # NOTE: The gradient is not preserved after a parameter change. + param = getattr(module, param_name) + flat_param._params[i] = param + if needs_param_writeback: + expected_shape = torch.Size([numel_in_shard]) + self._writeback_tensor( + param, flat_param, i, expected_shape, offset_in_shard, True + ) + wroteback = True + + # Check for gradient writeback + if self._skipped_use_sharded_views: + # Skip the writeback check because we do not expose gradients + # when we skipped using sharded views + continue + if param.grad is None and flat_param.grad is not None: + expected_shape = torch.Size([numel_in_shard]) + self._writeback_tensor( + None, flat_param.grad, i, expected_shape, offset_in_shard, False + ) + elif param.grad is not None: + # For `NO_SHARD` + CPU offloading, `_cpu_grad` is always in + # memory and owns the gradient storage, so it will never + # require gradient writeback. + if not self.uses_sharded_strategy and self._offload_params: + # Explicitly continue to handle the case of `no_sync()`, + # where `param.grad` is a view into the GPU gradient + # referenced by `flat_param.grad`, while `flat_param_grad` + # is `flat_param._cpu_grad`, which is on CPU + continue + + needs_grad_writeback = flat_param_grad is None or not _same_storage( + param.grad, flat_param_grad + ) + if needs_grad_writeback: + if flat_param_grad is None: + flat_param_grad = torch.zeros_like(flat_param) + expected_shape = torch.Size([numel_in_shard]) + self._writeback_tensor( + param.grad, + flat_param_grad, + i, + expected_shape, + offset_in_shard, + False, + ) + flat_param.grad = flat_param_grad + flat_param_grad = flat_param.grad + + # TODO: If we want to handle shared parameters, we need to re-generate + # the shared parameter data structures in case sharedness changed. + for i, ( + param_name, + module, + _, + prim_param_name, + prim_module, + _, + ) in enumerate(flat_param._shared_param_infos): + if getattr(module, param_name) is not getattr(prim_module, prim_param_name): + raise NotImplementedError( + "Changing shared parameters is not supported yet" + ) + return wroteback + + def _writeback_tensor( + self, + src_tensor: Optional[Tensor], + dst_tensor: Tensor, + tensor_index: int, + expected_shape: torch.Size, + offset: int, + is_param: bool, # else gradient + ) -> None: + """ + Write back ``src_tensor`` to ``dst_tensor`` at offset ``offset``, where ``src_tensor`` should have shape ``expected_shape``. + + ``is_param`` indicates if the tensor is the parameter (if ``True``) or gradient (if + ``False``). If ``src_tensor`` is ``None``, then the effect is zeroing + instead of copying. ``tensor_index`` gives the index of ``src_tensor`` + in the metadata structures. + + Raises: + RuntimeError: If the ``src_tensor`` does not have the expected + shape. + """ + _p_assert( + len(expected_shape) == 1, + f"Expects a 1D expected shape but got {expected_shape}", + ) + if self._debug_level == dist.DebugLevel.INFO: + rank = self.rank if hasattr(self, "rank") else dist.get_rank() + src_shape = src_tensor.shape if src_tensor is not None else None + src_device = src_tensor.device if src_tensor is not None else None + warnings.warn( + f"[Rank {rank}] {'Parameter' if is_param else 'Gradient'} needs " + f"writeback in {self._training_state}\n" + f"expected shape={expected_shape} shape={src_shape} " + f"expected device={dst_tensor.device} device={src_device}" + ) + if src_tensor is not None and src_tensor.shape != expected_shape: + # NOTE: Gradient shape mismatch is not possible in practice since + # the gradient shape is enforced to match that of the parameter and + # we already check for parameter shape mismatch. + raise RuntimeError( + f"Cannot writeback when the {'parameter' if is_param else 'gradient'} " + f"shape changes\nExpects {expected_shape} but got {src_tensor.shape}" + ) + if src_tensor is not None: + dst_tensor[offset : offset + expected_shape.numel()].copy_(src_tensor) + else: + dst_tensor[offset : offset + expected_shape.numel()].zero_() + assert self.flat_param._is_grad_none_mask is not None + self.flat_param._is_grad_none_mask[tensor_index] = True + + def _reset_flat_param_grad_info_if_needed(self): + """ + Reset ``flat_param.grad`` if needed. + + When ``use_orig_params=True``: + (1) sets the underlying ``flat_param.grad`` to ``None`` if *all* of the + original parameters' ``.grad`` are ``None``, and + (2) sets ``flat_param.requires_grad=False`` if *none* of the original + parameters require gradient. + For (1), this is targeting ``optim.zero_grad(set_to_none=True)``, in + which case we want to free the gradients as soon after the + ``zero_grad()`` call as possible. + """ + if not self._use_orig_params: + return + flat_param = self.flat_param + assert flat_param._params is not None # mypy + all_grad_none = True + requires_grad = False + for param in flat_param._params: + all_grad_none &= param.grad is None + requires_grad |= param.requires_grad + if all_grad_none: + flat_param.grad = None + # As long as one parameter requires gradient, then the flat parameter + # must require gradient + flat_param.requires_grad = requires_grad + + def _deregister_orig_params(self): + for param_info in self.flat_param._param_infos: + param_name, module, _ = param_info + if hasattr(module, param_name): + delattr(module, param_name) + for param_name, module, _, _, _, _ in self.flat_param._shared_param_infos: + if hasattr(module, param_name): + delattr(module, param_name) + + ########### + # HELPERS # + ########### + def flat_param_to(self, *args, **kwargs): + """Wrap an in-place call to ``.to()`` for ``self.flat_param``.""" + self.flat_param.data = self.flat_param.to(*args, **kwargs) + if self._use_orig_params: + # Refresh the views because their storage may have changed + if self.is_sharded(self.flat_param): + self._use_sharded_views() + else: + self._use_unsharded_views(as_params=True) + + def _get_modules(self) -> Set[nn.Module]: + """Return a :class:`set` of the modules whose parameters are included in this handle's flat parameter.""" + return {pi.module for pi in self.flat_param._param_infos}.union( + {spi.module for spi in self.flat_param._shared_param_infos} + ) + + def is_sharded(self, tensor: Tensor) -> bool: + """ + Return whether ``tensor`` is *currently* sharded. + + For ``NO_SHARD``, we choose to have this always return ``False`` for clarity. + """ + if ( + not hasattr(self.flat_param, "_sharded_size") + or not self.uses_sharded_strategy + ): + # `_sharded_size` is defined iff `handle.shard()` has been called + return False + sharded_size = self.flat_param._sharded_size # type: ignore[attr-defined] + return tensor.size() == sharded_size + + def param_module_names(self) -> Iterator[Tuple[str, str]]: + shared_param_infos = [ + ParamInfo(param_name, module, module_name) + for ( + param_name, + module, + module_name, + _, + _, + _, + ) in self.flat_param._shared_param_infos + ] + for param_info in chain(self.flat_param._param_infos, shared_param_infos): + param_name, _, module_name = param_info # type: ignore[misc] + yield (param_name, module_name) + + def shared_param_module_names(self) -> Iterator[Tuple[str, str]]: + for param_name, _, module_name in [ + ParamInfo(param_name, module, module_name) + for ( + param_name, + module, + module_name, + _, + _, + _, + ) in self.flat_param._shared_param_infos + ]: + yield (param_name, module_name) + + @property + def _fqns_in_shard(self) -> List[str]: + """Return the FQNs of the parameters present in this rank's shard.""" + fqns_in_shard: List[str] = [] + for fqn, shard_param_info in zip( + self.flat_param._fqns, self.flat_param._shard_param_infos # type: ignore[attr-defined] + ): + if shard_param_info.in_shard: + fqns_in_shard.append(fqn) + return fqns_in_shard + + @property + def sharded_grad(self) -> Optional[Tensor]: + """Return the handle's sharded gradient.""" + flat_param = self.flat_param + # Priority for non-`None`: `_cpu_grad` > `_saved_grad_shard` > `grad` + # - CPU offloading: `_cpu_grad` + # - No CPU offloading + sharded strategies: `_saved_grad_shard` + # - No CPU offloading + `NO_SHARD`: `grad` + grad: Optional[Tensor] + if hasattr(flat_param, "_cpu_grad"): + grad = flat_param._cpu_grad # type: ignore[attr-defined] + elif hasattr(flat_param, "_saved_grad_shard"): + # In the post-backward hook, the sharded gradient is still in + # `_saved_grad_shard`. + grad = flat_param._saved_grad_shard # type: ignore[attr-defined] + else: + # If in IDLE or in FORWARD states, then there may be an + # (accumulated) gradient. If accessed in IDLE, then this should + # be due to re-registering the original parameters (e.g. in state + # dict load). + _p_assert( + flat_param.grad is None + or not self.uses_sharded_strategy + or self._training_state + in (HandleTrainingState.FORWARD, HandleTrainingState.IDLE), + "Sharded strategies should use `_cpu_grad` or `_saved_grad_shard` " + "unless in IDLE or FORWARD", + ) + grad = flat_param.grad + return grad + + def _reset_is_grad_none(self) -> None: + """ + Reset ``_is_grad_none_mask`` as needed. + + This method should only be + called in the post-backward after gradient computation, in which case + if a parameter requires gradient, then it will surely receive a + gradient and we may reset its mask entry to ``False``. + """ + if not self._use_orig_params: + return + _p_assert( + self._training_state == HandleTrainingState.BACKWARD_POST, + "Expects to only be called in the post-backward after gradient computation", + ) + flat_param = self.flat_param + assert flat_param._params is not None # mypy + for i, param in enumerate(flat_param._params): # type: ignore[arg-type] + # As long as the parameter requires gradient, it should receive a + # meaningful gradient (even if the gradient happens to be zeros) + if param.requires_grad: + assert flat_param._is_grad_none_mask is not None # mypy + flat_param._is_grad_none_mask[i] = False + + ####################### + # CHECKS & INVARIANTS # + ####################### + def _check_sharded_strategy(self): + _p_assert(self.uses_sharded_strategy, "Expects sharded strategy") + + def _check_on_compute_device(self, tensor: Tensor): + _p_assert( + tensor.device == self.device, + f"Expects tensor to be on the compute device {self.device}, was on {tensor.device}", + ) + + def _check_on_cpu(self, tensor: Tensor): + _p_assert( + tensor.device == torch.device("cpu"), + f"Expects tensor to be on CPU but got {tensor.device}", + ) + + @staticmethod + def _check_storage_freed(tensor: Tensor): + # Compile does not resize during trace + if not torch.distributed._functional_collectives.is_torchdynamo_compiling(): + _p_assert( + _same_storage_size(tensor, 0), + "Expects storage to be freed but got storage with size > 0", + ) + + @staticmethod + def _check_storage_allocated(tensor: Tensor): + _p_assert(_storage_size_allocated(tensor), "Expects storage to be allocated") + + def _check_low_precision_shard(self): + _p_assert( + self._uses_param_mixed_precision, + "Not using low precision for parameters", + ) + _p_assert( + getattr(self.flat_param, "_mp_shard", None) is not None, + "Expects `_mp_shard` to exist", + ) + device = self.flat_param._mp_shard.device # type: ignore[attr-defined] + _p_assert( + device == self.device, + f"Expects the low precision shard to be on {self.device} but got {device}", + ) + + def _check_unsharded(self, tensor: Tensor): + msg_prefix = "Expects tensor to be unsharded " + _p_assert(tensor is not None, msg_prefix + "but got `None`") + unsharded_size = self.flat_param._unpadded_unsharded_size + _p_assert( + tensor.size() == unsharded_size, + msg_prefix + f"with size {unsharded_size} but got {tensor.size()}", + ) + + def _check_sharded(self, tensor: Tensor): + msg_prefix = "Expects tensor to be sharded " + _p_assert(tensor is not None, msg_prefix + "but got `None`") + sharded_size = self.flat_param._sharded_size # type: ignore[attr-defined] + _p_assert( + tensor.size() == sharded_size, + msg_prefix + f"with size {sharded_size} but got {tensor.size()}", + ) + + ############## + # PROPERTIES # + ############## + @property + def uses_sharded_strategy(self) -> bool: + return self._sharding_strategy != HandleShardingStrategy.NO_SHARD + + @property + def _uses_param_mixed_precision(self) -> bool: + return self._fwd_bwd_param_dtype != self._orig_param_dtype + + @property + def _uses_reduce_mixed_precision(self) -> bool: + return self._reduce_dtype != self._orig_param_dtype + + @property + def _force_full_precision(self) -> bool: + return ( + self._uses_param_mixed_precision or self._uses_reduce_mixed_precision + ) and ( + self._training_state == HandleTrainingState.SUMMON_FULL_PARAMS + or + # Also disable mixed precision in model eval mode, if configured + (not self._fully_sharded_module.training and self._use_full_prec_in_eval) + ) + + @property + def _skipped_use_sharded_views(self) -> bool: + """ + This property is used for sharding strategies that do not free after forward with ``use_orig_params=True``. + + This returns if this handle is + currently in a state where it has skipped using sharded views, in which + case it can restore view invariants via ``_use_sharded_views()``. + """ + return self._unsharded_flat_param_for_skipped_views is not None + + +# NOTE: These are hacks to bypass `nn.Module.__setattr__` checks. +def _unsafe_setattr_param( + module: nn.Module, param_name: str, param: nn.Parameter +) -> None: + module._parameters[param_name] = param + # This bypasses any overrides in case `module` is an instance of an + # `nn.Module` subclass + super(nn.Module, module).__setattr__(param_name, param) + + +def _unsafe_setattr_tensor(module: nn.Module, param_name: str, tensor: Tensor) -> None: + module._parameters.pop(param_name, None) + # This bypasses any overrides in case `module` is an instance of an + # `nn.Module` subclass + super(nn.Module, module).__setattr__(param_name, tensor) + + +def _safe_setattr_tensor_or_param( + module: nn.Module, param_name: str, tensor_or_param: Union[Tensor, nn.Parameter] +): + # Call `delattr()` and `setattr()` to go through `nn.Module` checks + if hasattr(module, param_name): + delattr(module, param_name) + setattr(module, param_name, tensor_or_param) + + +def _convert_to_params( + tensors: List[Union[torch.Tensor, nn.Parameter]] +) -> List[nn.Parameter]: + return [t if isinstance(t, nn.Parameter) else nn.Parameter(t) for t in tensors] + + +def _detach_if_needed(param_or_tensor: Union[nn.Parameter, Tensor]) -> Tensor: + return ( + param_or_tensor.detach() + if isinstance(param_or_tensor, nn.Parameter) + else param_or_tensor + ) + + +def _get_aligned_numel(unsharded_dtype: torch.dtype): + # NOTE: This alignment constraint comes from TorchInductor. + ALIGNMENT = 16 # bytes + unsharded_dtype_size = _get_dtype_size(unsharded_dtype) + aligned_numel = ALIGNMENT // unsharded_dtype_size + return aligned_numel + + +@functools.lru_cache(8) +def _get_dtype_size(dtype): + return torch.empty((), dtype=dtype).element_size() + + +def _construct_padding_tensor( + padding_numel: int, dtype: torch.dtype, requires_grad: bool, device: torch.device +): + # NOTE: Set the padding value as a magic number for debuggability. The + # value itself should never be used in any user-facing computation. + return ( + torch.ones( + (padding_numel,), dtype=dtype, requires_grad=requires_grad, device=device + ) + * _FLAT_PARAM_PADDING_VALUE + ) + + +# Use `lru_cache(1)` to only log the warning once (assuming the fixed warning +# messasge is passed in) +@functools.lru_cache(1) +def _warn_skip_writeback_check(log: logging.Logger, warning: str): + logger.warning(warning) + + +# Use `lru_cache(1)` to only log the warning once +@functools.lru_cache(1) +def _warn_use_fake_all_gather(log: logging.Logger, warning: str): + logger.warning(warning) + + +# Use `lru_cache(1)` to only log the warning once +@functools.lru_cache(1) +def _warn_use_fake_reduce(log: logging.Logger, warning: str): + logger.warning(warning) + + +def _same_storage(a, b): + # Params are DTensors in backward + # with SHARD_GRAD_OP + TP + from torch.distributed.tensor import DTensor + + if isinstance(a, DTensor): + a = a._local_tensor + if isinstance(b, DTensor): + b = b._local_tensor + return a.untyped_storage().data_ptr() == b.untyped_storage().data_ptr() + + +def _same_storage_size(a: torch.Tensor, b: int): + return a.untyped_storage().size() // a.element_size() == b + + +def _storage_size_allocated(tensor: Tensor): + storage_size: int = tensor.untyped_storage().size() + return storage_size > 0 diff --git a/janus/lib/python3.10/site-packages/torch/distributed/fsdp/_init_utils.py b/janus/lib/python3.10/site-packages/torch/distributed/fsdp/_init_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..3166f4b1430d767a8e4c5c30da3590aafadab04a --- /dev/null +++ b/janus/lib/python3.10/site-packages/torch/distributed/fsdp/_init_utils.py @@ -0,0 +1,1200 @@ +# mypy: allow-untyped-defs +import collections +import itertools +import os +import warnings +from typing import ( + Any, + Callable, + Deque, + Dict, + Generator, + Iterable, + Iterator, + List, + no_type_check, + Optional, + Set, + Tuple, + TYPE_CHECKING, + Union, +) + +import torch +import torch.distributed as dist +import torch.distributed.fsdp._exec_order_utils as exec_order_utils +import torch.distributed.fsdp._traversal_utils as traversal_utils +import torch.distributed.fsdp.fully_sharded_data_parallel as fsdp_file +import torch.nn as nn +from torch.distributed.algorithms._comm_hooks import default_hooks +from torch.distributed.device_mesh import _mesh_resources, DeviceMesh +from torch.distributed.distributed_c10d import _get_default_group +from torch.distributed.fsdp._common_utils import ( + _FSDPDeviceHandle, + _FSDPState, + _get_module_fsdp_state, + _is_fsdp_flattened, + _named_parameters_with_duplicates, + clean_tensor_name, + TrainingState, +) +from torch.distributed.fsdp._flat_param import ( + _FSDP_USE_FULL_PREC_IN_EVAL, + FlatParameter, + FlatParamHandle, + HandleShardingStrategy, +) +from torch.distributed.fsdp._limiter_utils import _FreeEventQueue +from torch.distributed.fsdp.api import ( + BackwardPrefetch, + CPUOffload, + FullOptimStateDictConfig, + FullStateDictConfig, + MixedPrecision, + ShardingStrategy, + StateDictConfig, + StateDictType, +) +from torch.distributed.fsdp.wrap import _Policy +from torch.distributed.tensor.parallel.fsdp import DTensorExtensions +from torch.distributed.utils import _sync_params_and_buffers +from torch.utils._python_dispatch import is_traceable_wrapper_subclass + + +if TYPE_CHECKING: + from torch.utils.hooks import RemovableHandle + +_TORCHDISTX_AVAIL = True +try: + from torchdistx import deferred_init, fake # type: ignore[import] +except ImportError: + _TORCHDISTX_AVAIL = False + +PARAM_BROADCAST_BUCKET_SIZE = int(250 * 1024 * 1024) +FSDP_SYNCED = "_fsdp_synced" +# Specification of process groups for hybrid sharding strategies. +HybridShardProcessGroupType = Tuple[dist.ProcessGroup, dist.ProcessGroup] +# Overall specification of process group. +ProcessGroupType = Optional[Union[dist.ProcessGroup, HybridShardProcessGroupType]] + + +# TODO (awgu): Refactor this later +SHARDING_STRATEGY_MAP = { + ShardingStrategy.NO_SHARD: HandleShardingStrategy.NO_SHARD, + ShardingStrategy.FULL_SHARD: HandleShardingStrategy.FULL_SHARD, + ShardingStrategy.SHARD_GRAD_OP: HandleShardingStrategy.SHARD_GRAD_OP, + ShardingStrategy.HYBRID_SHARD: HandleShardingStrategy.HYBRID_SHARD, + ShardingStrategy._HYBRID_SHARD_ZERO2: HandleShardingStrategy._HYBRID_SHARD_ZERO2, +} +HYBRID_SHARDING_STRATEGIES = [ + ShardingStrategy.HYBRID_SHARD, + ShardingStrategy._HYBRID_SHARD_ZERO2, +] +NO_RESHARD_AFTER_FORWARD_STRATEGIES = ( + ShardingStrategy.SHARD_GRAD_OP, + ShardingStrategy._HYBRID_SHARD_ZERO2, +) + + +# NOTE: Since non-self attributes cannot be type annotated, several attributes +# on `state` are defined first as local variables before being assigned. + + +@no_type_check +def _init_process_group_state( + state: _FSDPState, + process_group: ProcessGroupType, + sharding_strategy: ShardingStrategy, + policy: Optional[_Policy], + device_mesh: Optional[DeviceMesh] = None, +) -> _FSDPState: + if process_group is not None and device_mesh is not None: + raise ValueError( + "Cannot pass both process_group and device_mesh at the " + "same time. Please just pass only one of them." + ) + is_hybrid_strategy = sharding_strategy in HYBRID_SHARDING_STRATEGIES + if is_hybrid_strategy: + if process_group is None and policy is None and device_mesh is None: + # Raise an error here, since this is manual wrapping with no process group + # passed in, there is no way to ensure all wrapped FSDP instances use the same + # process groups. + raise ValueError( + f"Manual wrapping with {sharding_strategy} " + "requires explicit specification of process group or device_mesh." + ) + else: + state = _init_process_group_state_for_hybrid_shard( + state, process_group, device_mesh + ) + else: + if device_mesh: + state._device_mesh = device_mesh + state.process_group = device_mesh.get_group(mesh_dim=0) + else: + state.process_group = ( + process_group if process_group is not None else _get_default_group() + ) + + state.rank = state.process_group.rank() + state.world_size = state.process_group.size() + data_parallel_world_size = state.world_size + if is_hybrid_strategy: + data_parallel_world_size *= state._inter_node_pg.size() + state._gradient_predivide_factor = ( + default_hooks.DefaultState._get_gradient_predivide_factor( + data_parallel_world_size + ) + ) + state._gradient_postdivide_factor = ( + data_parallel_world_size / state._gradient_predivide_factor + ) + return state + + +@no_type_check +def _init_process_group_state_for_hybrid_shard( + state: _FSDPState, + process_group: ProcessGroupType, + device_mesh: DeviceMesh, +) -> _FSDPState: + if device_mesh: + if _is_valid_hybrid_shard_device_mesh(device_mesh): + state._device_mesh = device_mesh + # We currently only allow _inter_node_pg to be the outermost dimension, and the + # process_group(intra_node) to be the innermost dimension. + state._inter_node_pg = device_mesh.get_group(mesh_dim=0) + state.process_group = device_mesh.get_group(mesh_dim=1) + else: + raise ValueError( + f"Expected device_mesh to have ndim=2 but got {device_mesh.ndim}" + ) + elif process_group is None: + default_group = _get_default_group() + intra_node_group, inter_node_group = _init_intra_and_inter_node_groups( + default_group, state._device_handle.device_count() + ) + # we shard across intra-node + state.process_group = intra_node_group + # save _inter_node_pg to allreduce across. + state._inter_node_pg = inter_node_group + else: + # Check type and assign state.process_group and state._inter_node_pg. + if _is_valid_hybrid_shard_pg_type(process_group): + # Assuming that user passed in as intra node group and inter node group + # as documented. + state.process_group, state._inter_node_pg = process_group + else: + raise ValueError( + "Expected process_group to be passed in as either None or " + f"Tuple[dist.ProcessGroup, dist.ProcessGroup] but got {type(process_group)}" + ) + # Create state for allreduce + state._inter_node_state = _get_default_comm_hook_state( + process_group=state._inter_node_pg, + ) + return state + + +@no_type_check +def _is_valid_hybrid_shard_pg_type(process_group: Any) -> bool: + return ( + isinstance(process_group, tuple) + and len(process_group) == 2 + and all(isinstance(pg, dist.ProcessGroup) for pg in process_group) + ) + + +@no_type_check +def _is_valid_hybrid_shard_device_mesh(device_mesh: DeviceMesh) -> bool: + return isinstance(device_mesh, DeviceMesh) and device_mesh.ndim == 2 + + +@no_type_check +def _init_intra_node_process_group(num_devices_per_node: int) -> dist.ProcessGroup: + """ + Return a process group across the current node. + + For example, given each row is a distinct node: + 0 1 2 3 4 5 6 7 + 8 9 10 11 12 13 14 15 + This API would return an intra-node subgroup across + [0, 1, ..., 7] or [8, 9, ..., 15] depending on the process's rank. + For example, rank 3 would get [0, 1, ..., 7]. + """ + intra_node_subgroup, _ = dist.new_subgroups(num_devices_per_node) + return intra_node_subgroup + + +@no_type_check +def _init_inter_node_process_group( + global_process_group: dist.ProcessGroup, + num_devices_per_node: int, +) -> dist.ProcessGroup: + """ + Return an inter-node process group where each contained rank has the same local rank. + + For example, given each row is a distinct node: + 0 1 2 3 4 5 6 7 + 8 9 10 11 12 13 14 15 + This API would return inter-node process group [0, 8], [1, 9], [2, 10], and so forth + depending on the process's rank. For example, rank 1 would get [1, 9], rank 5 + would get [5, 13]. + """ + # the inter-node pg that is returned + inter_node_pg = None + sharding_backend = dist.get_backend(global_process_group) + world_size = dist.get_world_size(global_process_group) + # Assuming fully homogeneous setup + num_nodes = world_size // num_devices_per_node + my_local_rank = dist.get_rank(global_process_group) % num_devices_per_node + for local_rank in range(num_devices_per_node): + ranks_for_inter_group = [ + local_rank + (i * num_devices_per_node) for i in range(num_nodes) + ] + # every rank always needs to call dist.new_group + grp = dist.new_group(ranks=ranks_for_inter_group, backend=sharding_backend) + if local_rank == my_local_rank: + inter_node_pg = grp + + assert ( + inter_node_pg is not None + ), f"{my_local_rank} expected to assign inter-node pg, but did not" + return inter_node_pg + + +def _init_intra_and_inter_node_groups( + global_process_group: dist.ProcessGroup, + num_devices_per_node: int, +) -> Tuple[dist.ProcessGroup, dist.ProcessGroup]: + """ + Initialize intra and inter-node process groups and return the ones corresponding to this process's rank. + + This function can be used to initialize process groups for ``HYBRID_SHARD`` or + ``_HYBRID_SHARD_ZERO2`` in FSDP. + This function assumes each node has an equal number of CUDA-enabled devices. + Returns: + Tuple[dist.ProcessGroup, dist.ProcessGroup]: Intra and inter-node process group. + """ + return ( + _init_intra_node_process_group(num_devices_per_node), + _init_inter_node_process_group(global_process_group, num_devices_per_node), + ) + + +@no_type_check +def _init_ignored_module_states( + state: _FSDPState, + module: nn.Module, + ignored_modules: Optional[Iterable[torch.nn.Module]], + ignored_states: Union[ + Optional[Iterable[torch.nn.Parameter]], Optional[Iterable[torch.nn.Module]] + ] = None, +) -> _FSDPState: + if ignored_modules is not None and ignored_states is not None: + raise ValueError( + "Cannot pass both ignored_modules and ignored_states at the " + "same time. Please just pass ignored_states." + ) + ignored_parameters = None + passed_as_ignored_states = ignored_states is not None + if passed_as_ignored_states: + ignored_states_list = list(ignored_states) + _check_ignored_states(ignored_states_list, True) + else: + ignored_states_list = [] + _check_ignored_states( + list(ignored_modules) if ignored_modules is not None else [], False + ) + if len(ignored_states_list) > 0: + if isinstance(ignored_states_list[0], nn.Parameter): + ignored_parameters = ignored_states_list + else: + ignored_modules = ignored_states_list + state._ignored_modules = _get_ignored_modules(module, ignored_modules) + state._ignored_params = _get_ignored_params( + module, + state._ignored_modules, + ignored_parameters, + ) + state._ignored_buffer_names = _get_ignored_buffer_names( + module, + state._ignored_modules, + ) + # TODO: FSDP's contract for buffers is not well-defined. They are + # implicitly ignored for most functionality since they are not sharded; + # however, FSDP still imposes some semantics on buffers (e.g. buffer mixed + # precision). We should formalize this contract and decide if we need to + # compute and store `_ignored_buffers`. + return state + + +def _check_ignored_states( + ignored_states: List[Any], passed_as_ignored_states: bool +) -> None: + """ + Check that the ignored states are uniformly parameters or uniformly modules. + + We may remove this check in the future if we permit mixing. + """ + if len(ignored_states) == 0: + return + if passed_as_ignored_states: + all_params = all(isinstance(state, nn.Parameter) for state in ignored_states) + all_modules = all(isinstance(state, nn.Module) for state in ignored_states) + if not all_params and not all_modules: + # Sort for consistent ordering for unit test regex matching + sorted_types = sorted({type(state) for state in ignored_states}, key=repr) + raise ValueError( + "ignored_states expects all nn.Parameter or all nn.Module list " + f"elements but got types {sorted_types}" + ) + else: + if not all(isinstance(state, nn.Module) for state in ignored_states): + sorted_types = sorted({type(state) for state in ignored_states}, key=repr) + raise ValueError( + "ignored_modules expects nn.Module list elements but got " + f"types {sorted_types}" + ) + + +@no_type_check +def _init_device_handle( + state: _FSDPState, + module: nn.Module, + ignored_params: Set[nn.Parameter], + device_id: Optional[Union[int, torch.device]], +) -> _FSDPState: + """ + Determine device handle used for initializing FSDP. + + If a device is specified by ``device_id``, + then returns device handle corresponds to that device type. Otherwise, If the + module is already on a non-CPU device, then the device type is that non-CPU device type. + If the module is on CPU or meta, then the device type is the current accelerator device. + See the :ref:`Accelerators` for details. + + + This method will be called once ignored paramters was determined, as the device handle maybe needed + for other initialization. + """ + determined_device = None + if device_id is not None: + determined_device = ( + device_id + if isinstance(device_id, torch.device) + else torch.device(device_id) + ) + if determined_device is None: + for param in _get_orig_params(module, ignored_params): + if param.device.type in {"cpu", "meta"}: + continue + if determined_device is None: + determined_device = param.device + else: + if param.device.type != determined_device.type: + raise RuntimeError( + f"FSDP does not support modules with different device types " + f"but got params on {determined_device.type} and {param.device.type}" + ) + determined_device = determined_device or torch._C._get_accelerator() + if determined_device.type == "cpu": + raise RuntimeError( + "FSDP needs a non-CPU accelerator device, but no accelerator device is detected." + ) + + state._device_handle = _FSDPDeviceHandle.from_device(determined_device) + return state + + +@no_type_check +def _init_buffer_state( + state: _FSDPState, + module: nn.Module, +) -> _FSDPState: + state._buffer_names = _get_buffer_names(module) + # Save a mapping from clean fully-qualified buffer name (starting from + # `module`) to its original dtype for restoring that dtype during model + # checkpointing when buffer mixed precision is enabled. The names should + # be clean since the casting happens in a `summon_full_params()` context. + _buffer_name_to_orig_dtype: Dict[str, torch.dtype] = {} + for buffer_name, buffer in module.named_buffers(): + buffer_name = clean_tensor_name(buffer_name) + _buffer_name_to_orig_dtype[buffer_name] = buffer.dtype + state._buffer_name_to_orig_dtype = _buffer_name_to_orig_dtype + return state + + +@no_type_check +def _init_core_state( + state: _FSDPState, + sharding_strategy: Optional[ShardingStrategy], + mixed_precision: Optional[MixedPrecision], + cpu_offload: Optional[CPUOffload], + limit_all_gathers: bool, + use_orig_params: bool, + backward_prefetch_limit: int, + forward_prefetch_limit: int, +) -> _FSDPState: + # We clamp the strategy to `NO_SHARD` for world size of 1 since they are + # currently functionally equivalent. This may change if/when we integrate + # FSDP with MoE. + if state.world_size == 1: + if sharding_strategy != ShardingStrategy.NO_SHARD: + warnings.warn( + "FSDP is switching to use `NO_SHARD` instead of " + f"{sharding_strategy or ShardingStrategy.FULL_SHARD} since " + "the world size is 1." + ) + sharding_strategy = ShardingStrategy.NO_SHARD + elif sharding_strategy == ShardingStrategy.NO_SHARD: + warnings.warn( + "The `NO_SHARD` sharding strategy is deprecated. If having issues, " + "please use `DistributedDataParallel` instead.", + FutureWarning, + # Level 1 is here, level 2 is from `FullyShardedDataParallel`, and + # level 3 is from the true caller + stacklevel=3, + ) + state.sharding_strategy = sharding_strategy or ShardingStrategy.FULL_SHARD + state.mixed_precision = mixed_precision or MixedPrecision() + if mixed_precision is not None: + torch._C._log_api_usage_once( + f"torch.distributed.fsdp.mixed_precision.{str(state.mixed_precision)}" + ) + state._use_full_prec_in_eval = ( + os.environ.get(_FSDP_USE_FULL_PREC_IN_EVAL, "") == "1" + ) + state.cpu_offload = cpu_offload or CPUOffload() + state.limit_all_gathers = limit_all_gathers + state._use_orig_params = use_orig_params + state.training_state = TrainingState.IDLE + state._is_root = None + state._free_event_queue = _FreeEventQueue() + state._debug_level = dist.get_debug_level() + state._exec_order_data = exec_order_utils._ExecOrderData( + state._debug_level, + backward_prefetch_limit, + forward_prefetch_limit, + ) + state._unshard_event = None + # Mapping from fully sharded module to the handles it is responsible to + # unshard and reshard (see [Note: Fully Sharded Module]) + _fully_sharded_module_to_handle: Dict[nn.Module, FlatParamHandle] = {} + state._fully_sharded_module_to_handle = _fully_sharded_module_to_handle + # Invariant: `state.params` contains exactly the `FlatParameter`s of the + # handles in `state._handle` + _handle: Optional[FlatParamHandle] = None + state._handle = _handle + params: List[FlatParameter] = [] + state.params = params + return state + + +@no_type_check +def _init_runtime_state( + state: _FSDPState, +) -> _FSDPState: + _root_pre_forward_handles: List[RemovableHandle] = [] + state._root_pre_forward_handles = _root_pre_forward_handles + _pre_forward_handles: List[RemovableHandle] = [] + state._pre_forward_handles = _pre_forward_handles + _post_forward_handles: List[RemovableHandle] = [] + state._post_forward_handles = _post_forward_handles + state._sync_gradients = True + state._comm_hook = None + state._comm_hook_state = None + # Used to prevent running the pre-backward hook multiple times + return state + + +@no_type_check +def _init_prefetching_state( + state: _FSDPState, + backward_prefetch: BackwardPrefetch, + forward_prefetch: bool, +) -> _FSDPState: + state.backward_prefetch = backward_prefetch + state.forward_prefetch = forward_prefetch + # The data structures use tuples of handles to generalize over the case + # where a module's forward involves multiple handles. + return state + + +@no_type_check +def _init_extension(state: _FSDPState, device_mesh: DeviceMesh = None) -> _FSDPState: + # TODO: we need to add additional check once we support FSDP + PiPPy. + # This check is currently sufficient, since we only support FSDP + TP. + root_mesh = _mesh_resources.get_root_mesh(device_mesh) + # if a root mesh is not the same as device_mesh, + # meaning the device_mesh is sliced out from the root mesh. + if device_mesh and root_mesh != state._device_mesh: + state._fsdp_extension = DTensorExtensions(state._device_handle) + else: + # We need to explicilty set _fsdp_extension to None. + # Otherwise, we will run into an infinite recursion when getting the attribute. + state._fsdp_extension = None + return state + + +@no_type_check +def _init_state_dict_state(state: _FSDPState) -> _FSDPState: + state._state_dict_type = StateDictType.FULL_STATE_DICT + state_dict_config: StateDictConfig = FullStateDictConfig() + state._optim_state_dict_config = FullOptimStateDictConfig() + state._state_dict_config = state_dict_config + unshard_params_ctx: Dict[nn.Module, Generator] = {} + state._unshard_params_ctx = unshard_params_ctx + + return state + + +def _verify_managed_params(module: nn.Module, params: List[nn.Parameter]) -> None: + """ + Verify if the parameters are accepted by FSDP. The only restriction now + is that the parameter cannot be a scalar tensor (param.shape == []). + """ + for param in params: + if len(param.shape) == 0: + param_name = "" + for name, param_ in module.named_parameters(): + if param is param_: + param_name = name + break + assert param_name + raise ValueError( + "FSDP doesn't support salar parameters. " + f"Change {param_name} to a 1D tensor with numel equal to 1." + ) + + +@no_type_check +def _init_param_handle_from_module( + state: _FSDPState, + fully_sharded_module: nn.Module, + device_id: Optional[Union[int, torch.device]], + param_init_fn: Optional[Callable[[nn.Module], None]], + sync_module_states: bool, +) -> _FSDPState: + """Initialize a ``FlatParamHandle`` from a module ``fully_sharded_module``.""" + _check_single_device_module(fully_sharded_module, state._ignored_params, device_id) + device_from_device_id = _get_device_from_device_id( + device_id, state.rank, state._device_handle + ) + is_meta_module, is_torchdistX_deferred_init = _need_to_materialize_module( + fully_sharded_module, state._ignored_params, state._ignored_modules + ) + # Materialize the module if needed + if (is_meta_module or is_torchdistX_deferred_init) and param_init_fn is not None: + _materialize_with_param_init_fn( + fully_sharded_module, param_init_fn, state._ignored_modules + ) + elif is_meta_module: + _materialize_meta_module( + fully_sharded_module, + device_id, + state._ignored_modules, + state._device_handle, + ) + elif is_torchdistX_deferred_init: + deferred_init.materialize_module( + fully_sharded_module, + check_fn=lambda submodule: _get_module_fsdp_state(submodule) is None + and submodule not in state._ignored_modules, + ) + + ignored_buffers = { + buffer + for ignored_module in state._ignored_modules + for buffer in ignored_module.buffers() + } + + _move_module_to_device( + fully_sharded_module, + state._ignored_params, + ignored_buffers, + device_from_device_id, + ) + state.compute_device = _get_compute_device( + fully_sharded_module, + state._ignored_params, + device_from_device_id, + state.rank, + state._device_handle, + ) + + managed_params = list(_get_orig_params(fully_sharded_module, state._ignored_params)) + _verify_managed_params(fully_sharded_module, managed_params) + if sync_module_states: + _sync_module_params_and_buffers( + fully_sharded_module, managed_params, state.process_group + ) + if state.sharding_strategy in HYBRID_SHARDING_STRATEGIES: + _sync_module_params_and_buffers( + fully_sharded_module, managed_params, state._inter_node_pg + ) + _init_param_handle_from_params(state, managed_params, fully_sharded_module) + return state + + +@no_type_check +def _init_param_handle_from_params( + state: _FSDPState, + params: List[nn.Parameter], + fully_sharded_module: nn.Module, +): + if len(params) == 0: + return + handle = FlatParamHandle( + params, + fully_sharded_module, + state.compute_device, + SHARDING_STRATEGY_MAP[state.sharding_strategy], + state.cpu_offload.offload_params, + state.mixed_precision.param_dtype, + state.mixed_precision.reduce_dtype, + state.mixed_precision.keep_low_precision_grads, + state.process_group, + state._use_orig_params, + fsdp_extension=state._fsdp_extension, + ) + handle.shard() + assert not state._handle + state.params.append(handle.flat_param) + state._handle = handle + state._fully_sharded_module_to_handle[handle._fully_sharded_module] = handle + cpu_device = torch.device("cpu") + if state.cpu_offload.offload_params and handle.flat_param.device != cpu_device: + handle.flat_param_to(cpu_device) + + +def _get_ignored_modules( + root_module: nn.Module, + _ignored_modules: Optional[Iterable[torch.nn.Module]], +) -> Set[nn.Module]: + """ + Check that ``_ignored_modules`` is an iterable of ``nn.Module`` s without any FSDP instances. + + Return the modules contained in their module + subtrees as a :class:`set`. Nested FSDP instances are excluded, but their + already-computed ignored modules are included. + + ``_ignored_modules`` represents the argument passed by the user to FSDP. + """ + msg_prefix = "`ignored_modules` should be an iterable of `torch.nn.Module`s " + try: + ignored_root_modules = ( + set(_ignored_modules) if _ignored_modules is not None else set() + ) + except TypeError as e: + raise TypeError(msg_prefix + f"but got {type(_ignored_modules)}") from e + for module in ignored_root_modules: + if not isinstance(module, torch.nn.Module): + raise TypeError(msg_prefix + f"but got an iterable with {type(module)}") + if _get_module_fsdp_state(module): + # TODO: We may relax this by taking the FSDP instance's wrapped + # module to provide more flexibility to the user. + raise ValueError("`ignored_modules` should not include FSDP modules") + # Treat modules that cannot compose with `fully_shard` as ignored modules, + # meaning that their subtrees are ignored + for module in root_module.modules(): + if not traversal_utils._composable(module): + ignored_root_modules.add(module) + # NOTE: Even if `ignored_root_modules` is empty, do not return early so + # that this FSDP instance can get any ignored modules from its children. + + # Include child modules and exclude nested FSDP modules themselves + ignored_modules = { + child + for module in ignored_root_modules + for child in module.modules() + if not isinstance(child, fsdp_file.FullyShardedDataParallel) + } + if root_module in ignored_modules: + warnings.warn( + "Trying to ignore the top-level module passed into the FSDP " + "constructor itself will result in all parameters being " + f"ignored and is not well-supported: {module}" + ) + # Include nested FSDP modules' ignored modules + for submodule in root_module.modules(): + optional_fsdp_state = _get_module_fsdp_state(submodule) + if optional_fsdp_state is not None: + assert hasattr(optional_fsdp_state, "_ignored_modules") + ignored_modules.update(optional_fsdp_state._ignored_modules) + return ignored_modules + + +def _get_ignored_params( + root_module: torch.nn.Module, + ignored_modules: Set[torch.nn.Module], + ignored_parameters: Optional[Iterable[torch.nn.Parameter]] = None, +) -> Set[torch.nn.Parameter]: + """ + Return the parameters of the modules in ``ignored_modules`` and the parameters in ``ignored_parameters``. + + :class:`FlatParameter` s are excluded from the result. + """ + all_ignored_params: Set[torch.nn.Parameter] = set() + + params_in_ignored_modules = { + p for m in ignored_modules for p in m.parameters() if not _is_fsdp_flattened(p) + } + + all_ignored_params.update(params_in_ignored_modules) + + if ignored_parameters is not None: + params_in_ignored_parameters = { + p for p in ignored_parameters if not _is_fsdp_flattened(p) + } + all_ignored_params.update(params_in_ignored_parameters) + + # Always include nested FSDP modules' ignored parameters + for submodule in root_module.modules(): + optional_fsdp_state = _get_module_fsdp_state(submodule) + if optional_fsdp_state is not None: + assert hasattr(optional_fsdp_state, "_ignored_params") + all_ignored_params.update(optional_fsdp_state._ignored_params) + + return all_ignored_params + + +def _get_ignored_buffer_names( + root_module: torch.nn.Module, + ignored_modules: Set[torch.nn.Module], +) -> Set[str]: + """Return the cleaned buffer FQNs in ``ignored_modules``.""" + all_ignored_buffer_names: Set[str] = set() + + buffers_in_ignored_modules = { + buffer for m in ignored_modules for buffer in m.buffers() + } + + all_ignored_buffer_names.update( + { + clean_tensor_name(buffer_name) + for buffer_name, buffer in root_module.named_buffers() + if buffer in buffers_in_ignored_modules + } + ) + + # Always include nested FSDP modules' ignored buffer names + for submodule in root_module.modules(): + optional_fsdp_state = _get_module_fsdp_state(submodule) + if optional_fsdp_state is not None: + assert hasattr(optional_fsdp_state, "_ignored_buffer_names") + all_ignored_buffer_names.update(optional_fsdp_state._ignored_buffer_names) + + return all_ignored_buffer_names + + +def _get_buffer_names(root_module: nn.Module) -> Set[str]: + """Return the fully prefixed names of all buffers in the module hierarchy rooted at ``root_module`` as a class:`set`.""" + return { + clean_tensor_name(buffer_name) for buffer_name, _ in root_module.named_buffers() + } + + +def _check_single_device_module( + module: nn.Module, + ignored_params: Set[nn.Parameter], + device_id: Optional[Union[int, torch.device]], +) -> None: + """ + Raise an error if ``module`` has original parameters on multiple devices, ignoring the parameters in ``ignored_params``. + + Thus, after this method, the + module must be either fully on the CPU or fully on a non-CPU device. + """ + devices = {param.device for param in _get_orig_params(module, ignored_params)} + # We allow module to be partially on CPU and partially on GPU if device_id is not + # None, since the device_id arg will result in the CPU portion being moved to + # GPU. This is useful in cases where part of the module may be parallelized + # by another algorithm and may already be on GPU. We'd like to enforce device_id + # to not be None, otherwise we'd flatten parameters in a mixed module which is + # not supported. + if len(devices) == 2 and torch.device("cpu") in devices: + if device_id is None: + raise RuntimeError( + "To support a module with both CPU and GPU params, " + "please pass in device_id argument." + ) + elif len(devices) > 1: + raise RuntimeError( + f"FSDP only supports single device modules but got params on {devices}" + ) + + +def _get_device_from_device_id( + device_id: Optional[Union[int, torch.device]], + rank: int, + device_handle: _FSDPDeviceHandle, +) -> Optional[torch.device]: + """ + Return a ``torch.device`` for the specified ``device_id``. + + Processes ``device_id`` and returns either the corresponding device or + ``None`` if ``device_id`` is ``None``. + """ + if device_id is None: + return None + device = ( + device_id if isinstance(device_id, torch.device) else torch.device(device_id) + ) + if device.type != "cpu" and device.index is None: + warnings.warn( + f"FSDP got the argument `device_id` {device_id} on rank " + f"{rank}, which does not have an explicit index. " + f"FSDP will use the current device {device_handle.current_device()}. " + f"If this is incorrect, please explicitly call `torch.{device.type}.set_device()` " + "before FSDP initialization or pass in the explicit device " + "index as the `device_id` argument." + ) + device = torch.device(device_handle.current_device()) + return device + + +def _need_to_materialize_module( + module: nn.Module, + ignored_params: Set[nn.Parameter], + ignored_modules: Set[nn.Module], +) -> Tuple[bool, bool]: + """ + Return if ``module`` has parameters on meta device and if ``module`` is using torchdistX deferred initialization. + + At most of the returned bools can + be ``True``. If either is ``True``, then ``module`` needs to be + materialized. + """ + managed_params = list(_get_orig_params(module, ignored_params)) + is_meta_module = any(param.is_meta for param in managed_params) + # TODO: We need to establish a contract for FSDP and buffers. For now, we + # skip checking for meta buffers from ignored modules. We should consider + # refactoring the initialization holistically to avoid so many traversals. + for submodule in module.modules(): + if submodule in ignored_modules: + continue + for buf in submodule.buffers(recurse=False): + is_meta_module |= buf.is_meta + is_torchdistX_deferred_init = ( + not is_meta_module + and _TORCHDISTX_AVAIL + and any(fake.is_fake(param) for param in managed_params) + ) + return is_meta_module, is_torchdistX_deferred_init + + +def _materialize_with_param_init_fn( + root_module: nn.Module, + param_init_fn: Callable[[nn.Module], None], + ignored_modules: Set[nn.Module], +) -> None: + if not callable(param_init_fn): + raise ValueError( + f"Expected {param_init_fn} to be callable but got {type(param_init_fn)}" + ) + modules_to_materialize = _get_modules_to_materialize(root_module, ignored_modules) + for module in modules_to_materialize: + param_init_fn(module) + + +def _materialize_meta_module( + root_module: nn.Module, + device_from_device_id: Optional[torch.device], + ignored_modules: Set[nn.Module], + device_handle: _FSDPDeviceHandle, +): + # Run default meta device initialization + materialization_device = device_from_device_id or torch.device( + device_handle.current_device() + ) + modules_to_materialize = _get_modules_to_materialize(root_module, ignored_modules) + module = None + try: + # Assume that each module's `reset_parameters()` only initializes its + # own parameters and not those of its children + with torch.no_grad(): + for module in modules_to_materialize: + # As a contract to the user, only call `reset_parameters()` if + # the module has directly managed parameters/buffers + module_state_iter = itertools.chain( + module.parameters(recurse=False), module.buffers(recurse=False) + ) + has_module_states = len(list(module_state_iter)) > 0 + if has_module_states: + module.to_empty(device=materialization_device, recurse=False) + module.reset_parameters() # type: ignore[operator] + except BaseException as e: + warnings.warn( + "Unable to call `reset_parameters()` for module on meta " + f"device with error {str(e)}. Please ensure that your module of" + f"type {type(module)} implements a `reset_parameters()` method." # type: ignore[possibly-undefined] + ) + raise e + + +def _get_modules_to_materialize( + root_module: nn.Module, ignored_modules: Set[nn.Module] +) -> List[nn.Module]: + # Run BFS to collect the modules to materialize via `reset_parameters()`, + # stopping at any module with FSDP already applied or at ignored modules. + modules_to_materialize: List[nn.Module] = [] + queue = collections.deque([root_module]) + visited_modules: Set[nn.Module] = {root_module} + while queue: + module = queue.popleft() + modules_to_materialize.append(module) + for child_module in module.children(): + if ( + child_module not in visited_modules + and _get_module_fsdp_state(child_module) is None + and child_module not in ignored_modules + ): + visited_modules.add(child_module) + queue.append(child_module) + return modules_to_materialize + + +def _move_module_to_device( + module: nn.Module, + ignored_params: Set[nn.Parameter], + ignored_buffers: Set[torch.Tensor], + device_from_device_id: Optional[torch.device], +) -> None: + """ + Move ``module`` depending on ``device_from_device_id`` and its current device. + + This includes moving ignored modules' parameters. + + - If ``device_from_device_id`` is not ``None``, then this moves + ``module`` to the device. + - If ``device_from_device_id`` is ``None``, then this does not move + ``module`` but warns the user if it is on CPU. + + Precondition: ``_check_single_device_module()``. + """ + cpu_device = torch.device("cpu") + if device_from_device_id is not None: + # BFS from `module` without traversing any nested FSDP instances to + # collect the parameters/buffers that have not yet been managed + queue: Deque[nn.Module] = collections.deque() + queue.append(module) + params: List[nn.Parameter] = [] + buffers: List[torch.Tensor] = [] + while queue: + curr_module = queue.popleft() + # NOTE: We include a check to only move parameters/buffers that are + # on CPU device. If they are on a CUDA device different from the + # one specified by `device_id`, then this does NOT move them. This + # is so that we can raise an error in `_get_compute_device()`. + params.extend( + param + for param in curr_module.parameters(recurse=False) + if param.device == cpu_device + ) + buffers.extend( + buffer + for buffer in curr_module.buffers(recurse=False) + if buffer.device == cpu_device + ) + for submodule in curr_module.children(): + if not isinstance(submodule, fsdp_file.FullyShardedDataParallel): + queue.append(submodule) + params_to_move = [p for p in params if p not in ignored_params] + bufs_to_move = [p for p in buffers if p not in ignored_buffers] + _move_states_to_device(params_to_move, bufs_to_move, device_from_device_id) + return + param = next(_get_orig_params(module, ignored_params), None) + if param is not None and param.device == cpu_device: + _warn_cpu_init() + + +def _move_states_to_device( + params: List[nn.Parameter], + buffers: List[torch.Tensor], + device_from_device_id: Optional[torch.device], +) -> None: + """ + Move states to the specified device. + + Precondition: ``_check_single_device_module()`` and module's parameters and + buffers have been materialized if needed. + """ + if len(params) == 0 and len(buffers) == 0: + return + if len(params) > 0: + current_device = params[0].device + elif len(buffers) > 0: + current_device = buffers[0].device + cpu_device = torch.device("cpu") + if device_from_device_id is not None: + # Move the parameters and buffers like the `.data` code path in + # `nn.Module._apply()`, which underlies `nn.Module.to()` + for param in params: + with torch.no_grad(): + param.data = param.to(device_from_device_id) + if param.grad is not None: + param.grad.data = param.grad.to(device_from_device_id) + for buffer in buffers: + buffer.data = buffer.to(device_from_device_id) + elif current_device == cpu_device: # type: ignore[possibly-undefined] + _warn_cpu_init() + + +def _warn_cpu_init(): + warnings.warn( + "The passed-in `module` is on CPU and will thus have FSDP's sharding " + "initialization run on CPU, which may be slower than on GPU. We " + "recommend passing in the `device_id` argument for FSDP to move " + "`module` to GPU for the sharding initialization. `module` must also " + "be on GPU device to work with the `sync_module_states=True` flag " + "since that requires GPU communication." + ) + + +def _get_compute_device( + module: nn.Module, + ignored_params: Set[nn.Parameter], + device_from_device_id: Optional[torch.device], + rank: int, + device_handle: _FSDPDeviceHandle, +) -> torch.device: + """ + Determine and return this FSDP instance's compute device. + + If the module is already on a non-CPU device, then the compute device is that non-CPU + device. If the module is on CPU, then the compute device is the current + device. + + Since this method should be called after materializing the module, any + non-CPU device should not be meta device. For now, the compute device is + always a CUDA or CUDA-like device with its explicit index. + + Precondition: ``_check_single_device_module()`` and + ``_move_module_to_device()``. + """ + param = next(_get_orig_params(module, ignored_params), None) + if param is not None and param.device.type != "cpu": + compute_device = param.device # Determined by model param placement + else: + compute_device = torch.device(device_handle.current_device()) + if device_from_device_id is not None and compute_device != device_from_device_id: + raise ValueError( + f"Inconsistent compute device and `device_id` on rank {rank}: " + f"{compute_device} vs {device_from_device_id}" + ) + return compute_device + + +# TODO: See how to deprecate! +def _sync_module_params_and_buffers( + module: nn.Module, + params: List[nn.Parameter], + process_group: dist.ProcessGroup, +) -> None: + """ + Synchronize module states (i.e. parameters ``params`` and all not-yet-synced buffers) by broadcasting from rank 0 to all ranks. + + Precondition: ``sync_module_states == True`` and ``self.process_group`` has + been set. + """ + module_states: List[torch.Tensor] = [] + for buffer in module.buffers(): + # Avoid re-synchronizing buffers in case of nested wrapping + if not getattr(buffer, FSDP_SYNCED, False): + setattr(buffer, FSDP_SYNCED, True) + detached_buffer = buffer.detach() + if is_traceable_wrapper_subclass(detached_buffer): + # NOTE: Here we assume no nested subclasses, at most one level of subclass + # in both model's buffers and params + attrs, _ = detached_buffer.__tensor_flatten__() # type: ignore[attr-defined] + inner_buffers = [getattr(detached_buffer, attr) for attr in attrs] + module_states.extend(inner_buffers) + else: + module_states.append(detached_buffer) + + for param in params: + detached_param = param.detach() + if is_traceable_wrapper_subclass(detached_param): + attrs, _ = detached_param.__tensor_flatten__() # type: ignore[attr-defined] + inner_params = [getattr(detached_param, attr) for attr in attrs] + module_states.extend(inner_params) + else: + module_states.append(detached_param) + + _check_module_states_for_sync_module_states(module_states) + _sync_params_and_buffers( + process_group, + module_states, + PARAM_BROADCAST_BUCKET_SIZE, + src=0, + ) + + +def _check_module_states_for_sync_module_states( + module_states: List[torch.Tensor], +) -> None: + if module_states and any( + tensor.device == torch.device("cpu") for tensor in module_states + ): + raise ValueError( + "The module has CPU parameters or buffers when `sync_module_states=True`, " + "which requires them to be on GPU. Please specify the `device_id` argument " + "or move the module to GPU before passing it to FSDP." + ) + + +def _get_orig_params( + module: nn.Module, + ignored_params: Set[nn.Parameter], +) -> Iterator[nn.Parameter]: + """ + Return an iterator over the original parameters in ``module``. + + The iterator does not return + the parameters in ``ignored_params``, any ``FlatParameter`` s (which may be + present due to nested FSDP wrapping), or any original parameters already + flattened (only relevant when ``use_orig_params=True``). + """ + param_gen = module.parameters() + try: + while True: + param = next(param_gen) + if param not in ignored_params and not _is_fsdp_flattened(param): + yield param + except StopIteration: + pass + + +def _check_orig_params_flattened( + fsdp_module, + ignored_params: Set[nn.Parameter], +) -> None: + """ + Check that original parameters in ``fsdp_module`` have been flattened. + + The flattened parameters are made + invisible to ``named_parameters()`` for the module hierarchy rooted at + ``fsdp_module``. This should be called as a sanity check after flattening + the wrapped module's parameters. + """ + for param_name, param in _named_parameters_with_duplicates(fsdp_module): + if param not in ignored_params and not _is_fsdp_flattened(param): + raise RuntimeError( + f"Found an unflattened parameter: {param_name}; " + f"{param.size()} {param.__class__}" + ) + + +def _get_default_comm_hook(sharding_strategy: ShardingStrategy): + return ( + default_hooks.allreduce_hook + if sharding_strategy == ShardingStrategy.NO_SHARD + else default_hooks.reduce_scatter_hook + ) + + +def _get_default_comm_hook_state( + process_group: dist.ProcessGroup, +) -> default_hooks.DefaultState: + return default_hooks.DefaultState(process_group=process_group) diff --git a/janus/lib/python3.10/site-packages/torch/distributed/fsdp/_optim_utils.py b/janus/lib/python3.10/site-packages/torch/distributed/fsdp/_optim_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..35beee36ef583900a512d8f2665806645662c6f5 --- /dev/null +++ b/janus/lib/python3.10/site-packages/torch/distributed/fsdp/_optim_utils.py @@ -0,0 +1,2091 @@ +# mypy: allow-untyped-defs +import copy +import functools +import logging +import warnings +from contextlib import ExitStack +from dataclasses import dataclass, field +from typing import ( + Any, + cast, + Dict, + Iterable, + Iterator, + List, + NamedTuple, + no_type_check, + Optional, + Sequence, + Set, + Tuple, + TYPE_CHECKING, + Union, +) + +import torch +import torch.distributed as dist +import torch.distributed.fsdp._traversal_utils as traversal_utils +import torch.nn as nn +from torch.distributed._state_dict_utils import _gather_state_dict +from torch.distributed.distributed_c10d import _get_pg_default_device +from torch.distributed.fsdp._common_utils import ( + _apply_to_modules, + _FSDPState, + _get_module_fsdp_state_if_fully_sharded_module, + _get_param_to_fqns, + _module_handle, + _named_parameters_with_duplicates, + clean_tensor_name, +) +from torch.distributed.fsdp._debug_utils import SimpleProfiler +from torch.distributed.fsdp._flat_param import FlatParameter, FlatParamHandle +from torch.distributed.fsdp._fsdp_extensions import ( + _ext_chunk_dtensor, + _ext_chunk_tensor, +) +from torch.distributed.fsdp._runtime_utils import ( + _lazy_init, + _reset_flat_param_grad_info_if_needed, +) +from torch.distributed.fsdp.api import ( + ShardingStrategy, + StateDictSettings, + StateDictType, +) +from torch.distributed.tensor import DTensor, Replicate +from torch.utils._pytree import tree_map_only + + +if TYPE_CHECKING: + from torch.distributed._shard.sharded_tensor import ShardedTensor + + +logger = logging.getLogger(__name__) + + +@dataclass +class FSDPParamInfo: + state: _FSDPState + handle: FlatParamHandle + param_indices: Dict[str, int] + param_requires_grad: List[bool] + + +def sorted_items(dictionary: Dict[str, Any]) -> Iterator[Tuple[str, Any]]: + keys = sorted(dictionary.keys()) + for k in keys: + yield k, dictionary[k] + + +@dataclass +class _ConsolidatedOptimState: + """ + This holds the consolidated optimizer state on the target rank. Positive- + dimension tensor state is communicated across ranks, while zero-dimension + tensor state and non-tensor state is taken directly from the target rank. + + PyTorch version 1.12 moved to using zero-dimension tensors for scalar + values, but user implemented optimizers may still use float (i.e. a + non-tensor). Thus, we support both and handle them identically. + + Attributes: + tensor_state (Dict[str, torch.Tensor]): Mapping from positive-dimension + tensor state name to the unsharded flat tensor representing the + state. + zero_dim_tensor_state (Dict[str, torch.Tensor]): Mapping from zero- + dimension tensor state name to its value. + non_tensor_state (Dict[str, Any]): Mapping from non-tensor state + name to its value. + """ + + tensor_state: Dict[str, torch.Tensor] = field(default_factory=dict) + zero_dim_tensor_state: Dict[str, torch.Tensor] = field(default_factory=dict) + non_tensor_state: Dict[str, Any] = field(default_factory=dict) + + +class _PosDimTensorInfo(NamedTuple): + """ + Meatadata for positive-dimension tensors used internally for + :meth:`scatter_full_optim_state_dict`. + + Attributes: + shape (torch.Size): Sharded tensor shape (which is equal to the + unsharded tensor shape if the tensor is optimizer state for a + non-FSDP parameter and is hence not sharded). + dtype (torch.dtype): Data type of the tensor. + """ + + shape: torch.Size + dtype: torch.dtype + + +class _OptimStateKey(NamedTuple): + """ + This represents an optimizer state key that may be used commonly across + ranks. It is based on the unflattened parameter names rather than parameter + IDs to make it independent of each rank's own optimizer construction. + """ + + unflat_param_names: Tuple[str, ...] + is_fsdp_managed: bool + + +def _unflatten_optim_state( + fsdp_param_info: FSDPParamInfo, + flat_param_state: Dict[str, Any], + to_save: bool, + shard_state: bool, + cpu_offload: bool, +) -> List[Dict[str, Any]]: + """ + Unflattens the optimizer state, consisting of the "state" part and the + "param_groups" part. Unflattening the "state" part involves consolidating + the state on the target rank and remapping from flattened to unflattened + parameter IDs, and the "param_groups" part only involves remapping from + flattened to unflattened parameter IDs. + + Args: + fsdp_param_info (FSDPParamInfo): The FSDP state, the handle, and a + mapping from FQN to original parameter index. + flat_param_state (Dict[str, Any]): Entry for the flat parameter in the + "state" part of the optimizer state dict. + to_save (bool): Whether to save the state on this rank. + + Returns: + List[Dict[str, Any]]: A :class:`list` holding the entries in the + "state" part of the optimizer state dict corresponding to the + unflattened parameters comprising the flat parameter if on the target + rank or an empty :class:`list` otherwise. The final optimizer state + dict will need to map these entries using the proper unflattened + parameter IDs. + """ + assert ( + not shard_state or to_save + ), "If ``shard_state`` is True, ``to_save`` has to be True." + consolidated_state = _communicate_optim_state( + fsdp_param_info, + flat_param_state, + ) + if to_save: + unflat_param_state = _unflatten_communicated_optim_state( + fsdp_param_info, + consolidated_state, + shard_state, + ) + for optim_state in unflat_param_state: + # We can't use .items() below cuz we'd run into a concurrent modification error + if cpu_offload: + for key in list(optim_state.keys()): + state = optim_state[key] + if not isinstance(state, torch.Tensor): + continue + optim_state[key] = state.cpu() + return unflat_param_state + else: + return [] + + +def _is_zero_dim_tensor(x: Any) -> bool: + return torch.is_tensor(x) and x.dim() == 0 + + +def _communicate_optim_state( + fsdp_param_info: FSDPParamInfo, + flat_param_state: Dict[str, Any], +) -> _ConsolidatedOptimState: + """ + Communicates the optimizer state for a flat parameter across ranks. All + ranks will hold the entire non-sharded optimizer state on GPU. + + If ``N`` is the number of tensor optimizer states in the optimizer state + dict, then the communication complexity is 0 if ``N = 0`` and ``N + 1`` + otherwise (where the plus 1 comes from all-gathering the padding per rank). + + Args: + fsdp_param_info (FSDPParamInfo): The FSDP state, the handle, and a + mapping from FQN to original parameter index. + flat_param_state (Dict[str, Any]): The entry in the "state" part of the + optimizer state dict corresponding to the flat parameter. + + Returns: + ConsolidatedOptimState: Consolidated optimizer state for the target + flat parameter. + """ + fsdp_state = fsdp_param_info.state + flat_param = fsdp_param_info.handle.flat_param + state = _ConsolidatedOptimState() + tensor_state, zero_dim_tensor_state, non_tensor_state = ( + state.tensor_state, + state.zero_dim_tensor_state, + state.non_tensor_state, + ) + + for state_name, value in sorted_items(flat_param_state): + # Positive-dimension tensor state: communicate across ranks + if torch.is_tensor(value) and value.dim() > 0: + # If the parameter is not sharded, then neither is the + # positive-dimension tensor state, so no need to communicate it -- + # we take the target rank's value + if ( + fsdp_state.world_size == 1 + or fsdp_state.sharding_strategy == ShardingStrategy.NO_SHARD + ): + tensor_state[state_name] = value + continue + assert ( + fsdp_state.compute_device is not None + ), "compute_device has not been initialized" + if value.device.type != fsdp_state.compute_device.type: + value = value.to(fsdp_state.compute_device) + # Assume that positive-dimension tensor optimizer state + # has the same shape as the sharded flat parameter + buffer_size = flat_param._full_param_padded.size() # type: ignore[attr-defined] + tensor_buffer = value.new_zeros(*buffer_size) + dist.all_gather_into_tensor( + tensor_buffer, value, group=fsdp_state.process_group + ) + fsdp_state._device_handle.synchronize() + unpadded_numel = cast( + nn.Parameter, flat_param._unpadded_unsharded_size + ).numel() + tensor_state[state_name] = tensor_buffer[:unpadded_numel] + # Zero-dimension tensor state and non-tensor state: take this rank's + # value directly + else: + if _is_zero_dim_tensor(value): + zero_dim_tensor_state[state_name] = value.detach().clone() + else: + non_tensor_state[state_name] = value + return state + + +def _unflatten_communicated_optim_state( + fsdp_param_info: FSDPParamInfo, + state: _ConsolidatedOptimState, + shard_state: bool, +) -> List[Dict[str, Any]]: + """ + Unflattens the communicated optimizer state (given by ``tensor_state``, + ``non_tensor_state``, and ``zero_dim_tensor_state``) for a single flat + parameter. This should only be called on the target rank. + + Args: + fsdp_param_info (FSDPParamInfo): The FSDP state, the handle, and a + mapping from FQN to original parameter index. + state (_ConsolidatedOptimState): Consolidated optimizer state. + + Returns: + List[Dict[str, Any]]: A :class:`list` holding the entries in the + "state" part of the optimizer state dict corresponding to the + unflattened parameters comprising the flat parameter. The final + optimizer state dict will need to map these entries using the proper + unflattened parameter IDs. + """ + fsdp_state = fsdp_param_info.state + handle = fsdp_param_info.handle + flat_param = handle.flat_param + unflat_param_state: List[Dict[str, Any]] = [] + flat_param_views: Dict[str, Iterator] = {} + num_unflat_params = flat_param._num_params + tensor_state, zero_dim_tensor_state, non_tensor_state = ( + state.tensor_state, + state.zero_dim_tensor_state, + state.non_tensor_state, + ) + + for _ in range(num_unflat_params): + unflat_state_param = {} + # Add positive-dimension tensor state: unflatten with views + for state_name, flat_tensor in sorted_items(tensor_state): + views_generated = state_name in flat_param_views + if not views_generated: + views = handle._get_unflat_views(flat_tensor) + flat_param_views[state_name] = views + else: + views = flat_param_views[state_name] + optim_state: Union[torch.Tensor, ShardedTensor, DTensor] = next(views) + if shard_state: + osd_config = fsdp_state._optim_state_dict_config + if getattr(osd_config, "_use_dtensor", False): + assert fsdp_state._device_mesh is not None + optim_state = _ext_chunk_dtensor( + optim_state, + fsdp_state.rank, + fsdp_state._device_mesh, + fsdp_state._fsdp_extension, + ) + else: + assert fsdp_state.process_group is not None + optim_state = _ext_chunk_tensor( + optim_state, + fsdp_state.rank, + fsdp_state.world_size, + fsdp_state._device_handle.device_count(), + fsdp_state.process_group, + fsdp_state._fsdp_extension, + ) + unflat_state_param[state_name] = optim_state + + # Add zero-dimension tensor state: take the target rank's value + for state_name, zero_dim_tensor in sorted_items(zero_dim_tensor_state): + unflat_state_param[state_name] = zero_dim_tensor + # Add non-tensor state: take the target rank's value + for state_name, non_tensor in sorted_items(non_tensor_state): + unflat_state_param[state_name] = non_tensor + unflat_param_state.append(unflat_state_param) + return unflat_param_state + + +def _broadcast_processed_state( + fsdp_state: _FSDPState, + optim_state: Dict[str, Any], + group: Optional[dist.ProcessGroup], +) -> Dict[str, Any]: + objects: List[Any] = [None] + if dist.get_rank(group) == 0: + objects[0] = tree_map_only( + torch.Tensor, + lambda v: v.cpu() if v.dim() == 0 else _PosDimTensorInfo(v.shape, v.dtype), # type: ignore[union-attr] + optim_state, + ) + dist.broadcast_object_list(objects, src=0, group=group) + if dist.get_rank(group) == 0: + return optim_state + else: + return objects[0] + + +def _broadcast_state( + fsdp_state: _FSDPState, state: Any, group: Optional[dist.ProcessGroup] +) -> Any: + if dist.get_rank(group) == 0: + if not isinstance(state, torch.Tensor) or state.dim() == 0: + return state + tensor = state.to(fsdp_state.compute_device) + else: + if isinstance(state, torch.Tensor): + assert state.dim() == 0, ( + "For non-zero ranks, a tensor state should have zero dimension, " + "but got the state with shape {state.shape()}." + ) + return state + elif not isinstance(state, _PosDimTensorInfo): + return state + tensor = torch.zeros( + state.shape, dtype=state.dtype, device=fsdp_state.compute_device + ) + dist.broadcast(tensor, src=0, group=group) + return tensor + + +def _shard_orig_param_state( + fsdp_param_info: FSDPParamInfo, + fqn: str, + optim_state: Dict[str, Any], +) -> Dict[str, Any]: + """ + Shard the optimizer state for the original parameter with the name ``fqn``. + This API should only be used when ``use_orig_params`` is True. + """ + if not optim_state: + return {} + fsdp_state = fsdp_param_info.state + flat_param = fsdp_param_info.handle.flat_param + param_idx = fsdp_param_info.param_indices[fqn] + shard_param_info = flat_param._shard_param_infos[param_idx] # type: ignore[attr-defined] + optim_state = _gather_state_dict( + optim_state, pg=fsdp_state.process_group, device=fsdp_state.compute_device + ) + if not shard_param_info.in_shard: + return {} + # Flatten and shard the state. + new_optim_state: Dict[str, Any] = {} + intra_param_start_idx = shard_param_info.intra_param_start_idx + intra_param_end_idx = shard_param_info.intra_param_end_idx + for state_name, value in optim_state.items(): + if ( + torch.is_tensor(value) + and value.dim() > 0 + and fsdp_state.sharding_strategy != ShardingStrategy.NO_SHARD + ): + value = value.flatten()[intra_param_start_idx : intra_param_end_idx + 1].clone() # type: ignore[operator] + new_optim_state[state_name] = value + return new_optim_state + + +def _flatten_optim_state_dict( + optim_state_dict: Dict[str, Any], + model: nn.Module, + use_orig_params: bool = False, + optim: Optional[torch.optim.Optimizer] = None, + rank0_only: bool = False, + group: Optional[dist.ProcessGroup] = None, +) -> Dict[str, Any]: + """ + Flattens the full optimizer state dict, still keying by unflattened parameter + names. + + If ``use_orig_params`` is True, each rank will have all FSDP-managed + parameters but some of these parameters may be empty due to the sharding. + For a regular optim.Optimizer, states for those empty parameters will + not be initialized. So, when aggregating the FQNs across ranks, no assert + will be raised on a rank even if it does not have all the states -- it is + valid and FSDP know how to aggregate them. However, FSDP has to ignore + handling those parameters that are not managed by FSDP and do not exist on + the local rank -- it is managed by other parallelism and FSDP does not + know ho to handle/aggregate them. + + Note that ``_flatten_tensor_optim_state`` does not need ``optim`` to + flatten/shard the state. However, NamedOptimizer and KeyedOptimizer require + all the states even if the corresponding parameters are empty. To this end, + ``optim`` will be used to to get the initial state of the empty parameters. + ``optim`` should only be non-None if the ``optim` is KeyedOptimizer or + NamedOptimizer. + + Returns: + Dict[str, Any]: The flattened optimizer state dict. + """ + SimpleProfiler.reset() + + unflat_osd = optim_state_dict + if "state" not in unflat_osd and not rank0_only: + raise ValueError( + '`optim_state_dict` must have the keys "state"' + "to be a valid optimizer state dict" + ) + param_to_fqns = _get_param_to_fqns(model) + fqn_to_fsdp_param_info = _get_fqn_to_fsdp_param_info(model) + fsdp_state = next(iter(fqn_to_fsdp_param_info.values())).state + + # Broadcast unflat_osd without non-scalar tensor if rank0_only is True. + if rank0_only: + unflat_osd = _broadcast_processed_state(fsdp_state, unflat_osd, group=group) + + # Construct the "state" part + flat_osd_state: Dict[Union[_OptimStateKey, str], Any] = {} + unflat_osd_state = unflat_osd["state"] + all_state_keys = set(unflat_osd_state.keys()) + + for param, fqns in param_to_fqns.items(): + fqn = fqns[0] + if fqn not in unflat_osd_state: + continue + all_state_keys.difference_update(fqns) + + if rank0_only: + for fqn in fqns: + if not unflat_osd_state[fqn]: + continue + for state_name in unflat_osd_state[fqn].keys(): + unflat_osd_state[fqn][state_name] = _broadcast_state( + fsdp_state, unflat_osd_state[fqn][state_name], group=group + ) + fqn = fqns[0] + if fqn in fqn_to_fsdp_param_info: + fsdp_param_info = fqn_to_fsdp_param_info[fqn] + if use_orig_params: + with SimpleProfiler.profile(SimpleProfiler.Type.RESHARDING): + flat_state = _shard_orig_param_state( + fsdp_param_info, + fqn, + unflat_osd_state[fqn], + ) + else: + flat_state = _flatten_optim_state( + fsdp_param_info, + unflat_osd_state, + fqns, + ) + key = _OptimStateKey(tuple(fqns), True) + # Only include non-empty states since as expected by + # `torch.optim.Optimizer` s unless the optimizer is KeyedOptimizer + # or NamedOptimizer. + if flat_state: + flat_osd_state[key] = flat_state + elif use_orig_params: + assert ( + len(fqns) == 1 + ), f"use_orig_params is True but there are multiple FQNs, {fqns}." + if optim is not None: # NamedOptimizer or KeyedOptimizer case. + state = optim.state.get(param, None) # type: ignore[call-overload] + if state is not None: + flat_osd_state[key] = copy.deepcopy(state) + else: + warnings.warn( + f"optim_state[{key}] is not on rank{fsdp_state.rank}." + ) + + else: + raise RuntimeError( + f"The state of {key} is empty. This should happen when " + "use_orig_params=True." + ) + else: # do not flatten non-FSDP parameters' states + assert len(fqns) == 1 + key = _OptimStateKey(tuple(fqns), False) + flat_osd_state[key] = copy.copy(unflat_osd_state[fqn]) + + if rank0_only: + for fqn in fqns: + if not unflat_osd_state[fqn]: + continue + for state_name, param_state in list(unflat_osd_state[fqn].items()): + if fsdp_state.rank > 0: + # Deference the tensor so that PyTorch can collect the memory. + del unflat_osd_state[fqn][state_name] + else: + # Move the tensor in the original osd back to CPU to make the + # original osd unaffected. + unflat_osd_state[fqn][state_name] = unflat_osd_state[fqn][ + state_name + ].cpu() + + # Handle user-defined state, states that are not associated with parameters. + for key in all_state_keys: + user_state = unflat_osd_state[key] + if isinstance(user_state, torch.Tensor) and rank0_only and use_orig_params: + user_state = _broadcast_state(fsdp_state, user_state, group=group) + flat_osd_state[key] = copy.copy(user_state) + + SimpleProfiler.dump_and_reset("FSDP _flatten_optim_state_dict() profiling: ") + # Construct the "param_groups" part -- copy as is since it will be + # rekeyed later according to the target rank's optimizer + # Only copy param_groups if it exists in unflat_osd + if "param_groups" in unflat_osd: + flat_osd_param_groups = copy.deepcopy(unflat_osd["param_groups"]) + return {"state": flat_osd_state, "param_groups": flat_osd_param_groups} + else: + return {"state": flat_osd_state} + + +def _flatten_optim_state( + fsdp_param_info: FSDPParamInfo, + unflat_osd_state: Dict[str, Dict[str, Any]], + unflat_param_names: List[str], +) -> Dict[str, Any]: + """ + Flattens the optimizer state in ``full_optim_state_dict`` for a single + flat parameter in ``fsdp_param_info`` corresponding to the unflattened + parameter names in ``unflat_param_names``. + + Args: + fsdp_param_info (FSDPParamInfo): The FSDP state, the handle, and a + mapping from FQN to original parameter index. + unflat_osd_state (Dict[str, Dict[str, Any]]): The "state" part of the + optimizer state dict corresponding to the unflattened parameters. + unflat_param_names (List[str]): A :class:`list` of unflattened + parameter names corresponding to the flat parameter ``flat_param``. + + Returns: + Dict[str, Any]: A :class:`dict` mapping state names to their values for + a particular flat parameter. The sharded optimizer state dict's "state" + part will map a key to this returned value. + """ + fsdp_state = fsdp_param_info.state + handle = fsdp_param_info.handle + flat_param = handle.flat_param + num_unflat_params = len(unflat_param_names) + assert num_unflat_params > 0, ( + "Expects at least one unflattened parameter corresponding to the " + "flat parameter" + ) + unflat_param_shapes = flat_param._shapes + num_unflat_param_shapes = len(unflat_param_shapes) + assert ( + num_unflat_params == num_unflat_param_shapes + ), f"Expects {num_unflat_params} shapes but got {num_unflat_param_shapes}" + + # Check if these unflattened parameters have any optimizer state + has_state = [ + bool(unflat_param_name in unflat_osd_state) + for unflat_param_name in unflat_param_names + ] + # If none of the unflattened parameters comprising this flat parameter have + # any state, then we do not want an entry in the optimizer state dict + if not any(has_state): + return {} # no need to flatten any state + # There may still be some unflattened parameters with state and some + # without + unflat_param_states = [ + _gather_state_dict( + unflat_osd_state[unflat_param_name], + pg=fsdp_state.process_group, + device=fsdp_state.compute_device, + ) + if unflat_param_name in unflat_osd_state + else None + for unflat_param_name in unflat_param_names + ] + # Check that the unflattened parameters have the same state names + state_names = None + for unflat_param_state in unflat_param_states: + if unflat_param_state is None: + continue + if state_names is None: + state_names = set(unflat_param_state.keys()) + else: + if state_names != set(unflat_param_state.keys()): + raise ValueError( + "Differing optimizer state names for the unflattened " + f"parameters: {unflat_param_names}" + ) + assert state_names is not None + + # Flatten the state + flat_state: Dict[str, Any] = {} + for state_name in state_names: + state_values = [ + unflat_param_state[state_name] if unflat_param_state is not None else None + for unflat_param_state in unflat_param_states + ] + non_none_state_values = [v for v in state_values if v is not None] + # If all ranks have None, this is a None value + if not non_none_state_values: + flat_state[state_name] = None + continue + are_pos_dim_tensors = are_zero_dim_tensors = are_non_tensors = True + for v in non_none_state_values: + are_pos_dim_tensors &= torch.is_tensor(v) and v.dim() > 0 + are_zero_dim_tensors &= _is_zero_dim_tensor(v) + are_non_tensors &= not torch.is_tensor(v) + types = {type(v) for v in non_none_state_values} + if len(types) != 1 or not ( + are_pos_dim_tensors or are_zero_dim_tensors or are_non_tensors + ): + raise ValueError( + f"Differing optimizer state types for state {state_name}, " + f"values {non_none_state_values}, and unflattened parameter " + f"names {unflat_param_names}" + ) + if are_pos_dim_tensors: + flat_tensor = _flatten_tensor_optim_state( + state_name, + state_values, + unflat_param_names, + unflat_param_shapes, + handle, + ) + # Shard the flattened tensor immediately to minimize max memory + # usage + if ( + fsdp_state.world_size != 1 + and fsdp_state.sharding_strategy != ShardingStrategy.NO_SHARD + ): + sharded_flat_tensor, _ = FlatParamHandle._get_shard( + flat_tensor, + fsdp_state.rank, + fsdp_state.world_size, + ) + else: + sharded_flat_tensor = flat_tensor + flat_state[state_name] = sharded_flat_tensor + elif are_zero_dim_tensors: + flat_state[state_name] = _flatten_zero_dim_tensor_optim_state( + state_name, + state_values, + unflat_param_names, + ) + else: + assert are_non_tensors + flat_state[state_name] = _flatten_non_tensor_optim_state( + state_name, + state_values, + unflat_param_names, + ) + + return flat_state + + +def _flatten_tensor_optim_state( + state_name: str, + pos_dim_tensors: List[torch.Tensor], + unflat_param_names: List[str], + unflat_param_shapes: Sequence[torch.Size], + handle: FlatParamHandle, +) -> torch.Tensor: + """ + Flattens the positive-dimension tensor optimizer state given by the values + ``tensors`` for the state ``state_name`` for a single flat parameter + from ``handle`` corresponding to the unflattened parameter names + ``unflat_param_names`` and unflatted parameter shapes + ``unflat_param_shapes``. This flattens each unflattened parameter's tensor + state into one tensor. + + NOTE: We use zero tensors for any unflattened parameters without state + since some value is required to fill those entries. This assumes that the + zero tensor is mathematically equivalent to having no state, which is true + for Adam's "exp_avg" and "exp_avg_sq" but may not be true for all + optimizers. + + Args: + state_name (str): Optimizer state name. + pos_dim_tensors (List[torch.Tensor]): Positive-dimension tensor + optimizer state values for the unflattened parameters corresponding + to the single flat parameter. + unflat_param_names (List[str]): A :class:`list` of unflattened + parameter names corresponding to the single flat parameter. + unflat_param_shapes (List[torch.Size]): Unflattened parameter shapes + corresponding to the single flat parameter. + handle (FlatParamHandle): The flat parameter's handle. + + Returns: + torch.Tensor: A flat tensor containing the optimizer state + corresponding to ``state_name`` constructed by concatenating the + unflattened parameter tensor states in ``pos_dim_tensors`` (using zero + tensors for any unflattened parameters without the state). + """ + flat_param = handle.flat_param + non_none_tensors = [t for t in pos_dim_tensors if t is not None] + # Check that all are tensors with the same dtype + dtypes = {t.dtype for t in non_none_tensors} + if len(dtypes) != 1: + raise ValueError( + "All unflattened parameters comprising a single flat " + "parameter must have positive-dimension tensor state with the " + f"same dtype but got dtypes {dtypes} for state {state_name} and " + f"unflattened parameter names {unflat_param_names}" + ) + dtype = next(iter(dtypes)) + # Check that each tensor state matches its parameter's shape + for tensor, shape in zip(pos_dim_tensors, unflat_param_shapes): + if tensor is None and len(shape) == 0: + raise ValueError("Flattening a zero-dimension parameter is not supported") + elif tensor is not None and tensor.shape != shape: + raise ValueError( + "Tensor optimizer state does not have same shape as its " + f"parameter: {tensor.shape} {shape}" + ) + # Flatten the tensor states: we do not need to add any right-hand-side + # padding since the flat optimizer state tensor is sharded via + # `_get_shard()`, which pads the shard as needed (just like for the flat + # parameter) + cpu_device = torch.device("cpu") + tensors_to_flatten = [ + torch.flatten(state_value.to(cpu_device)) + if state_value is not None + else torch.flatten( + torch.zeros( + size=shape, + dtype=dtype, + device=cpu_device, + ) + ) + for state_value, shape in zip(pos_dim_tensors, unflat_param_shapes) + ] + flat_tensor = handle.flatten_tensors(tensors_to_flatten, handle._aligned_numel) + flat_param_shape = flat_param._unpadded_unsharded_size # type: ignore[attr-defined] + assert flat_tensor.shape == flat_param_shape, ( + f"tensor optim state: {flat_tensor.shape} " + f"flat parameter: {flat_param_shape}" + ) + return flat_tensor + + +def _flatten_zero_dim_tensor_optim_state( + state_name: str, + zero_dim_tensors: List[torch.Tensor], + unflat_param_names: List[str], +) -> torch.Tensor: + """ + Flattens the zero-dimension tensor optimizer state given by the values + ``zero_dim_tensors`` for the state ``state_name`` for a single flat + parameter corresponding to the unflattened parameter names + ``unflat_param_names`` by enforcing that all tensors are the same and using + that common value. + + NOTE: The requirement that the tensors are the same across all unflattened + parameters comprising the flat parameter is needed to maintain the + invariant that FSDP performs the same computation as its non-sharded + equivalent. This means that none of the unflattened parameters can be + missing this state since imposing a value may differ from having no value. + For example, for Adam's "step", no value means maximum bias correction, + while having some positive value means less bias correction. + + Args: + state_name (str): Optimizer state name. + zero_dim_tensors (List[torch.Tensor]): Zero-dimension optimizer state + for the unflattened parameters corresponding to the single + flat parameter. + unflat_param_names (List[str]): A :class:`list` of unflattened + parameter names corresponding to the single flat parameter. + + Returns: + torch.Tensor: A zero-dimensional tensor giving the value of the state + ``state_name`` for all unflattened parameters corresponding to the + names ``unflat_param_names``. + """ + non_none_tensors = [t for t in zero_dim_tensors if t is not None] + # Enforce that all have the same value and dtype + values_set = {t.item() if t is not None else None for t in zero_dim_tensors} + dtypes = {t.dtype if t is not None else None for t in zero_dim_tensors} + if ( + len(non_none_tensors) != len(zero_dim_tensors) + or len(values_set) != 1 + or len(dtypes) != 1 + ): + raise ValueError( + "All unflattened parameters comprising a single flat " + "parameter must have scalar state with the same value and dtype " + f"but got values {values_set} and dtypes {dtypes} for state " + f"{state_name} and unflattened parameter names " + f"{unflat_param_names}" + ) + value = next(iter(values_set)) + dtype = next(iter(dtypes)) + return torch.tensor(value, dtype=dtype, device=torch.device("cpu")) + + +def _flatten_non_tensor_optim_state( + state_name: str, + non_tensors: List[Any], + unflat_param_names: List[str], +) -> Any: + """ + Flattens the non-tensor optimizer state given by the values ``non_tensors`` + for the state ``state_name`` for a single flat parameter corresponding + to the unflattened parameter names ``unflat_param_names`` by enforcing that + all values are the same and using that common value. + + See the note in :func:`_flatten_zero_dim_tensor_optim_state`. + + Args: + state_name (str): Optimizer state name. + non_tensors (List[Any]): Non-tensor optimizer state for the unflattened + parameters corresponding to the single flat parameter. + unflat_param_names (List[str]): A :class:`list` of unflattened + parameter names corresponding to the single flat parameter. + + Returns: + Any: A non-tensor giving the value of the state ``state_name`` for all + unflattened parameters corresponding to the names + ``unflat_param_names``. + """ + non_none_non_tensors = [nt for nt in non_tensors if nt is not None] + # Enforce that all have the same value (same type already checked) + non_tensor_set = set(non_tensors) + if len(non_none_non_tensors) != len(non_tensors) or len(non_tensor_set) != 1: + raise ValueError( + "All unflattened parameters comprising a single flat " + "parameter must have scalar state with the same value and dtype " + f"but got values {non_tensor_set} for state {state_name} and " + f"unflattened parameter names {unflat_param_names}" + ) + non_tensor = next(iter(non_tensor_set)) + return non_tensor + + +def _rekey_sharded_optim_state_dict( + sharded_osd: Dict[str, Any], + model: nn.Module, + optim: torch.optim.Optimizer, + optim_input: Optional[ + Union[ + List[Dict[str, Any]], + Iterable[nn.Parameter], + ] + ], + using_optim_input: bool, + is_named_optimizer: bool = False, +) -> Dict[str, Any]: + """ + Rekeys the optimizer state dict from unflattened parameter names to flat + parameter IDs according to the calling rank's ``optim``, which may be + different across ranks. In particular, the unflattened parameter names are + represented as :class:`_OptimStateKey` s. + """ + param_to_fqns = _get_param_to_fqns(model) + flat_param_to_fqn = _get_flat_param_to_fqn(model) + param_to_param_key: Dict[nn.Parameter, Union[int, str]] = cast( + Dict[nn.Parameter, Union[int, str]], + ( + _get_param_to_param_id_from_optim_input(model, optim_input) + if using_optim_input + else _get_param_to_param_key( + optim, model, is_named_optimizer, param_to_fqns, flat_param_to_fqn + ) + ), + ) + # All parameter keys in `param_to_param_key` should be in + # `param_to_fqns` -- strict inequality follows when not all parameters are + # passed to the optimizer + assert len(param_to_param_key) <= len(param_to_fqns) + + unflat_param_names_to_flat_param_key: Dict[ + Tuple[str, ...], Union[int, str] + ] = {} # for "state" + unflat_param_name_to_flat_param_key: Dict[ + str, Union[int, str] + ] = {} # for "param_groups" + for param, unflat_param_names in param_to_fqns.items(): + if param not in param_to_param_key: + # This parameter was not passed to the optimizer + continue + flat_param_key = param_to_param_key[param] + unflat_param_names_to_flat_param_key[tuple(unflat_param_names)] = flat_param_key + for unflat_param_name in unflat_param_names: + unflat_param_name_to_flat_param_key[unflat_param_name] = flat_param_key + + sharded_osd_state = sharded_osd["state"] + rekeyed_osd_state: Dict[Union[str, int], Any] = {} + for key, param_state in sharded_osd_state.items(): + if isinstance(key, str): + rekeyed_osd_state[key] = param_state + continue + flat_param_key = unflat_param_names_to_flat_param_key.get( + key.unflat_param_names, key.unflat_param_names + ) + rekeyed_osd_state[flat_param_key] = param_state + + # Only process param_groups if it exists in sharded_osd + if "param_groups" in sharded_osd: + rekeyed_osd_param_groups: List[Dict[str, Any]] = [] + for unflat_param_group in sharded_osd["param_groups"]: + flat_param_group = copy.deepcopy(unflat_param_group) + flat_param_keys = sorted( + { + unflat_param_name_to_flat_param_key[unflat_param_name] + for unflat_param_name in unflat_param_group["params"] + } + ) + flat_param_group["params"] = flat_param_keys + rekeyed_osd_param_groups.append(flat_param_group) + return {"state": rekeyed_osd_state, "param_groups": rekeyed_osd_param_groups} + else: + return {"state": rekeyed_osd_state} + + +def _get_param_id_to_param_from_optim_input( + model: nn.Module, + optim_input: Optional[ + Union[ + List[Dict[str, Any]], + Iterable[nn.Parameter], + ] + ] = None, +) -> Dict[int, nn.Parameter]: + """ + Constructs a mapping from parameter IDs to parameters. This may be used + both for models with ``FlatParameter`` s and without. + + NOTE: This method is only preserved for backward compatibility. The method + :meth:`_get_param_key_to_param` is the preferred code path that does not + rely on ``optim_input``. + + NOTE: We critically assume that, whether the optimizer input is a list of + parameters or a list of parameter groups, :class:`torch.optim.Optimizer` + enumerates the parameter IDs in order. In other words, for a parameter list + input, the parameter IDs should be in that list order, and for a parameter + groups input, the parameter IDs should be in order within each parameter + group and in order across parameter groups. + + Args: + model (nn.Module): Model whose parameters are passed into the + optimizer. + optim_input (Optional[Union[List[Dict[str, Any]], + Iterable[nn.Parameter]]]): Input passed into the optimizer + representing either a :class:`list` of parameter groups or an + iterable of parameters; if ``None``, then this method assumes the + input was ``model.parameters()``. (Default: ``None``) + + Returns: + List[nn.Parameter]: Mapping from parameter IDs to parameters, + where the parameter ID is implicitly the index in the :class:`list`. + """ + # Assume the standard case of passing `model.parameters()` to the optimizer + # if `optim_input` is not specified + if optim_input is None: + return dict(enumerate(model.parameters())) + try: + params = cast(List[nn.Parameter], list(optim_input)) + except TypeError as e: + raise TypeError( + "Optimizer input should be an iterable of Tensors or dicts, " + f"but got {optim_input}" + ) from e + if len(params) == 0: + raise ValueError("Optimizer input should not be empty") + + # Check if the optimizer input represents tensors or parameter groups + all_tensors = True + all_dicts = True + for param in params: + all_tensors &= isinstance(param, torch.Tensor) + all_dicts &= isinstance(param, dict) + if not all_tensors and not all_dicts: + raise TypeError("Optimizer input should be an iterable of Tensors or dicts") + if all_tensors: + return dict(enumerate(params)) + assert all_dicts + param_id_to_param: List[nn.Parameter] = [] + for param_group in params: + has_params_key = "params" in param_group # type: ignore[operator] + assert has_params_key, ( + 'A parameter group should map "params" to a list of the ' + "parameters in the group" + ) + # Implicitly map `flat_param_id` (current length of the list) to + # `param` + param_id_to_param.extend(param_group["params"]) # type: ignore[index] + return dict(enumerate(param_id_to_param)) + + +def _get_flat_param_to_fqn(model: torch.nn.Module) -> Dict[FlatParameter, str]: + """ + Constructs a mapping from ``FlatParameter`` to a cleaned (devoid of prefixes + from wrappers) fully qualified name (FQN). Note that this FQN is "non-canonical" + because ``FlatParameter`` s do not come from the original module but are + registered only after FSDP has been applied. This function returns the FSDP-given + name for the ``FlatParameter`` (usually module._flat_param) as opposed to the + canonical FQNs returned for ``FlatParameter`` s in ``_common_utils._get_param_to_fqns(...)``). + + Consequently, this function will only return a non-empty mapping if FSDP was + applied with ``use_orig_params=False`` as, otherwise, the original parameters + are used within the module and there would be no ``FlatParameter`` s in the module. + + """ + + def module_fn(module, prefix, tree_level, flat_param_to_fqn): + for param_name, param in _named_parameters_with_duplicates( + module, recurse=False + ): + if not isinstance(param, FlatParameter): + continue + fqn = clean_tensor_name(prefix + param_name) + flat_param_to_fqn[param] = fqn + + def return_fn(flat_param_to_fqn): + return flat_param_to_fqn + + flat_param_to_fqn_ret: Dict[FlatParameter, str] = {} + return _apply_to_modules( + model, + module_fn, + return_fn, + [fqn for fqn, _ in _named_parameters_with_duplicates(model)], + flat_param_to_fqn_ret, + ) + + +def _get_param_key_to_param( + optim: torch.optim.Optimizer, + model: Optional[nn.Module] = None, + is_named_optimizer: bool = False, + param_to_fqns: Optional[Dict[nn.Parameter, List[str]]] = None, + flat_param_to_fqn: Optional[Dict[FlatParameter, str]] = None, +) -> Dict[Union[int, str], nn.Parameter]: + """ + Constructs a mapping from parameter keys to parameters. For the regular + optimizers, the keys are parameter IDs. For NamedOptimizer, the keys + are FQNs. This API may be used both for models with ``FlatParameter`` s and + without. + """ + clean_fqn_to_curr_fqn: Dict[str, str] = {} + if is_named_optimizer: + assert ( + param_to_fqns is not None and flat_param_to_fqn is not None + ), "The optimizer is a NamedOptimizer, `param_to_fqns` must not be None." + assert model is not None + for key, _ in _named_parameters_with_duplicates(model): + clean_fqn_to_curr_fqn[clean_tensor_name(key)] = key + + param_key_to_param: Dict[Union[str, int], nn.Parameter] = {} + pid = 0 + for param_group in optim.param_groups: + if is_named_optimizer: + for param in param_group["params"]: + assert flat_param_to_fqn is not None + if param in flat_param_to_fqn: + # FlatParameter case + key = flat_param_to_fqn[param] + else: + assert param_to_fqns is not None + # use_orig_params case + assert len(param_to_fqns[param]) == 1 + key = param_to_fqns[param][0] + try: + key = clean_fqn_to_curr_fqn[key] + except KeyError as e: + raise KeyError( + f"Can't find {key} from {list(clean_fqn_to_curr_fqn.keys())}." + ) from e + param_key_to_param[key] = param + else: + for param in param_group["params"]: + param_key_to_param[pid] = param + pid += 1 + + return param_key_to_param + + +def _get_param_to_param_key( + optim: torch.optim.Optimizer, + model: Optional[nn.Module] = None, + is_named_optimizer: bool = False, + param_to_fqns: Optional[Dict[nn.Parameter, List[str]]] = None, + flat_param_to_fqn: Optional[Dict[FlatParameter, str]] = None, +) -> Dict[nn.Parameter, Union[int, str]]: + """ + Constructs the inverse mapping of :func:`_get_param_key_to_param`. This API + only supports the case where `optim` is a regular optimizer, not NamedOptimizer. + So the parameter keys will be parameter ids. + """ + param_id_to_param = _get_param_key_to_param( + optim, model, is_named_optimizer, param_to_fqns, flat_param_to_fqn + ) + return {param: param_id for param_id, param in param_id_to_param.items()} + + +def _get_param_to_param_id_from_optim_input( + model: nn.Module, + optim_input: Optional[ + Union[ + List[Dict[str, Any]], + Iterable[nn.Parameter], + ] + ] = None, +) -> Dict[nn.Parameter, int]: + """Constructs the inverse mapping of :func:`_get_param_id_to_param_from_optim_input`.""" + param_id_to_param = _get_param_id_to_param_from_optim_input(model, optim_input) + return {param: param_id for param_id, param in param_id_to_param.items()} + + +def _check_missing_keys_on_rank( + r0_optim_state_keys: List[_OptimStateKey], + optim_state_key_to_param_key: Dict[_OptimStateKey, Union[str, int]], + param_key_to_param: Dict[Union[str, int], nn.Parameter], + group: Optional[dist.ProcessGroup], +) -> None: + # Ensure that all ranks have at least the optimizer states needed by + # rank 0's optimizer + missing_keys: List[_OptimStateKey] = [] + for r0_optim_state_key in r0_optim_state_keys: + if r0_optim_state_key not in optim_state_key_to_param_key: + # A parameter from rank 0's optimizer does not exist for this + # rank's optimizer + missing_keys.append(r0_optim_state_key) + continue + param_key = optim_state_key_to_param_key[r0_optim_state_key] + if isinstance(param_key, int): + assert param_key >= 0 and param_key < len( + param_key_to_param + ), "Check the `param_key_to_param` construction" + # We cannot use FSDPState.compute_device as this API is a global view. + device = _get_pg_default_device(group) + num_missing = torch.tensor([len(missing_keys)], dtype=torch.int32, device=device) + dist.all_reduce(num_missing, group=group) + if num_missing.item() > 0: + obj_list = [None for _ in range(dist.get_world_size(group))] + dist.all_gather_object(obj_list, missing_keys, group=group) + error_msg = ( + "FSDP currently requires each rank to have at least the " + "optimizer states needed by rank 0's optimizer but some ranks " + "are missing some of those states" + ) + for rank, keys in enumerate(obj_list): + keys = cast(List[_OptimStateKey], keys) + if len(keys) > 0: + error_msg += ( + f"\nRank {rank} is missing states for the parameters: " + f"{[key.unflat_param_names for key in keys]}" + ) + raise RuntimeError(error_msg) + + +def _map_param_key_to_optim_keys( + optim_state_dict: Dict[str, Any], + group: Optional[dist.ProcessGroup], + param_key_to_param: Dict[Union[int, str], nn.Parameter], + param_to_fqns: Dict[nn.Parameter, List[str]], + fqn_to_fsdp_param_info: Dict[str, FSDPParamInfo], + merge_keys: bool = False, +) -> Tuple[List[_OptimStateKey], Dict[_OptimStateKey, Union[int, str]]]: + """ + Construct the local mapping between the ``_OptimStateKey`` and parameter keys + and all the ``_OptimStateKey`` across ranks. If ``merge_keys`` is False, rank0 + must contain all the ``_OptimStateKey``, an exception will be raised otherwise. + Note that ``merge_keys`` should equal to ``use_orig_params``. + """ + rank = dist.get_rank(group) + optim_state_key_to_param_key: Dict[_OptimStateKey, Union[int, str]] = {} # local + all_optim_state_keys: List[_OptimStateKey] = [] + + for param_key, param in param_key_to_param.items(): + # Do not include parameters without state to avoid empty mappings + # just like in normal `torch.optim.Optimizer.state_dict()` + if param_key not in optim_state_dict["state"]: + continue + fqns = param_to_fqns[param] + is_fsdp_managed = isinstance(param, FlatParameter) + if is_fsdp_managed: + assert fqns[0] in fqn_to_fsdp_param_info, ( + fqns[0], + list(fqn_to_fsdp_param_info.keys()), + ) + is_fsdp_managed = fqns[0] in fqn_to_fsdp_param_info + optim_state_key = _OptimStateKey( + unflat_param_names=tuple(fqns), + is_fsdp_managed=is_fsdp_managed, + ) + if rank == 0 or merge_keys: + all_optim_state_keys.append(optim_state_key) + optim_state_key_to_param_key[optim_state_key] = param_key + + if merge_keys: + all_keys: List[List[_OptimStateKey]] = [ + [] for _ in range(dist.get_world_size(group)) + ] + dist.all_gather_object(all_keys, all_optim_state_keys, group=group) + merge_all_optim_state_keys = [ + key for local_keys in all_keys for key in local_keys + ] + all_optim_state_keys = sorted(set(merge_all_optim_state_keys)) + else: + key_obj_list: List[Optional[List[_OptimStateKey]]] = ( + [all_optim_state_keys] if rank == 0 else [None] + ) + dist.broadcast_object_list(key_obj_list, src=0, group=group) + assert key_obj_list[0] is not None + all_optim_state_keys = key_obj_list[0] + _check_missing_keys_on_rank( + all_optim_state_keys, + optim_state_key_to_param_key, + param_key_to_param, + group, + ) + + return all_optim_state_keys, optim_state_key_to_param_key + + +def _unflatten_param_groups( + state_dict: Dict[str, Any], + param_key_to_param: Dict[Union[int, str], nn.Parameter], + param_to_fqns: Dict[nn.Parameter, List[str]], +) -> List[Dict[str, Any]]: + param_groups: List[Dict[str, Any]] = [] + for flat_param_group in state_dict["param_groups"]: + unflat_param_group = copy.deepcopy(flat_param_group) + param_group_params = [ + param_key_to_param[flat_param_key] + for flat_param_key in flat_param_group["params"] + ] + nested_unflat_param_names = [ + param_to_fqns[param] for param in param_group_params + ] + unflat_param_group["params"] = [ + unflat_param_name + for unflat_param_names in nested_unflat_param_names + for unflat_param_name in unflat_param_names + ] # flatten the list of lists + param_groups.append(unflat_param_group) + return param_groups + + +def _is_named_optimizer(optim_state_dict: Dict[str, Any]) -> bool: + """ + Returns whether the state_dict is from a NamedOptimizer. + This function checks that the keys in the state_dict['state'] are strings + (which usually are FQNs) versus integers (which usually refer to param_ids + from a vanilla torch.optim.Optimizer). + """ + state = optim_state_dict.get("state", None) + if not state: + # If we cannot find a state, assume it is not NamedOptimizer as + # NamedOptimizer has eager initialization. + return False + try: + key = next(iter(state.keys())) + except Exception as e: + raise Exception(optim_state_dict) from e # noqa: TRY002 + return isinstance(key, str) + + +@dataclass +class StateInfo: + # The key of these dictionaries are the state name, e.g., `exp_avg`. + tensors: Dict[str, _PosDimTensorInfo] + scalar_tensors: Dict[str, torch.Tensor] + non_tensors: Dict[str, Any] + + +def _allgather_state_info( + fsdp_state: _FSDPState, + input_states: Dict[str, Any], +) -> List[Dict[str, StateInfo]]: + """ + Given the ``input_states``, allgather StateInfo for each state. The function + uses all_gather_object to gather StateInfo so no GPU tensors are sent. + """ + + processed_state_dict: Dict[str, StateInfo] = {} + gathered_state_info: List[Dict[str, StateInfo]] = [ + {} for _ in range(fsdp_state.world_size) + ] + + for fqn, optim_state in input_states.items(): + # Allgather the scalar tensor state, non-tensor states and tensors metadata. + processed_state = StateInfo({}, {}, {}) + for state_name, value in sorted_items(optim_state): + if torch.is_tensor(value): + if value.dim() == 0: + # Ensure that `step` is on CPU. + processed_state.scalar_tensors[state_name] = value.cpu() + else: + processed_state.tensors[state_name] = _PosDimTensorInfo( + value.shape, value.dtype + ) + else: + processed_state.non_tensors[state_name] = value + processed_state_dict[fqn] = processed_state + dist.all_gather_object( + gathered_state_info, + processed_state_dict, + group=fsdp_state.process_group, + ) + return gathered_state_info + + +def _convert_all_state_info( + fsdp_param_info: FSDPParamInfo, + gathered_state_info: List[Dict[str, StateInfo]], + input_states: Dict[str, Any], + output_states: Dict[str, Dict[str, Any]], +) -> Tuple[Optional[torch.dtype], Dict[str, List[Optional[torch.Tensor]]]]: + """ + Given the ``gathered_state_info`` and ``input_states``, the API converted + the StateInfo into the original state if the state is not a non-scalar + tensor. For a multi-dimensional tensor, the local state will be stored in + ``state_buffer`` in a correct order for later allgather purpose. + """ + + state_buffers: Dict[str, List[Optional[torch.Tensor]]] = {} + + for fqn, gathered_state in output_states.items(): + state_info = [s[fqn] for s in gathered_state_info] + all_tensor_states = sorted( + {n for state in state_info for n in state.tensors.keys()} + ) + empty_ranks: Set[int] = set() + dtype: Optional[torch.dtype] = None + # First check all the non-scalar states and get the information of + # states on each rank. + for state_name in all_tensor_states: + numels = [] + _empty_ranks: Set[int] = set() + for rank, object_state in enumerate(state_info): + numels.append(0) + info = object_state.tensors.get(state_name, None) + if info is not None: + numels[-1] = info.shape.numel() + if not dtype: + dtype = info.dtype + else: + assert dtype == info.dtype + if numels[-1] == 0: + _empty_ranks.add(rank) + + assert not empty_ranks or empty_ranks == _empty_ranks + empty_ranks = _empty_ranks + if state_name not in state_buffers: + state_buffers[state_name] = [ + None for _ in fsdp_param_info.param_indices + ] + local_state = input_states[fqn].get(state_name, None) + # N.B. We need to move the state to compute_device. The reason is + # not yet clear and we need to figure out why the state may be on a + # different device. + if local_state is not None: + local_state = local_state.to(fsdp_param_info.state.compute_device) + state_buffers[state_name][fsdp_param_info.param_indices[fqn]] = local_state + + # Restoring the scalar and non-tensor states. If the corresponding + # non-scalar states do not exist on the rank, we also skip the scalar + # non-tensor states on that rank. + for rank, object_state in enumerate(state_info): + if rank in empty_ranks: + continue + for name, non_tensor_value in object_state.non_tensors.items(): + curr_non_tensor_value = gathered_state.get(name, None) + assert ( + curr_non_tensor_value is None + or curr_non_tensor_value == non_tensor_value + ), ( + f"Rank {rank} has different values for {name}: {non_tensor_value}." + + f" Other ranks: {curr_non_tensor_value}" + ) + gathered_state[name] = non_tensor_value + + for name, scalar_tensor_value in object_state.scalar_tensors.items(): + curr_scalar_tensor_value = gathered_state.get(name, None) + assert curr_scalar_tensor_value is None or torch.equal( + scalar_tensor_value, curr_scalar_tensor_value + ), ( + f"Rank {rank} has different values for {name}: {scalar_tensor_value}." + + f" Other ranks: {curr_scalar_tensor_value}" + ) + gathered_state[name] = scalar_tensor_value + + return dtype, state_buffers # type: ignore[possibly-undefined] + + +def _unflatten_orig_param_states( + fsdp_param_info: FSDPParamInfo, + output_states: Dict[str, Dict[str, Any]], + state_name: str, + shard_state: bool, + to_save: bool, + cpu_offload: bool, +) -> None: + """ + Given a output state dict, ``output_states``, which the keys are FQNs to the + original parameters (not FlatParameters nor parmeter ID), and the values + are gathered states, unflatten the states to the original dimensions. + + This function performs the unflattening process in-place. + """ + if not to_save: + return + flat_param = fsdp_param_info.handle.flat_param + fsdp_state = fsdp_param_info.state + for fqn, gathered_state in output_states.items(): + value = gathered_state[state_name] + param_idx = fsdp_param_info.param_indices[fqn] + + # TODO: This solution is not general and only apply to PTD TP solution. + if isinstance(value, DTensor): + placement = value.placements[0] + # If gathered state is a DTensor and its TP placement is not Replicate(), we need to + # gather the tensor on its TP dimension before chunking them into DTensor again. + if placement != Replicate(): + placement_dim = placement.dim # type: ignore[attr-defined] + value_local = value.redistribute(placements=(Replicate(),)) + reshape_size = list(flat_param._shapes[param_idx]) + reshape_size[placement_dim] *= value.device_mesh.size(0) + reshape_size = torch.Size(reshape_size) + value = value.reshape(reshape_size) + # If gathered state is a replicate DTensor, we directly reshape it. + else: + value = value.reshape(flat_param._shapes[param_idx]) + else: + # If gathered state is a tensor, we directly reshape it into unflatten state. + value = value.reshape(flat_param._shapes[param_idx]) + + if shard_state: + osd_config = fsdp_state._optim_state_dict_config + if getattr(osd_config, "_use_dtensor", False): + assert fsdp_state._device_mesh is not None + value = _ext_chunk_dtensor( + value, + fsdp_state.rank, + fsdp_state._device_mesh, + fsdp_state._fsdp_extension, + ) + else: + assert fsdp_state.process_group is not None + value = _ext_chunk_tensor( + value, + fsdp_state.rank, + fsdp_state.world_size, + fsdp_state._device_handle.device_count(), + fsdp_state.process_group, + fsdp_state._fsdp_extension, + ) + elif not cpu_offload: + with SimpleProfiler.profile("clone"): + value = value.detach().clone() + + if cpu_offload: + with SimpleProfiler.profile(SimpleProfiler.Type.D2H): + value = value.cpu() + gathered_state[state_name] = value + + +def _allgather_orig_param_states( + fsdp_param_info: FSDPParamInfo, + gathered_state_info: List[Dict[str, StateInfo]], + input_states: Dict[str, Any], + shard_state: bool, + to_save: bool, + cpu_offload: bool, +) -> Dict[str, Dict[str, Any]]: + """ + Given the ``gathered_state_info`` and ``input_states``, the API allgathers + all tensor states and restore non-tensor states from ``gathered_state_info``. + """ + fsdp_state = fsdp_param_info.state + if fsdp_state.rank == 0 and dist.get_debug_level() == dist.DebugLevel.DETAIL: + logger.info( + "Memory Summary before calling to _allgather_orig_param_states %s", + fsdp_state._device_handle.memory_summary(), + ) + + output_states: Dict[str, Dict[str, Any]] = {fqn: {} for fqn in input_states.keys()} + + dtype, state_buffers = _convert_all_state_info( + fsdp_param_info, gathered_state_info, input_states, output_states + ) + + if len(state_buffers) == 0: + return output_states + + has_state_params: List[bool] = [ + True if fqn in output_states else False + for fqn, idx in fsdp_param_info.param_indices.items() + ] + + # Loop through the ``state_buffers`` and construct the flattened, concatenated, + # sharded states. The size of the constructed state will be the same size as + # flat_param (also sharded). + # Then we perform an allgather_into_tensor to get the full flat_param state. + # The full flat_param state is the result of concatenation of multiple states + # the order of of flat_param._fqns. + # The final step is to split the flat_param state into original param states + # and return the result. + flat_param = fsdp_param_info.handle.flat_param + empty_func = functools.partial( + torch.empty, dtype=dtype, device=fsdp_state.compute_device + ) + gathered_tensor = empty_func(flat_param._padded_unsharded_size) + # Synchronize can be slow but this will be easier for us to debug. + fsdp_state._device_handle.synchronize() + for state_name, buffers in state_buffers.items(): + local_buffers: List[torch.Tensor] = [] + begin = fsdp_state.rank * flat_param._sharded_size.numel() + # End is inclusive. + end = begin + flat_param._sharded_size.numel() - 1 + # param_idx corresponds to the parameter index in the FlatParameter. + mem_offset, param_idx = 0, 0 + for numel, is_padding in zip( + flat_param._numels_with_padding, flat_param._is_padding_mask + ): + frozen_and_no_state = not is_padding and ( + not fsdp_param_info.param_requires_grad[param_idx] + and not has_state_params[param_idx] + ) + + if is_padding or frozen_and_no_state: + # This memory range is a padding or the param is frozen and does + # not require gradient. For the later case, we treat it as a + # padding and add empty values to the local_buffers. + + padding_begin, padding_end = mem_offset, mem_offset + numel - 1 + if padding_begin <= begin <= padding_end: + # The range is an align padding before the first parameter in + # the shard. The shard includes parts of this align padding. + padding_len = ( + padding_end - begin + 1 + if end >= padding_end + else end - begin + 1 + ) + elif padding_begin <= end <= padding_end: + # The range is an align padding after the last parameter in + # the shard. The shard includes parts of this align padding. + padding_len = ( + end - padding_begin + 1 + if begin <= padding_begin + else end - begin + 1 + ) + elif begin < padding_begin <= padding_end < end: + # The range is an align padding that is completely in the + # shard. + padding_len = numel + else: + padding_len = 0 + if padding_len: + local_buffers.append(empty_func(padding_len)) + + if not is_padding: + # This memory range is a parameter in FlatParameter. So there + # should be an corresponding state in the optimizer unless the + # parameter is frozen, which we treat it as a padding above. + + # We need to check if this rank owns the buffer. If this is None: + # 1.) the rank does not own any part of the original parameter. + # As a result, there is no corresponding optimizer state on + # the rank as well. + # 2.) the parameter is frozen AND no optimizer state for the + # parameter. If a parameter is frozen, there can still be + # optimizer state if the parameter is not frozen in the + # previous steps. + if buffers[param_idx] is not None: + local_buffers.append(cast(torch.Tensor, buffers[param_idx])) + param_idx += 1 + + mem_offset += numel + + shard_numel_padded = flat_param._sharded_size.numel() - ( + sum(t.numel() for t in local_buffers) + ) + + assert flat_param._shard_numel_padded == shard_numel_padded, ( + "Manually calculated _sharded_numel_padded is incorrect. " + f"_shard_numel_padded={flat_param._shard_numel_padded}, " + f"shard_numel_padded={shard_numel_padded}, " + f"_sharded_size.numel={flat_param._sharded_size.numel()}, " + f"_numels_with_padding={flat_param._numels_with_padding}, " + f"begin={begin}, end={end}," + ) + if shard_numel_padded > 0: + # Add right-handed padding. + local_buffers.append(empty_func(shard_numel_padded)) + local_shard = torch.cat(local_buffers) + assert local_shard.numel() * fsdp_state.world_size == gathered_tensor.numel(), ( + "The size of local shard times the world size should equal to the " + "gathered tensor size. The inconsistency may be from a bug of " + "FlatParameter's metadata or the reconstruction logic in optimizer " + "state dict." + ) + fsdp_state._device_handle.synchronize() + with SimpleProfiler.profile(SimpleProfiler.Type.ALLGATHER): + dist.all_gather_into_tensor( + gathered_tensor, local_shard, group=fsdp_state.process_group + ) + # Synchronize can be slow but this will be easier for us to debug. + fsdp_state._device_handle.synchronize() + + unpadded_tensor = gathered_tensor[: flat_param._unpadded_unsharded_size.numel()] + flat_param_handle = fsdp_param_info.handle + orig_states = flat_param_handle._get_unflat_views_aligned(unpadded_tensor) + assert len(orig_states) == len(fsdp_param_info.param_indices), ( + "The number of parameters from FlatParameter is not consistent to " + "the number of states used by optimizer state dict reconstruction " + "logic." + ) + for fqn, idx in fsdp_param_info.param_indices.items(): + if fsdp_param_info.param_requires_grad[idx] or fqn in output_states: + output_states[fqn][state_name] = orig_states[idx] + + _unflatten_orig_param_states( + fsdp_param_info, + output_states, + state_name, + shard_state, + to_save, + cpu_offload, + ) + + del gathered_tensor + return output_states + + +def _gather_all_orig_param_state( + fsdp_param_info: FSDPParamInfo, + input_states: Dict[str, Any], + shard_state: bool, + to_save: bool, + cpu_offload: bool, +) -> Dict[str, Any]: + """ + Given a optimizer state dict, ``input_states``, which the keys are FQNs to the + original parameters (not FlatParameters nor parmeter ID), gather all the + states and unflatten them to the original dimensions. Note that all the + params referred by the ``input_states`` must be managed by FSDP. + """ + fsdp_state = fsdp_param_info.state + if ( + fsdp_state.world_size == 1 + or fsdp_state.sharding_strategy == ShardingStrategy.NO_SHARD + ): + return input_states if to_save else {} + + with SimpleProfiler.profile(SimpleProfiler.Type.RESHARDING): + with SimpleProfiler.profile(SimpleProfiler.Type.ALLGATHER_OBJ): + gathered_state_info = _allgather_state_info(fsdp_state, input_states) + output_states = _allgather_orig_param_states( + fsdp_param_info, + gathered_state_info, + input_states, + shard_state, + to_save, + cpu_offload, + ) + if to_save: + for key, idx in fsdp_param_info.param_indices.items(): + if key in output_states: + continue + if not fsdp_param_info.param_requires_grad[idx]: + continue + + raise RuntimeError( + f"{key} is not in the output state. " + "The FSDPParamInfo has the param keys " + f"{sorted(fsdp_param_info.param_indices.keys())} while " + "the output_states has the param keys " + f"{sorted(output_states.keys())}." + ) + return output_states + else: + return {} + + +def _convert_state_with_orig_params( + all_optim_state_keys: List[_OptimStateKey], + optim_state_key_to_param_key: Dict[_OptimStateKey, Union[int, str]], + fqn_to_fsdp_param_info: Dict[str, FSDPParamInfo], + optim_state_dict: Dict[Union[str, int], Any], + to_save: bool, + shard_state: bool, + cpu_offload: bool = True, +) -> Dict[str, Any]: + fsdp_osd_state: Dict[str, Any] = {} + # This variable is used to deduplicate the FSDPParamInfo as one FSDPParamInfo + # usually corresponds to multiple parameters. We could not use FSDPParamInfo + # as the key because FSDPParamInfo is not hashable. As a result, we fall back + # to `id(FSDPParamInfo)`, which the type is an integer. + all_states: Dict[int, Dict[str, Any]] = {} + # Iterate in rank 0's flat parameter ID order to ensure aligned all-gathers + # across ranks + for optim_state_key in all_optim_state_keys: + param_key: Union[str, int, None] = optim_state_key_to_param_key.get( + optim_state_key, None + ) + + if param_key is None and not optim_state_key.is_fsdp_managed: + continue + + if optim_state_key.is_fsdp_managed: + fqn = optim_state_key.unflat_param_names[0] + fsdp_param_info = fqn_to_fsdp_param_info.get(fqn, None) + if fsdp_param_info is None: + # This can happen if the not all FSDP instances have all the + # parameters. This can happen with FSDP + some MPMD style + # parallelism. + + # TODO: it is unclear if we need to do the same check with + # non-FSDP managed keys. + continue + state = {} if param_key is None else optim_state_dict[param_key] + if id(fsdp_param_info) not in all_states: + all_states[id(fsdp_param_info)] = {} + all_states[id(fsdp_param_info)][fqn] = state + + elif to_save: + assert len(optim_state_key.unflat_param_names) == 1 + unflat_param_name = optim_state_key.unflat_param_names[0] + with SimpleProfiler.profile("none_fsdp_managed_copy"): + param_key = cast(Union[str, int], param_key) + fsdp_osd_state[unflat_param_name] = copy.copy( + optim_state_dict[param_key] + ) + if cpu_offload: + for state_name, value in sorted_items( + fsdp_osd_state[unflat_param_name] + ): + if not torch.is_tensor(value): + continue + fsdp_osd_state[unflat_param_name][state_name] = value.cpu() + + # Instead of gathering the state of each parameter individually, we perform + # the gathering all at once to speed up the process. + for _all_states in all_states.values(): + fqn = next(iter(_all_states.keys())) + fsdp_param_info = fqn_to_fsdp_param_info[fqn] + assert len(fsdp_param_info.param_requires_grad) > 0, ( + "With use_orig_params, FSDPParamInfo should have requires_grad " + "information. However, the length is zero." + ) + for key, idx in fsdp_param_info.param_indices.items(): + if key in _all_states: + continue + if not fsdp_param_info.param_requires_grad[idx]: + continue + raise RuntimeError( + f"{key} is not in the optimizer state. " + "The FSDPParamInfo has the param keys " + f"{sorted(fsdp_param_info.param_indices.keys())} while " + "the optimizer has the param keys " + f"{sorted(_all_states.keys())}." + ) + fsdp_osd_state.update( + _gather_all_orig_param_state( + fsdp_param_info, + _all_states, + shard_state, + to_save, + cpu_offload, + ) + ) + + return fsdp_osd_state + + +def _convert_state_with_flat_params( + all_optim_state_keys: List[_OptimStateKey], + optim_state_key_to_param_key: Dict[_OptimStateKey, Union[int, str]], + fqn_to_fsdp_param_info: Dict[str, FSDPParamInfo], + optim_state_dict: Dict[Union[str, int], Any], + to_save: bool, + shard_state: bool, + cpu_offload: bool = True, +) -> Dict[str, Any]: + fsdp_osd_state: Dict[str, Any] = {} + # Iterate in rank 0's flat parameter ID order to ensure aligned all-gathers + # across ranks + for optim_state_key in all_optim_state_keys: + param_key: Union[str, int, None] = optim_state_key_to_param_key.get( + optim_state_key, None + ) + + assert param_key is not None, ( + "If use_orig_params is False, we must be able to find the " + f"corresponding param id. {optim_state_key} {param_key}" + ) + + if optim_state_key.is_fsdp_managed: + # If there are multiple unflat_param_names (not use_orig_params), + # they share the same FSDPParamInfo. So the first unflat_param_name + # is sufficient to fetch the FSDPParamInfo. + fqn = optim_state_key.unflat_param_names[0] + fsdp_param_info = fqn_to_fsdp_param_info[fqn] + unflat_state = _unflatten_optim_state( + fsdp_param_info, + optim_state_dict[param_key], + to_save, + shard_state, + cpu_offload, + ) + if to_save: + assert len(unflat_state) == len(optim_state_key.unflat_param_names) + for unflat_param_name, unflat_param_state in zip( + optim_state_key.unflat_param_names, + unflat_state, + ): + fsdp_osd_state[unflat_param_name] = unflat_param_state + elif to_save: + assert len(optim_state_key.unflat_param_names) == 1 + unflat_param_name = optim_state_key.unflat_param_names[0] + fsdp_osd_state[unflat_param_name] = copy.copy(optim_state_dict[param_key]) + if cpu_offload: + for state_name, value in sorted_items( + fsdp_osd_state[unflat_param_name] + ): + if not torch.is_tensor(value): + continue + fsdp_osd_state[unflat_param_name][state_name] = value.cpu() + + return fsdp_osd_state + + +@torch.no_grad() +def _optim_state_dict( + model: nn.Module, + optim: torch.optim.Optimizer, + optim_state_dict: Dict[str, Any], + optim_input: Optional[ + Union[ + List[Dict[str, Any]], + Iterable[nn.Parameter], + ] + ], + rank0_only: bool, + shard_state: bool, + group: Optional[dist.ProcessGroup], + using_optim_input: bool, + use_orig_params: bool = False, + cpu_offload: bool = True, +) -> Dict[str, Any]: + """ + Consolidates the optimizer state and returns it as a :class:`dict` + following the convention of :meth:`torch.optim.Optimizer.state_dict`, + i.e. with keys ``"state"`` and ``"param_groups"``. + The flat parameters in ``FSDP`` modules contained in ``model`` are mapped + back to their unflattened parameters. + + Parameter keys are not well-defined. For a regular optimizer, the optimizer + state_dict contains a mapping from parameter IDs to parameter states. + Parameter IDs are the order of parameters in ``optim.param_groups()`` across + all the groups. This API also allows user to pass ``optim_input`` for the + mapping between parameters and parameter IDs. Using ``optim_input`` is being + deprecated. + + If the optimizer is a ``NamedOptimizer``, the optimizer state_dict does not + contain parameter IDs mapping but a mapping from parameter FQNs to parameter + states. This API finds the mapping from FQNs to parameters if the optimizer + is a ``NamedOptimizer``. + + If ``use_orig_params`` is True, each rank will have all FSDP-managed + parameters but some of these parameters may be empty due to the sharding. + For a regular optim.Optimizer, states for those empty parameters will + not be initialized. So, when aggregating the FQNs across ranks, no assert + will be raised on a rank even if it does not have all the states -- it is + valid and FSDP knows how to aggregate them. However, FSDP has to ignore + handling those parameters that are not managed by FSDP and do not exist on + the local rank -- those are managed by other parallelisms and FSDP does not + know how to handle/aggregate them. + + Args: + model (nn.Module): Root module (which may or may not be a + :class:`FullyShardedDataParallel` instance) whose parameters + were passed into the optimizer ``optim``. + optim (torch.optim.Optimizer): Optimizer for ``model`` 's + parameters. + rank0_only (bool): If ``True``, saves the populated :class:`dict` + only on rank 0; if ``False``, saves it on all ranks. (Default: + ``True``) + shard_state (bool): If ``True``, shard and distribute all + non-zero-dimension states. + + Returns: + Dict[str, Any]: A :class:`dict` containing the optimizer state for + ``model`` 's original unflattened parameters and including keys + "state" and "param_groups" following the convention of + :meth:`torch.optim.Optimizer.state_dict`. If ``rank0_only=False``, + then nonzero ranks return an empty :class:`dict`. + """ + SimpleProfiler.reset() + cm = ExitStack() + cm.enter_context(SimpleProfiler.profile(SimpleProfiler.Type.ALL)) + _reset_flat_param_grad_info_if_needed(traversal_utils._get_fsdp_handles(model)) + to_save = not rank0_only or dist.get_rank(group) == 0 or shard_state + + with SimpleProfiler.profile("preprocessing"): + param_to_fqns = _get_param_to_fqns(model) + flat_param_to_fqn = _get_flat_param_to_fqn(model) + is_named_optimizer = _is_named_optimizer(optim_state_dict) + + param_key_to_param = cast( + Dict[Union[int, str], nn.Parameter], + ( + _get_param_id_to_param_from_optim_input(model, optim_input) + if using_optim_input + else _get_param_key_to_param( + optim, model, is_named_optimizer, param_to_fqns, flat_param_to_fqn + ) + ), + ) + fqn_to_fsdp_param_info = _get_fqn_to_fsdp_param_info(model) + + with SimpleProfiler.profile("preprocessing_with_comm"): + ( + all_optim_state_keys, + optim_state_key_to_param_key, + ) = _map_param_key_to_optim_keys( + optim_state_dict, + group, + param_key_to_param, + param_to_fqns, + fqn_to_fsdp_param_info, + merge_keys=use_orig_params, + ) + + with SimpleProfiler.profile("state_converting"): + convert_fn = ( + _convert_state_with_orig_params + if use_orig_params + else _convert_state_with_flat_params + ) + fsdp_osd_state = convert_fn( + all_optim_state_keys, + optim_state_key_to_param_key, + fqn_to_fsdp_param_info, + optim_state_dict["state"], + to_save, + shard_state, + cpu_offload, + ) + + # At this point, communication is complete and ranks can return early if nothing + # will be saved on that rank. + if not to_save: + return {} + + fsdp_osd: Dict[str, Any] = {"state": fsdp_osd_state} + + flat_param_fqns = set(flat_param_to_fqn.values()) + for key, value in optim_state_dict["state"].items(): + if key in fsdp_osd_state: + continue + if key in flat_param_fqns: + continue + if key in param_key_to_param: + continue + # This key is not recognized by FSDP. It may be a user-defined state + # or some parameters state that FSDP is unable to map from + # ``optim.param_groups``. + warnings.warn( + f"Found a optim state, {key}, that FSDP cannot process. FSDP " + "will directly copy everything to the returned state_dict. In " + "most cases, this is a user-defined state that is not " + "associated with any particular parameter. Another possible " + "case is this state is managed by TorchRec. Otherwise, there may " + " be a mismatched assumption of optim_state_dict of this mode." + ) + fsdp_osd_state[key] = value + + if "param_groups" in optim_state_dict: + fsdp_osd["param_groups"] = _unflatten_param_groups( + optim_state_dict, param_key_to_param, param_to_fqns + ) + + cm.close() + SimpleProfiler.dump_and_reset("FSDP _optim_state_dict() profiling: ") + + return fsdp_osd + + +def _get_fqn_to_fsdp_param_info(model: nn.Module) -> Dict[str, FSDPParamInfo]: + """ + Construct the mapping from a param's fqn to its corresponding ``FSDPParamInfo`` + if the param is managed by FSDP. Shared parameters, or original parameters that + are shared across multiple nn.Modules, are required to belong to one and only + one FSDP instance and thus correspond to one ``FlatParameter``. Within the one + ``FlatParameter``, ``FlatParameter._fqns`` only stores the first FQN of a shared + parameter. Thus, the keys in the mapping are guaranteed to map to unique parameters. + """ + + def module_fn(module, prefix, tree_level, fqn_to_param_info): + fsdp_state = _get_module_fsdp_state_if_fully_sharded_module(module) + if fsdp_state is None: + return + _lazy_init(fsdp_state, module) + handle = _module_handle(fsdp_state, module) + if not handle: + return + flat_param = handle.flat_param + fsdp_param_info = FSDPParamInfo(fsdp_state, handle, {}, []) + # NOTE: `idx` indexes into the data structures *without* padding + # elements + for idx, local_fqn in enumerate(flat_param._fqns): + fqn = clean_tensor_name(prefix + local_fqn) + if fqn in fqn_to_param_info: + assert fqn_to_param_info[fqn].handle.flat_param is flat_param, fqn + fqn_to_param_info[fqn] = fsdp_param_info + fsdp_param_info.param_indices[fqn] = idx + if flat_param._params is not None: + fsdp_param_info.param_requires_grad.append( + flat_param._params[idx].requires_grad + ) + + def return_fn(fqn_to_param_info): + return fqn_to_param_info + + fqn_to_param_info: Dict[str, FSDPParamInfo] = {} + # FlatParameter._fqns stores the local fqn, starting from the root of the + # FSDP. Using _apply_to_modules() with model (may not be the FSDP root + # module) allows us to construct the global fqn. + return _apply_to_modules( + model, + module_fn, + return_fn, + [fqn for fqn, _ in _named_parameters_with_duplicates(model)], + fqn_to_param_info, + ) + + +@no_type_check +def _set_optim_use_dtensor( + fsdp_state: _FSDPState, + state_dict_settings: StateDictSettings, +) -> None: + # If device_mesh is passed in when initalizing FSDP, we automatically turn the + # _use_dtensor flag to be true for ShardedOptimStateDictConfig() if state_dict_type + # has to be set to SHARDED_STATE_DICT. + if getattr(fsdp_state, "_device_mesh", None): + state_dict_type = state_dict_settings.state_dict_type + if state_dict_type == StateDictType.LOCAL_STATE_DICT: + raise RuntimeError( + "Found state_dict_type LOCAL_STATE_DICT.", + "DeviceMesh is not compatible with LOCAL_STATE_DICT.", + "Please set state_dict_type to SHARDED_STATE_DICT to get DTensor state_dict.", + ) + else: + state_dict_settings.optim_state_dict_config._use_dtensor = True diff --git a/janus/lib/python3.10/site-packages/torch/distributed/fsdp/_traversal_utils.py b/janus/lib/python3.10/site-packages/torch/distributed/fsdp/_traversal_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..68c38d15daf1f97b54fa3366c2b668c934c7a17d --- /dev/null +++ b/janus/lib/python3.10/site-packages/torch/distributed/fsdp/_traversal_utils.py @@ -0,0 +1,113 @@ +""" +NOTE: This file must be imported like +``import torch.distributed.fsdp._traversal_utils`` and not like +``from torch.distirbuted.fsdp._traversal_utils import ...`` to avoid circular +imports. For brevity, we may import the file as ``traversal_utils``. +""" + +import collections +from typing import Deque, List, Set, Tuple + +import torch.nn as nn +from torch.distributed._composable.contract import _get_registry +from torch.distributed.fsdp._common_utils import _FSDPState, _get_module_fsdp_state + + +""" +[Note: FSDP State Traversal] +For the wrapper code path, ``_FSDPState`` is the ``FullyShardedDataParallel`` +module wrapping a fully sharded module, and for the non-wrapper code path, +``_FSDPState`` is an object that gets embedded on a fully sharded module. +See [Note: Fully Sharded Module] for the definition. + +There are three common traversal idioms: Given a root module, +- ``_get_fsdp_states()`` returns all ``_FSDPState`` s in the tree. +- ``get_fsdp_root_states()`` returns all local root ``_FSDPState`` s in the +tree (i.e. those with ``_is_root == True``). +- ``_get_fsdp_handles()``returns all ``FlatParamHandle`` s in the tree. + +All of these methods must take in the root module (i.e. an ``nn.Module``) and +not a general ``_FSDPState`` because ``_FSDPState`` does not support a graph +traversal, whereas ``nn.Module`` has ``nn.Module.modules()`` for traversal. +""" + + +def _composable(module: nn.Module) -> bool: + """ + Returns if ``module`` can compose with ``fully_shard``. + """ + # TODO: Add any other composable APIs that are mutually exclusive. + registry = _get_registry(module) + if registry is None: + return True + return "replicate" not in registry + + +# TODO (awgu): We may be able to remove this function if we retired the +# `use_orig_params=False` code path since so far we only need the module for +# `FlatParameter` registration, which is not needed for `use_orig_params=True`. +def _get_fsdp_states_with_modules( + module: nn.Module, +) -> Tuple[List[_FSDPState], List[nn.Module]]: + """ + Returns a tuple containing: + 1. A list of the ``_FSDPState`` instances in the module tree rooted at + ``module`` without any duplicates and following the ``module.modules()`` + traversal order (which is assumed to be depth-first). + 2. A corresponding list of the modules owning the states in the first list. + + For the wrapper code path, both returned lists are the same, each + containing all ``FullyShardedDataParallel`` instances. For the composable + code path, this returns a list of all composable state instances and a list + of the corresponding fully sharded modules. See [Note: Fully Sharded + Module]. + + NOTE: The traversal does not proceed into any module annotated by an + incompatible API (e.g. ``replicate``). + """ + fsdp_states: List[_FSDPState] = [] + fsdp_modules: List[nn.Module] = [] + # Track the visited FSDP states since multiple modules may share the same + # one and we want to return a de-duplicated list + visited_fsdp_states: Set[_FSDPState] = set() + # Track the visited modules in case of shared modules, which implies the + # module graph is no longer a tree + visited_modules: Set[nn.Module] = set() + + # Perform depth-first search from `module` to ensure that we do not + # traverse into an incompatible API's subtree (use DFS instead of BFS to + # match `.modules()` order) + deque: Deque[nn.Module] = collections.deque([module]) + while deque: + submodule = deque.popleft() + visited_modules.add(submodule) + if not _composable(submodule): + continue + for child_module in reversed(list(submodule.children())): + if child_module not in visited_modules: + deque.appendleft(child_module) + optional_state = _get_module_fsdp_state(submodule) + if optional_state is not None and optional_state not in visited_fsdp_states: + visited_fsdp_states.add(optional_state) + fsdp_states.append(optional_state) + fsdp_modules.append(submodule) + return fsdp_states, fsdp_modules + + +def _get_fsdp_states(module: nn.Module) -> List[_FSDPState]: + """See :func:`_get_fsdp_states_with_modules`.""" + fsdp_states, _ = _get_fsdp_states_with_modules(module) + return fsdp_states + + +def _get_fsdp_handles(module: nn.Module) -> List: + """ + Returns all ``FlatParamHandle`` s in the module tree rooted at ``module`` + following the rules in :func:`_get_fsdp_state`. + """ + handles = [ + fsdp_state._handle + for fsdp_state in _get_fsdp_states(module) + if fsdp_state._handle is not None + ] + return handles diff --git a/janus/lib/python3.10/site-packages/torch/distributed/fsdp/_unshard_param_utils.py b/janus/lib/python3.10/site-packages/torch/distributed/fsdp/_unshard_param_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..4143d2928c8b83a1714813960c668490ac9ca50e --- /dev/null +++ b/janus/lib/python3.10/site-packages/torch/distributed/fsdp/_unshard_param_utils.py @@ -0,0 +1,336 @@ +# mypy: allow-untyped-defs +import contextlib +import warnings +from typing import cast, Generator + +import torch +import torch.distributed.fsdp._traversal_utils as traversal_utils +import torch.nn as nn +from torch.distributed.fsdp._common_utils import ( + _FSDPState, + _get_module_fsdp_state, + _has_fsdp_params, + _module_handle, + HandleTrainingState, + TrainingState, +) +from torch.distributed.fsdp._runtime_utils import ( + _lazy_init, + _reset_flat_param_grad_info_if_needed, + _reshard, + _reshard_grads, + _unshard, + _unshard_grads, +) +from torch.distributed.utils import _p_assert + +from ._flat_param import FlatParamHandle + + +FLAT_PARAM = "_flat_param" + + +@torch.no_grad() +def _writeback_to_local_shard( + handle: FlatParamHandle, + writeback_grad: bool, +): + """ + For the handle, writes back the this rank's shard of the unsharded + flattened parameter to the sharded flattened parameter. If + ``writeback_grad=True``, then writes back to the sharded gradient as + well. + + Precondition: The handle's ``FlatParameter`` 's data points to the + padded unsharded flattened parameter. + """ + + def _get_shard(flat_param_or_grad: torch.Tensor) -> torch.Tensor: + if handle.uses_sharded_strategy: + # For sharded strategies, get the *unpadded* shard instead of + # the *padded* shard to persist user changes to the padding + # (though FSDP does not explicitly support this) + shard, _ = FlatParamHandle._get_unpadded_shard( + flat_param_or_grad, + handle.rank, + handle.world_size, + ) + return shard + # For `NO_SHARD`, the `flat_param` or its gradient may be modified, + # so we write it back directly + return flat_param_or_grad + + param_shard = _get_shard(handle.flat_param) + handle.flat_param._local_shard[: param_shard.numel()].copy_(param_shard) # type: ignore[attr-defined] + if writeback_grad: + existing_grad = handle.sharded_grad + if existing_grad is not None: + assert handle.flat_param.grad is not None + grad_shard = _get_shard(handle.flat_param.grad) + existing_grad[: grad_shard.numel()].copy_(grad_shard) + + +def _deregister_flat_param(state: _FSDPState, module: nn.Module) -> None: + """ + De-registers the flattened parameter from the wrapped module, hiding it + from ``nn.Module`` methods. + + We do not use ``del`` because we want ``FLAT_PARAM`` to always be an + attribute but dynamically change whether it is visible to ``nn.Module`` + methods. + """ + if _has_fsdp_params(state, module): + # TODO: figure out the case for the composable APIs. + cast(nn.Module, module.module)._parameters.pop(FLAT_PARAM, None) + + +def _register_flat_param(state: _FSDPState, module: nn.Module) -> None: + """ + Registers the flattened parameter to the wrapped module, making it + visible to ``nn.Module`` methods. + + We do not use :meth:`nn.Module.register_parameter` because we want + ``FLAT_PARAM`` to always be an attribute but dynamically change whether + it is visible to ``nn.Module`` methods. + """ + handle = _module_handle(state, module) + if _has_fsdp_params(state, module): + # TODO: figure out the case for the composable APIs. + cast(nn.Module, module.module)._parameters[FLAT_PARAM] = handle.flat_param + + +@contextlib.contextmanager +def _unflatten_as_params(state: _FSDPState, module: nn.Module) -> Generator: + """ + Assumes that the flattened parameter is unsharded. When in the context, + de-registers the flattened parameter and unflattens the original + parameters as ``nn.Parameter`` views into the flattened parameter. + After the context, re-registers the flattened parameter and restores + the original parameters as ``Tensor`` views into the flattened + parameter. + """ + handle = _module_handle(state, module) + if not handle: + yield + else: + _deregister_flat_param(state, module) + try: + with handle.unflatten_as_params(): + yield + finally: + if not handle._use_orig_params: + _register_flat_param(state, module) + + +def _validate_unshard_params_args( + state: _FSDPState, + writeback: bool, + rank0_only: bool, + offload_to_cpu: bool, + with_grads: bool, +) -> None: + if with_grads and (offload_to_cpu or not state._use_orig_params): + raise NotImplementedError( + f"with_grads={with_grads}, " + f"use_orig_params={state._use_orig_params}, " + f"offload_to_cpu={offload_to_cpu} " + f"is not supported yet" + ) + if offload_to_cpu and state._handle and (not state._handle.uses_sharded_strategy): + raise NotImplementedError( + "offload_to_cpu=True and NO_SHARD is not supported yet" + ) + if writeback and rank0_only: + # TODO: Rank 0 can broadcast the `FlatParameter` to allow all ranks to + # persist the changes. + raise NotImplementedError( + "writeback=True and rank0_only=True is not supported yet" + ) + if offload_to_cpu and not rank0_only: + warnings.warn( + "offload_to_cpu=True and rank0_only=False may result in the" + "unsharded parameters being redundantly copied to CPU memory for " + "GPUs sharing the same CPU memory, which risks CPU OOM. We " + "recommend using offload_to_cpu=True with rank0_only=True." + ) + + +@contextlib.contextmanager +def _unshard_fsdp_state_params( + module: nn.Module, + state: _FSDPState, + writeback: bool, + rank0_only: bool, + offload_to_cpu: bool, + with_grads: bool, +): + """ + This unshards the parameters for a single FSDP state ``state`` that + corresponds to ``module``. + """ + _validate_unshard_params_args( + state, writeback, rank0_only, offload_to_cpu, with_grads + ) + state._device_handle.synchronize() + # If handles are shared by other module(s), the handle may be already unsharded. + maybe_handle = _module_handle(state, module) + handle = None + if ( + maybe_handle + and maybe_handle._training_state != HandleTrainingState.SUMMON_FULL_PARAMS + ): + handle = maybe_handle + if not handle: + yield + return + + assert ( + handle._training_state == HandleTrainingState.IDLE + ), f"Expects the handle training to be IDLE but got {handle._training_state}" + + handle._training_state = HandleTrainingState.SUMMON_FULL_PARAMS + + _reset_flat_param_grad_info_if_needed(handle) + free_unsharded_flat_param = handle.needs_unshard() + # No need to call `wait_stream()` since we unshard in the computation + # stream directly + computation_stream = state._device_handle.current_stream() + _unshard(state, handle, computation_stream, computation_stream) + if with_grads: + _unshard_grads(handle) + + if rank0_only and state.rank != 0: + # Free the unsharded flattened parameter early + _reshard(state, handle, free_unsharded_flat_param) + if with_grads: + _reshard_grads(handle) + try: + yield + finally: + handle._training_state = HandleTrainingState.IDLE + else: + # Unflatten the unsharded flattened parameters + with contextlib.ExitStack() as stack: + # Invariant: rank == 0 or !rank0_only + if offload_to_cpu and handle.uses_sharded_strategy: + stack.enter_context(handle.to_cpu()) + # NOTE: Since PyTorch enforces that a parameter and its + # gradients need to match metadata (e.g. device), we must + # move gradients to CPU *after* we move parameters. + # NOTE: This assumes 1 `FlatParameter` + if not state._use_orig_params: + stack.enter_context(_unflatten_as_params(state, module)) + try: + yield + finally: + stack.close() + if writeback: + _writeback_to_local_shard(handle, with_grads) + _reshard(state, handle, free_unsharded_flat_param) + if with_grads: + _reshard_grads(handle) + handle._training_state = HandleTrainingState.IDLE + + +@contextlib.contextmanager +def _unshard_params_for_summon( + module: nn.Module, + state: _FSDPState, + writeback: bool, + rank0_only: bool, + offload_to_cpu: bool, + with_grads: bool, +): + _validate_unshard_params_args( + state, writeback, rank0_only, offload_to_cpu, with_grads + ) + _lazy_init(state, module) + if state.training_state == TrainingState.FORWARD_BACKWARD: + raise AssertionError( + "Cannot manually unshard parameters during forward/backward" + ) + elif state.training_state == TrainingState.SUMMON_FULL_PARAMS: + raise AssertionError( + "Cannot manually unshard parameters when already unsharding parameters" + ) + with _unshard_fsdp_state_params( + module=module, + state=state, + writeback=writeback, + rank0_only=rank0_only, + offload_to_cpu=offload_to_cpu, + with_grads=with_grads, + ): + try: + state.training_state = TrainingState.SUMMON_FULL_PARAMS + yield + finally: + state.training_state = TrainingState.IDLE + + +@contextlib.contextmanager +def _unshard_params( + module: nn.Module, + recurse: bool, + writeback: bool, + rank0_only: bool, + offload_to_cpu: bool, + with_grads: bool, +): + """ + This unshards FSDP-managed parameters for all modules with FSDP applied in + the module tree rooted at ``module``. + """ + if not recurse: + optional_state = _get_module_fsdp_state(module) + if optional_state is None: + with contextlib.nullcontext(): + yield + return + states_and_modules = ([optional_state], [module]) + else: + states_and_modules = traversal_utils._get_fsdp_states_with_modules(module) + with contextlib.ExitStack() as stack: + for state, module in zip(*states_and_modules): + stack.enter_context( + _unshard_params_for_summon( + module=module, + state=state, + writeback=writeback, + rank0_only=rank0_only, + offload_to_cpu=offload_to_cpu, + with_grads=with_grads, + ) + ) + yield + + +def _deregister_orig_params(state: _FSDPState, module: nn.Module) -> None: + """ + Deregisters the original parameters; registers the ``FlatParameter``. + """ + handle = _module_handle(state, module) + if not handle: + return + _p_assert( + handle._use_orig_params, + f"Inconsistent `_use_orig_params` -- FSDP: {state._use_orig_params} " + f"handle: {handle._use_orig_params}", + ) + handle._deregister_orig_params() + _register_flat_param(state, module) + + +def _register_orig_params(state: _FSDPState, module: nn.Module) -> None: + """ + Deregisters the ``FlatParameter``; registers the original parameters. + """ + handle = _module_handle(state, module) + if not handle: + return + _deregister_flat_param(state, module) + if handle.is_sharded(handle.flat_param): + handle._use_sharded_views() + handle._use_sharded_grad_views() + else: + handle._use_unsharded_views(as_params=True) diff --git a/janus/lib/python3.10/site-packages/torch/distributed/fsdp/_wrap_utils.py b/janus/lib/python3.10/site-packages/torch/distributed/fsdp/_wrap_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..895bcbd8e967b467b7aeab48db14094457eef2bb --- /dev/null +++ b/janus/lib/python3.10/site-packages/torch/distributed/fsdp/_wrap_utils.py @@ -0,0 +1,262 @@ +# mypy: allow-untyped-defs +import collections +import functools +import inspect +import warnings +from functools import partial +from typing import Any, Callable, Dict, List, Set, Tuple, Type, Union + +import torch.nn as nn +from torch.distributed.fsdp._common_utils import ( + _get_module_fsdp_state, + _override_module_mixed_precision, +) +from torch.distributed.fsdp.wrap import ( + _construct_wrap_fn, + _or_policy, + _Policy, + _post_order_apply, + _recursive_wrap, + _run_mixed_precision_override_policy, + _wrap_module_cls_individually, +) + + +def _auto_wrap( + root_module: nn.Module, + policy: Union[Callable, _Policy], + ignored_modules: Set[nn.Module], + ignored_params: Set[nn.Parameter], + root_kwargs: Dict[str, Any], + fsdp_fn: Callable, # e.g. `FullyShardedDataParallel` or `fully_shard` +): + """ + Auto wraps modules in ``root_module`` 's tree according to ``policy`` + following a post-order traversal. + + Precondition: ``root_kwargs`` should contain all arguments except + ``module``. This function accepts the kwargs dict directly since it gets + forwarded into the post-order traversal function. + """ + mixed_precision = root_kwargs["mixed_precision"] + is_wrapper = inspect.isclass(fsdp_fn) + # TODO: We may relax this no-nested-wrapping constraint to support manual + # wrapping followed by auto wrapping. + _check_nested_wrapping(root_module) + + if isinstance(policy, _Policy): + root_kwargs["auto_wrap_policy" if is_wrapper else "policy"] = None + target_module_to_kwargs = policy._run_policy( + root_module, ignored_modules, root_kwargs + ) + if mixed_precision is not None: + target_module_to_kwargs = _run_mixed_precision_override_policy( + root_module, + mixed_precision._module_classes_to_ignore, + ignored_modules, + root_kwargs, + target_module_to_kwargs, + ) + overridden_module_classes = _override_module_mixed_precision( + root_module, mixed_precision._module_classes_to_ignore + ) + _warn_on_overridden_mixed_precision(overridden_module_classes) + use_orig_params = root_kwargs.get("use_orig_params", False) + _validate_frozen_params( + root_module, + set(target_module_to_kwargs.keys()), + ignored_params, + use_orig_params, + ) + wrap_fn = _construct_wrap_fn(root_module, target_module_to_kwargs, fsdp_fn) + _post_order_apply(root_module, wrap_fn) + return + + recursive_wrap_kwargs = { + "module": root_module, + "auto_wrap_policy": policy, + "wrapper_cls": fsdp_fn, + "ignored_modules": ignored_modules, + "ignored_params": ignored_params, + "only_wrap_children": True, + } + if mixed_precision is not None: + # Wrap modules of the ignored types separately and register forward + # hooks to cast to fp32 and back to the original dtype, respectively + overridden_module_classes = _override_module_mixed_precision( + root_module, mixed_precision._module_classes_to_ignore + ) + policy = functools.partial( + _or_policy, + policies=[ + policy, + partial( + _wrap_module_cls_individually, + module_classes=mixed_precision._module_classes_to_ignore, + ), + ], + ) + recursive_wrap_kwargs["auto_wrap_policy"] = policy + _warn_on_overridden_mixed_precision(overridden_module_classes) + _recursive_wrap(**recursive_wrap_kwargs, **root_kwargs) # type: ignore[arg-type] + + +def _check_nested_wrapping(root_module: nn.Module): + for module_name, module in root_module.named_modules(): + if _get_module_fsdp_state(module) is not None: + raise ValueError( + "FSDP auto wrapping requires modules to not already have " + f"FSDP applied but found {module_name} in\n{root_module}" + ) + + +def _warn_on_overridden_mixed_precision( + overridden_module_classes: Set[Type[nn.Module]], +): + if len(overridden_module_classes) == 0: + return + warnings.warn( + "Both mixed precision and an auto_wrap_policy were specified to FSDP, " + f"where the wrapped module has submodules of type:\n{overridden_module_classes}\n" + "These modules will be wrapped as separate FSDP instacnes with mixed " + "precision disabled." + ) + + +def _validate_frozen_params( + root_module: nn.Module, + modules_to_wrap: Set[nn.Module], + ignored_params: Set[nn.Parameter], + use_orig_params: bool, +): + """ + This checks that, given ``modules_to_wrap``, each module would manage + parameters that are uniformly frozen or non-frozen. This uniformity + requirement is strict for ``use_orig_params=False`` (hard error) and highly + recommended for ``use_orig_params=True`` (user warning). + """ + post_order_named_modules = _get_post_order_named_modules(root_module) + visited_modules: Set[nn.Module] = set() + for module_name, module in post_order_named_modules: + if module in modules_to_wrap: + param_to_fqn = _get_managed_param_to_fqn( + module, ignored_params, visited_modules, module_name + ) + frozen_param_fqns: List[str] = [] + frozen_param_numel = 0 + nonfrozen_param_fqns: List[str] = [] + nonfrozen_param_numel = 0 + for param, fqn in param_to_fqn.items(): + if param.requires_grad: + nonfrozen_param_fqns.append(fqn) + nonfrozen_param_numel += param.numel() + else: + frozen_param_fqns.append(fqn) + frozen_param_numel += param.numel() + if len(frozen_param_fqns) > 0 and len(nonfrozen_param_fqns) > 0: + msg = f"{module_name} has both parameters with requires_grad=True and False." + if use_orig_params: + total_param_numel = frozen_param_numel + nonfrozen_param_numel + msg += ( + " We do not recommend wrapping such modules since " + "the gradient memory usage will be higher than expected " + f"({total_param_numel} numel instead of {nonfrozen_param_numel} numel " + "before sharding via reduce-scatter). " + ) + else: + msg += " FSDP does not support wrapping such modules when use_orig_params=False. " + msg += "If possible, wrap the frozen parameters with FSDP separately.\n" + msg += ( + f"The following parameters have requires_grad=True:\n{nonfrozen_param_fqns}\n" + f"The following parameters have requires_grad=False:\n{frozen_param_fqns}" + ) + if use_orig_params: + warnings.warn(msg) + else: + raise ValueError(msg) + + +def _get_post_order_named_modules( + root_module: nn.Module, +) -> List[Tuple[str, nn.Module]]: + """ + This returns the named modules following a post-order traversal, which is a + valid reverse topological sort. We achieve this using the reverse of a + stack-based DFS order instead of reversing ``root_module.named_modules()`` + since the former gives the modules in registration order at each level in + the module tree (as opposed to the reverse), which allows us to error/warn + on the first registered module that violates the condition. + + For example, consider the following module structure: + M( + S1(), + S2( + SS1(), + SS2(), + ), + S3(), + ) + The reverse DFS order is [S1, SS1, SS2, S2, S3, M], while the reverse + ``named_modules()`` order is [S3, SS2, SS1, S2, S1, M]. + """ + visited_modules = {root_module} + stack = [("", root_module)] + # Append and reverse at the end for linear-time algorithm + reverse_post_order_named_modules: List[Tuple[str, nn.Module]] = [] + while stack: + module_name, module = stack.pop() + reverse_post_order_named_modules.append((module_name, module)) + for child_module_name, child_module in module.named_children(): + if child_module is None: # only for overrides of `named_children()` + continue + if child_module not in visited_modules: + visited_modules.add(child_module) + if module_name != "": + child_module_name = module_name + "." + child_module_name + stack.append((child_module_name, child_module)) + post_order_named_modules = list(reversed(reverse_post_order_named_modules)) + return post_order_named_modules + + +def _get_managed_param_to_fqn( + module_to_wrap: nn.Module, + ignored_params: Set[nn.Parameter], + visited_modules: Set[nn.Module], + root_prefix: str, +) -> Dict[nn.Parameter, str]: + """ + This returns a dict that maps managed parameter to its FQN for the given + ``module_to_wrap``. The dict's keys are exactly the parameters that would + be managed by the module, where this is achieved by calling this function + on the modules to wrap in reverse topological order, destructively updating + ``visited_modules``, and not traversing into those modules. The FQNs are + prefixed from the root (via ``root_prefix``) to be more informative. + + NOTE: This function is meant to be called pre-wrapping and iteratively in + reverse topological order to cover the full module tree. This differs from + the ``_get_param_to_fqn()`` function meant to be called post-wrapping and + on the full module tree in one shot. Given those differences, we do not try + to unify the two. + """ + param_to_fqn: Dict[nn.Parameter, str] = {} + # Run BFS (or any tree traversal works) + queue = collections.deque([(module_to_wrap, root_prefix)]) + visited_modules.add(module_to_wrap) + while queue: + module, prefix = queue.popleft() + for param_name, param in module.named_parameters(recurse=False): + if param not in ignored_params: + fqn = param_name if prefix == "" else prefix + "." + param_name + param_to_fqn[param] = fqn + for child_module_name, child_module in module.named_children(): + if child_module is None: # only for overrides of `named_children()` + continue + if child_module not in visited_modules: + visited_modules.add(child_module) + child_prefix = ( + child_module_name + if prefix == "" + else prefix + "." + child_module_name + ) + queue.append((child_module, child_prefix)) + return param_to_fqn diff --git a/janus/lib/python3.10/site-packages/torch/distributed/fsdp/fully_sharded_data_parallel.py b/janus/lib/python3.10/site-packages/torch/distributed/fsdp/fully_sharded_data_parallel.py new file mode 100644 index 0000000000000000000000000000000000000000..3ff9e80ca12e3cfab4ccff455748d693be11a79a --- /dev/null +++ b/janus/lib/python3.10/site-packages/torch/distributed/fsdp/fully_sharded_data_parallel.py @@ -0,0 +1,2185 @@ +# mypy: ignore-errors + +import contextlib +import copy +import functools +import math +import traceback +import warnings +from contextlib import contextmanager +from enum import auto, Enum +from typing import ( + Any, + Callable, + Dict, + Generator, + Iterable, + Iterator, + List, + Optional, + Tuple, + Union, +) + +import torch +import torch.distributed as dist +import torch.distributed.fsdp._traversal_utils as traversal_utils +import torch.nn as nn +from torch.distributed.algorithms._checkpoint.checkpoint_wrapper import ( + _CHECKPOINT_WRAPPED_MODULE, + ActivationWrapper, +) +from torch.distributed.algorithms._comm_hooks import LOW_PRECISION_HOOKS +from torch.distributed.fsdp._common_utils import ( + _FSDPState, + _get_param_to_fqns, + FSDP_PREFIX, + FSDP_WRAPPED_MODULE, + HandleTrainingState, + TrainingState, +) +from torch.distributed.fsdp._dynamo_utils import _annotate_modules_for_dynamo +from torch.distributed.fsdp._init_utils import ( + _check_orig_params_flattened, + _init_buffer_state, + _init_core_state, + _init_device_handle, + _init_extension, + _init_ignored_module_states, + _init_param_handle_from_module, + _init_prefetching_state, + _init_process_group_state, + _init_runtime_state, + _init_state_dict_state, + HYBRID_SHARDING_STRATEGIES, + ProcessGroupType, +) +from torch.distributed.fsdp._runtime_utils import ( + _get_fsdp_root_states, + _is_fsdp_root, + _lazy_init, + _post_forward, + _post_forward_reshard, + _pre_forward, + _pre_forward_unshard, + _root_pre_forward, + _unshard, + _wait_for_computation_stream, +) +from torch.distributed.fsdp._wrap_utils import _auto_wrap +from torch.distributed.fsdp.api import ( + BackwardPrefetch, + CPUOffload, + FullOptimStateDictConfig, + FullStateDictConfig, + LocalOptimStateDictConfig, + LocalStateDictConfig, + MixedPrecision, + OptimStateDictConfig, + ShardedOptimStateDictConfig, + ShardedStateDictConfig, + ShardingStrategy, + StateDictConfig, + StateDictSettings, + StateDictType, +) +from torch.distributed.tensor import DeviceMesh +from torch.distributed.utils import _p_assert + +from ._flat_param import FlatParameter, FlatParamHandle +from ._optim_utils import ( + _flatten_optim_state_dict, + _get_param_id_to_param_from_optim_input, + _get_param_key_to_param, + _get_param_to_param_id_from_optim_input, + _get_param_to_param_key, + _optim_state_dict, + _rekey_sharded_optim_state_dict, + _set_optim_use_dtensor, +) +from ._state_dict_utils import _register_all_state_dict_hooks +from ._unshard_param_utils import ( + _deregister_orig_params, + _register_flat_param, + _register_orig_params, + _unshard_params, + _unshard_params_for_summon, +) +from .wrap import CustomPolicy, ModuleWrapPolicy + + +__all__ = [ + "FullyShardedDataParallel", + "OptimStateKeyType", +] + + +FLAT_PARAM = "_flat_param" + + +class OptimStateKeyType(Enum): + """Represents the type of key in an optimizer state-dict.""" + + PARAM_NAME = auto() + PARAM_ID = auto() + + +class FullyShardedDataParallel(nn.Module, _FSDPState): + """A wrapper for sharding module parameters across data parallel workers. + + This is inspired by `Xu et al.`_ as well as the ZeRO Stage 3 from DeepSpeed_. + FullyShardedDataParallel is commonly shortened to FSDP. + + .. _`Xu et al.`: https://arxiv.org/abs/2004.13336 + .. _DeepSpeed: https://www.deepspeed.ai/ + + To understand FSDP internals, refer to the + :ref:`fsdp_notes`. + + Example:: + + >>> # xdoctest: +SKIP("undefined variables") + >>> import torch + >>> from torch.distributed.fsdp import FullyShardedDataParallel as FSDP + >>> torch.cuda.set_device(device_id) + >>> sharded_module = FSDP(my_module) + >>> optim = torch.optim.Adam(sharded_module.parameters(), lr=0.0001) + >>> x = sharded_module(x, y=3, z=torch.Tensor([1])) + >>> loss = x.sum() + >>> loss.backward() + >>> optim.step() + + Using FSDP involves wrapping your module and then initializing your + optimizer after. This is required since FSDP changes the parameter + variables. + + When setting up FSDP, you need to consider the destination CUDA + device. If the device has an ID (``dev_id``), you have three options: + + * Place the module on that device + * Set the device using ``torch.cuda.set_device(dev_id)`` + * Pass ``dev_id`` into the ``device_id`` constructor argument. + + This ensures that the FSDP instance's compute device is the + destination device. For option 1 and 3, the FSDP initialization + always occurs on GPU. For option 2, the FSDP initialization + happens on module's current device, which may be a CPU. + + If you're using the ``sync_module_states=True`` flag, you need to + ensure that the module is on a GPU or use the ``device_id`` + argument to specify a CUDA device that FSDP will move the module + to in the FSDP constructor. This is necessary because + ``sync_module_states=True`` requires GPU communication. + + FSDP also takes care of moving input tensors to the forward method + to the GPU compute device, so you don't need to manually move them + from CPU. + + For ``use_orig_params=True``, + ``ShardingStrategy.SHARD_GRAD_OP`` exposes the unsharded + parameters, not the sharded parameters after forward, unlike + ``ShardingStrategy.FULL_SHARD``. If you want + to inspect the gradients, you can use the ``summon_full_params`` + method with ``with_grads=True``. + + With ``limit_all_gathers=True``, you may see a gap in the FSDP + pre-forward where the CPU thread is not issuing any kernels. This is + intentional and shows the rate limiter in effect. Synchronizing the CPU + thread in that way prevents over-allocating memory for subsequent + all-gathers, and it should not actually delay GPU kernel execution. + + FSDP replaces managed modules' parameters with ``torch.Tensor`` + views during forward and backward computation for autograd-related + reasons. If your module's forward relies on saved references to + the parameters instead of reacquiring the references each + iteration, then it will not see FSDP's newly created views, + and autograd will not work correctly. + + Finally, when using ``sharding_strategy=ShardingStrategy.HYBRID_SHARD`` + with the sharding process group being intra-node and the + replication process group being inter-node, setting + ``NCCL_CROSS_NIC=1`` can help improve the all-reduce times over + the replication process group for some cluster setups. + + **Limitations** + + There are several limitations to be aware of when using FSDP: + + * FSDP currently does not support gradient accumulation outside + ``no_sync()`` when using CPU offloading. This is because FSDP + uses the newly-reduced gradient instead of accumulating with any + existing gradient, which can lead to incorrect results. + + * FSDP does not support running the forward pass of a submodule + that is contained in an FSDP instance. This is because the + submodule's parameters will be sharded, but the submodule itself + is not an FSDP instance, so its forward pass will not all-gather + the full parameters appropriately. + + * FSDP does not work with double backwards due to the way it + registers backward hooks. + + * FSDP has some constraints when freezing parameters. + For ``use_orig_params=False``, each FSDP instance must manage + parameters that are all frozen or all non-frozen. For + ``use_orig_params=True``, FSDP supports mixing frozen and + non-frozen parameters, but it's recommended to avoid doing so to + prevent higher than expected gradient memory usage. + + * As of PyTorch 1.12, FSDP offers limited support for shared + parameters. If enhanced shared parameter support is needed for + your use case, please post in + `this issue `__. + + * You should avoid modifying the parameters between forward and + backward without using the ``summon_full_params`` context, as + the modifications may not persist. + + Args: + module (nn.Module): + This is the module to be wrapped with FSDP. + process_group (Optional[Union[ProcessGroup, Tuple[ProcessGroup, ProcessGroup]]]): + This is the process group over which the model is sharded and thus + the one used for FSDP's all-gather and reduce-scatter collective + communications. If ``None``, then FSDP uses the default process + group. For hybrid sharding strategies such as + ``ShardingStrategy.HYBRID_SHARD``, users can pass in a tuple of + process groups, representing the groups over which to shard and + replicate, respectively. If ``None``, then FSDP constructs process + groups for the user to shard intra-node and replicate inter-node. + (Default: ``None``) + sharding_strategy (Optional[ShardingStrategy]): + This configures the sharding strategy, which may trade off memory + saving and communication overhead. See :class:`ShardingStrategy` + for details. (Default: ``FULL_SHARD``) + cpu_offload (Optional[CPUOffload]): + This configures CPU offloading. If this is set to ``None``, then + no CPU offloading happens. See :class:`CPUOffload` for details. + (Default: ``None``) + auto_wrap_policy (Optional[Union[Callable[[nn.Module, bool, int], bool], ModuleWrapPolicy, CustomPolicy]]): + This specifies a policy to apply FSDP to submodules of ``module``, + which is needed for communication and computation overlap and thus + affects performance. If ``None``, then FSDP only applies to + ``module``, and users should manually apply FSDP to parent modules + themselves (proceeding bottom-up). For convenience, this accepts + ``ModuleWrapPolicy`` directly, which allows users to specify the + module classes to wrap (e.g. the transformer block). Otherwise, + this should be a callable that takes in three arguments + ``module: nn.Module``, ``recurse: bool``, and + ``nonwrapped_numel: int`` and should return a ``bool`` specifying + whether the passed-in ``module`` should have FSDP applied if + ``recurse=False`` or if the traversal should continue into the + module's subtree if ``recurse=True``. Users may add additional + arguments to the callable. The ``size_based_auto_wrap_policy`` in + ``torch.distributed.fsdp.wrap.py`` gives an example callable that + applies FSDP to a module if the parameters in its subtree exceed + 100M numel. We recommend printing the model after applying FSDP + and adjusting as needed. + + Example:: + + >>> def custom_auto_wrap_policy( + >>> module: nn.Module, + >>> recurse: bool, + >>> nonwrapped_numel: int, + >>> # Additional custom arguments + >>> min_num_params: int = int(1e8), + >>> ) -> bool: + >>> return nonwrapped_numel >= min_num_params + >>> # Configure a custom `min_num_params` + >>> my_auto_wrap_policy = functools.partial(custom_auto_wrap_policy, min_num_params=int(1e5)) + + backward_prefetch (Optional[BackwardPrefetch]): + This configures explicit backward prefetching of all-gathers. If + ``None``, then FSDP does not backward prefetch, and there is no + communication and computation overlap in the backward pass. See + :class:`BackwardPrefetch` for details. (Default: ``BACKWARD_PRE``) + mixed_precision (Optional[MixedPrecision]): + This configures native mixed precision for FSDP. If this is set to + ``None``, then no mixed precision is used. Otherwise, parameter, + buffer, and gradient reduction dtypes can be set. See + :class:`MixedPrecision` for details. (Default: ``None``) + ignored_modules (Optional[Iterable[torch.nn.Module]]): Modules whose + own parameters and child modules' parameters and buffers are + ignored by this instance. None of the modules directly in + ``ignored_modules`` should be :class:`FullyShardedDataParallel` + instances, and any child modules that are already-constructed + :class:`FullyShardedDataParallel` instances will not be ignored if + they are nested under this instance. This argument may be used to + avoid sharding specific parameters at module granularity when using an + ``auto_wrap_policy`` or if parameters' sharding is not managed by + FSDP. (Default: ``None``) + param_init_fn (Optional[Callable[[nn.Module], None]]): + A ``Callable[torch.nn.Module] -> None`` that + specifies how modules that are currently on the meta device should + be initialized onto an actual device. As of v1.12, FSDP detects + modules with parameters or buffers on meta device via ``is_meta`` + and either applies ``param_init_fn`` if specified or calls + ``nn.Module.reset_parameters()`` otherwise. For both cases, the + implementation should *only* initialize the parameters/buffers of + the module, not those of its submodules. This is to avoid + re-initialization. In addition, FSDP also supports deferred + initialization via torchdistX's (https://github.com/pytorch/torchdistX) + ``deferred_init()`` API, where the deferred modules are initialized + by calling ``param_init_fn`` if specified or torchdistX's default + ``materialize_module()`` otherwise. If ``param_init_fn`` is + specified, then it is applied to all meta-device modules, meaning + that it should probably case on the module type. FSDP calls the + initialization function before parameter flattening and sharding. + + Example:: + + >>> # xdoctest: +SKIP("undefined variables") + >>> module = MyModule(device="meta") + >>> def my_init_fn(module: nn.Module): + >>> # E.g. initialize depending on the module type + >>> ... + >>> fsdp_model = FSDP(module, param_init_fn=my_init_fn, auto_wrap_policy=size_based_auto_wrap_policy) + >>> print(next(fsdp_model.parameters()).device) # current CUDA device + >>> # With torchdistX + >>> module = deferred_init.deferred_init(MyModule, device="cuda") + >>> # Will initialize via deferred_init.materialize_module(). + >>> fsdp_model = FSDP(module, auto_wrap_policy=size_based_auto_wrap_policy) + + device_id (Optional[Union[int, torch.device]]): An ``int`` or + ``torch.device`` giving the CUDA device on which FSDP + initialization takes place, including the module initialization + if needed and the parameter sharding. This should be specified to + improve initialization speed if ``module`` is on CPU. If the + default CUDA device was set (e.g. via ``torch.cuda.set_device``), + then the user may pass ``torch.cuda.current_device`` to this. + (Default: ``None``) + sync_module_states (bool): If ``True``, then each FSDP module will + broadcast module parameters and buffers from rank 0 to ensure that + they are replicated across ranks (adding communication overhead to + this constructor). This can help load ``state_dict`` checkpoints + via ``load_state_dict`` in a memory efficient way. See + :class:`FullStateDictConfig` for an example of this. (Default: + ``False``) + forward_prefetch (bool): If ``True``, then FSDP *explicitly* prefetches + the next forward-pass all-gather before the current forward + computation. This is only useful for CPU-bound workloads, in which + case issuing the next all-gather earlier may improve overlap. This + should only be used for static-graph models since the prefetching + follows the first iteration's execution order. (Default: ``False``) + limit_all_gathers (bool): If ``True``, then FSDP explicitly + synchronizes the CPU thread to ensure GPU memory usage from only + *two* consecutive FSDP instances (the current instance running + computation and the next instance whose all-gather is prefetched). + If ``False``, then FSDP allows the CPU thread to issue all-gathers + without any extra synchronization. (Default: ``True``) We often + refer to this feature as the "rate limiter". This flag should only + be set to ``False`` for specific CPU-bound workloads with low + memory pressure in which case the CPU thread can aggressively issue + all kernels without concern for the GPU memory usage. + use_orig_params (bool): Setting this to ``True`` has FSDP use + ``module`` 's original parameters. FSDP exposes those original + parameters to the user via :meth:`nn.Module.named_parameters` + instead of FSDP's internal :class:`FlatParameter` s. This means + that the optimizer step runs on the original parameters, enabling + per-original-parameter hyperparameters. FSDP preserves the original + parameter variables and manipulates their data between unsharded + and sharded forms, where they are always views into the underlying + unsharded or sharded :class:`FlatParameter`, respectively. With the + current algorithm, the sharded form is always 1D, losing the + original tensor structure. An original parameter may have all, + some, or none of its data present for a given rank. In the none + case, its data will be like a size-0 empty tensor. Users should not + author programs relying on what data is present for a given + original parameter in its sharded form. ``True`` is required to + use ``torch.compile()``. Setting this to ``False`` exposes FSDP's + internal :class:`FlatParameter` s to the user via + :meth:`nn.Module.named_parameters`. (Default: ``False``) + ignored_states (Optional[Iterable[torch.nn.Parameter]], Optional[Iterable[torch.nn.Module]]): + Ignored parameters or modules that will not be managed by this FSDP + instance, meaning that the parameters are not sharded and their + gradients are not reduced across ranks. This argument unifies with + the existing ``ignored_modules`` argument, and we may deprecate + ``ignored_modules`` soon. For backward compatibility, we keep both + ``ignored_states`` and `ignored_modules``, but FSDP only allows one + of them to be specified as not ``None``. + device_mesh (Optional[DeviceMesh]): DeviceMesh can be used as an altenative to + process_group. When device_mesh is passed, FSDP will use the underlying process + groups for all-gather and reduce-scatter collective communications. Therefore, + these two args need to be mutually exclusive. For hybrid sharding strategies such as + ``ShardingStrategy.HYBRID_SHARD``, users can pass in a 2D DeviceMesh instead + of a tuple of process groups. For 2D FSDP + TP, users are required to pass in + device_mesh instead of process_group. For more DeviceMesh info, please visit: + https://pytorch.org/tutorials/recipes/distributed_device_mesh.html + """ + + def __init__( + self, + module: nn.Module, + process_group: ProcessGroupType = None, + sharding_strategy: Optional[ShardingStrategy] = None, + cpu_offload: Optional[CPUOffload] = None, + auto_wrap_policy: Optional[ + Union[Callable, ModuleWrapPolicy, CustomPolicy] + ] = None, + backward_prefetch: Optional[BackwardPrefetch] = BackwardPrefetch.BACKWARD_PRE, + mixed_precision: Optional[MixedPrecision] = None, + ignored_modules: Optional[Iterable[torch.nn.Module]] = None, + param_init_fn: Optional[Callable[[nn.Module], None]] = None, + device_id: Optional[Union[int, torch.device]] = None, + sync_module_states: bool = False, + forward_prefetch: bool = False, + limit_all_gathers: bool = True, + use_orig_params: bool = False, + ignored_states: Union[ + Optional[Iterable[torch.nn.Parameter]], Optional[Iterable[torch.nn.Module]] + ] = None, + device_mesh: Optional[DeviceMesh] = None, + ): + torch._C._log_api_usage_once("torch.distributed.fsdp") + super().__init__() + if isinstance(module, (nn.ModuleList, nn.ModuleDict)): + warnings.warn( + "FSDP will not all-gather parameters for containers that do " + f"not implement forward: {module}", + stacklevel=2, + ) + _init_ignored_module_states(self, module, ignored_modules, ignored_states) + _init_device_handle(self, module, self._ignored_params, device_id) + + # Add module annotations for Dynamo support (see function for details) + _annotate_modules_for_dynamo(module, self._ignored_modules, use_orig_params) + + # Initializes self.process_group, along with rank and world size. This will + # also set another attribute, _inter_node_pg, to control the process group + # over which sharding occurs, if sharding_strategy is {HYBRID_SHARD, _HYBRID_SHARD_ZERO2}. + # Note that this is done before auto_wrapping, so that child FSDP modules simply pick up + # the same process group state as the root FSDP module. + self._device_mesh = device_mesh + _init_process_group_state( + self, + process_group, + sharding_strategy, + auto_wrap_policy, + device_mesh, + ) + if auto_wrap_policy is not None: + root_kwargs = { + "process_group": process_group, + "sharding_strategy": sharding_strategy, + "cpu_offload": cpu_offload, + "backward_prefetch": backward_prefetch, + "mixed_precision": mixed_precision, + "param_init_fn": param_init_fn, + "device_id": device_id, + "sync_module_states": sync_module_states, + "forward_prefetch": forward_prefetch, + "limit_all_gathers": limit_all_gathers, + "use_orig_params": use_orig_params, + "ignored_states": self._ignored_params, + "device_mesh": device_mesh, + } + if sharding_strategy in HYBRID_SHARDING_STRATEGIES and device_mesh is None: + # Share root process groups with children to maintain + # the invariant that all FSDP modules will have the same + # process groups. + root_kwargs["process_group"] = (self.process_group, self._inter_node_pg) + + _auto_wrap( + module, + auto_wrap_policy, + self._ignored_modules, + self._ignored_params, + root_kwargs, + FullyShardedDataParallel, + ) + + backward_prefetch_limit = 1 + forward_prefetch_limit = 1 + _init_core_state( + self, + sharding_strategy, + mixed_precision, + cpu_offload, + limit_all_gathers, + use_orig_params, + backward_prefetch_limit, + forward_prefetch_limit, + ) + _init_runtime_state(self) + _init_prefetching_state(self, backward_prefetch, forward_prefetch) + _init_buffer_state(self, module) + # extension needs to be set before `_init_param_handle_from_module()` + _init_extension(self, device_mesh) + _init_param_handle_from_module( + self, + module, + device_id, + param_init_fn, + sync_module_states, + ) + self._fsdp_wrapped_module = module + if not use_orig_params: + _check_orig_params_flattened(self, self._ignored_params) + _register_flat_param(self, self) + + # `_state_dict_type` controls the `state_dict()` behavior, which is + # implemented using post-save and pre-load hooks + _init_state_dict_state(self) + _register_all_state_dict_hooks(self) + self._zero_scalar = None + + @property + def module(self) -> nn.Module: + """Return the wrapped module.""" + # FSDP's `.module` must refer to the innermost wrapped module when + # composing with other module wrappers in order for state dict to work + if isinstance(self._fsdp_wrapped_module, ActivationWrapper): + return getattr(self._fsdp_wrapped_module, _CHECKPOINT_WRAPPED_MODULE) + return self._fsdp_wrapped_module + + @property + def _has_params(self) -> bool: + """Returns whether this FSDP instance manages any parameters.""" + return hasattr(self, "_handle") and self._handle is not None + + @property + def _flat_param(self) -> Optional[FlatParameter]: + return self._handle.flat_param if self._handle else None + + def __getattr__(self, name: str) -> Any: + """Forward missing attributes to the wrapped module.""" + try: + return super().__getattr__(name) # defer to nn.Module's logic + except AttributeError: + return getattr(self._fsdp_wrapped_module, name) + + def __getitem__(self, key: int) -> Any: + """Forward indexing calls in case the module is an ``nn.Sequential``.""" + if hasattr(self, FSDP_WRAPPED_MODULE): + return self._fsdp_wrapped_module.__getitem__(key) # type: ignore[operator] + return super().__getitem__(key) + + def check_is_root(self) -> bool: + """Check if this instance is a root FSDP module.""" + return _is_fsdp_root(self, self) + + @staticmethod + def fsdp_modules( + module: nn.Module, + root_only: bool = False, + ) -> List["FullyShardedDataParallel"]: + """Return all nested FSDP instances. + + This possibly includes ``module`` itself and only includes FSDP root modules if ``root_only=True``. + + Args: + module (torch.nn.Module): Root module, which may or may not be an + ``FSDP`` module. + root_only (bool): Whether to return only FSDP root modules. + (Default: ``False``) + + Returns: + List[FullyShardedDataParallel]: FSDP modules that are nested in + the input ``module``. + """ + if root_only: + return _get_fsdp_root_states(module) + return traversal_utils._get_fsdp_states(module) + + def apply(self, fn: Callable[[nn.Module], None]) -> "FullyShardedDataParallel": + r"""Apply ``fn`` recursively to every submodule (as returned by ``.children()``) as well as self. + + Typical use includes initializing the parameters of a model (see also :ref:`nn-init-doc`). + + Compared to ``torch.nn.Module.apply``, this version additionally gathers + the full parameters before applying ``fn``. It should not be called from + within another ``summon_full_params`` context. + + Args: + fn (:class:`Module` -> None): function to be applied to each submodule + + Returns: + Module: self + """ + uninitialized = self._is_root is None + self._assert_state(TrainingState.IDLE) + # Use `_unshard_params_for_summon()` with `recurse=False` instead of + # `_unshard_fsdp_state_params()` directly to perform lazy + # initialization, which is needed to initialize `FlatParameter` + # parameter attributes as required by the unshard logic + with _unshard_params_for_summon( + self, + self, + writeback=True, + rank0_only=False, + offload_to_cpu=False, + with_grads=False, + ): + ret = super().apply(fn) + + # Reset lazy init called in `_unshard_params_for_summon()` since + # `apply()` may have been called on FSDP instance that is not truly a + # root, in which case it will be incorrectly marked as one. + if uninitialized and self._is_root: + for module in traversal_utils._get_fsdp_states(self): + module._reset_lazy_init() + + return ret + + def _mixed_precision_enabled_for_buffers(self) -> bool: + """Return whether the user explicitly enabled buffer mixed precision. + + NOTE: Unlike parameters and gradient reduction, buffer mixed precision + is applied at the FSDP instance level, not the ``FlatParameter`` level, + which may be different for the composable code path. + """ + return self.mixed_precision.buffer_dtype is not None + + def _low_precision_hook_enabled(self) -> bool: + """Whether a low precision hook is registered or not.""" + return self._comm_hook is not None and self._comm_hook in LOW_PRECISION_HOOKS + + def _reset_lazy_init(self) -> None: + """Reset instance so :func:`_lazy_init` will run on the next forward.""" + self._is_root: Optional[bool] = None + + @staticmethod + def set_state_dict_type( + module: nn.Module, + state_dict_type: StateDictType, + state_dict_config: Optional[StateDictConfig] = None, + optim_state_dict_config: Optional[OptimStateDictConfig] = None, + ) -> StateDictSettings: + """Set the ``state_dict_type`` of all the descendant FSDP modules of the target module. + + Also takes (optional) configuration for the model's and optimizer's state dict. + The target module does not have to be a FSDP module. If the target + module is a FSDP module, its ``state_dict_type`` will also be changed. + + .. note:: This API should be called for only the top-level (root) + module. + + .. note:: This API enables users to transparently use the conventional + ``state_dict`` API to take model checkpoints in cases where the + root FSDP module is wrapped by another ``nn.Module``. For example, + the following will ensure ``state_dict`` is called on all non-FSDP + instances, while dispatching into `sharded_state_dict` implementation + for FSDP: + + Example:: + + >>> # xdoctest: +SKIP("undefined variables") + >>> model = DDP(FSDP(...)) + >>> FSDP.set_state_dict_type( + >>> model, + >>> StateDictType.SHARDED_STATE_DICT, + >>> state_dict_config = ShardedStateDictConfig(offload_to_cpu=True), + >>> optim_state_dict_config = OptimStateDictConfig(offload_to_cpu=True), + >>> ) + >>> param_state_dict = model.state_dict() + >>> optim_state_dict = FSDP.optim_state_dict(model, optim) + + Args: + module (torch.nn.Module): Root module. + state_dict_type (StateDictType): the desired ``state_dict_type`` to set. + state_dict_config (Optional[StateDictConfig]): the configuration for the + target ``state_dict_type``. + optim_state_dict_config (Optional[OptimStateDictConfig]): the configuration + for the optimizer state dict. + + Returns: + A StateDictSettings that include the previous state_dict type and + configuration for the module. + """ + warnings.warn( + "FSDP.state_dict_type() and FSDP.set_state_dict_type() are being " + "deprecated. Please use APIs, get_state_dict() and set_state_dict(), " + "which can support different parallelisms, FSDP1, FSDP2, DDP. " + "API doc: https://pytorch.org/docs/stable/distributed.checkpoint.html" + "#torch.distributed.checkpoint.state_dict.get_state_dict ." + "Tutorial: https://pytorch.org/tutorials/recipes/distributed_checkpoint_recipe.html .", + FutureWarning, + ) + _state_dict_type_to_config = { + StateDictType.FULL_STATE_DICT: FullStateDictConfig, + StateDictType.LOCAL_STATE_DICT: LocalStateDictConfig, + StateDictType.SHARDED_STATE_DICT: ShardedStateDictConfig, + } + _optim_state_dict_type_to_config = { + StateDictType.FULL_STATE_DICT: FullOptimStateDictConfig, + StateDictType.LOCAL_STATE_DICT: LocalOptimStateDictConfig, + StateDictType.SHARDED_STATE_DICT: ShardedOptimStateDictConfig, + } + + # Use the default config if a state_dict config is not set. + state_dict_config_type = _state_dict_type_to_config[state_dict_type] + optim_state_dict_config_type = _optim_state_dict_type_to_config[state_dict_type] + if state_dict_config is None: + state_dict_config = state_dict_config_type() + if optim_state_dict_config is None: + optim_state_dict_config = optim_state_dict_config_type() + if state_dict_config_type != type(state_dict_config): + raise RuntimeError( + f"Expected state_dict_config of type {state_dict_config_type} " + f"but got {type(state_dict_config)}" + ) + if optim_state_dict_config_type != type(optim_state_dict_config): + raise RuntimeError( + f"Expected optim_state_dict_config of type {optim_state_dict_config_type} " + f"but got {type(optim_state_dict_config)}" + ) + + # Set the state_dict type and configurations. + prev_state_dict_type = None + prev_state_dict_config = None + prev_optim_state_dict_config = None + for submodule in traversal_utils._get_fsdp_states(module): + if prev_state_dict_type is None: + prev_state_dict_type = submodule._state_dict_type + else: + assert ( + prev_state_dict_type == submodule._state_dict_type + ), "All FSDP modules should have the same state_dict_type." + if prev_state_dict_config is None: + prev_state_dict_config = submodule._state_dict_config + else: + assert isinstance( + submodule._state_dict_config, type(prev_state_dict_config) + ), "All FSDP modules must have the same type of state_dict_config." + if prev_optim_state_dict_config is None: + prev_optim_state_dict_config = submodule._optim_state_dict_config + else: + assert isinstance( + submodule._optim_state_dict_config, + type(prev_optim_state_dict_config), + ), "All FSDP modules must have the same type of optim_state_dict_config." + + submodule._state_dict_type = state_dict_type + submodule._state_dict_config = state_dict_config + submodule._optim_state_dict_config = optim_state_dict_config + + return StateDictSettings( + prev_state_dict_type, prev_state_dict_config, prev_optim_state_dict_config + ) + + @staticmethod + def get_state_dict_type(module: nn.Module) -> StateDictSettings: + """Get the state_dict_type and the corresponding configurations for the FSDP modules rooted at ``module``. + + The target module does not have to be an FSDP module. + + Returns: + A ``StateDictSettings`` containing the state_dict_type and + state_dict / optim_state_dict configs that are currently set. + + Raises: + ``AssertionError`` if the ``StateDictSettings`` for different + FSDP submodules differ. + """ + state_dict_settings: Optional[StateDictSettings] = None + for submodule in FullyShardedDataParallel.fsdp_modules(module): + if state_dict_settings is None: + state_dict_settings = StateDictSettings( + state_dict_type=submodule._state_dict_type, + state_dict_config=submodule._state_dict_config, + optim_state_dict_config=submodule._optim_state_dict_config, + ) + _set_optim_use_dtensor(submodule, state_dict_settings) + else: + submodule_settings = StateDictSettings( + submodule._state_dict_type, + submodule._state_dict_config, + submodule._optim_state_dict_config, + ) + assert state_dict_settings == submodule_settings, ( + "All FSDP modules must have the same state dict settings." + f"Got {submodule_settings} and {state_dict_settings}." + ) + _set_optim_use_dtensor(submodule, submodule_settings) + return state_dict_settings + + @staticmethod + @contextlib.contextmanager + def state_dict_type( + module: nn.Module, + state_dict_type: StateDictType, + state_dict_config: Optional[StateDictConfig] = None, + optim_state_dict_config: Optional[OptimStateDictConfig] = None, + ) -> Generator: + """Set the ``state_dict_type`` of all the descendant FSDP modules of the target module. + + This context manager has the same functions as :meth:`set_state_dict_type`. Read the document of + :meth:`set_state_dict_type` for the detail. + + Example:: + + >>> # xdoctest: +SKIP("undefined variables") + >>> model = DDP(FSDP(...)) + >>> with FSDP.state_dict_type( + >>> model, + >>> StateDictType.SHARDED_STATE_DICT, + >>> ): + >>> checkpoint = model.state_dict() + + Args: + module (torch.nn.Module): Root module. + state_dict_type (StateDictType): the desired ``state_dict_type`` to set. + state_dict_config (Optional[StateDictConfig]): the model ``state_dict`` + configuration for the target ``state_dict_type``. + optim_state_dict_config (Optional[OptimStateDictConfig]): the optimizer + ``state_dict`` configuration for the target ``state_dict_type``. + """ + prev_state_dict_settings = FullyShardedDataParallel.set_state_dict_type( + module, + state_dict_type, + state_dict_config, + optim_state_dict_config, + ) + yield + FullyShardedDataParallel.set_state_dict_type( + module, + prev_state_dict_settings.state_dict_type, + prev_state_dict_settings.state_dict_config, + prev_state_dict_settings.optim_state_dict_config, + ) + + def forward(self, *args: Any, **kwargs: Any) -> Any: + """Run the forward pass for the wrapped module, inserting FSDP-specific pre- and post-forward sharding logic.""" + handle = self._handle + with torch.autograd.profiler.record_function( + "FullyShardedDataParallel.forward" + ): + args, kwargs = _root_pre_forward(self, self, args, kwargs) + unused = None + args, kwargs = _pre_forward( + self, + handle, + _pre_forward_unshard, + self._fsdp_wrapped_module, + args, + kwargs, + ) + if handle: + _p_assert( + handle.flat_param.device == self.compute_device, + "Expected `FlatParameter` to be on the compute device " + f"{self.compute_device} but got {handle.flat_param.device}", + ) + output = self._fsdp_wrapped_module(*args, **kwargs) + return _post_forward( + self, handle, _post_forward_reshard, self, unused, output + ) + + @staticmethod + @contextlib.contextmanager + def summon_full_params( + module: nn.Module, + recurse: bool = True, + writeback: bool = True, + rank0_only: bool = False, + offload_to_cpu: bool = False, + with_grads: bool = False, + ) -> Generator: + r"""Expose full params for FSDP instances with this context manager. + + Can be useful *after* forward/backward for a model to get + the params for additional processing or checking. It can take a non-FSDP + module and will summon full params for all contained FSDP modules as + well as their children, depending on the ``recurse`` argument. + + .. note:: This can be used on inner FSDPs. + .. note:: This can *not* be used within a forward or backward pass. Nor + can forward and backward be started from within this context. + .. note:: Parameters will revert to their local shards after the context + manager exits, storage behavior is the same as forward. + .. note:: The full parameters can be modified, but only the portion + corresponding to the local param shard will persist after the + context manager exits (unless ``writeback=False``, in which case + changes will be discarded). In the case where FSDP does not shard + the parameters, currently only when ``world_size == 1``, or ``NO_SHARD`` + config, the modification is persisted regardless of ``writeback``. + .. note:: This method works on modules which are not FSDP themselves but + may contain multiple independent FSDP units. In that case, the given + arguments will apply to all contained FSDP units. + + .. warning:: Note that ``rank0_only=True`` in conjunction with + ``writeback=True`` is not currently supported and will raise an + error. This is because model parameter shapes would be different + across ranks within the context, and writing to them can lead to + inconsistency across ranks when the context is exited. + + .. warning:: Note that ``offload_to_cpu`` and ``rank0_only=False`` will + result in full parameters being redundantly copied to CPU memory for + GPUs that reside on the same machine, which may incur the risk of + CPU OOM. It is recommended to use ``offload_to_cpu`` with + ``rank0_only=True``. + + Args: + recurse (bool, Optional): recursively summon all params for nested + FSDP instances (default: True). + writeback (bool, Optional): if ``False``, modifications to params are + discarded after the context manager exits; + disabling this can be slightly more efficient (default: True) + rank0_only (bool, Optional): if ``True``, full parameters are + materialized on only global rank 0. This means that within the + context, only rank 0 will have full parameters and the other + ranks will have sharded parameters. Note that setting + ``rank0_only=True`` with ``writeback=True`` is not supported, + as model parameter shapes will be different across ranks + within the context, and writing to them can lead to + inconsistency across ranks when the context is exited. + offload_to_cpu (bool, Optional): If ``True``, full parameters are + offloaded to CPU. Note that this offloading currently only + occurs if the parameter is sharded (which is only not the case + for world_size = 1 or ``NO_SHARD`` config). It is recommended + to use ``offload_to_cpu`` with ``rank0_only=True`` to avoid + redundant copies of model parameters being offloaded to the same CPU memory. + with_grads (bool, Optional): If ``True``, gradients are also + unsharded with the parameters. Currently, this is only + supported when passing ``use_orig_params=True`` to the FSDP + constructor and ``offload_to_cpu=False`` to this method. + (Default: ``False``) + """ + with _unshard_params( + module, recurse, writeback, rank0_only, offload_to_cpu, with_grads + ): + yield + + @contextlib.contextmanager + def _deregister_orig_params_ctx(self): + """Deregister the original parameters and expose the :class:`FlatParameter`. + + If a :class:`FlatParameter` is sharded, then + this refreshes the sharded views before exiting. This method should + only be called when using the original parameters. + """ + _p_assert( + self._use_orig_params, + "`_deregister_orig_params_ctx()` should only be called when " + "`_use_orig_params=True`", + ) + for fsdp_module in traversal_utils._get_fsdp_states(self): + _deregister_orig_params(fsdp_module, fsdp_module) + try: + yield + finally: + for fsdp_module in traversal_utils._get_fsdp_states(self): + _register_orig_params(fsdp_module, fsdp_module) + + def _apply(self, *args, **kwargs): + """Deregister the original parameters and expose the :class:`FlatParameter` s before calling ``_apply()``.""" + # When using the original parameters: Since (1) the `FlatParameter`s + # own the storage and (2) `_apply()` is the subroutine underlying the + # most common storage-changing ops like `to()` and `cuda()`, we + # override `_apply()` to have the storage change directly performed on + # the `FlatParameter`s instead of applying to the original parameters + # and then writing back to the `FlatParameter`s. + context = ( + self._deregister_orig_params_ctx() + if self._use_orig_params + else contextlib.nullcontext() + ) + with context: + return super()._apply(*args, **kwargs) + + def named_buffers( + self, + *args, + **kwargs, + ) -> Iterator[Tuple[str, torch.Tensor]]: + """Return an iterator over module buffers, yielding both the name of the buffer and the buffer itself. + + Intercepts buffer names and removes all occurrences of the FSDP-specific flattened buffer prefix + when inside the :meth:`summon_full_params` context manager. + """ + should_clean_name = self.training_state == TrainingState.SUMMON_FULL_PARAMS + for buffer_name, buffer in super().named_buffers(*args, **kwargs): + if should_clean_name: + # Remove any instances of the FSDP-specific prefix; there can + # be multiple in the case of nested FSDP modules + buffer_name = buffer_name.replace(FSDP_PREFIX, "") + yield (buffer_name, buffer) + + def named_parameters( + self, + *args, + **kwargs, + ) -> Iterator[Tuple[str, torch.nn.Parameter]]: + """Return an iterator over module parameters, yielding both the name of the parameter and the parameter itself. + + Intercepts parameter names and removes all occurrences of the FSDP-specific flattened parameter prefix + when inside the :meth:`summon_full_params` context manager. + """ + should_clean_name = self.training_state == TrainingState.SUMMON_FULL_PARAMS + for param_name, param in super().named_parameters(*args, **kwargs): + if should_clean_name: + # Remove any instances of the FSDP-specific prefix; there can + # be multiple in the case of nested FSDP modules + param_name = param_name.replace(FSDP_PREFIX, "") + yield (param_name, param) + + def _assert_state(self, state: Union[TrainingState, List[TrainingState]]) -> None: + """Assert we are in the given state.""" + # Since assert can be turned off and this error checking + # is really important, we use explicit error checking + # and raise a ValueError if needed. + if isinstance(state, TrainingState): + state = [state] + if self.training_state not in state: + msg = ( + f"expected to be in states {state} but current state " + f"is {self.training_state}" + ) + # In case we are failing in the context of autograd hook, asserting + # may not generate useful msg. So, let's print it to be sure. + if self.rank == 0: + print(f"Asserting FSDP instance is: {self}") + print(f"ERROR: {msg}") + traceback.print_stack() + raise ValueError(msg) + + @contextmanager + def no_sync(self) -> Generator: + """Disable gradient synchronizations across FSDP instances. + + Within this context, gradients will be accumulated in module + variables, which will later be synchronized in the first + forward-backward pass after exiting the context. This should only be + used on the root FSDP instance and will recursively apply to all + children FSDP instances. + + .. note:: This likely results in higher memory usage because FSDP will + accumulate the full model gradients (instead of gradient shards) + until the eventual sync. + + .. note:: When used with CPU offloading, the gradients will not be + offloaded to CPU when inside the context manager. Instead, they + will only be offloaded right after the eventual sync. + """ + _lazy_init(self, self) + if not self._is_root: + raise RuntimeError( + "`no_sync()` on inner FSDP instances is not supported. Please call `no_sync()` on root FSDP module." + ) + self._assert_state(TrainingState.IDLE) + old_flags = [] + for m in self.modules(): + if isinstance(m, FullyShardedDataParallel): + old_flags.append((m, m._sync_gradients)) + m._sync_gradients = False + try: + yield + finally: + for m, old_flag in old_flags: + assert not m._sync_gradients, ( + "`_sync_gradients` was incorrectly set to " + "`True` while in the `no_sync()` context manager" + ) + m._sync_gradients = old_flag + + @torch.no_grad() + def clip_grad_norm_( + self, max_norm: Union[float, int], norm_type: Union[float, int] = 2.0 + ) -> torch.Tensor: + """Clip the gradient norm of all parameters. + + The norm is computed over all parameters' gradients as viewed as a single vector, and the + gradients are modified in-place. + + Args: + max_norm (float or int): max norm of the gradients + norm_type (float or int): type of the used p-norm. Can be ``'inf'`` + for infinity norm. + + Returns: + Total norm of the parameters (viewed as a single vector). + + If every FSDP instance uses ``NO_SHARD``, meaning that no + gradients are sharded across ranks, then you may directly use + :func:`torch.nn.utils.clip_grad_norm_`. + + If at least some FSDP instance uses a sharded strategy (i.e. + one other than ``NO_SHARD``), then you should use this method + instead of :func:`torch.nn.utils.clip_grad_norm_` since this method + handles the fact that gradients are sharded across ranks. + + The total norm returned will have the "largest" dtype across + all parameters/gradients as defined by PyTorch's type promotion + semantics. For example, if *all* parameters/gradients use a low + precision dtype, then the returned norm's dtype will be that low + precision dtype, but if there exists at least one parameter/ + gradient using FP32, then the returned norm's dtype will be FP32. + + .. warning:: This needs to be called on all ranks since it uses + collective communications. + """ + _lazy_init(self, self) + if not self._is_root: + raise RuntimeError( + "`clip_grad_norm_()` should only be called on the root FSDP instance" + ) + if self._zero_scalar is None: + self._zero_scalar = torch.tensor(0.0, device=self.compute_device) + self._assert_state(TrainingState.IDLE) + # If every FSDP instance uses `NO_SHARD`, then we can directly use + # the normal `nn.utils` one targeting local gradients + all_no_shard = all( + not handle.uses_sharded_strategy for handle in self._all_handles + ) + if all_no_shard: + return torch.nn.utils.clip_grad_norm_( + self.parameters(), max_norm, norm_type + ) + # Otherwise, there exists some FSDP instance using a sharded strategy, + # where sharded and non-sharded parameters must be handled separately + max_norm = float(max_norm) + norm_type = float(norm_type) + sharded_params_set = set() + nonsharded_params_set = set() # `NO_SHARD` or not FSDP-managed + # Make sure to compute the local norm using lists for deterministic + # iteration order and hence deterministic total norm computation + sharded_params = [] + nonsharded_params = [] + grads: List[torch.Tensor] = [] + for handle in self._all_handles: + if handle.uses_sharded_strategy: + target_set = sharded_params_set + target_list = sharded_params + else: + target_set = nonsharded_params_set + target_list = nonsharded_params + if handle._use_orig_params: + for param in handle.flat_param._params: + if param not in target_set: + target_set.add(param) + target_list.append(param) + if param.grad is not None: + grads.append(param.grad) + else: + if handle.flat_param not in target_set: + target_set.add(handle.flat_param) + target_list.append(handle.flat_param) + if handle.flat_param.grad is not None: + grads.append(handle.flat_param.grad) + for param in self.parameters(): + not_fsdp_managed = ( + param not in sharded_params_set and param not in nonsharded_params_set + ) + if not_fsdp_managed: + nonsharded_params_set.add(param) + nonsharded_params.append(param) + if param.grad is not None: + grads.append(param.grad) + # Compute local norms (forced to be in FP32) + local_sharded_norm = _get_grad_norm( + sharded_params, norm_type, self._zero_scalar, self.compute_device + ) + local_nonsharded_norm = ( + _get_grad_norm( + nonsharded_params, norm_type, self._zero_scalar, self.compute_device + ) + if nonsharded_params + else None + ) + # Reconstruct the total gradient norm depending on the norm type + if norm_type == math.inf: + total_norm = ( + torch.maximum(local_sharded_norm, local_nonsharded_norm) + if local_nonsharded_norm is not None + else local_sharded_norm + ) + dist.all_reduce( + total_norm, op=torch.distributed.ReduceOp.MAX, group=self.process_group + ) + else: + total_norm = local_sharded_norm**norm_type + dist.all_reduce(total_norm, group=self.process_group) + # All-reducing the local non-sharded norm would count it an extra + # world-size-many times + if local_nonsharded_norm is not None: + total_norm += local_nonsharded_norm**norm_type + total_norm = total_norm ** (1.0 / norm_type) + if self.cpu_offload.offload_params: + total_norm = total_norm.cpu() + + clip_coef = max_norm / (total_norm + 1e-6) + # Multiplying by the clamped coefficient is meaningless when it is + # equal to 1, but it avoids the host-device sync that would result from + # `if clip_coef < 1` + clip_coef_clamped = torch.clamp(clip_coef, max=1.0) + for grad in grads: + grad.mul_(clip_coef_clamped.to(grad.device, grad.dtype)) + # Use the "largest" dtype by type promotion semantics to use the same + # dtype as if we did not force local norm computation to be in FP32 + if len(grads) == 0: + # If this rank has no gradients, then we must default to FP32 + # unless we use additional communication, which we prefer to avoid + # since `clip_grad_norm_()` is called in the training loop + warnings.warn( + f"Called FSDP.clip_grad_norm_() on rank {self.rank} with no " + "gradients -- returning the total norm in the default dtype " + f"{total_norm.dtype}" + ) # warn since this is generally unexpected + return total_norm + total_norm_dtype = functools.reduce( + torch.promote_types, + [grad.dtype for grad in grads], + ) + return total_norm.to(total_norm_dtype) + + @staticmethod + def _warn_optim_input(optim_input, *, stacklevel: int = 1): + if optim_input is not None: + warnings.warn( + "The `optim_input` argument is deprecated and will be removed after PyTorch 1.13. " + "You may remove it from your code without changing its functionality.", + FutureWarning, + stacklevel=stacklevel + 1, + ) + + @staticmethod + def _is_using_optim_input(optim_input, optim) -> bool: + if optim_input is None and optim is None: + # Use the default behavior of `optim_input`` + return True + if optim_input is not None: + # Use the `optim_input` code path + return True + # Use the `optim` code path + return False + + @staticmethod + def _warn_legacy_optim_state_dict(curr: str, new: str, *, stacklevel: int = 1): + warnings.warn( + f"``FullyShardedDataParallel.{curr}``is being deprecated and is " + f"replaced by ``FullyShardedDataParallel.{new}``. " + f"``FullyShardedDataParallel.{curr}`` may be removed after PyTorch 2.2.", + FutureWarning, + stacklevel=stacklevel + 1, + ) + + @staticmethod + def _optim_state_dict_impl( + model: torch.nn.Module, + optim: torch.optim.Optimizer, + optim_state_dict: Dict[str, Any], + optim_input: Optional[ + Union[ + List[Dict[str, Any]], + Iterable[torch.nn.Parameter], + ] + ] = None, + rank0_only: bool = True, + full_state_dict: bool = True, + group: Optional[dist.ProcessGroup] = None, + cpu_offload: bool = True, + *, + _stacklevel: int = 1, + ) -> Dict[str, Any]: + """Transform the state-dict of an optimizer corresponding to a sharded model. + + This is the internal API that is used by all the optim_state_dict implementations. + Given model, optim, the original optim_state_dict, this API removes the + FSDP internal information and internal sharding from the optim_state_dict. + """ + if full_state_dict: + FullyShardedDataParallel._warn_optim_input( + optim_input, stacklevel=_stacklevel + 1 + ) + using_optim_input = FullyShardedDataParallel._is_using_optim_input( + optim_input, + optim, + ) + else: + using_optim_input = False + assert optim_input is None and not rank0_only + + use_orig_params = FullyShardedDataParallel.fsdp_modules(model)[ + 0 + ]._use_orig_params + assert all( + use_orig_params == m._use_orig_params + for m in FullyShardedDataParallel.fsdp_modules(model) + ), "Not all FSDP modules have the same _use_orig_params value" + + return _optim_state_dict( + model=model, + optim=optim, + optim_state_dict=optim_state_dict, + optim_input=optim_input, + rank0_only=rank0_only, + shard_state=not full_state_dict, + group=group, + using_optim_input=using_optim_input, + use_orig_params=use_orig_params, + cpu_offload=cpu_offload, + ) + + @staticmethod + def _optim_state_dict_to_load_impl( + optim_state_dict: Dict[str, Any], + model: torch.nn.Module, + optim_input: Optional[ + Union[ + List[Dict[str, Any]], + Iterable[torch.nn.Parameter], + ] + ] = None, + optim: Optional[torch.optim.Optimizer] = None, + full_state_dict: bool = True, + rank0_only: bool = False, + is_named_optimizer: bool = False, + group: Optional[dist.ProcessGroup] = None, + ) -> Dict[str, Any]: + """ + Convert an optimizer state-dict so that it can be loaded into the optimizer associated with the FSDP model. + + This is the internal API that is used by all the load optim_state_dict implementations. + Given model, optim, and the saved optim_state_dict, this API adds the FSDP + internal information and internal sharding to the optim_state_dict. + """ + if full_state_dict: + FullyShardedDataParallel._warn_optim_input(optim_input) + using_optim_input = FullyShardedDataParallel._is_using_optim_input( + optim_input, + optim, + ) + else: + using_optim_input = False + assert optim_input is None and not rank0_only + + use_orig_params = FullyShardedDataParallel.fsdp_modules(model)[ + 0 + ]._use_orig_params + assert all( + use_orig_params == m._use_orig_params + for m in FullyShardedDataParallel.fsdp_modules(model) + ), "Not all FSDP modules have the same _use_orig_params value" + + if rank0_only and dist.get_rank(group) > 0: + optim_state_dict = {} + sharded_osd = _flatten_optim_state_dict( + optim_state_dict, + model=model, + use_orig_params=use_orig_params, + optim=(optim if is_named_optimizer else None), + rank0_only=rank0_only, + group=group, + ) + return _rekey_sharded_optim_state_dict( + sharded_osd, + model=model, + optim=optim, + optim_input=optim_input, + using_optim_input=using_optim_input, + is_named_optimizer=is_named_optimizer, + ) + + @staticmethod + def full_optim_state_dict( + model: torch.nn.Module, + optim: torch.optim.Optimizer, + optim_input: Optional[ + Union[ + List[Dict[str, Any]], + Iterable[torch.nn.Parameter], + ] + ] = None, + rank0_only: bool = True, + group: Optional[dist.ProcessGroup] = None, + ) -> Dict[str, Any]: + """Return the full optimizer state-dict. + + Consolidates the full optimizer state on rank 0 and returns it + as a :class:`dict` following the convention of + :meth:`torch.optim.Optimizer.state_dict`, i.e. with keys ``"state"`` + and ``"param_groups"``. The flattened parameters in ``FSDP`` modules + contained in ``model`` are mapped back to their unflattened parameters. + + This needs to be called on all ranks since it uses + collective communications. However, if ``rank0_only=True``, then + the state dict is only populated on rank 0, and all other ranks + return an empty :class:`dict`. + + Unlike ``torch.optim.Optimizer.state_dict()``, this method + uses full parameter names as keys instead of parameter IDs. + + Like in :meth:`torch.optim.Optimizer.state_dict`, the tensors + contained in the optimizer state dict are not cloned, so there may + be aliasing surprises. For best practices, consider saving the + returned optimizer state dict immediately, e.g. using + ``torch.save()``. + + Args: + model (torch.nn.Module): Root module (which may or may not be a + :class:`FullyShardedDataParallel` instance) whose parameters + were passed into the optimizer ``optim``. + optim (torch.optim.Optimizer): Optimizer for ``model`` 's + parameters. + optim_input (Optional[Union[List[Dict[str, Any]], Iterable[torch.nn.Parameter]]]): + Input passed into the optimizer ``optim`` representing either a + :class:`list` of parameter groups or an iterable of parameters; + if ``None``, then this method assumes the input was + ``model.parameters()``. This argument is deprecated, and there + is no need to pass it in anymore. (Default: ``None``) + rank0_only (bool): If ``True``, saves the populated :class:`dict` + only on rank 0; if ``False``, saves it on all ranks. (Default: + ``True``) + group (dist.ProcessGroup): Model's process group or ``None`` if using + the default process group. (Default: ``None``) + + Returns: + Dict[str, Any]: A :class:`dict` containing the optimizer state for + ``model`` 's original unflattened parameters and including keys + "state" and "param_groups" following the convention of + :meth:`torch.optim.Optimizer.state_dict`. If ``rank0_only=True``, + then nonzero ranks return an empty :class:`dict`. + """ + FullyShardedDataParallel._warn_legacy_optim_state_dict( + "full_optim_state_dict", + "optim_state_dict", + stacklevel=2, + ) + return FullyShardedDataParallel._optim_state_dict_impl( + model=model, + optim=optim, + optim_state_dict=optim.state_dict(), + optim_input=optim_input, + rank0_only=rank0_only, + group=group, + full_state_dict=True, + _stacklevel=2, + ) + + @staticmethod + def sharded_optim_state_dict( + model: torch.nn.Module, + optim: torch.optim.Optimizer, + group: Optional[dist.ProcessGroup] = None, + ) -> Dict[str, Any]: + """Return the optimizer state-dict in its sharded form. + + The API is similar to :meth:`full_optim_state_dict` but this API chunks + all non-zero-dimension states to :class:`ShardedTensor` to save memory. + This API should only be used when the model ``state_dict`` is derived + with the context manager ``with state_dict_type(SHARDED_STATE_DICT):``. + + For the detailed usage, refer to :meth:`full_optim_state_dict`. + + .. warning:: The returned state dict contains ``ShardedTensor`` and + cannot be directly used by the regular ``optim.load_state_dict``. + """ + FullyShardedDataParallel._warn_legacy_optim_state_dict( + "sharded_optim_state_dict", + "optim_state_dict", + stacklevel=2, + ) + return FullyShardedDataParallel._optim_state_dict_impl( + model=model, + optim=optim, + optim_state_dict=optim.state_dict(), + optim_input=None, + rank0_only=False, + full_state_dict=False, + group=group, + _stacklevel=2, + ) + + @staticmethod + def shard_full_optim_state_dict( + full_optim_state_dict: Dict[str, Any], + model: torch.nn.Module, + optim_input: Optional[ + Union[ + List[Dict[str, Any]], + Iterable[torch.nn.Parameter], + ] + ] = None, + optim: Optional[torch.optim.Optimizer] = None, + ) -> Dict[str, Any]: + """Shard a full optimizer state-dict. + + Remaps the state in ``full_optim_state_dict`` to flattened parameters instead of unflattened + parameters and restricts to only this rank's part of the optimizer state. + The first argument should be the return value of :meth:`full_optim_state_dict`. + + Example:: + + >>> # xdoctest: +SKIP("undefined variables") + >>> from torch.distributed.fsdp import FullyShardedDataParallel as FSDP + >>> model, optim = ... + >>> full_osd = FSDP.full_optim_state_dict(model, optim) + >>> torch.save(full_osd, PATH) + >>> # Define new model with possibly different world size + >>> new_model, new_optim = ... + >>> full_osd = torch.load(PATH) + >>> sharded_osd = FSDP.shard_full_optim_state_dict(full_osd, new_model) + >>> new_optim.load_state_dict(sharded_osd) + + .. note:: Both :meth:`shard_full_optim_state_dict` and + :meth:`scatter_full_optim_state_dict` may be used to get the + sharded optimizer state dict to load. Assuming that the full + optimizer state dict resides in CPU memory, the former requires + each rank to have the full dict in CPU memory, where each rank + individually shards the dict without any communication, while the + latter requires only rank 0 to have the full dict in CPU memory, + where rank 0 moves each shard to GPU memory (for NCCL) and + communicates it to ranks appropriately. Hence, the former has + higher aggregate CPU memory cost, while the latter has higher + communication cost. + + Args: + full_optim_state_dict (Dict[str, Any]): Optimizer state dict + corresponding to the unflattened parameters and holding the + full non-sharded optimizer state. + model (torch.nn.Module): Root module (which may or may not be a + :class:`FullyShardedDataParallel` instance) whose parameters + correspond to the optimizer state in ``full_optim_state_dict``. + optim_input (Optional[Union[List[Dict[str, Any]], Iterable[torch.nn.Parameter]]]): + Input passed into the optimizer representing either a + :class:`list` of parameter groups or an iterable of parameters; + if ``None``, then this method assumes the input was + ``model.parameters()``. This argument is deprecated, and there + is no need to pass it in anymore. (Default: ``None``) + optim (Optional[torch.optim.Optimizer]): Optimizer that will load + the state dict returned by this method. This is the preferred + argument to use over ``optim_input``. (Default: ``None``) + + Returns: + Dict[str, Any]: The full optimizer state dict now remapped to + flattened parameters instead of unflattened parameters and + restricted to only include this rank's part of the optimizer state. + """ + FullyShardedDataParallel._warn_legacy_optim_state_dict( + "shard_full_optim_state_dict", + "optim_state_dict_to_load", + stacklevel=2, + ) + return FullyShardedDataParallel._optim_state_dict_to_load_impl( + optim_state_dict=full_optim_state_dict, + model=model, + optim_input=optim_input, + optim=optim, + full_state_dict=True, + is_named_optimizer=False, + ) + + @staticmethod + def flatten_sharded_optim_state_dict( + sharded_optim_state_dict: Dict[str, Any], + model: torch.nn.Module, + optim: torch.optim.Optimizer, + ) -> Dict[str, Any]: + """Flatten a sharded optimizer state-dict. + + The API is similar to :meth:`shard_full_optim_state_dict`. The only + difference is that the input ``sharded_optim_state_dict`` should be + returned from :meth:`sharded_optim_state_dict`. Therefore, there will + be all-gather calls on each rank to gather ``ShardedTensor`` s. + + Args: + sharded_optim_state_dict (Dict[str, Any]): Optimizer state dict + corresponding to the unflattened parameters and holding the + sharded optimizer state. + model (torch.nn.Module): + Refer to :meth:`shard_full_optim_state_dict`. + optim (torch.optim.Optimizer): Optimizer for ``model`` 's + parameters. + + Returns: + Refer to :meth:`shard_full_optim_state_dict`. + """ + FullyShardedDataParallel._warn_legacy_optim_state_dict( + "flatten_sharded_optim_state_dict", + "optim_state_dict_to_load", + stacklevel=2, + ) + return FullyShardedDataParallel._optim_state_dict_to_load_impl( + optim_state_dict=sharded_optim_state_dict, + model=model, + optim_input=None, + optim=optim, + full_state_dict=False, + is_named_optimizer=False, + ) + + @staticmethod + def scatter_full_optim_state_dict( + full_optim_state_dict: Optional[Dict[str, Any]], + model: torch.nn.Module, + optim_input: Optional[ + Union[ + List[Dict[str, Any]], + Iterable[torch.nn.Parameter], + ] + ] = None, + optim: Optional[torch.optim.Optimizer] = None, + group: Optional[Any] = None, + ) -> Dict[str, Any]: + """Scatter the full optimizer state dict from rank 0 to all other ranks. + + Returns the sharded optimizer state dict on each rank. + The return value is the same as :meth:`shard_full_optim_state_dict`, and on rank + 0, the first argument should be the return value of + :meth:`full_optim_state_dict`. + + Example:: + + >>> # xdoctest: +SKIP("undefined variables") + >>> from torch.distributed.fsdp import FullyShardedDataParallel as FSDP + >>> model, optim = ... + >>> full_osd = FSDP.full_optim_state_dict(model, optim) # only non-empty on rank 0 + >>> # Define new model with possibly different world size + >>> new_model, new_optim, new_group = ... + >>> sharded_osd = FSDP.scatter_full_optim_state_dict(full_osd, new_model, group=new_group) + >>> new_optim.load_state_dict(sharded_osd) + + .. note:: Both :meth:`shard_full_optim_state_dict` and + :meth:`scatter_full_optim_state_dict` may be used to get the + sharded optimizer state dict to load. Assuming that the full + optimizer state dict resides in CPU memory, the former requires + each rank to have the full dict in CPU memory, where each rank + individually shards the dict without any communication, while the + latter requires only rank 0 to have the full dict in CPU memory, + where rank 0 moves each shard to GPU memory (for NCCL) and + communicates it to ranks appropriately. Hence, the former has + higher aggregate CPU memory cost, while the latter has higher + communication cost. + + Args: + full_optim_state_dict (Optional[Dict[str, Any]]): Optimizer state + dict corresponding to the unflattened parameters and holding + the full non-sharded optimizer state if on rank 0; the argument + is ignored on nonzero ranks. + model (torch.nn.Module): Root module (which may or may not be a + :class:`FullyShardedDataParallel` instance) whose parameters + correspond to the optimizer state in ``full_optim_state_dict``. + optim_input (Optional[Union[List[Dict[str, Any]], Iterable[torch.nn.Parameter]]]): + Input passed into the optimizer representing either a + :class:`list` of parameter groups or an iterable of parameters; + if ``None``, then this method assumes the input was + ``model.parameters()``. This argument is deprecated, and there + is no need to pass it in anymore. (Default: ``None``) + optim (Optional[torch.optim.Optimizer]): Optimizer that will load + the state dict returned by this method. This is the preferred + argument to use over ``optim_input``. (Default: ``None``) + group (dist.ProcessGroup): Model's process group or ``None`` if + using the default process group. (Default: ``None``) + + Returns: + Dict[str, Any]: The full optimizer state dict now remapped to + flattened parameters instead of unflattened parameters and + restricted to only include this rank's part of the optimizer state. + """ + FullyShardedDataParallel._warn_legacy_optim_state_dict( + "scatter_full_optim_state_dict", + "optim_state_dict_to_load", + stacklevel=2, + ) + return FullyShardedDataParallel._optim_state_dict_to_load_impl( + optim_state_dict=full_optim_state_dict, + model=model, + optim_input=optim_input, + optim=optim, + full_state_dict=True, + rank0_only=True, + is_named_optimizer=False, + group=group, + ) + + @staticmethod + def rekey_optim_state_dict( + optim_state_dict: Dict[str, Any], + optim_state_key_type: OptimStateKeyType, + model: torch.nn.Module, + optim_input: Optional[ + Union[ + List[Dict[str, Any]], + Iterable[torch.nn.Parameter], + ] + ] = None, + optim: Optional[torch.optim.Optimizer] = None, + ) -> Dict[str, Any]: + """Re-keys the optimizer state dict ``optim_state_dict`` to use the key type ``optim_state_key_type``. + + This can be used to achieve compatibility between optimizer state dicts from models with FSDP + instances and ones without. + + To re-key an FSDP full optimizer state dict (i.e. from + :meth:`full_optim_state_dict`) to use parameter IDs and be loadable to + a non-wrapped model:: + + >>> # xdoctest: +SKIP("undefined variables") + >>> wrapped_model, wrapped_optim = ... + >>> full_osd = FSDP.full_optim_state_dict(wrapped_model, wrapped_optim) + >>> nonwrapped_model, nonwrapped_optim = ... + >>> rekeyed_osd = FSDP.rekey_optim_state_dict(full_osd, OptimStateKeyType.PARAM_ID, nonwrapped_model) + >>> nonwrapped_optim.load_state_dict(rekeyed_osd) + + To re-key a normal optimizer state dict from a non-wrapped model to be + loadable to a wrapped model:: + + >>> # xdoctest: +SKIP("undefined variables") + >>> nonwrapped_model, nonwrapped_optim = ... + >>> osd = nonwrapped_optim.state_dict() + >>> rekeyed_osd = FSDP.rekey_optim_state_dict(osd, OptimStateKeyType.PARAM_NAME, nonwrapped_model) + >>> wrapped_model, wrapped_optim = ... + >>> sharded_osd = FSDP.shard_full_optim_state_dict(rekeyed_osd, wrapped_model) + >>> wrapped_optim.load_state_dict(sharded_osd) + + Returns: + Dict[str, Any]: The optimizer state dict re-keyed using the + parameter keys specified by ``optim_state_key_type``. + """ + FullyShardedDataParallel._warn_optim_input(optim_input) + using_optim_input = FullyShardedDataParallel._is_using_optim_input( + optim_input, + optim, + ) + assert optim_state_key_type in ( + OptimStateKeyType.PARAM_NAME, + OptimStateKeyType.PARAM_ID, + ) + osd = optim_state_dict # alias + # Validate that the existing parameter keys are uniformly typed + uses_param_name_mask = [type(param_key) is str for param_key in osd["state"]] + uses_param_id_mask = [type(param_key) is int for param_key in osd["state"]] + if (any(uses_param_name_mask) and not all(uses_param_name_mask)) or ( + any(uses_param_id_mask) and not all(uses_param_id_mask) + ): + error_msg = f"Invalid parameter keys: {osd['state'].keys()}" + raise ValueError(error_msg) + # Return directly if the existing key type matches the target key type + if ( + optim_state_key_type == OptimStateKeyType.PARAM_NAME + and all(uses_param_name_mask) + ) or ( + optim_state_key_type == OptimStateKeyType.PARAM_ID + and all(uses_param_id_mask) + ): + return osd + # Otherwise, actually perform the re-keying + new_osd = {} + if optim_state_key_type == OptimStateKeyType.PARAM_NAME: # ID -> name + param_id_to_param = ( + _get_param_id_to_param_from_optim_input(model, optim_input) + if using_optim_input + else _get_param_key_to_param(optim) + ) + param_to_param_name = _get_param_to_fqn(model) + param_id_to_param_name: List[str] = [ + param_to_param_name[param] for param in param_id_to_param.values() + ] + new_osd["state"] = { + param_id_to_param_name[param_id]: param_state + for param_id, param_state in osd["state"].items() + } + new_osd["param_groups"] = copy.deepcopy(osd["param_groups"]) + for param_group in new_osd["param_groups"]: + param_group["params"] = sorted( + [ + param_id_to_param_name[param_id] + for param_id in param_group["params"] + ] + ) + return new_osd + elif optim_state_key_type == OptimStateKeyType.PARAM_ID: # name -> ID + param_name_to_param = _get_fqn_to_param(model) + param_to_param_id = ( + _get_param_to_param_id_from_optim_input(model, optim_input) + if using_optim_input + else _get_param_to_param_key(optim) + ) + # Because not all model parameters may be passed as the optimizer + # input, we may need to drop some parameters from this mapping + param_name_to_param_id = { + param_name: param_to_param_id[param] + for param_name, param in param_name_to_param.items() + if param in param_to_param_id + } + new_osd["state"] = { + param_name_to_param_id[param_name]: param_state + for param_name, param_state in osd["state"].items() + } + new_osd["param_groups"] = copy.deepcopy(osd["param_groups"]) + for param_group in new_osd["param_groups"]: + param_group["params"] = sorted( + [ + param_name_to_param_id[param_name] + for param_name in param_group["params"] + ] + ) + return new_osd + return new_osd # should never reach here + + @staticmethod + def optim_state_dict( + model: torch.nn.Module, + optim: torch.optim.Optimizer, + optim_state_dict: Optional[Dict[str, Any]] = None, + group: Optional[dist.ProcessGroup] = None, + ) -> Dict[str, Any]: + """ + Transform the state-dict of an optimizer corresponding to a sharded model. + + The given state-dict can be transformed to one of three types: + 1) full optimizer state_dict, 2) sharded optimizer state_dict, 3) local optimizer state_dict. + + For full optimizer state_dict, all states are unflattened and not sharded. + Rank0 only and CPU only can be specified via :meth:`state_dict_type` to + avoid OOM. + + For sharded optimizer state_dict, all states are unflattened but sharded. + CPU only can be specified via :meth:`state_dict_type` to further save + memory. + + For local state_dict, no transformation will be performed. But a state + will be converted from nn.Tensor to ShardedTensor to represent its sharding + nature (this is not supported yet). + + Example:: + + >>> # xdoctest: +SKIP("undefined variables") + >>> from torch.distributed.fsdp import FullyShardedDataParallel as FSDP + >>> from torch.distributed.fsdp import StateDictType + >>> from torch.distributed.fsdp import FullStateDictConfig + >>> from torch.distributed.fsdp import FullOptimStateDictConfig + >>> # Save a checkpoint + >>> model, optim = ... + >>> FSDP.set_state_dict_type( + >>> model, + >>> StateDictType.FULL_STATE_DICT, + >>> FullStateDictConfig(rank0_only=False), + >>> FullOptimStateDictConfig(rank0_only=False), + >>> ) + >>> state_dict = model.state_dict() + >>> optim_state_dict = FSDP.optim_state_dict(model, optim) + >>> save_a_checkpoint(state_dict, optim_state_dict) + >>> # Load a checkpoint + >>> model, optim = ... + >>> state_dict, optim_state_dict = load_a_checkpoint() + >>> FSDP.set_state_dict_type( + >>> model, + >>> StateDictType.FULL_STATE_DICT, + >>> FullStateDictConfig(rank0_only=False), + >>> FullOptimStateDictConfig(rank0_only=False), + >>> ) + >>> model.load_state_dict(state_dict) + >>> optim_state_dict = FSDP.optim_state_dict_to_load( + >>> model, optim, optim_state_dict + >>> ) + >>> optim.load_state_dict(optim_state_dict) + + Args: + model (torch.nn.Module): Root module (which may or may not be a + :class:`FullyShardedDataParallel` instance) whose parameters + were passed into the optimizer ``optim``. + optim (torch.optim.Optimizer): Optimizer for ``model`` 's + parameters. + optim_state_dict (Dict[str, Any]): the target optimizer state_dict to + transform. If the value is None, optim.state_dict() will be used. ( + Default: ``None``) + group (dist.ProcessGroup): Model's process group across which parameters + are sharded or ``None`` if using the default process group. ( + Default: ``None``) + + Returns: + Dict[str, Any]: A :class:`dict` containing the optimizer state for + ``model``. The sharding of the optimizer state is based on + ``state_dict_type``. + """ + state_dict_settings = FullyShardedDataParallel.get_state_dict_type(model) + if optim_state_dict is None: + optim_state_dict = optim.state_dict() + return FullyShardedDataParallel._optim_state_dict_impl( + model=model, + optim=optim, + optim_state_dict=optim_state_dict, + optim_input=None, + rank0_only=getattr( + state_dict_settings.optim_state_dict_config, "rank0_only", False + ), + full_state_dict=state_dict_settings.state_dict_type + == StateDictType.FULL_STATE_DICT, + group=group, + cpu_offload=getattr( + state_dict_settings.optim_state_dict_config, "offload_to_cpu", True + ), + _stacklevel=2, + ) + + @staticmethod + def optim_state_dict_to_load( + model: torch.nn.Module, + optim: torch.optim.Optimizer, + optim_state_dict: Dict[str, Any], + is_named_optimizer: bool = False, + load_directly: bool = False, + group: Optional[dist.ProcessGroup] = None, + ) -> Dict[str, Any]: + """ + Convert an optimizer state-dict so that it can be loaded into the optimizer associated with the FSDP model. + + Given a ``optim_state_dict`` that is transformed through + :meth:`optim_state_dict`, it gets converted to the flattened optimizer + state_dict that can be loaded to ``optim`` which is the optimizer for + ``model``. ``model`` must be sharded by FullyShardedDataParallel. + + >>> # xdoctest: +SKIP("undefined variables") + >>> from torch.distributed.fsdp import FullyShardedDataParallel as FSDP + >>> from torch.distributed.fsdp import StateDictType + >>> from torch.distributed.fsdp import FullStateDictConfig + >>> from torch.distributed.fsdp import FullOptimStateDictConfig + >>> # Save a checkpoint + >>> model, optim = ... + >>> FSDP.set_state_dict_type( + >>> model, + >>> StateDictType.FULL_STATE_DICT, + >>> FullStateDictConfig(rank0_only=False), + >>> FullOptimStateDictConfig(rank0_only=False), + >>> ) + >>> state_dict = model.state_dict() + >>> original_osd = optim.state_dict() + >>> optim_state_dict = FSDP.optim_state_dict( + >>> model, + >>> optim, + >>> optim_state_dict=original_osd + >>> ) + >>> save_a_checkpoint(state_dict, optim_state_dict) + >>> # Load a checkpoint + >>> model, optim = ... + >>> state_dict, optim_state_dict = load_a_checkpoint() + >>> FSDP.set_state_dict_type( + >>> model, + >>> StateDictType.FULL_STATE_DICT, + >>> FullStateDictConfig(rank0_only=False), + >>> FullOptimStateDictConfig(rank0_only=False), + >>> ) + >>> model.load_state_dict(state_dict) + >>> optim_state_dict = FSDP.optim_state_dict_to_load( + >>> model, optim, optim_state_dict + >>> ) + >>> optim.load_state_dict(optim_state_dict) + + Args: + model (torch.nn.Module): Root module (which may or may not be a + :class:`FullyShardedDataParallel` instance) whose parameters + were passed into the optimizer ``optim``. + optim (torch.optim.Optimizer): Optimizer for ``model`` 's + parameters. + optim_state_dict (Dict[str, Any]): The optimizer states to be loaded. + is_named_optimizer (bool): Is this optimizer a NamedOptimizer or + KeyedOptimizer. Only set to True if ``optim`` is TorchRec's + KeyedOptimizer or torch.distributed's NamedOptimizer. + load_directly (bool): If this is set to True, this API will also + call optim.load_state_dict(result) before returning the result. + Otherwise, users are responsible to call ``optim.load_state_dict()`` + (Default: ``False``) + group (dist.ProcessGroup): Model's process group across which parameters + are sharded or ``None`` if using the default process group. ( + Default: ``None``) + """ + state_dict_settings = FullyShardedDataParallel.get_state_dict_type(model) + result = FullyShardedDataParallel._optim_state_dict_to_load_impl( + optim_state_dict=optim_state_dict, + model=model, + optim_input=None, + optim=optim, + full_state_dict=( + state_dict_settings.state_dict_type == StateDictType.FULL_STATE_DICT + ), + rank0_only=getattr( + state_dict_settings.optim_state_dict_config, "rank0_only", False + ), + is_named_optimizer=is_named_optimizer, + group=group, + ) + if load_directly: + optim.load_state_dict(result) + return result + + def register_comm_hook(self, state: object, hook: callable): + """Register a communication hook. + + This is an enhancement that provides a flexible hook to users where they can specify how FSDP aggregates + gradients across multiple workers. + This hook can be used to implement several algorithms like + `GossipGrad `_ and gradient compression + which involve different communication strategies for + parameter syncs while training with :class:`FullyShardedDataParallel`. + + .. warning :: + FSDP communication hook should be registered before running an initial forward pass + and only once. + + Args: + state (object): Passed to the hook to maintain any state information during the training process. + Examples include error feedback in gradient compression, + peers to communicate with next in `GossipGrad `_, etc. + It is locally stored by each worker + and shared by all the gradient tensors on the worker. + hook (Callable): Callable, which has one of the following signatures: + 1) ``hook: Callable[torch.Tensor] -> None``: + This function takes in a Python tensor, which represents + the full, flattened, unsharded gradient with respect to all variables + corresponding to the model this FSDP unit is wrapping + (that are not wrapped by other FSDP sub-units). + It then performs all necessary processing and returns ``None``; + 2) ``hook: Callable[torch.Tensor, torch.Tensor] -> None``: + This function takes in two Python tensors, the first one represents + the full, flattened, unsharded gradient with respect to all variables + corresponding to the model this FSDP unit is wrapping + (that are not wrapped by other FSDP sub-units). The latter + represents a pre-sized tensor to store a chunk of a sharded gradient after + reduction. + In both cases, callable performs all necessary processing and returns ``None``. + Callables with signature 1 are expected to handle gradient communication for a `NO_SHARD` case. + Callables with signature 2 are expected to handle gradient communication for sharded cases. + + """ + if not self.check_is_root(): + raise AssertionError( + "register_comm_hook can only be called on a root instance." + ) + for fsdp_state in traversal_utils._get_fsdp_states(self): + if fsdp_state.sharding_strategy in HYBRID_SHARDING_STRATEGIES: + raise AssertionError( + f"Communication hook is not supported for hybrid strategies: {fsdp_state.sharding_strategy}" + ) + if fsdp_state._comm_hook is not None: + raise AssertionError("A communication hook is already registered") + if not callable(hook): + raise ValueError( + f"The communication hook must be callable but got {hook}" + ) + fsdp_state._comm_hook = hook + fsdp_state._comm_hook_state = state + + def _unshard(self, async_op: bool = False): + class UnshardHandle: + def __init__( + self, + flat_param_handle: Optional[FlatParamHandle], + unshard_event: torch.Event, + ): + self._flat_param_handle = flat_param_handle + self._unshard_event = unshard_event + + def wait(self): + if self._flat_param_handle is not None: + current_stream = ( + self._flat_param_handle._device_handle.current_stream() + ) + current_stream.wait_event(self._unshard_event) + self._flat_param_handle = None + + if self._handle: + with self._use_training_state( + TrainingState.FORWARD_BACKWARD, HandleTrainingState.FORWARD + ): + _unshard( + self, self._handle, self._unshard_stream, self._pre_unshard_stream + ) + self._unshard_event = self._unshard_stream.record_event() + self._handle._prefetched = True + unshard_handle = UnshardHandle(self._handle, self._unshard_stream) + if async_op: + return unshard_handle + unshard_handle.wait() + return None + + def _wait_unshard_streams_on_current_stream(self): + _wait_for_computation_stream( + self._device_handle.current_stream(), + self._unshard_stream, + self._pre_unshard_stream, + ) + + @contextlib.contextmanager + def _use_training_state( + self, training_state: TrainingState, handle_training_state: HandleTrainingState + ): + prev_training_state = self.training_state + self.training_state = training_state + if self._handle: + prev_handle_training_state = self._handle._training_state + self._handle._training_state = handle_training_state + try: + yield + finally: + self.training_state = prev_training_state + if self._handle: + self._handle._training_state = prev_handle_training_state + + +def _get_grad_norm( + params: Iterable[nn.Parameter], + norm_type: float, + zero: torch.Tensor, + device: torch.device, +) -> torch.Tensor: + """ + Return the gradient norm of parameters ``param`` s, where the gradients are viewed as a single vector. + + The returned norm is in FP32 even if parameters/gradients are in a low precision. This is because the downstream + use of this return value is a reduction across ranks. + """ + params_with_grad = [param for param in params if param.grad is not None] + if len(params_with_grad) == 0: + # Reuse a tensor for zero to avoid a GPU sync + return zero + grads = [param.grad for param in params_with_grad] + grad_dtypes = {grad.dtype for grad in grads} + if len(grad_dtypes) != 1: + raise ValueError( + f"Requires uniform dtype across all gradients but got {grad_dtypes}" + ) + # Compute the gradient norm in FP32, where we treat the gradients as a + # single vector + grad_norm = torch.linalg.vector_norm( + torch.stack( + [ + torch.linalg.vector_norm(grad.detach(), norm_type, dtype=torch.float32) + for grad in grads + ], + ), + norm_type, + dtype=torch.float32, + ) + return grad_norm.to(device=device) + + +def _get_param_to_fqn( + model: torch.nn.Module, +) -> Dict[torch.nn.Parameter, str]: + """ + Construct a mapping from parameters to their parameter names. + + The ``model`` should not contain any :class:`FullyShardedDataParallel` instances, which + means that none of the parameters should be ``FlatParameter`` s. As a + result, compared to :meth:`_get_param_to_fqns`, the mapped + values may be flattened from singleton :class:`list` s to the contained + names themselves. + + Args: + model (torch.nn.Module): Root module, which should not contain any + :class:`FullyShardedDataParallel` instances. + """ + param_to_param_names = _get_param_to_fqns(model) + for param_names in param_to_param_names.values(): + assert ( + len(param_names) > 0 + ), "`_get_param_to_fqns()` should not construct empty lists" + if len(param_names) > 1: + raise RuntimeError( + "Each parameter should only map to one parameter name but got " + f"{len(param_names)}: {param_names}" + ) + param_to_param_name = { + param: param_names[0] for param, param_names in param_to_param_names.items() + } + return param_to_param_name + + +def _get_fqn_to_param( + model: torch.nn.Module, +) -> Dict[str, torch.nn.Parameter]: + """Construct the inverse mapping of :meth:`_get_param_to_fqn`.""" + param_to_param_name = _get_param_to_fqn(model) + return dict(zip(param_to_param_name.values(), param_to_param_name.keys())) diff --git a/janus/lib/python3.10/site-packages/torch/distributed/fsdp/sharded_grad_scaler.py b/janus/lib/python3.10/site-packages/torch/distributed/fsdp/sharded_grad_scaler.py new file mode 100644 index 0000000000000000000000000000000000000000..c4f1f1dae4e82c6958aea8f7ff68524f981d3640 --- /dev/null +++ b/janus/lib/python3.10/site-packages/torch/distributed/fsdp/sharded_grad_scaler.py @@ -0,0 +1,396 @@ +# mypy: allow-untyped-defs +import logging +from collections import abc, defaultdict +from typing import Any, Dict, Iterable, List, Optional, overload, Sequence, Tuple, Union + +import torch +import torch.distributed as dist +from torch.amp.grad_scaler import _MultiDeviceReplicator, GradScaler, OptState +from torch.distributed.distributed_c10d import ProcessGroup + + +logger = logging.getLogger(__name__) + + +def _refresh_per_optimizer_state() -> Dict[str, Any]: + return {"stage": OptState.READY, "found_inf_per_device": {}} + + +def _is_supported_device(tensor: torch.Tensor) -> bool: + return tensor.is_cuda or tensor.device.type in ( + "xla", + "cpu", + "hpu", + "mtia", + torch._C._get_privateuse1_backend_name(), + ) + + +class _GeneralMultiDeviceReplicator(_MultiDeviceReplicator): + """ + Lazily serves tensor to request device. This class extends + _MultiDeviceReplicator to allow support for "cpu" as a device. + """ + + def __init__(self, master_tensor: torch.Tensor) -> None: + assert _is_supported_device(master_tensor) + self.master = master_tensor + self._per_device_tensors: Dict[torch.device, torch.Tensor] = {} + + +class ShardedGradScaler(GradScaler): + """ + ShardedGradScaler helps perform gradient scaling in a shard aware manner. It extends + functionality from GradScaler: + * Supports Pytorch DDP and FSDP implementations + * Support CPU offloaded tensors (as used in fully sharded data parallel[FSDP]) + * Supports the custom Mixed Precision loss dtype (fp16, bf16) that FSDP returns + * Sync inf/nan for scaled gradient tensors on any torch.device (where tensors are placed) across + nodes + + Example:: + + # Creates a ShardedGradScaler once at the beginning of training. + scaler = ShardedGradScaler() + + for epoch in epochs: + for input, target in data: + optimizer.zero_grad() + output = model(input) + loss = loss_fn(output, target) + + # Scales loss. Calls backward() on scaled loss to create scaled gradients. + scaler.scale(loss).backward() + + # scaler.step() first unscales gradients of the optimizer's params. + # If gradients don't contain infs/NaNs, optimizer.step() is then called, + # otherwise, optimizer.step() is skipped. + scaler.step(optimizer) + + # Updates the scale for next iteration. + scaler.update() + + See :class:`GradScaler` for explanation of scaling/unscaling and more use cases. + + Args: + init_scale (float, optional, default=2.**16): Initial scale factor. + growth_factor (float, optional, default=2.0): Factor by which the scale is multiplied during + :meth:`update` if no inf/NaN gradients occur for ``growth_interval`` consecutive iterations. + backoff_factor (float, optional, default=0.5): Factor by which the scale is multiplied during + :meth:`update` if inf/NaN gradients occur in an iteration. + growth_interval (int, optional, default=2000): Number of consecutive iterations without inf/NaN gradients + that must occur for the scale to be multiplied by ``growth_factor``. + enabled (bool, optional): If ``False``, disables gradient scaling. :meth:`step` simply + invokes the underlying ``optimizer.step()``, and other methods become no-ops. + Default: ``True`` + process_group (ProcessGroup, optional, default=torch.distributed.group.WORLD): + process group for sharding + """ + + def __init__( + self, + device: str = "cuda", + init_scale: float = 2.0**16, + backoff_factor: float = 0.5, + growth_factor: float = 2.0, + growth_interval: int = 2000, + enabled: bool = True, + process_group: Optional[ProcessGroup] = dist.group.WORLD, + ) -> None: + super().__init__( + device, + init_scale=init_scale, + backoff_factor=backoff_factor, + growth_factor=growth_factor, + growth_interval=growth_interval, + enabled=enabled, + ) + if self._enabled: + self.process_group = process_group + self._per_optimizer_states = defaultdict(_refresh_per_optimizer_state) + + @overload + def scale(self, outputs: torch.Tensor) -> torch.Tensor: + ... + + @overload + def scale(self, outputs: List[torch.Tensor]) -> List[torch.Tensor]: + ... + + @overload + def scale(self, outputs: Tuple[torch.Tensor, ...]) -> Tuple[torch.Tensor, ...]: + ... + + @overload + def scale(self, outputs: Iterable[torch.Tensor]) -> Iterable[torch.Tensor]: + ... + + def scale( + self, outputs: Union[torch.Tensor, Iterable[torch.Tensor]] + ) -> Union[torch.Tensor, Iterable[torch.Tensor]]: + if not self._enabled: + return outputs + + if isinstance(outputs, torch.Tensor): + assert _is_supported_device(outputs) + if self._scale is None: + self._lazy_init_scale_growth_tracker(outputs.device) + assert self._scale is not None + scaled_output = outputs * self._scale.to( + device=outputs.device, non_blocking=True + ) + # Here we ensure the return dtype is the same as the outputs dtype. + # For the FSDP + Mixed Precision use case, the loss output is in the Mixed Precision + # format (fp16, bf16) and so the scaled loss should be of the same dtype. + return scaled_output.type(outputs.dtype) + + stash: List[_GeneralMultiDeviceReplicator] = [] + + def apply_scale(val: Union[torch.Tensor, Iterable[torch.Tensor]]): + if isinstance(val, torch.Tensor): + assert _is_supported_device(val) + if len(stash) == 0: + if self._scale is None: + self._lazy_init_scale_growth_tracker(val.device) + assert self._scale is not None + stash.append(_GeneralMultiDeviceReplicator(self._scale)) + scaled_val = val * stash[0].get(val.device) + # Here we ensure the return dtype is the same as the outputs dtype. + # For the FSDP + Mixed Precision use case, the loss output is in the Mixed Precision + # format (fp16, bf16) and so the scaled loss should be of the same dtype. + return scaled_val.type(val.dtype) + if isinstance(val, abc.Iterable): + iterator = map(apply_scale, val) + if isinstance(val, (list, tuple)): + return type(val)(iterator) + return iterator + raise ValueError("outputs must be a Tensor or an iterable of Tensors") + + return apply_scale(outputs) + + def _foreach_non_finite_check_and_unscale_cpu_( + self, + grads: Sequence[torch.Tensor], + found_inf: torch.Tensor, + inv_scale: torch.Tensor, + ) -> None: + if len(grads) == 0: + return + assert inv_scale.numel() == 1, "inv_scale must be a 1-element tensor." + assert found_inf.numel() == 1, "found_inf must be a 1-element tensor." + + for grad in grads: + if grad.device.type != "cpu": + logger.error( + "tensor device is %s but was expected to be ``cpu``", + grad.device, + ) + raise ValueError( + "Gradients were found on a non-CPU device when" + " expected to be on CPU." + ) + if ( + torch.isinf(grad).any().item() is True + or torch.isnan(grad).any().item() is True + ): + found_inf.data = torch.tensor([1.0]) + break + else: + grad.data *= inv_scale.item() + + def _unscale_grads_( + self, + optimizer: torch.optim.Optimizer, + inv_scale: torch.Tensor, + found_inf: torch.Tensor, + allow_fp16: bool = True, + ) -> Dict[torch.device, torch.Tensor]: + per_device_inv_scale = _GeneralMultiDeviceReplicator(inv_scale) + per_device_found_inf = _GeneralMultiDeviceReplicator(found_inf) + + # To set up _amp_foreach_non_finite_check_and_unscale_, split grads by device and dtype. + # There could be thousands of grads, so we'd like to iterate through them just once. + # However, we don't know their devices or dtypes in advance. + + # https://stackoverflow.com/questions/5029934/defaultdict-of-defaultdict + # Google says mypy struggles with defaultdicts type annotations. + per_device_and_dtype_grads = defaultdict(lambda: defaultdict(list)) # type: ignore[var-annotated] + with torch.no_grad(): + for group in optimizer.param_groups: + for param in group["params"]: + if param.grad is None: + continue + if (not allow_fp16) and param.grad.dtype == torch.float16: + raise ValueError("Attempting to unscale FP16 gradients.") + if param.grad.is_sparse: + # is_coalesced() == False means the sparse grad has values with duplicate indices. + # coalesce() deduplicates indices and adds all values that have the same index. + # For scaled fp16 values, there's a good chance coalescing will cause overflow, + # so we should check the coalesced _values(). + if param.grad.dtype is torch.float16: + # coalesce is not supported in torch.float16 + param_grad_fp32 = param.grad.type(torch.float32).coalesce() + param.grad = param_grad_fp32.type(torch.float16) + to_unscale = param.grad._values() + else: + to_unscale = param.grad + + per_device_and_dtype_grads[to_unscale.device][ + to_unscale.dtype + ].append(to_unscale) + + for device, per_dtype_grads in per_device_and_dtype_grads.items(): + for grads in per_dtype_grads.values(): + if grads[0].device.type == "cpu": + self._foreach_non_finite_check_and_unscale_cpu_( + grads, + per_device_found_inf.get(device), + per_device_inv_scale.get(device), + ) + else: + torch._amp_foreach_non_finite_check_and_unscale_( + grads, + per_device_found_inf.get(device), + per_device_inv_scale.get(device), + ) + # There exist contexts (e.g. w/ `use_orig_params=True`) wherein some + # ranks may have no (non-zero sized) parameter shards, necessitating the + # initialization of `per_device_found_inf._per_device_tensors` here + if not per_device_found_inf._per_device_tensors: + assert self._scale is not None + per_device_found_inf.get(self._scale.device) + return per_device_found_inf._per_device_tensors + + def unscale_(self, optimizer: torch.optim.Optimizer) -> None: + if not self._enabled: + return + + self._check_scale_growth_tracker("unscale_") + + optimizer_state = self._per_optimizer_states[id(optimizer)] + + if optimizer_state["stage"] is OptState.UNSCALED: + raise RuntimeError( + "unscale_() has already been called on this optimizer since the last update()." + ) + elif optimizer_state["stage"] is OptState.STEPPED: + raise RuntimeError("unscale_() is being called after step().") + + # FP32 division can be imprecise for certain compile options, so we carry out the reciprocal in FP64. + assert self._scale is not None + inv_scale = self._scale.double().reciprocal().float() + found_inf = torch.full( + (1,), 0.0, dtype=torch.float32, device=self._scale.device + ) + + optimizer_state["found_inf_per_device"] = self._unscale_grads_( + optimizer, inv_scale, found_inf, True + ) + optimizer_state["stage"] = OptState.UNSCALED + + # Synchronize the detected inf across the ranks + optimizer_state = self._per_optimizer_states[id(optimizer)] + works = [] + found_inf_on_cpus = [] + found_inf_on_devices = [] + + for found_inf in optimizer_state["found_inf_per_device"].values(): + if self._device != "cpu" and found_inf.device.type == "cpu": + found_inf_on_cpus.append(found_inf) + found_inf_on_device = found_inf.to(self._device) + found_inf_on_devices.append(found_inf_on_device) + works.append( + dist.all_reduce( + found_inf_on_device, async_op=True, group=self.process_group + ) + ) + else: + works.append( + dist.all_reduce(found_inf, async_op=True, group=self.process_group) + ) + for work in works: + work.wait() + if found_inf_on_cpus: + torch._foreach_copy_(found_inf_on_cpus, found_inf_on_devices) + + def _amp_update_scale_cpu_(self, found_inf: torch.Tensor) -> None: + """ + If found_inf is 1.0 (True), then scale is multiplied by backoff_factor and growth_tracker is set to zero. + Otherwise, scale is multiplied by the growth factor when the growth interval is reached. + """ + assert self._scale is not None and self._growth_tracker is not None + + if found_inf.item() >= 1.0: + self._scale *= self._backoff_factor + self._growth_tracker.fill_(0) + else: + successful = self._growth_tracker + 1 + if successful == self._growth_interval: + self._scale *= self._growth_factor + self._growth_tracker.fill_(0) + else: + self._growth_tracker = successful + + def update(self, new_scale: Optional[Union[float, torch.Tensor]] = None) -> None: + """ + Updates the scale factor. + If any optimizer steps were skipped the scale is multiplied by ``backoff_factor`` + to reduce it. If ``growth_interval`` unskipped iterations occurred consecutively, + the scale is multiplied by ``growth_factor`` to increase it. + Passing ``new_scale`` sets the new scale value manually. (``new_scale`` is not + used directly, it's used to fill GradScaler's internal scale tensor. So if + ``new_scale`` was a tensor, later in-place changes to that tensor will not further + affect the scale GradScaler uses internally.) + Args: + new_scale (float or :class:`torch.Tensor`, optional, default=None): New scale factor. + .. warning:: + :meth:`update` should only be called at the end of the iteration, after ``scaler.step(optimizer)`` has + been invoked for all optimizers used this iteration. + """ + + if not self._enabled: + return + + _scale, _growth_tracker = self._check_scale_growth_tracker("update") # type: ignore[var-annotated] + + if new_scale is not None: + # Accept a new user-defined scale. + if isinstance(new_scale, float): + self._scale.fill_(new_scale) # type: ignore[union-attr] + else: + reason = "new_scale should be a float or a 1-element torch.cuda.FloatTensor or \ + torch.FloatTensor with requires_grad=False." + assert new_scale.device.type == self._device, reason + assert new_scale.numel() == 1, reason + assert new_scale.requires_grad is False, reason + self._scale.copy_(new_scale) # type: ignore[union-attr] + else: + # Consume shared inf/nan data collected from optimizers to update the scale. + # If all found_inf tensors are on the same device as self._scale, this operation is asynchronous. + found_infs = [ + found_inf.to(device=_scale.device, non_blocking=True) + for state in self._per_optimizer_states.values() + for found_inf in state["found_inf_per_device"].values() + ] + + assert len(found_infs) > 0, "No inf checks were recorded prior to update." + + found_inf_combined = found_infs[0] + if len(found_infs) > 1: + for i in range(1, len(found_infs)): + found_inf_combined += found_infs[i] + + if _scale.device.type == "cpu": + self._amp_update_scale_cpu_(found_inf_combined) + else: + torch._amp_update_scale_( + self._scale, # type: ignore[arg-type] + self._growth_tracker, # type: ignore[arg-type] + found_inf_combined, + self._growth_factor, # type: ignore[arg-type] + self._backoff_factor, # type: ignore[arg-type] + self._growth_interval, # type: ignore[arg-type] + ) + + # To prepare for next iteration, clear the data collected from optimizers this iteration. + self._per_optimizer_states = defaultdict(_refresh_per_optimizer_state) diff --git a/janus/lib/python3.10/site-packages/torch/distributed/rpc/__init__.py b/janus/lib/python3.10/site-packages/torch/distributed/rpc/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..6c6608a2a773f3be7d0166d8f36bb4ce237bed7b --- /dev/null +++ b/janus/lib/python3.10/site-packages/torch/distributed/rpc/__init__.py @@ -0,0 +1,249 @@ +# mypy: allow-untyped-defs +import logging +import os +import threading +import warnings +from datetime import timedelta +from typing import Generator, Tuple +from urllib.parse import urlparse + +import torch +import torch.distributed as dist + + +__all__ = ["is_available"] + + +logger = logging.getLogger(__name__) + + +_init_counter = 0 +_init_counter_lock = threading.Lock() + + +def is_available() -> bool: + return hasattr(torch._C, "_rpc_init") + + +if is_available() and not torch._C._rpc_init(): + raise RuntimeError("Failed to initialize torch.distributed.rpc") + + +if is_available(): + import numbers + + import torch.distributed.autograd as dist_autograd + from torch._C._distributed_c10d import Store + from torch._C._distributed_rpc import ( # noqa: F401 + _cleanup_python_rpc_handler, + _DEFAULT_INIT_METHOD, + _DEFAULT_NUM_WORKER_THREADS, + _DEFAULT_RPC_TIMEOUT_SEC, + _delete_all_user_and_unforked_owner_rrefs, + _destroy_rref_context, + _disable_jit_rref_pickle, + _disable_server_process_global_profiler, + _enable_jit_rref_pickle, + _enable_server_process_global_profiler, + _get_current_rpc_agent, + _invoke_remote_builtin, + _invoke_remote_python_udf, + _invoke_remote_torchscript, + _invoke_rpc_builtin, + _invoke_rpc_python_udf, + _invoke_rpc_torchscript, + _is_current_rpc_agent_set, + _reset_current_rpc_agent, + _rref_context_get_debug_info, + _set_and_start_rpc_agent, + _set_profiler_node_id, + _set_rpc_timeout, + _TensorPipeRpcBackendOptionsBase, + _UNSET_RPC_TIMEOUT, + enable_gil_profiling, + get_rpc_timeout, + PyRRef, + RemoteProfilerManager, + RpcAgent, + RpcBackendOptions, + TensorPipeAgent, + WorkerInfo, + ) + + from . import api, backend_registry, functions + from .api import * # noqa: F401,F403 + from .backend_registry import BackendType + from .options import TensorPipeRpcBackendOptions # noqa: F401 + from .server_process_global_profiler import _server_process_global_profile + + rendezvous_iterator: Generator[Tuple[Store, int, int], None, None] + + __all__ += ["init_rpc", "BackendType", "TensorPipeRpcBackendOptions"] + __all__ = __all__ + api.__all__ + backend_registry.__all__ # noqa: PLE0605 + + def init_rpc( + name, + backend=None, + rank=-1, + world_size=None, + rpc_backend_options=None, + ): + r""" + Initializes RPC primitives such as the local RPC agent + and distributed autograd, which immediately makes the current + process ready to send and receive RPCs. + + Args: + name (str): a globally unique name of this node. (e.g., + ``Trainer3``, ``ParameterServer2``, ``Master``, ``Worker1``) + Name can only contain number, alphabet, underscore, colon, + and/or dash, and must be shorter than 128 characters. + backend (BackendType, optional): The type of RPC backend + implementation. Supported values is + ``BackendType.TENSORPIPE`` (the default). + See :ref:`rpc-backends` for more information. + rank (int): a globally unique id/rank of this node. + world_size (int): The number of workers in the group. + rpc_backend_options (RpcBackendOptions, optional): The options + passed to the RpcAgent constructor. It must be an agent-specific + subclass of :class:`~torch.distributed.rpc.RpcBackendOptions` + and contains agent-specific initialization configurations. By + default, for all agents, it sets the default timeout to 60 + seconds and performs the rendezvous with an underlying process + group initialized using ``init_method = "env://"``, + meaning that environment variables ``MASTER_ADDR`` and + ``MASTER_PORT`` need to be set properly. See + :ref:`rpc-backends` for more information and find which options + are available. + """ + torch._C._log_api_usage_once("torch.distributed.init_rpc") + if backend is not None and not isinstance( + backend, backend_registry.BackendType + ): + raise TypeError("Argument backend must be a member of BackendType") + + if rpc_backend_options is not None and not isinstance( + rpc_backend_options, RpcBackendOptions + ): + raise TypeError( + "Argument rpc_backend_options must be an instance of RpcBackendOptions" + ) + + # Try to detect the backend from the options + if backend is None and rpc_backend_options is not None: + for candidate_backend in BackendType: + if isinstance( + rpc_backend_options, + type( + backend_registry.construct_rpc_backend_options( + candidate_backend + ) + ), + ): + backend = candidate_backend + break + else: + raise TypeError( + f"Could not infer backend for options {rpc_backend_options}" + ) + # Ignore type error because mypy doesn't handle dynamically generated type objects (#4865) + if backend != BackendType.TENSORPIPE: # type: ignore[attr-defined] + logger.warning( + "RPC was initialized with no explicit backend but with options " # type: ignore[attr-defined] + "corresponding to %(backend)s, hence that backend will be used " + "instead of the default BackendType.TENSORPIPE. To silence this " + "warning pass `backend=%(backend)s` explicitly.", + {"backend": backend}, + ) + + if backend is None: + backend = BackendType.TENSORPIPE # type: ignore[attr-defined] + + if rpc_backend_options is None: + # default construct a set of RPC backend options. + rpc_backend_options = backend_registry.construct_rpc_backend_options( + backend + ) + + # Create store, performs rendezvous for static RPC group. + if not world_size: + # If world_size is not set in construction and also not set in environment variables + # The store will be created for the dynamic group setting + store = dist._create_store_from_options(rpc_backend_options, rank) + else: + # This rendezvous state sometimes is destroyed before all processes + # finishing handshaking. To avoid that issue, we make it global to + # keep it alive. + global rendezvous_iterator + rendezvous_iterator = dist.rendezvous( + rpc_backend_options.init_method, rank=rank, world_size=world_size + ) + store, _, _ = next(rendezvous_iterator) + # Use same timeout as RPC. + store.set_timeout(timedelta(seconds=rpc_backend_options.rpc_timeout)) + + # Use a PrefixStore to distinguish multiple invocations. + with _init_counter_lock: + global _init_counter + store = dist.PrefixStore(str(f"rpc_prefix_{_init_counter}"), store) + _init_counter += 1 + + # Initialize autograd before RPC since _init_rpc_backend guarantees all + # processes sync via the store. If we initialize autograd after RPC, + # there could be a race where some nodes might have initialized autograd + # and others might not have. As a result, a node calling + # torch.distributed.autograd.backward() would run into errors since + # other nodes might not have been initialized. + dist_autograd._init(rank) + + _set_profiler_node_id(rank) + # Initialize RPC. + _init_rpc_backend(backend, store, name, rank, world_size, rpc_backend_options) + + def _validate_rpc_args(backend, store, name, rank, world_size, rpc_backend_options): + type_mapping = { + backend: backend_registry.BackendType, + store: dist.Store, + name: str, + rank: numbers.Integral, + # world_size can be None for a dynamic group + world_size: (numbers.Integral, type(None)), + rpc_backend_options: RpcBackendOptions, + } + for arg, arg_type in type_mapping.items(): + if not isinstance(arg, arg_type): # type: ignore[arg-type] + raise RuntimeError( + f"Argument {arg} must be of type {arg_type} but got type {type(arg)}" + ) + + def _init_rpc_backend( + backend=BackendType.TENSORPIPE, # type: ignore[attr-defined] + store=None, + name=None, + rank=-1, + world_size=None, + rpc_backend_options=None, + ): + _validate_rpc_args(backend, store, name, rank, world_size, rpc_backend_options) + + if _is_current_rpc_agent_set(): + raise RuntimeError("RPC is already initialized") + + # Initialize RPC. + rpc_agent = backend_registry.init_backend( + backend, + store=store, + name=name, + rank=rank, + world_size=world_size, + rpc_backend_options=rpc_backend_options, + ) + + api._init_rpc_states(rpc_agent) + + @api._require_initialized + def _get_debug_info(): + info = _rref_context_get_debug_info() + info.update(api._get_current_rpc_agent().get_debug_info()) + info.update(dist_autograd._get_debug_info()) + return info diff --git a/janus/lib/python3.10/site-packages/torch/distributed/rpc/__pycache__/__init__.cpython-310.pyc b/janus/lib/python3.10/site-packages/torch/distributed/rpc/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..be46d03d6392f9651ddf42ac9e3e7752a53443f2 Binary files /dev/null and b/janus/lib/python3.10/site-packages/torch/distributed/rpc/__pycache__/__init__.cpython-310.pyc differ diff --git a/janus/lib/python3.10/site-packages/torch/distributed/rpc/__pycache__/_utils.cpython-310.pyc b/janus/lib/python3.10/site-packages/torch/distributed/rpc/__pycache__/_utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5e879a1da9dbee680c7b65ffd0907b54e568a042 Binary files /dev/null and b/janus/lib/python3.10/site-packages/torch/distributed/rpc/__pycache__/_utils.cpython-310.pyc differ diff --git a/janus/lib/python3.10/site-packages/torch/distributed/rpc/__pycache__/api.cpython-310.pyc b/janus/lib/python3.10/site-packages/torch/distributed/rpc/__pycache__/api.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d4c3b888831bf9ddcd3758d2e582a3d27c388786 Binary files /dev/null and b/janus/lib/python3.10/site-packages/torch/distributed/rpc/__pycache__/api.cpython-310.pyc differ diff --git a/janus/lib/python3.10/site-packages/torch/distributed/rpc/__pycache__/backend_registry.cpython-310.pyc b/janus/lib/python3.10/site-packages/torch/distributed/rpc/__pycache__/backend_registry.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..054cafc21219c1dc6d128d349e9118cb738467f4 Binary files /dev/null and b/janus/lib/python3.10/site-packages/torch/distributed/rpc/__pycache__/backend_registry.cpython-310.pyc differ diff --git a/janus/lib/python3.10/site-packages/torch/distributed/rpc/__pycache__/constants.cpython-310.pyc b/janus/lib/python3.10/site-packages/torch/distributed/rpc/__pycache__/constants.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..84a1b793d8ddf8f4fff4ab44bd2d520795268a0f Binary files /dev/null and b/janus/lib/python3.10/site-packages/torch/distributed/rpc/__pycache__/constants.cpython-310.pyc differ diff --git a/janus/lib/python3.10/site-packages/torch/distributed/rpc/__pycache__/functions.cpython-310.pyc b/janus/lib/python3.10/site-packages/torch/distributed/rpc/__pycache__/functions.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..690f6a4e658696903251629f3beed91a34e8b828 Binary files /dev/null and b/janus/lib/python3.10/site-packages/torch/distributed/rpc/__pycache__/functions.cpython-310.pyc differ diff --git a/janus/lib/python3.10/site-packages/torch/distributed/rpc/__pycache__/internal.cpython-310.pyc b/janus/lib/python3.10/site-packages/torch/distributed/rpc/__pycache__/internal.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7fb93f7a4ae216582a60b8aae578221e63e164b3 Binary files /dev/null and b/janus/lib/python3.10/site-packages/torch/distributed/rpc/__pycache__/internal.cpython-310.pyc differ diff --git a/janus/lib/python3.10/site-packages/torch/distributed/rpc/__pycache__/options.cpython-310.pyc b/janus/lib/python3.10/site-packages/torch/distributed/rpc/__pycache__/options.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2ddc2e31fe11a0c09ec5a7f18a2840fb0ed048e4 Binary files /dev/null and b/janus/lib/python3.10/site-packages/torch/distributed/rpc/__pycache__/options.cpython-310.pyc differ diff --git a/janus/lib/python3.10/site-packages/torch/distributed/rpc/__pycache__/rref_proxy.cpython-310.pyc b/janus/lib/python3.10/site-packages/torch/distributed/rpc/__pycache__/rref_proxy.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5999a67bd5056e930f2d7eb9325219fde5d256af Binary files /dev/null and b/janus/lib/python3.10/site-packages/torch/distributed/rpc/__pycache__/rref_proxy.cpython-310.pyc differ diff --git a/janus/lib/python3.10/site-packages/torch/distributed/rpc/__pycache__/server_process_global_profiler.cpython-310.pyc b/janus/lib/python3.10/site-packages/torch/distributed/rpc/__pycache__/server_process_global_profiler.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..208755711bd080e5ac0630c480edf6fd9b49d147 Binary files /dev/null and b/janus/lib/python3.10/site-packages/torch/distributed/rpc/__pycache__/server_process_global_profiler.cpython-310.pyc differ diff --git a/janus/lib/python3.10/site-packages/torch/distributed/rpc/_testing/__init__.py b/janus/lib/python3.10/site-packages/torch/distributed/rpc/_testing/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..8ac1c02f4cee4c38c896cacd245b9ededf9a8a29 --- /dev/null +++ b/janus/lib/python3.10/site-packages/torch/distributed/rpc/_testing/__init__.py @@ -0,0 +1,20 @@ +# mypy: allow-untyped-defs + +import torch + + +def is_available(): + return hasattr(torch._C, "_faulty_agent_init") + + +if is_available() and not torch._C._faulty_agent_init(): + raise RuntimeError("Failed to initialize torch.distributed.rpc._testing") + +if is_available(): + # Registers FAULTY_TENSORPIPE RPC backend. + from torch._C._distributed_rpc_testing import ( + FaultyTensorPipeAgent, + FaultyTensorPipeRpcBackendOptions, + ) + + from . import faulty_agent_backend_registry diff --git a/janus/lib/python3.10/site-packages/torch/distributed/rpc/_testing/__pycache__/__init__.cpython-310.pyc b/janus/lib/python3.10/site-packages/torch/distributed/rpc/_testing/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..299785194b9690bcf5c2d9b0f92355f0968b6b08 Binary files /dev/null and b/janus/lib/python3.10/site-packages/torch/distributed/rpc/_testing/__pycache__/__init__.cpython-310.pyc differ diff --git a/janus/lib/python3.10/site-packages/torch/distributed/rpc/_testing/__pycache__/faulty_agent_backend_registry.cpython-310.pyc b/janus/lib/python3.10/site-packages/torch/distributed/rpc/_testing/__pycache__/faulty_agent_backend_registry.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7bf5afe00b16f1fdbcc4cb9765322eed0cf67b39 Binary files /dev/null and b/janus/lib/python3.10/site-packages/torch/distributed/rpc/_testing/__pycache__/faulty_agent_backend_registry.cpython-310.pyc differ diff --git a/janus/lib/python3.10/site-packages/torch/distributed/rpc/_testing/faulty_agent_backend_registry.py b/janus/lib/python3.10/site-packages/torch/distributed/rpc/_testing/faulty_agent_backend_registry.py new file mode 100644 index 0000000000000000000000000000000000000000..d04882e16e79a94f74ddc1350e94f547ef625611 --- /dev/null +++ b/janus/lib/python3.10/site-packages/torch/distributed/rpc/_testing/faulty_agent_backend_registry.py @@ -0,0 +1,62 @@ +#!/usr/bin/env python3 +# mypy: allow-untyped-defs + +import torch.distributed as dist +import torch.distributed.rpc as rpc + + +def _faulty_tensorpipe_construct_rpc_backend_options_handler( + rpc_timeout, + init_method, + num_worker_threads, + messages_to_fail, + messages_to_delay, + num_fail_sends, + **kwargs, +): + from . import FaultyTensorPipeRpcBackendOptions + + return FaultyTensorPipeRpcBackendOptions( + num_worker_threads=num_worker_threads, + rpc_timeout=rpc_timeout, + init_method=init_method, + messages_to_fail=messages_to_fail, + messages_to_delay=messages_to_delay, + num_fail_sends=num_fail_sends, + ) + + +def _faulty_tensorpipe_init_backend_handler( + store, name, rank, world_size, rpc_backend_options +): + from torch.distributed.rpc import api + + from . import FaultyTensorPipeAgent, FaultyTensorPipeRpcBackendOptions + + if not isinstance(store, dist.Store): + raise TypeError(f"`store` must be a c10d::Store. {store}") + + if not isinstance(rpc_backend_options, FaultyTensorPipeRpcBackendOptions): + raise TypeError( + f"`rpc_backend_options` must be a `FaultyTensorPipeRpcBackendOptions`. {rpc_backend_options}" + ) + + agent = FaultyTensorPipeAgent( + store, + name, + rank, + world_size, + rpc_backend_options, + {}, # reverse_device_map + [], # devices + ) + api._init_rpc_states(agent) + + return agent + + +rpc.backend_registry.register_backend( + "FAULTY_TENSORPIPE", + _faulty_tensorpipe_construct_rpc_backend_options_handler, + _faulty_tensorpipe_init_backend_handler, +) diff --git a/janus/lib/python3.10/site-packages/torch/distributed/rpc/_utils.py b/janus/lib/python3.10/site-packages/torch/distributed/rpc/_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..8925bc662b5f97555d273d4c641ae5930f50d789 --- /dev/null +++ b/janus/lib/python3.10/site-packages/torch/distributed/rpc/_utils.py @@ -0,0 +1,47 @@ +# mypy: allow-untyped-defs +import logging +from contextlib import contextmanager +from typing import cast + +from . import api, TensorPipeAgent + + +logger = logging.getLogger(__name__) + + +@contextmanager +def _group_membership_management(store, name, is_join): + token_key = "RpcGroupManagementToken" + join_or_leave = "join" if is_join else "leave" + my_token = f"Token_for_{name}_{join_or_leave}" + while True: + # Retrieve token from store to signal start of rank join/leave critical section + returned = store.compare_set(token_key, "", my_token).decode() + if returned == my_token: + # Yield to the function this context manager wraps + yield + # Finished, now exit and release token + # Update from store to signal end of rank join/leave critical section + store.set(token_key, "") + # Other will wait for this token to be set before they execute + store.set(my_token, "Done") + break + else: + # Store will wait for the token to be released + try: + store.wait([returned]) + except RuntimeError: + logger.error( + "Group membership token %s timed out waiting for %s to be released.", + my_token, + returned, + ) + raise + + +def _update_group_membership(worker_info, my_devices, reverse_device_map, is_join): + agent = cast(TensorPipeAgent, api._get_current_rpc_agent()) + ret = agent._update_group_membership( + worker_info, my_devices, reverse_device_map, is_join + ) + return ret diff --git a/janus/lib/python3.10/site-packages/torch/distributed/rpc/api.py b/janus/lib/python3.10/site-packages/torch/distributed/rpc/api.py new file mode 100644 index 0000000000000000000000000000000000000000..fbe77e465740aee9ebdaaebdb2ce4e2d4c03b6ef --- /dev/null +++ b/janus/lib/python3.10/site-packages/torch/distributed/rpc/api.py @@ -0,0 +1,965 @@ +# mypy: allow-untyped-decorators +# mypy: allow-untyped-defs + +import collections +import contextlib +import functools +import inspect +import logging +import threading +from typing import Any, Dict, Generic, Set, TYPE_CHECKING, TypeVar + +import torch +from torch._C._distributed_rpc import ( + _cleanup_python_rpc_handler, + _delete_all_user_and_unforked_owner_rrefs, + _destroy_rref_context, + _get_current_rpc_agent, + _invoke_remote_builtin, + _invoke_remote_python_udf, + _invoke_remote_torchscript, + _invoke_rpc_builtin, + _invoke_rpc_python_udf, + _invoke_rpc_torchscript, + _is_current_rpc_agent_set, + _reset_current_rpc_agent, + _set_and_start_rpc_agent, + get_rpc_timeout, + PyRRef, + RemoteProfilerManager, + TensorPipeAgent, + WorkerInfo, +) +from torch.futures import Future + +from ._utils import _group_membership_management, _update_group_membership +from .constants import DEFAULT_SHUTDOWN_TIMEOUT, UNSET_RPC_TIMEOUT +from .internal import ( + _build_rpc_profiling_key, + _internal_rpc_pickler, + PythonUDF, + RPCExecMode, +) + + +__all__ = [ + "shutdown", + "get_worker_info", + "remote", + "rpc_sync", + "rpc_async", + "RRef", + "AllGatherStates", + "method_factory", + "new_method", +] + + +logger = logging.getLogger(__name__) + +# NB: Ignoring RRef leaks during shutdown. Without this, applications have to +# make sure there is no references to any RRef in the application code and +# Python GC has done its job to delete those RRefs. This is could result in bad +# debugging experiences especially when for large applications. Therefore, by +# default, we are going to ignore RRef leaks during shutdown. This is usually +# fine as shutdown means applications have done training and no longer care +# about states. +# +# To enable RRef leak checking, set this _ignore_rref_leak to False +_ignore_rref_leak = True +_default_pickler = _internal_rpc_pickler + + +@contextlib.contextmanager +def _use_rpc_pickler(rpc_pickler): + r""" + rpc_pickler: (.internal._InternalRPCPickler) Overrides the default RPC pickler + """ + global _default_pickler + _default_pickler = rpc_pickler + try: + yield + finally: + _default_pickler = _internal_rpc_pickler + + +def _require_initialized(func): + @functools.wraps(func) + def wrapper(*args, **kwargs): + if not _is_current_rpc_agent_set(): + raise RuntimeError( + "RPC has not been initialized. Call " + "torch.distributed.rpc.init_rpc first." + ) + return func(*args, **kwargs) + + return wrapper + + +class AllGatherStates: + def __init__(self): + # Each `gathered_objects` is an empty dict at beginning. + # The leader worker is elected as the first worker in a sorted worker + # name list. Whenever there is a worker entering `_all_gather()`, it + # runs `_gather_to_leader()` on the leader to add its own name and + # data obj to this dict. The leader also adds itself's name to the dict + # on calling `_all_gather()`. + # Once `set(gathered_objects.keys()) == _ALL_WORKER_NAMES`, the leader + # will broadcast the gathered dict to all follower workers and set their + # `gathered_objects` field and the `proceed_signal` field. + self.gathered_objects = {} + # All workers wait on this signal until it receives all gathered + # objects. + self.proceed_signal = threading.Event() + + +# States used by `def _all_gather()`. +# `_ALL_WORKER_NAMES` is initialized on initializing RPC layer. +_ALL_WORKER_NAMES: Set[Any] = set() +_all_gather_dict_lock = threading.RLock() +_all_gather_sequence_id: Dict[str, int] = {} +_all_gather_sequence_id_to_states: collections.defaultdict = collections.defaultdict( + AllGatherStates +) + + +def _init_rpc_states(agent): + worker_infos = agent.get_worker_infos() + global _ALL_WORKER_NAMES + _ALL_WORKER_NAMES = {worker_info.name for worker_info in worker_infos} + + # NB: backend implementation might have already set the rpc_agent. + if not _is_current_rpc_agent_set(): + _set_and_start_rpc_agent(agent) + + +def _gather_to_leader(sequence_id, worker_name, obj, worker_names=None): + with _all_gather_dict_lock: + if not worker_names: + worker_names = _ALL_WORKER_NAMES + assert ( + worker_name in worker_names + ), f"{worker_name} is not expected by leader." + states = _all_gather_sequence_id_to_states[sequence_id] + assert ( + worker_name not in states.gathered_objects + ), f"{worker_name} reported intent sequence id {sequence_id} twice. " + states.gathered_objects[worker_name] = obj + if worker_names == set(states.gathered_objects.keys()): + states.proceed_signal.set() + + +def _broadcast_to_followers(sequence_id, objects_map): + with _all_gather_dict_lock: + states = _all_gather_sequence_id_to_states[sequence_id] + + assert ( + not states.proceed_signal.is_set() + ), f"Termination signal sequence id {sequence_id} got set twice." + states.gathered_objects = objects_map + states.proceed_signal.set() + + +_thread_local_var = threading.local() + + +@contextlib.contextmanager +def _wait_all(): + r""" + A context manager that collects all futures returned by ``rpc_async`` and + waits them on the context manager's exit; relieving the user of needing + to explicitly call wait. + + + Example:: + >>> # xdoctest: +SKIP("distributed") + >>> # On worker 0: + >>> import torch + >>> import torch.distributed.rpc as rpc + >>> rpc.init_rpc("worker0", rank=0, world_size=2) + >>> with rpc._wait_all(): + >>> fut_1 = rpc.rpc_async(dst, torch.add, (torch.ones(2, 2), 1)) + >>> fut_2 = rpc.rpc_async(dst, torch.add, (torch.ones(2, 2), 1)) + >>> #fut_1 and fut_2 are waited on + """ + _thread_local_var.future_list = [] + try: + yield + finally: + try: + torch.futures.wait_all(_thread_local_var.future_list) + finally: + del _thread_local_var.future_list + + +@_require_initialized +def _all_gather(obj, worker_names=None, timeout: float = UNSET_RPC_TIMEOUT): + r""" + This is similar to torch.distributed.all_gather(), but is using RPC. It + picks the worker with the smallest name (alphabetic order) as the leader. + Then all followers send their data ``obj`` to the leader. After the leader + has received all, it will broadcast the results back to all followers. This + function blocks until all workers have received the gathered results. + """ + if not worker_names: + assert ( + _ALL_WORKER_NAMES is not None + ), "`_ALL_WORKER_NAMES` is not initialized for `def _all_gather`." + worker_names = _ALL_WORKER_NAMES + leader_name = min(worker_names) + + self_name = _get_current_rpc_agent().get_worker_info().name + + with _all_gather_dict_lock: + concat_names = "".join(sorted(worker_names)) + sequence_num = _all_gather_sequence_id.get(concat_names, 0) + _all_gather_sequence_id[concat_names] = sequence_num + 1 + sequence_id = concat_names + str(sequence_num) + + is_leader = leader_name == self_name + + if timeout == UNSET_RPC_TIMEOUT: + # Timeout is specified by agent for RPC calls + rpc_timeout = get_rpc_timeout() + # No timeout for signal + signal_timeout = None + elif timeout == DEFAULT_SHUTDOWN_TIMEOUT: + # No timeout for RPC + rpc_timeout = timeout + # No timeout for signal + signal_timeout = None + else: + # Signal and RPC timeout use the same timeout + signal_timeout = rpc_timeout = timeout + + # Phase 1: Followers send it's object to the leader + if is_leader: + _gather_to_leader(sequence_id, self_name, obj, worker_names) + else: + rpc_sync( + leader_name, + _gather_to_leader, + args=(sequence_id, self_name, obj, worker_names), + timeout=rpc_timeout, + ) + + with _all_gather_dict_lock: + states = _all_gather_sequence_id_to_states[sequence_id] + + # Timeout is either set by function parameter or None (which is indefinite) + states.proceed_signal.wait(timeout=signal_timeout) + + # Phase 2: Leader broadcast gathered results to all followers + # Leader's signal is the first to be unblocked, after receiving all + # followers' data objects. + if is_leader: + worker_name_to_response_future_dict = {} + for follower_name in worker_names - {leader_name}: + fut = rpc_async( + follower_name, + _broadcast_to_followers, + args=(sequence_id, states.gathered_objects), + timeout=rpc_timeout, + ) + worker_name_to_response_future_dict[follower_name] = fut + + errors = [] + for follower_name, fut in worker_name_to_response_future_dict.items(): + try: + fut.wait() + except RuntimeError as ex: + errors.append((follower_name, ex)) + + if errors: + raise RuntimeError( + f"Followers {[e[0] for e in errors]} timed out in _all_gather " + f"after {rpc_timeout:.2f} seconds. The first exception is {errors[0][1]}" + ) + + # Clean up for the states using the sequence_id + with _all_gather_dict_lock: + states = _all_gather_sequence_id_to_states.pop(sequence_id) + return states.gathered_objects + + +@_require_initialized +def _barrier(worker_names): + r""" + Synchronizes local and remote RPC processes. + + This will block until all local and remote RPC processes specified under worker_names + reach this method to wait for all outstanding work to complete. + + Args: + worker_names (List[str]): The set of workers to synchronize. + + """ + try: + _all_gather(None, set(worker_names)) + except RuntimeError as ex: + logger.error("Failed to complete barrier, got error %s", ex) + + +@_require_initialized +def _wait_all_workers(timeout=DEFAULT_SHUTDOWN_TIMEOUT): + r""" + Block until all local and remote RPC processes reach this method and wait + for all outstanding work to complete. Every RPC process must call this + method before exit to perform a graceful shutdown. This should be used to + terminate the RPC framework, and there is no guarantee that the RPC + framework will work after this method returns. + """ + try: + _all_gather(None, timeout=timeout) + except RuntimeError as ex: + logger.error( + "Failed to respond to 'Shutdown Proceed' in time, got error %s", ex + ) + raise ex + + +@_require_initialized +def shutdown(graceful=True, timeout=DEFAULT_SHUTDOWN_TIMEOUT): + r""" + Perform a shutdown of the RPC agent, and then destroy the RPC agent. This + stops the local agent from accepting outstanding requests, and shuts + down the RPC framework by terminating all RPC threads. If ``graceful=True``, + this will block until all local and remote RPC processes reach this method + and wait for all outstanding work to complete. Otherwise, if + ``graceful=False``, this is a local shutdown, and it does not wait for other + RPC processes to reach this method. + + .. warning:: + For :class:`~torch.futures.Future` objects returned by + :meth:`~torch.distributed.rpc.rpc_async`, ``future.wait()`` should not + be called after ``shutdown()``. + + Args: + graceful (bool): Whether to do a graceful shutdown or not. If True, + this will 1) wait until there is no pending system + messages for ``UserRRefs`` and delete them; 2) block + until all local and remote RPC processes have reached + this method and wait for all outstanding work to + complete. + + Example:: + Make sure that ``MASTER_ADDR`` and ``MASTER_PORT`` are set properly + on both workers. Refer to :meth:`~torch.distributed.init_process_group` + API for more details. For example, + + export MASTER_ADDR=localhost + export MASTER_PORT=5678 + + Then run the following code in two different processes: + + >>> # xdoctest: +SKIP + >>> # On worker 0: + >>> import torch + >>> import torch.distributed.rpc as rpc + >>> rpc.init_rpc("worker0", rank=0, world_size=2) + >>> # do some work + >>> result = rpc.rpc_sync("worker1", torch.add, args=(torch.ones(1), 1)) + >>> # ready to shutdown + >>> rpc.shutdown() + + >>> # On worker 1: + >>> import torch.distributed.rpc as rpc + >>> rpc.init_rpc("worker1", rank=1, world_size=2) + >>> # wait for worker 0 to finish work, and then shutdown. + >>> rpc.shutdown() + """ + if graceful: + try: + agent = _get_current_rpc_agent() + if not isinstance(agent, TensorPipeAgent) or agent.is_static_group: + _wait_all_workers(timeout) + _delete_all_user_and_unforked_owner_rrefs() + agent.join(shutdown=True, timeout=timeout) + else: + # This is a dynamic group so we need to grab the token for the operation + my_worker_info = agent.get_worker_info() + my_name = my_worker_info.name + with _group_membership_management(agent.store, my_name, False): + all_worker_infos = agent.get_worker_infos() + for worker in all_worker_infos: + if worker.name != my_name: + rpc_sync( + worker.name, + _update_group_membership, + args=(my_worker_info, [], {}, False), + ) + agent.join(shutdown=True, timeout=timeout) + finally: + # In case of errors, continue to complete the local shutdown. + _finalize_shutdown() + else: + _finalize_shutdown() + + +def _finalize_shutdown(): + try: + # This raises a `TORCH_CHECK()` exception on RRef leak detected. + _destroy_rref_context(_ignore_rref_leak) + finally: + _get_current_rpc_agent().shutdown() + # clean up python rpc handler in shutdown(), see comments in + # PythonRpcHandler::cleanup(), call it in python API because the + # cleanup() function has python dependency, it assumes python + # interpreter exists. + # No matter if RRef leak exception is raised, this clean-up code + # must run to avoid destruction segfault in Python 3.5. + # + # future.wait() should not be called after shutdown(). + # pythonRpcHandler is cleaned up in shutdown(), after + # shutdown(), python objects returned from rpc python call can not be + # resolved. + _cleanup_python_rpc_handler() + _reset_current_rpc_agent() + + +@_require_initialized +def get_worker_info(worker_name=None): + r""" + Get :class:`~torch.distributed.rpc.WorkerInfo` of a given worker name. + Use this :class:`~torch.distributed.rpc.WorkerInfo` to avoid passing an + expensive string on every invocation. + + Args: + worker_name (str): the string name of a worker. If ``None``, return the + the id of the current worker. (default ``None``) + + Returns: + :class:`~torch.distributed.rpc.WorkerInfo` instance for the given + ``worker_name`` or :class:`~torch.distributed.rpc.WorkerInfo` of the + current worker if ``worker_name`` is ``None``. + """ + if worker_name is not None: + return _get_current_rpc_agent().get_worker_info(worker_name) + else: + return _get_current_rpc_agent().get_worker_info() + + +def _to_worker_info(to): + if isinstance(to, WorkerInfo): + return to + elif isinstance(to, (str, int)): + return get_worker_info(to) + else: + raise ValueError(f"Cannot get WorkerInfo from name {to}") + + +def _rref_typeof_on_owner(rref, blocking: bool = True): + rref_type = type(rref.local_value()) + if blocking: + return rref_type + else: + # Wrap result into a completed Future. This is so that if blocking=`False` + # is specified, we return a future regardless of if this call is on user + # or owner. + future = Future[type]() + future.set_result(rref_type) + return future + + +def _rref_typeof_on_user( + rref, timeout: float = UNSET_RPC_TIMEOUT, blocking: bool = True +): + fut = rpc_async(rref.owner(), _rref_typeof_on_owner, args=(rref,), timeout=timeout) + if blocking: + return fut.wait() + else: + return fut + + +T = TypeVar("T") +GenericWithOneTypeVar = Generic[T] + + +if TYPE_CHECKING: + + class RRef(PyRRef[T], Generic[T]): + pass + +else: + try: + # Combine the implementation class and the type class. + class RRef(PyRRef, Generic[T]): + pass + + except TypeError: + # TypeError: metaclass conflict: the metaclass of a derived class + # must be a (non-strict) subclass of the metaclasses of all its bases + # Mypy doesn't understand __class__ (mypy bug #4177) + class RRefMeta(PyRRef.__class__, GenericWithOneTypeVar.__class__): # type: ignore[name-defined, misc, valid-type] + pass + + # Combine the implementation class and the type class. + # Types for classes expecting a certain generic parameter (mypy bug #7791) + class RRef(PyRRef, GenericWithOneTypeVar, metaclass=RRefMeta): # type: ignore[misc, no-redef, valid-type] + pass + + +# Install docstrings from `PyRRef` to `RRef`. +# +# This is for the fact that pybind11 generates the parameter +# `self` as type `rpc.PyRRef`, so a `:inherited-members:` +# under `.. autoclass:: RRef` does not work. +# we have to do the following process to replace `rpc.PyRRef` with `rpc.RRef`. +# +def method_factory(method_name, docstring): + def method(self, *args, **kwargs): + return getattr(super(RRef, self), method_name)(*args, **kwargs) + + if method.__doc__: + method.__doc__ = docstring + return method + + +for method_name, method in inspect.getmembers(PyRRef): + # Ignore magic methods, except "__str__". + if method_name.startswith("_") and method_name != "__str__": + continue + + # Get pybind11 generated docstring. + # It's like, + """ + to_here(self: torch.distributed.rpc.PyRRef, timeout: float=-1.0) -> object + + Blocking call that copies the value of the RRef from the owner + to the local node and returns it. If the current node is the + owner, returns a reference to the local value. + """ + docstring = getattr(method, "__doc__", None) + assert docstring is not None, "RRef user-facing methods should all have docstrings." + + # Do surgery on pybind11 generated docstrings. + docstring = docstring.replace( + "torch.distributed.rpc.PyRRef", "torch.distributed.rpc.RRef" + ) + + # Attach user-facing RRef method with modified docstring. + new_method = method_factory(method_name, docstring) + setattr(RRef, method_name, new_method) + + +@_require_initialized +def remote(to, func, args=None, kwargs=None, timeout=UNSET_RPC_TIMEOUT): + r""" + Make a remote call to run ``func`` on worker ``to`` and return an + :class:`~torch.distributed.rpc.RRef` to the result value immediately. + Worker ``to`` will be the owner of the returned + :class:`~torch.distributed.rpc.RRef`, and the worker calling ``remote`` is + a user. The owner manages the global reference count of its + :class:`~torch.distributed.rpc.RRef`, and the owner + :class:`~torch.distributed.rpc.RRef` is only destructed when globally there + are no living references to it. + + Args: + to (str or WorkerInfo or int): name/rank/``WorkerInfo`` of the destination worker. + func (Callable): a callable function, such as Python callables, builtin + operators (e.g. :meth:`~torch.add`) and annotated + TorchScript functions. + args (tuple): the argument tuple for the ``func`` invocation. + kwargs (dict): is a dictionary of keyword arguments for the ``func`` + invocation. + + timeout (float, optional): timeout in seconds for this remote call. If the + creation of this + :class:`~torch.distributed.rpc.RRef` on worker + ``to`` is not successfully processed on this + worker within this timeout, then the next time + there is an attempt to use the RRef (such as + ``to_here()``), a timeout will be raised + indicating this failure. A value of 0 indicates + an infinite timeout, i.e. a timeout error will + never be raised. If not provided, the default + value set during initialization or with + ``_set_rpc_timeout`` is used. + + Returns: + A user :class:`~torch.distributed.rpc.RRef` instance to the result + value. Use the blocking API :meth:`torch.distributed.rpc.RRef.to_here` + to retrieve the result value locally. + + .. warning :: + The ``remote`` API does not copy storages of argument tensors until + sending them over the wire, which could be done by a different thread + depending on the RPC backend type. The caller should make sure that the + contents of those tensors stay intact until the returned RRef is + confirmed by the owner, which can be checked using the + :meth:`torch.distributed.rpc.RRef.confirmed_by_owner` API. + + .. warning :: + Errors such as timeouts for the ``remote`` API are handled on a + best-effort basis. This means that when remote calls initiated by + ``remote`` fail, such as with a timeout error, we take a best-effort + approach to error handling. This means that errors are handled and set + on the resulting RRef on an asynchronous basis. If the RRef has not been + used by the application before this handling (such as ``to_here`` or + fork call), then future uses of the ``RRef`` will appropriately raise + errors. However, it is possible that the user application will use the + ``RRef`` before the errors are handled. In this case, errors may not be + raised as they have not yet been handled. + + Example:: + + Make sure that ``MASTER_ADDR`` and ``MASTER_PORT`` are set properly + on both workers. Refer to :meth:`~torch.distributed.init_process_group` + API for more details. For example, + + export MASTER_ADDR=localhost + export MASTER_PORT=5678 + + Then run the following code in two different processes: + + >>> # xdoctest: +SKIP + >>> # On worker 0: + >>> import torch + >>> import torch.distributed.rpc as rpc + >>> rpc.init_rpc("worker0", rank=0, world_size=2) + >>> rref1 = rpc.remote("worker1", torch.add, args=(torch.ones(2), 3)) + >>> rref2 = rpc.remote("worker1", torch.add, args=(torch.ones(2), 1)) + >>> x = rref1.to_here() + rref2.to_here() + >>> rpc.shutdown() + + >>> # On worker 1: + >>> import torch.distributed.rpc as rpc + >>> rpc.init_rpc("worker1", rank=1, world_size=2) + >>> rpc.shutdown() + + Below is an example of running a TorchScript function using RPC. + + >>> # On both workers: + >>> @torch.jit.script + >>> def my_script_add(tensor: torch.Tensor, scalar: int): + >>> return torch.add(tensor, scalar) + + >>> # On worker 0: + >>> import torch.distributed.rpc as rpc + >>> rpc.init_rpc("worker0", rank=0, world_size=2) + >>> rref = rpc.remote("worker1", my_script_add, args=(torch.ones(2), 3)) + >>> rref.to_here() + >>> rpc.shutdown() + + >>> # On worker 1: + >>> import torch.distributed.rpc as rpc + >>> rpc.init_rpc("worker1", rank=1, world_size=2) + >>> rpc.shutdown() + """ + torch._C._log_api_usage_once("torch.distributed.rpc_remote") + qualified_name = torch.jit._builtins._find_builtin(func) + dst_worker_info = _to_worker_info(to) + should_profile = _get_should_profile() + + ctx_manager = _enable_rpc_profiler( + should_profile, qualified_name, func, RPCExecMode.REMOTE, dst_worker_info + ) + + with ctx_manager as rf: + args = args if args else () + kwargs = kwargs if kwargs else {} + + is_async_exec = hasattr(func, "_wrapped_async_rpc_function") + + if is_async_exec: + wrapped = func._wrapped_async_rpc_function + if isinstance(wrapped, torch.jit.ScriptFunction): + func = wrapped + + if qualified_name is not None: + rref = _invoke_remote_builtin( + dst_worker_info, qualified_name, timeout, *args, **kwargs + ) + elif isinstance(func, torch.jit.ScriptFunction): + rref = _invoke_remote_torchscript( + dst_worker_info.name, + torch._jit_internal._qualified_name(func), + timeout, + is_async_exec, + *args, + **kwargs, + ) + else: + (pickled_python_udf, tensors) = _default_pickler.serialize( + PythonUDF(func, args, kwargs) + ) + rref = _invoke_remote_python_udf( + dst_worker_info, pickled_python_udf, tensors, timeout, is_async_exec + ) + # attach profiling information + if should_profile: + assert torch.autograd._profiler_enabled() + assert rf is not None + fut = rf._call_end_callbacks_on_future(rref._get_future()) + rref._set_profiling_future(fut) + + return rref + + +def _invoke_rpc( + to, func, rpc_type, args=None, kwargs=None, rpc_timeout: float = UNSET_RPC_TIMEOUT +): + if not callable(func): + raise TypeError("function should be callable.") + + qualified_name = torch.jit._builtins._find_builtin(func) + dst_worker_info = _to_worker_info(to) + + should_profile = _get_should_profile() + + ctx_manager = _enable_rpc_profiler( + should_profile, qualified_name, func, rpc_type, dst_worker_info + ) + + with ctx_manager as rf: + args = args if args else () + kwargs = kwargs if kwargs else {} + + is_async_exec = hasattr(func, "_wrapped_async_rpc_function") + + if is_async_exec: + wrapped = func._wrapped_async_rpc_function + if isinstance(wrapped, torch.jit.ScriptFunction): + func = wrapped + + if qualified_name is not None: + fut = _invoke_rpc_builtin( + dst_worker_info, qualified_name, rpc_timeout, *args, **kwargs + ) + elif isinstance(func, torch.jit.ScriptFunction): + fut = _invoke_rpc_torchscript( + dst_worker_info.name, + torch._jit_internal._qualified_name(func), + args, + kwargs, + rpc_timeout, + is_async_exec, + ) + else: + (pickled_python_udf, tensors) = _default_pickler.serialize( + PythonUDF(func, args, kwargs) + ) + fut = _invoke_rpc_python_udf( + dst_worker_info, pickled_python_udf, tensors, rpc_timeout, is_async_exec + ) + if should_profile: + assert torch.autograd._profiler_enabled() + assert rf is not None + # Schedule profiling callbacks to run when the future completes. + # This returns a future that is completed when the original future + # completes and the profiling callbacks have been completed as well, + # to guarantee that fut.wait() completes the profiling. This new + # future will contain the same value as the original future. + fut = rf._call_end_callbacks_on_future(fut) + return fut + + +@_require_initialized +def rpc_sync(to, func, args=None, kwargs=None, timeout: float = UNSET_RPC_TIMEOUT): + r""" + Make a blocking RPC call to run function ``func`` on worker ``to``. RPC + messages are sent and received in parallel to execution of Python code. This + method is thread-safe. + + Args: + to (str or WorkerInfo or int): name/rank/``WorkerInfo`` of the destination worker. + func (Callable): a callable function, such as Python callables, builtin + operators (e.g. :meth:`~torch.add`) and annotated + TorchScript functions. + args (tuple): the argument tuple for the ``func`` invocation. + kwargs (dict): is a dictionary of keyword arguments for the ``func`` + invocation. + timeout (float, optional): timeout in seconds to use for this RPC. If + the RPC does not complete in this amount of + time, an exception indicating it has + timed out will be raised. A value of 0 + indicates an infinite timeout, i.e. a timeout + error will never be raised. If not provided, + the default value set during initialization + or with ``_set_rpc_timeout`` is used. + + Returns: + Returns the result of running ``func`` with ``args`` and ``kwargs``. + + Example:: + Make sure that ``MASTER_ADDR`` and ``MASTER_PORT`` are set properly + on both workers. Refer to :meth:`~torch.distributed.init_process_group` + API for more details. For example, + + export MASTER_ADDR=localhost + export MASTER_PORT=5678 + + Then run the following code in two different processes: + + >>> # xdoctest: +SKIP + >>> # On worker 0: + >>> import torch + >>> import torch.distributed.rpc as rpc + >>> rpc.init_rpc("worker0", rank=0, world_size=2) + >>> ret = rpc.rpc_sync("worker1", torch.add, args=(torch.ones(2), 3)) + >>> rpc.shutdown() + + >>> # On worker 1: + >>> import torch.distributed.rpc as rpc + >>> rpc.init_rpc("worker1", rank=1, world_size=2) + >>> rpc.shutdown() + + Below is an example of running a TorchScript function using RPC. + + >>> # On both workers: + >>> @torch.jit.script + >>> def my_script_add(tensor: torch.Tensor, scalar: int): + >>> return torch.add(tensor, scalar) + + >>> # On worker 0: + >>> import torch.distributed.rpc as rpc + >>> rpc.init_rpc("worker0", rank=0, world_size=2) + >>> ret = rpc.rpc_sync("worker1", my_script_add, args=(torch.ones(2), 3)) + >>> rpc.shutdown() + + >>> # On worker 1: + >>> import torch.distributed.rpc as rpc + >>> rpc.init_rpc("worker1", rank=1, world_size=2) + >>> rpc.shutdown() + + """ + torch._C._log_api_usage_once("torch.distributed.rpc_sync") + fut = _invoke_rpc(to, func, RPCExecMode.SYNC, args, kwargs, timeout) + return fut.wait() + + +@_require_initialized +def rpc_async(to, func, args=None, kwargs=None, timeout=UNSET_RPC_TIMEOUT): + r""" + Make a non-blocking RPC call to run function ``func`` on worker ``to``. RPC + messages are sent and received in parallel to execution of Python code. This + method is thread-safe. This method will immediately return a + :class:`~torch.futures.Future` that can be awaited on. + + Args: + to (str or WorkerInfo or int): name/rank/``WorkerInfo`` of the destination worker. + func (Callable): a callable function, such as Python callables, builtin + operators (e.g. :meth:`~torch.add`) and annotated + TorchScript functions. + args (tuple): the argument tuple for the ``func`` invocation. + kwargs (dict): is a dictionary of keyword arguments for the ``func`` + invocation. + timeout (float, optional): timeout in seconds to use for this RPC. If + the RPC does not complete in this amount of + time, an exception indicating it has + timed out will be raised. A value of 0 + indicates an infinite timeout, i.e. a timeout + error will never be raised. If not provided, + the default value set during initialization + or with ``_set_rpc_timeout`` is used. + + + Returns: + Returns a :class:`~torch.futures.Future` object that can be waited + on. When completed, the return value of ``func`` on ``args`` and + ``kwargs`` can be retrieved from the :class:`~torch.futures.Future` + object. + + .. warning :: + Using GPU tensors as arguments or return values of ``func`` is not + supported since we don't support sending GPU tensors over the wire. You + need to explicitly copy GPU tensors to CPU before using them as + arguments or return values of ``func``. + + .. warning :: + The ``rpc_async`` API does not copy storages of argument tensors until + sending them over the wire, which could be done by a different thread + depending on the RPC backend type. The caller should make sure that the + contents of those tensors stay intact until the returned + :class:`~torch.futures.Future` completes. + + Example:: + Make sure that ``MASTER_ADDR`` and ``MASTER_PORT`` are set properly + on both workers. Refer to :meth:`~torch.distributed.init_process_group` + API for more details. For example, + + export MASTER_ADDR=localhost + export MASTER_PORT=5678 + + Then run the following code in two different processes: + + >>> # xdoctest: +SKIP + >>> # On worker 0: + >>> import torch + >>> import torch.distributed.rpc as rpc + >>> rpc.init_rpc("worker0", rank=0, world_size=2) + >>> fut1 = rpc.rpc_async("worker1", torch.add, args=(torch.ones(2), 3)) + >>> fut2 = rpc.rpc_async("worker1", min, args=(1, 2)) + >>> result = fut1.wait() + fut2.wait() + >>> rpc.shutdown() + + >>> # On worker 1: + >>> import torch.distributed.rpc as rpc + >>> rpc.init_rpc("worker1", rank=1, world_size=2) + >>> rpc.shutdown() + + Below is an example of running a TorchScript function using RPC. + + >>> # On both workers: + >>> @torch.jit.script + >>> def my_script_add(tensor: torch.Tensor, scalar: int): + >>> return torch.add(tensor, scalar) + + >>> # On worker 0: + >>> import torch.distributed.rpc as rpc + >>> rpc.init_rpc("worker0", rank=0, world_size=2) + >>> fut = rpc.rpc_async("worker1", my_script_add, args=(torch.ones(2), 3)) + >>> ret = fut.wait() + >>> rpc.shutdown() + + >>> # On worker 1: + >>> import torch.distributed.rpc as rpc + >>> rpc.init_rpc("worker1", rank=1, world_size=2) + >>> rpc.shutdown() + """ + torch._C._log_api_usage_once("torch.distributed.rpc_async") + fut = _invoke_rpc(to, func, RPCExecMode.ASYNC, args, kwargs, timeout) + if hasattr(_thread_local_var, "future_list"): + _thread_local_var.future_list.append(fut) + return fut + + +def _get_should_profile(): + # Legacy profiler should be enabled. RPC profiling is not supported with + # Kineto profiler. + ActiveProfilerType = torch._C._profiler.ActiveProfilerType + return ( + torch.autograd._profiler_enabled() + and torch._C._autograd._profiler_type() + == ActiveProfilerType.LEGACY # type: ignore[attr-defined] + ) + + +def _enable_rpc_profiler( + should_profile, qualified_name, func, rpc_type, dst_worker_info +): + ctx_manager = contextlib.nullcontext() + + if should_profile: + # Create appropriate string representation based on type of func + # (builtin, script, python) + if qualified_name is None: + func_name = ( + torch._jit_internal._qualified_name(func) + if isinstance(func, torch.jit.ScriptFunction) + else func.__qualname__ + ) + else: + func_name = qualified_name + # Build RPC profiling key. + rpc_profiling_key = _build_rpc_profiling_key( + rpc_type, + func_name, + get_worker_info().name, + dst_worker_info.name, + ) + RemoteProfilerManager.set_current_profiling_key(rpc_profiling_key) + # Mypy doesn't support re-def of a variable not in the same block (#1174) + ctx_manager = torch.autograd.profiler.record_function(rpc_profiling_key) # type: ignore[assignment] + + return ctx_manager diff --git a/janus/lib/python3.10/site-packages/torch/distributed/rpc/backend_registry.py b/janus/lib/python3.10/site-packages/torch/distributed/rpc/backend_registry.py new file mode 100644 index 0000000000000000000000000000000000000000..a8feba8a52d33712f563aebe659f1b89770f7722 --- /dev/null +++ b/janus/lib/python3.10/site-packages/torch/distributed/rpc/backend_registry.py @@ -0,0 +1,432 @@ +# mypy: allow-untyped-defs + + +import collections +import enum +from typing import cast, Dict, List, Set, Tuple + +import torch +import torch.distributed as dist + +from . import api, constants as rpc_constants +from ._utils import _group_membership_management, _update_group_membership + + +__all__ = [ + "backend_registered", + "register_backend", + "construct_rpc_backend_options", + "init_backend", + "BackendValue", + "BackendType", +] + +BackendValue = collections.namedtuple( + "BackendValue", ["construct_rpc_backend_options_handler", "init_backend_handler"] +) + + +def _backend_type_repr(self): + return "BackendType." + self.name + + +_backend_type_doc = """ + An enum class of available backends. + + PyTorch ships with a builtin ``BackendType.TENSORPIPE`` backend. + Additional ones can be registered using the + :func:`~torch.distributed.rpc.backend_registry.register_backend` function. +""" + +# Create an enum type, `BackendType`, with empty members. +# Can't handle Function Enum API (mypy bug #9079) +BackendType = enum.Enum(value="BackendType", names={}) # type: ignore[misc] +# Unable to assign a function a method (mypy bug #2427) +BackendType.__repr__ = _backend_type_repr # type: ignore[assignment] + +if BackendType.__doc__: + BackendType.__doc__ = _backend_type_doc + + +def backend_registered(backend_name): + """ + Checks if backend_name is registered as an RPC backend. + + Args: + backend_name (str): string to identify the RPC backend. + Returns: + True if the backend has been registered with ``register_backend``, else + False. + """ + return backend_name in BackendType.__members__.keys() + + +def register_backend( + backend_name, construct_rpc_backend_options_handler, init_backend_handler +): + """Registers a new RPC backend. + + Args: + backend_name (str): backend string to identify the handler. + construct_rpc_backend_options_handler (function): + Handler that is invoked when + rpc_backend.construct_rpc_backend_options(**dict) is called. + init_backend_handler (function): Handler that is invoked when the + `_init_rpc_backend()` function is called with a backend. + This returns the agent. + """ + global BackendType + if backend_registered(backend_name): + raise RuntimeError(f"RPC backend {backend_name}: already registered") + # Create a new enum type, `BackendType`, with extended members. + existing_enum_dict = {member.name: member.value for member in BackendType} + extended_enum_dict = dict( + { + backend_name: BackendValue( + construct_rpc_backend_options_handler=construct_rpc_backend_options_handler, + init_backend_handler=init_backend_handler, + ) + }, + **existing_enum_dict, + ) + # Can't handle Function Enum API (mypy bug #9079) + BackendType = enum.Enum(value="BackendType", names=extended_enum_dict) # type: ignore[misc] + # Unable to assign a function a method (mypy bug #2427) + BackendType.__repr__ = _backend_type_repr # type: ignore[assignment] + if BackendType.__doc__: + BackendType.__doc__ = _backend_type_doc + return BackendType[backend_name] + + +def construct_rpc_backend_options( + backend, + rpc_timeout=rpc_constants.DEFAULT_RPC_TIMEOUT_SEC, + init_method=rpc_constants.DEFAULT_INIT_METHOD, + **kwargs, +): + return backend.value.construct_rpc_backend_options_handler( + rpc_timeout, init_method, **kwargs + ) + + +def init_backend(backend, *args, **kwargs): + return backend.value.init_backend_handler(*args, **kwargs) + + +def _init_process_group(store, rank, world_size): + # Initialize ProcessGroup. + process_group_timeout = rpc_constants.DEFAULT_PROCESS_GROUP_TIMEOUT + + # We're using a bunch of private APIs here since `new_group` requires the + # default group to be initialized. + group = dist.ProcessGroupGloo(store, rank, world_size, process_group_timeout) + + assert group is not None, "Failed to initialize default ProcessGroup." + + if (rank != -1) and (rank != group.rank()): + raise RuntimeError(f"rank argument {rank} doesn't match pg rank {group.rank()}") + if (world_size != -1) and (world_size != group.size()): + raise RuntimeError( + f"world_size argument {world_size} doesn't match pg size {group.size()}" + ) + return group + + +def _tensorpipe_construct_rpc_backend_options_handler( + rpc_timeout, + init_method, + num_worker_threads=rpc_constants.DEFAULT_NUM_WORKER_THREADS, + _transports=None, + _channels=None, + **kwargs, +): + from . import TensorPipeRpcBackendOptions + + return TensorPipeRpcBackendOptions( + rpc_timeout=rpc_timeout, + init_method=init_method, + num_worker_threads=num_worker_threads, + _transports=_transports, + _channels=_channels, + ) + + +def _tensorpipe_validate_devices(devices, device_count): + return all( + d.type == "cpu" or (d.type == "cuda" and 0 <= d.index < device_count) + for d in devices + ) + + +# detect if any worker has invalid device_map configurations, and return +# reverse device maps +def _tensorpipe_exchange_and_check_all_device_maps( + my_name, my_device_count, my_device_maps, my_devices, group +): + gathered: List[ + Tuple[str, int, Dict[str, Dict[torch.device, torch.device]], List[torch.device]] + ] = [("", 0, {}, []) for _ in range(group.size())] + dist.all_gather_object( + gathered, (my_name, my_device_count, my_device_maps, my_devices), group + ) + all_names = [name for name, _, _, _ in gathered] + all_device_counts = {name: count for name, count, _, _ in gathered} + all_device_maps = {name: map_ for name, _, map_, _ in gathered} + all_devices = {name: devices for name, _, _, devices in gathered} + + _validate_device_maps(all_names, all_device_counts, all_device_maps, all_devices) + + # passed all checked, construct reverse mapping and get list of devices handled by this agent + reverse_device_maps = _create_reverse_mapping(my_name, all_names, all_device_maps) + my_devices = _create_device_list(my_devices, my_device_maps, reverse_device_maps) + return reverse_device_maps, my_devices + + +def _validate_device_maps( + all_names, all_device_counts, all_device_maps, all_devices, is_static_group=True +): + for node in all_names: + devices = all_devices[node] + if len(set(devices)) != len(devices): + raise ValueError( + f"Node {node} has duplicated devices\n" f"devices = {devices}" + ) + if not _tensorpipe_validate_devices(devices, all_device_counts[node]): + raise ValueError( + f"Node {node} has devices with invalid indices\n" + f"devices = {devices}\n" + f"device count = {all_device_counts[node]}" + ) + + for source_node in all_names: + # For dynamic group (non-static) do not check the target node name since it may not have joined yet + if is_static_group and not set(all_device_maps[source_node].keys()).issubset( + all_names + ): + raise ValueError( + f"Node {source_node} has invalid target node names in its device maps\n" + f"device maps = {all_device_maps[source_node].keys()}\n" + f"node names = {all_names}" + ) + for target_node, map_ in all_device_maps[source_node].items(): + if len(set(map_.values())) != len(map_): + raise ValueError( + f"Node {source_node} has duplicated target devices " + f"in its device map for {target_node}\n" + f"device map = {map_}" + ) + if all_devices[source_node]: + if not set(map_.keys()).issubset(all_devices[source_node]): + raise ValueError( + f"Node {source_node} has unexpected source devices " + f"in its device map for {target_node}\n" + f"device map = {map_}\n" + f"devices = {all_devices[source_node]}" + ) + elif not _tensorpipe_validate_devices( + map_.keys(), all_device_counts[source_node] + ): + raise ValueError( + f"Node {source_node} has source devices with invalid indices " + f"in its device map for {target_node}\n" + f"device map = {map_}\n" + f"device count = {all_device_counts[source_node]}" + ) + if all_devices.get(target_node, []): + if not set(map_.values()).issubset(all_devices[target_node]): + raise ValueError( + f"Node {source_node} has unexpected target devices " + f"in its device map for {target_node}\n" + f"device map = {map_}\n" + f"devices = {all_devices[target_node]}" + ) + elif target_node in all_device_counts and not _tensorpipe_validate_devices( + map_.values(), all_device_counts[target_node] + ): + raise ValueError( + f"Node {source_node} has target devices with invalid indices " + f"in its device map for {target_node}\n" + f"device map = {map_}\n" + f"device count = {all_device_counts[target_node]}" + ) + + +def _create_device_list(my_devices, my_device_maps, reverse_device_maps): + if not my_devices: + devices_set: Set[torch.device] = set() + for map_ in my_device_maps.values(): + devices_set.update(map_.keys()) + for map_ in reverse_device_maps.values(): + devices_set.update(map_.keys()) + devices_set.discard(torch.device("cpu")) + my_devices = list(devices_set) + my_devices = sorted(my_devices, key=lambda d: d.index) + return my_devices + + +def _create_reverse_mapping(my_name, all_names, all_device_maps): + reverse_device_maps: Dict[str, Dict[torch.device, torch.device]] = {} + for node in all_names: + if my_name in all_device_maps[node]: + reverse_device_maps[node] = { + v: k for k, v in all_device_maps[node][my_name].items() + } + return reverse_device_maps + + +def _get_device_infos(): + from . import TensorPipeAgent + + agent = cast(TensorPipeAgent, api._get_current_rpc_agent()) + opts = agent._get_backend_options() + device_count = torch.cuda.device_count() + if torch.cuda.is_available() and opts.devices: + torch.cuda.init() + return device_count, opts.device_maps, opts.devices + + +def _set_devices_and_reverse_device_map(agent): + from . import TensorPipeAgent + + agent = cast(TensorPipeAgent, agent) + # Group state is retrieved from local agent + # On initialization, tensorpipe agent retrieves information from all existing workers, so group state is valid + my_worker_info = agent.get_worker_info() + my_name = my_worker_info.name + all_worker_infos = agent.get_worker_infos() + # One round to get device_maps of all workers and construct reverse device maps + all_device_counts, all_device_maps, all_devices, all_names = {}, {}, {}, [] + for worker_info in all_worker_infos: + worker_name = worker_info.name + if worker_name != my_name: + # TODO: make async? + device_count, device_map, devices = api.rpc_sync( + worker_name, _get_device_infos + ) + else: + opts = agent._get_backend_options() + device_count, device_map, devices = ( + torch.cuda.device_count(), + opts.device_maps, + opts.devices, + ) + all_device_counts[worker_name] = device_count + all_device_maps[worker_name] = device_map + all_devices[worker_name] = devices + all_names.append(worker_name) + + _validate_device_maps( + all_names, + all_device_counts, + all_device_maps, + all_devices, + is_static_group=False, + ) + reverse_device_maps = _create_reverse_mapping(my_name, all_names, all_device_maps) + + # Perform RPC call to all workers, including itself, to include newly joined worker information and device maps + for worker_name in all_names: + # Set device list for each worker + all_devices[worker_name] = _create_device_list( + all_devices[worker_name], all_device_maps[worker_name], reverse_device_maps + ) + api.rpc_sync( + worker_name, + _update_group_membership, + args=(my_worker_info, all_devices[worker_name], reverse_device_maps, True), + ) + + +def _tensorpipe_init_backend_handler( + store, name, rank, world_size, rpc_backend_options +): + from . import TensorPipeAgent, TensorPipeRpcBackendOptions + + if not isinstance(store, dist.Store): + raise TypeError(f"`store` must be a c10d::Store. {store}") + + if not isinstance(rpc_backend_options, TensorPipeRpcBackendOptions): + raise TypeError( + f"`rpc_backend_options` must be a `TensorPipeRpcBackendOptions`. {rpc_backend_options}" + ) + + device_count = torch.cuda.device_count() + + is_static_group = True if world_size else False + # world_size is specified so this is a static group (ranks cannot join and leave) + if is_static_group: + # The agent's join method is required to behave like a barrier and perform + # collective operations, for which it relies on a process group, instead of + # re-implementing this on top of RPCs. + group = _init_process_group(store, rank, world_size) + + reverse_device_maps, devices = _tensorpipe_exchange_and_check_all_device_maps( + name, + device_count, + rpc_backend_options.device_maps, + rpc_backend_options.devices, + group, + ) + + if torch.cuda.is_available() and devices: + # It's necessary to initialize PyTorch CUDA states here (e.g., + # CUDACachingAllocator). If this is missing, we could hit errors like + # "allocator not initialized", because other processes might send + # CUDA-related RPC request to this process before user code in this + # process initializes its PyTorch CUDA states. + torch.cuda.init() + + # TODO: add try-except and destroy _agent in all processes if any fails. + agent = TensorPipeAgent( + store, + name, + rank, + world_size, + rpc_backend_options, + reverse_device_maps, + devices, + ) + + api._init_rpc_states(agent) + + # Run one dummy round of RPC to initialize channels/transports. Without + # this, it's easy to hit timeout in rpc.shutdown() if there is no other RPC + # on that process before rpc.shutdown(), as the agent initialization can + # take longer than 5s. + api._all_gather(None, timeout=rpc_backend_options.rpc_timeout) + # Need a barrier here to make sure no peers leave before the rank0 finishes + # _all_gather + group.barrier().wait() + + return agent + # initialization for dynamic rpc (ranks can join and leave) + else: + with _group_membership_management(store, name, True): + # Construct TPAgent with empty reverse_device_map and devices + # these properties will be updated after initialization + agent = TensorPipeAgent( + store, + name, + rank, + world_size, + rpc_backend_options, + {}, + [], + ) + api._init_rpc_states(agent) + + try: + # Notify all workers in group this rank has joined and set devices and reverse_device_map + # This is a synchronous operation that completes once all existing ranks are updated + _set_devices_and_reverse_device_map(agent) + except Exception: + api.shutdown() + raise + return agent + + +register_backend( + "TENSORPIPE", + _tensorpipe_construct_rpc_backend_options_handler, + _tensorpipe_init_backend_handler, +) diff --git a/janus/lib/python3.10/site-packages/torch/distributed/rpc/constants.py b/janus/lib/python3.10/site-packages/torch/distributed/rpc/constants.py new file mode 100644 index 0000000000000000000000000000000000000000..56f6db4db259df01245c6615e4b41669c23a367c --- /dev/null +++ b/janus/lib/python3.10/site-packages/torch/distributed/rpc/constants.py @@ -0,0 +1,25 @@ +from datetime import timedelta +from typing import List + +from torch._C._distributed_rpc import ( + _DEFAULT_INIT_METHOD, + _DEFAULT_NUM_WORKER_THREADS, + _DEFAULT_RPC_TIMEOUT_SEC, + _UNSET_RPC_TIMEOUT, +) + + +# For any RpcAgent. +DEFAULT_RPC_TIMEOUT_SEC: float = _DEFAULT_RPC_TIMEOUT_SEC +DEFAULT_INIT_METHOD: str = _DEFAULT_INIT_METHOD +DEFAULT_SHUTDOWN_TIMEOUT: float = 0 + +# For TensorPipeAgent. +DEFAULT_NUM_WORKER_THREADS: int = _DEFAULT_NUM_WORKER_THREADS +# Ensure that we don't time out when there are long periods of time without +# any operations against the underlying ProcessGroup. +DEFAULT_PROCESS_GROUP_TIMEOUT: timedelta = timedelta(milliseconds=2**31 - 1) +# Value indicating that timeout is not set for RPC call, and the default should be used. +UNSET_RPC_TIMEOUT: float = _UNSET_RPC_TIMEOUT + +__all__: List[str] = [] diff --git a/janus/lib/python3.10/site-packages/torch/distributed/rpc/functions.py b/janus/lib/python3.10/site-packages/torch/distributed/rpc/functions.py new file mode 100644 index 0000000000000000000000000000000000000000..e48ea8cc534ab87838965c947bbd0ed76d4d64c7 --- /dev/null +++ b/janus/lib/python3.10/site-packages/torch/distributed/rpc/functions.py @@ -0,0 +1,169 @@ +# mypy: allow-untyped-defs +import functools + + +def async_execution(fn): + r""" + A decorator for a function indicating that the return value of the function + is guaranteed to be a :class:`~torch.futures.Future` object and this + function can run asynchronously on the RPC callee. More specifically, the + callee extracts the :class:`~torch.futures.Future` returned by the wrapped + function and installs subsequent processing steps as a callback to that + :class:`~torch.futures.Future`. The installed callback will read the value + from the :class:`~torch.futures.Future` when completed and send the + value back as the RPC response. That also means the returned + :class:`~torch.futures.Future` only exists on the callee side and is never + sent through RPC. This decorator is useful when the wrapped function's + (``fn``) execution needs to pause and resume due to, e.g., containing + :meth:`~torch.distributed.rpc.rpc_async` or waiting for other signals. + + .. note:: To enable asynchronous execution, applications must pass the + function object returned by this decorator to RPC APIs. If RPC detected + attributes installed by this decorator, it knows that this function + returns a ``Future`` object and will handle that accordingly. + However, this does not mean this decorator has to be outmost one when + defining a function. For example, when combined with ``@staticmethod`` + or ``@classmethod``, ``@rpc.functions.async_execution`` needs to be the + inner decorator to allow the target function be recognized as a static + or class function. This target function can still execute asynchronously + because, when accessed, the static or class method preserves attributes + installed by ``@rpc.functions.async_execution``. + + + Example:: + The returned :class:`~torch.futures.Future` object can come from + :meth:`~torch.distributed.rpc.rpc_async`, + :meth:`~torch.futures.Future.then`, or :class:`~torch.futures.Future` + constructor. The example below shows directly using the + :class:`~torch.futures.Future` returned by + :meth:`~torch.futures.Future.then`. + + >>> from torch.distributed import rpc + >>> + >>> # omitting setup and shutdown RPC + >>> + >>> # On all workers + >>> @rpc.functions.async_execution + >>> def async_add_chained(to, x, y, z): + >>> # This function runs on "worker1" and returns immediately when + >>> # the callback is installed through the `then(cb)` API. In the + >>> # mean time, the `rpc_async` to "worker2" can run concurrently. + >>> # When the return value of that `rpc_async` arrives at + >>> # "worker1", "worker1" will run the lambda function accordingly + >>> # and set the value for the previously returned `Future`, which + >>> # will then trigger RPC to send the result back to "worker0". + >>> return rpc.rpc_async(to, torch.add, args=(x, y)).then( + >>> lambda fut: fut.wait() + z + >>> ) + >>> + >>> # On worker0 + >>> # xdoctest: +SKIP + >>> ret = rpc.rpc_sync( + >>> "worker1", + >>> async_add_chained, + >>> args=("worker2", torch.ones(2), 1, 1) + >>> ) + >>> print(ret) # prints tensor([3., 3.]) + + When combined with TorchScript decorators, this decorator must be the + outmost one. + + >>> from torch import Tensor + >>> from torch.futures import Future + >>> from torch.distributed import rpc + >>> + >>> # omitting setup and shutdown RPC + >>> + >>> # On all workers + >>> @torch.jit.script + >>> def script_add(x: Tensor, y: Tensor) -> Tensor: + >>> return x + y + >>> + >>> @rpc.functions.async_execution + >>> @torch.jit.script + >>> def async_add(to: str, x: Tensor, y: Tensor) -> Future[Tensor]: + >>> return rpc.rpc_async(to, script_add, (x, y)) + >>> + >>> # On worker0 + >>> ret = rpc.rpc_sync( + >>> "worker1", + >>> async_add, + >>> args=("worker2", torch.ones(2), 1) + >>> ) + >>> print(ret) # prints tensor([2., 2.]) + + When combined with static or class method, this decorator must be the + inner one. + + >>> from torch.distributed import rpc + >>> + >>> # omitting setup and shutdown RPC + >>> + >>> # On all workers + >>> class AsyncExecutionClass: + >>> + >>> @staticmethod + >>> @rpc.functions.async_execution + >>> def static_async_add(to, x, y, z): + >>> return rpc.rpc_async(to, torch.add, args=(x, y)).then( + >>> lambda fut: fut.wait() + z + >>> ) + >>> + >>> @classmethod + >>> @rpc.functions.async_execution + >>> def class_async_add(cls, to, x, y, z): + >>> ret_fut = torch.futures.Future() + >>> rpc.rpc_async(to, torch.add, args=(x, y)).then( + >>> lambda fut: ret_fut.set_result(fut.wait() + z) + >>> ) + >>> return ret_fut + >>> + >>> @rpc.functions.async_execution + >>> def bound_async_add(self, to, x, y, z): + >>> return rpc.rpc_async(to, torch.add, args=(x, y)).then( + >>> lambda fut: fut.wait() + z + >>> ) + >>> + >>> # On worker0 + >>> ret = rpc.rpc_sync( + >>> "worker1", + >>> AsyncExecutionClass.static_async_add, + >>> args=("worker2", torch.ones(2), 1, 2) + >>> ) + >>> print(ret) # prints tensor([4., 4.]) + >>> + >>> ret = rpc.rpc_sync( + >>> "worker1", + >>> AsyncExecutionClass.class_async_add, + >>> args=("worker2", torch.ones(2), 1, 2) + >>> ) + >>> print(ret) # prints tensor([4., 4.]) + + This decorator also works with RRef helpers, i.e., . + :meth:`torch.distributed.rpc.RRef.rpc_sync`, + :meth:`torch.distributed.rpc.RRef.rpc_async`, and + :meth:`torch.distributed.rpc.RRef.remote`. + + >>> from torch.distributed import rpc + >>> + >>> # reuse the AsyncExecutionClass class above + >>> rref = rpc.remote("worker1", AsyncExecutionClass) + >>> ret = rref.rpc_sync().static_async_add("worker2", torch.ones(2), 1, 2) + >>> print(ret) # prints tensor([4., 4.]) + >>> + >>> rref = rpc.remote("worker1", AsyncExecutionClass) + >>> ret = rref.rpc_async().static_async_add("worker2", torch.ones(2), 1, 2).wait() + >>> print(ret) # prints tensor([4., 4.]) + >>> + >>> rref = rpc.remote("worker1", AsyncExecutionClass) + >>> ret = rref.remote().static_async_add("worker2", torch.ones(2), 1, 2).to_here() + >>> print(ret) # prints tensor([4., 4.]) + """ + + @functools.wraps(fn) + def wrapper(*args, **kwargs): + return fn(*args, **kwargs) + + # Can't declare and use attributes of function objects (mypy#2087) + wrapper._wrapped_async_rpc_function = fn # type: ignore[attr-defined] + return wrapper diff --git a/janus/lib/python3.10/site-packages/torch/distributed/rpc/internal.py b/janus/lib/python3.10/site-packages/torch/distributed/rpc/internal.py new file mode 100644 index 0000000000000000000000000000000000000000..5faf7d14d0da572a24a36085bdc334e468ab76a6 --- /dev/null +++ b/janus/lib/python3.10/site-packages/torch/distributed/rpc/internal.py @@ -0,0 +1,285 @@ +# mypy: allow-untyped-defs +import collections +import copyreg +import io +import pickle +import sys +import threading +import traceback +from enum import Enum + +import torch +import torch.distributed as dist +from torch._C._distributed_rpc import _get_current_rpc_agent + + +__all__ = ["RPCExecMode", "serialize", "deserialize", "PythonUDF", "RemoteException"] + +# Thread local tensor tables to store tensors while pickling torch.Tensor +# objects +_thread_local_tensor_tables = threading.local() +_pickler = pickle.Pickler +_unpickler = pickle.Unpickler + + +class RPCExecMode(Enum): + SYNC = "sync" + ASYNC = "async" + ASYNC_JIT = "async_jit" + REMOTE = "remote" + + +class _InternalRPCPickler: + r""" + This class provides serialize() and deserialize() interfaces to serialize + data to be "binary string + tensor table" format + So for RPC python UDF function and args, non tensor data will be serialized + into regular binary string, tensor data will be put into thread local tensor + tables, this serialization format is consistent with builtin operator and args + using JIT pickler. This format will make tensor handling in C++ much easier, + e.g. attach tensor to distributed autograd graph in C++ + """ + + def __init__(self): + # Ignore type error because dispatch_table is defined in third-party package + self._dispatch_table = copyreg.dispatch_table.copy() # type: ignore[attr-defined] + self._dispatch_table[torch.Tensor] = self._tensor_reducer + # Used for registering customized picklers. + self._class_reducer_dict = {} + + def _register_reducer(self, obj_class, reducer): + # For the same class, only register the reducer once. + if obj_class not in self._class_reducer_dict: + self._class_reducer_dict[obj_class] = reducer + + @classmethod + def _tensor_receiver(cls, tensor_index): + global _thread_local_tensor_tables + return _thread_local_tensor_tables.recv_tables[tensor_index] + + def _tensor_reducer(self, tensor): + global _thread_local_tensor_tables + _thread_local_tensor_tables.send_tables.append(tensor) + tensor_index = len(_thread_local_tensor_tables.send_tables) - 1 + return (_InternalRPCPickler._tensor_receiver, (tensor_index,)) + + @classmethod + def _py_rref_receiver(cls, rref_fork_data): + return dist.rpc.PyRRef._deserialize(rref_fork_data) + + def _py_rref_reducer(self, py_rref): + rref_fork_data = py_rref._serialize() + return (_InternalRPCPickler._py_rref_receiver, (rref_fork_data,)) + + def _rref_reducer(self, rref): + return self._py_rref_reducer(rref) + + @classmethod + def _script_module_receiver(cls, script_module_serialized): + """ + Given a serialized representation of a ScriptModule created with torch.jit.save, + loads and returns the ScriptModule. + """ + f = io.BytesIO(script_module_serialized) + m = torch.jit.load(f) + return m + + def _script_module_reducer(self, script_module): + """ + Serializes a ScriptModule. + """ + f = io.BytesIO() + torch.jit.save(script_module, f) + return (_InternalRPCPickler._script_module_receiver, (f.getvalue(),)) + + def serialize(self, obj): + r""" + Serialize non tensor data into binary string, tensor data into + tensor table + """ + f = io.BytesIO() + p = _pickler(f) + p.dispatch_table = self._dispatch_table + + # rpc api could accept user picklers inheriting from _InternalRPCPickler to serialize rref, + # user picklers could have different initialization function from _InternalRPCPickler, + # but all the user picklers should call serialize() and use _rref_reducer to pickle rref + # in python. also, when _internal_rpc_pickler is imported to rpc/api.py, rpc.RRef is not + # compiled yet, it is not good place to access rpc.RRef inside _InternalRPCPickler constructor, + # so putting rref's dispatch table here + # + # The return value of a `rpc.remote(..)` call is type of `rpc.PyRRef`. + # The deserialized RRef object on an RPC receiver side is type of `rpc.PyRRef`. + # Ignore type error because dispatch_table is defined in third-party package + p.dispatch_table[dist.rpc.PyRRef] = self._py_rref_reducer # type: ignore[index] + # An RRef created locally by RRef Python constructor is type of `rpc.RRef`. + # Ignore type error because dispatch_table is defined in third-party package + p.dispatch_table[dist.rpc.RRef] = self._rref_reducer # type: ignore[index] + + # Add dispatch pickling for ScriptModule or its subclass. + if isinstance(obj, torch.jit.ScriptModule): + # Ignore type error because dispatch_table is defined in third-party package + p.dispatch_table[obj.__class__] = self._script_module_reducer # type: ignore[index] + + # Install customized picklers. + for class_name in self._class_reducer_dict.keys(): + p.dispatch_table[class_name] = self._class_reducer_dict[class_name] # type: ignore[index] + + # save _thread_local_tensor_tables.send_tables if it is in nested call + global _thread_local_tensor_tables + if hasattr(_thread_local_tensor_tables, "send_tables"): + old_send_tables = _thread_local_tensor_tables.send_tables + else: + old_send_tables = None + _thread_local_tensor_tables.send_tables = [] + + p.dump(obj) + + # restore _thread_local_tensor_tables.send_tables if return + # from nested call, otherwise clean up the table + tensors = _thread_local_tensor_tables.send_tables + if old_send_tables is not None: + _thread_local_tensor_tables.send_tables = old_send_tables + else: + del _thread_local_tensor_tables.send_tables + + return (f.getvalue(), tensors) + + def deserialize(self, binary_data, tensor_table): + r""" + Deserialize binary string + tensor table to original obj + """ + # save _thread_local_tensor_tables.recv_tables if it is in nested call + global _thread_local_tensor_tables + if hasattr(_thread_local_tensor_tables, "recv_tables"): + old_recv_tables = _thread_local_tensor_tables.recv_tables + else: + old_recv_tables = None + _thread_local_tensor_tables.recv_tables = tensor_table + + try: + unpickler = _unpickler(io.BytesIO(binary_data)) + ret = unpickler.load() + except AttributeError as e: + # Occurs when function is not found on module/class during + # unpickling. + except_str = ( + str(e) + + """ Default RPC pickler does not serialize + function code. Ensure that UDFs are defined on both caller and + callee modules.""" + ) + ret = AttributeError(except_str) + # Ensure the stack trace gets preserved + ret.__cause__ = e + + # restore _thread_local_tensor_tables.recv_tables if return + # from nested call, otherwise clean up the table + if old_recv_tables is not None: + _thread_local_tensor_tables.recv_tables = old_recv_tables + else: + del _thread_local_tensor_tables.recv_tables + + return ret + + +# Create _internal_rpc_pickler only once to initialize _dispatch_table only once +_internal_rpc_pickler = _InternalRPCPickler() + + +def serialize(obj): + return _internal_rpc_pickler.serialize(obj) + + +def deserialize(binary_data, tensor_table): + return _internal_rpc_pickler.deserialize(binary_data, tensor_table) + + +def _run_function(python_udf): + r""" + This function is exclusively called from C++. + See ``torch/csrc/distributed/rpc/python_rpc_handler.cpp``. + + Runs a Python UDF and returns its return value. + Wraps any exception in ``RemoteException`` if the function raises. + """ + try: + if isinstance(python_udf, AttributeError): + raise python_udf + result = python_udf.func(*python_udf.args, **python_udf.kwargs) + except Exception as e: + # except str = exception info + traceback string + except_str = ( + f"On {_get_current_rpc_agent().get_worker_info()}:\n" + f"{repr(e)}\n{traceback.format_exc()}" + ) + print(except_str, file=sys.stderr) + result = RemoteException(except_str, type(e)) + return result + + +def _handle_exception(result): + if isinstance(result, RemoteException): + exception_msg = result.msg.encode("utf-8").decode("unicode_escape") + # We wrap exception re-creation here in case some exception classes + # cannot be constructed directly from a string. + exc = None + try: + exc = result.exception_type(exception_msg) + except BaseException as e: + raise RuntimeError( # noqa: B904 + f"Failed to create original exception type. Error msg was {str(e)}" + f" Original exception on remote side was {exception_msg}" + ) from e + + if exc is not None: + raise exc + + +def _build_rpc_profiling_key( + exec_type, func_name, current_worker_name, dst_worker_name +): + """ + Builds the key that RPC calls are profiled with using the autograd profiler. + This will be the name of the corresponding Event recorded in the profiler. + + Args: + exec_type (RPCExecMode): Type of RPC/RRef call + func_name (str): Name of function being profiled. + current_worker_name (str): Name of current worker. + dst_worker_name (str): Name of the destination worker. + + Returns: + String representing profiling key + """ + profile_key = ( + f"rpc_{exec_type.value}#{func_name}({current_worker_name} -> {dst_worker_name})" + ) + return profile_key + + +def _start_record_function(exec_type, func_name, current_worker_name, dest_worker_name): + """ + This function should be called from RPC/RRef functions to create a + RecordFunction object for profiling. This function also runs the before + callbacks that start the profiling, though the user is responsible for + running the appropriate callbacks when the function to be profiled finishes. + + Args: + exec_type (RPCExecMode): Type of RPC/RRef call + func_name (str): Name of function being profiled. + current_worker_name (str): Name of current worker. + dest_worker_name (str): Name of the destination worker. + + Returns: + An instance of `torch.autograd._RecordFunction`. + """ + assert torch.autograd._profiler_enabled(), "Autograd profiler should be enabled." + profile_key = f"rpc_{exec_type.value}#{str(func_name)}({current_worker_name} -> {dest_worker_name})" + rf = torch.autograd._RecordFunction() # type: ignore[attr-defined] + torch.autograd._run_before_callbacks(rf, profile_key) # type: ignore[attr-defined] + return rf + + +PythonUDF = collections.namedtuple("PythonUDF", ["func", "args", "kwargs"]) +RemoteException = collections.namedtuple("RemoteException", ["msg", "exception_type"]) diff --git a/janus/lib/python3.10/site-packages/torch/distributed/rpc/options.py b/janus/lib/python3.10/site-packages/torch/distributed/rpc/options.py new file mode 100644 index 0000000000000000000000000000000000000000..53bf473ba56287befc57805865463955d999f00b --- /dev/null +++ b/janus/lib/python3.10/site-packages/torch/distributed/rpc/options.py @@ -0,0 +1,175 @@ +# mypy: allow-untyped-defs +from typing import Dict, List, Optional, Union + +import torch +from torch._C._distributed_rpc import _TensorPipeRpcBackendOptionsBase + +from . import constants as rpc_contants + + +DeviceType = Union[int, str, torch.device] + +__all__ = ["TensorPipeRpcBackendOptions"] + + +def _to_device(device: DeviceType) -> torch.device: + device = torch.device(device) + if device.type != "cuda": + raise ValueError( + "`set_devices` expect a list of CUDA devices, but got " + f"device type {device.type}." + ) + return device + + +def _to_device_map( + device_map: Dict[DeviceType, DeviceType] +) -> Dict[torch.device, torch.device]: + full_device_map: Dict[torch.device, torch.device] = {} + reverse_map: Dict[torch.device, torch.device] = {} + for k, v in device_map.items(): + k, v = torch.device(k), torch.device(v) + if v in reverse_map: + raise ValueError( + "`device_map` only supports 1-to-1 mapping, " + f"trying to map {k} and {reverse_map[v]} to {v}" + ) + full_device_map[k] = v + reverse_map[v] = k + return full_device_map + + +def _to_device_list(devices: List[DeviceType]) -> List[torch.device]: + return list(map(_to_device, devices)) + + +class TensorPipeRpcBackendOptions(_TensorPipeRpcBackendOptionsBase): + r""" + The backend options for + :class:`~torch.distributed.rpc.TensorPipeAgent`, derived from + :class:`~torch.distributed.rpc.RpcBackendOptions`. + + Args: + num_worker_threads (int, optional): The number of threads in the + thread-pool used by + :class:`~torch.distributed.rpc.TensorPipeAgent` to execute + requests (default: 16). + rpc_timeout (float, optional): The default timeout, in seconds, + for RPC requests (default: 60 seconds). If the RPC has not + completed in this timeframe, an exception indicating so will + be raised. Callers can override this timeout for individual + RPCs in :meth:`~torch.distributed.rpc.rpc_sync` and + :meth:`~torch.distributed.rpc.rpc_async` if necessary. + init_method (str, optional): The URL to initialize the distributed + store used for rendezvous. It takes any value accepted for the + same argument of :meth:`~torch.distributed.init_process_group` + (default: ``env://``). + device_maps (Dict[str, Dict], optional): Device placement mappings from + this worker to the callee. Key is the callee worker name and value + the dictionary (``Dict`` of ``int``, ``str``, or ``torch.device``) + that maps this worker's devices to the callee worker's devices. + (default: ``None``) + devices (List[int, str, or ``torch.device``], optional): all local + CUDA devices used by RPC agent. By Default, it will be initialized + to all local devices from its own ``device_maps`` and corresponding + devices from its peers' ``device_maps``. When processing CUDA RPC + requests, the agent will properly synchronize CUDA streams for + all devices in this ``List``. + """ + + def __init__( + self, + *, + num_worker_threads: int = rpc_contants.DEFAULT_NUM_WORKER_THREADS, + rpc_timeout: float = rpc_contants.DEFAULT_RPC_TIMEOUT_SEC, + init_method: str = rpc_contants.DEFAULT_INIT_METHOD, + device_maps: Optional[Dict[str, Dict[DeviceType, DeviceType]]] = None, + devices: Optional[List[DeviceType]] = None, + _transports: Optional[List] = None, + _channels: Optional[List] = None, + ): + full_device_maps = ( + {} + if device_maps is None + else {k: _to_device_map(v) for k, v in device_maps.items()} + ) + full_device_list = [] if devices is None else _to_device_list(devices) + super().__init__( + num_worker_threads, + _transports, + _channels, + rpc_timeout, + init_method, + full_device_maps, + full_device_list, + ) + + def set_device_map(self, to: str, device_map: Dict[DeviceType, DeviceType]): + r""" + Set device mapping between each RPC caller and callee pair. This + function can be called multiple times to incrementally add + device placement configurations. + + Args: + to (str): Callee name. + device_map (Dict of int, str, or torch.device): Device placement + mappings from this worker to the callee. This map must be + invertible. + + Example: + >>> # xdoctest: +SKIP("distributed") + >>> # both workers + >>> def add(x, y): + >>> print(x) # tensor([1., 1.], device='cuda:1') + >>> return x + y, (x + y).to(2) + >>> + >>> # on worker 0 + >>> options = TensorPipeRpcBackendOptions( + >>> num_worker_threads=8, + >>> device_maps={"worker1": {0: 1}} + >>> # maps worker0's cuda:0 to worker1's cuda:1 + >>> ) + >>> options.set_device_map("worker1", {1: 2}) + >>> # maps worker0's cuda:1 to worker1's cuda:2 + >>> + >>> rpc.init_rpc( + >>> "worker0", + >>> rank=0, + >>> world_size=2, + >>> backend=rpc.BackendType.TENSORPIPE, + >>> rpc_backend_options=options + >>> ) + >>> + >>> x = torch.ones(2) + >>> rets = rpc.rpc_sync("worker1", add, args=(x.to(0), 1)) + >>> # The first argument will be moved to cuda:1 on worker1. When + >>> # sending the return value back, it will follow the invert of + >>> # the device map, and hence will be moved back to cuda:0 and + >>> # cuda:1 on worker0 + >>> print(rets[0]) # tensor([2., 2.], device='cuda:0') + >>> print(rets[1]) # tensor([2., 2.], device='cuda:1') + """ + full_device_map = _to_device_map(device_map) + curr_device_maps = super().device_maps + + if to in curr_device_maps: + for k, v in full_device_map.items(): + if k in curr_device_maps[to] and v != curr_device_maps[to][k]: + raise ValueError( + "`set_device_map` only supports 1-to-1 mapping, trying" + f" to map {k} to {v} and {curr_device_maps[to][k]}" + ) + + super()._set_device_map(to, full_device_map) + + def set_devices(self, devices: List[DeviceType]): + r""" + Set local devices used by the TensorPipe RPC agent. When processing + CUDA RPC requests, the TensorPipe RPC agent will properly synchronize + CUDA streams for all devices in this ``List``. + + Args: + devices (List of int, str, or torch.device): local devices used by + the TensorPipe RPC agent. + """ + self.devices = _to_device_list(devices) diff --git a/janus/lib/python3.10/site-packages/torch/distributed/rpc/rref_proxy.py b/janus/lib/python3.10/site-packages/torch/distributed/rpc/rref_proxy.py new file mode 100644 index 0000000000000000000000000000000000000000..85927b68bacb9c3fa33eab64a62e52bbf90fd054 --- /dev/null +++ b/janus/lib/python3.10/site-packages/torch/distributed/rpc/rref_proxy.py @@ -0,0 +1,80 @@ +# mypy: allow-untyped-defs +from functools import partial + +import torch +from torch.futures import Future + +from . import functions, rpc_async +from .constants import UNSET_RPC_TIMEOUT + + +def _local_invoke(rref, func_name, args, kwargs): + return getattr(rref.local_value(), func_name)(*args, **kwargs) + + +@functions.async_execution +def _local_invoke_async_execution(rref, func_name, args, kwargs): + return getattr(rref.local_value(), func_name)(*args, **kwargs) + + +def _invoke_rpc(rref, rpc_api, func_name, timeout, *args, **kwargs): + def _rref_type_cont(rref_fut): + rref_type = rref_fut.value() + + _invoke_func = _local_invoke + # Bypass ScriptModules when checking for async function attribute. + bypass_type = issubclass(rref_type, torch.jit.ScriptModule) or issubclass( + rref_type, torch._C.ScriptModule + ) + if not bypass_type: + func = getattr(rref_type, func_name) + if hasattr(func, "_wrapped_async_rpc_function"): + _invoke_func = _local_invoke_async_execution + + return rpc_api( + rref.owner(), + _invoke_func, + args=(rref, func_name, args, kwargs), + timeout=timeout, + ) + + rref_fut = rref._get_type(timeout=timeout, blocking=False) + + if rpc_api != rpc_async: + rref_fut.wait() + return _rref_type_cont(rref_fut) + else: + # A little explanation on this. + # rpc_async returns a Future pointing to the return value of `func_name`, it returns a `Future[T]` + # Calling _rref_type_cont from the `then` lambda causes Future wrapping. IOW, `then` returns a `Future[Future[T]]` + # To address that, we return a Future that is completed with the result of the async call. + result: Future = Future() + + def _wrap_rref_type_cont(fut): + try: + _rref_type_cont(fut).then(_complete_op) + except BaseException as ex: + result.set_exception(ex) + + def _complete_op(fut): + try: + result.set_result(fut.value()) + except BaseException as ex: + result.set_exception(ex) + + rref_fut.then(_wrap_rref_type_cont) + return result + + +# This class manages proxied RPC API calls for RRefs. It is entirely used from +# C++ (see python_rpc_handler.cpp). +class RRefProxy: + def __init__(self, rref, rpc_api, timeout=UNSET_RPC_TIMEOUT): + self.rref = rref + self.rpc_api = rpc_api + self.rpc_timeout = timeout + + def __getattr__(self, func_name): + return partial( + _invoke_rpc, self.rref, self.rpc_api, func_name, self.rpc_timeout + ) diff --git a/janus/lib/python3.10/site-packages/torch/distributed/rpc/server_process_global_profiler.py b/janus/lib/python3.10/site-packages/torch/distributed/rpc/server_process_global_profiler.py new file mode 100644 index 0000000000000000000000000000000000000000..46696599032a88d126b03d12d2f47bc28db184d7 --- /dev/null +++ b/janus/lib/python3.10/site-packages/torch/distributed/rpc/server_process_global_profiler.py @@ -0,0 +1,183 @@ +#!/usr/bin/python3 +# mypy: allow-untyped-defs + +import itertools +from typing import List + +import torch +from torch.autograd.profiler_legacy import profile + +from . import ( + _disable_server_process_global_profiler, + _enable_server_process_global_profiler, +) + + +__all__: List[str] = [] + + +class _server_process_global_profile(profile): + """ + It has the same API as ``torch.autograd.profiler.profile`` class, + except that it enables profiling on all threads running RPC server request callbacks. + + Context manager that manages autograd profiler state and holds a summary of results. + Under the hood it just records events of functions being executed in C++ and + exposes those events to Python. You can wrap any code into it and it will + only report runtime of PyTorch functions. + Note: profiler is thread local and is automatically propagated into the async tasks + + Args: + enabled (bool, optional): Setting this to False makes this context manager a no-op. + Default: ``True``. + + use_cuda (bool, optional): Enables timing of CUDA events as well using the cudaEvent API. + Adds approximately 4us of overhead to each tensor operation. + Default: ``False`` + + record_shapes (bool, optional): If shapes recording is set, information + about input dimensions will be collected. This allows one to see which + dimensions have been used under the hood and further group by them + using prof.key_averages(group_by_input_shape=True). Please note that + shape recording might skew your profiling data. It is recommended to + use separate runs with and without shape recording to validate the timing. + Most likely the skew will be negligible for bottom most events (in a case + of nested function calls). But for higher level functions the total + self cpu time might be artificially increased because of the shape + collection. + + profile_memory (bool, optional): Whether to report memory usage, default: ``False`` + + .. warning: + Enabling memory profiling incurs additional profiler overhead + + .. warning: + Due to some CUDA multiprocessing limitations (multiprocessing-cuda-note_), + one cannot use the profiler with ``use_cuda = True`` to benchmark + DataLoaders with ``num_workers > 0``. If you wish to benchmark data loading, + please use ``use_cuda = False`` or ``num_workers = 0``. + + Example: + >>> # xdoctest: +SKIP + >>> # On worker 0: + >>> import torch + >>> import torch.distributed.rpc as rpc + >>> rpc.init_rpc("worker0", rank=0, world_size=2) + >>> x, y = torch.tensor(1), torch.tensor(2) + >>> outer_profile_rref = rpc.remote(dst_worker_name, rpc._server_process_global_profile) + >>> outer_profile_rref.rpc_sync().__enter__() + >>> rpc.rpc_sync(dst_worker_name, torch.add, (x, y)) + >>> inner_profile_rref = rpc.remote(dst_worker_name, rpc._server_process_global_profile) + >>> inner_profile_rref.rpc_sync().__enter__() + >>> rpc.rpc_sync(dst_worker_name, torch.sub, (x, y)) + >>> inner_profile_rref.rpc_sync().__exit__(None, None, None) + >>> outer_profile_rref.rpc_sync().__exit__(None, None, None) + >>> print(inner_profile_rref.rpc_sync().key_averages()) + --------- --------------- --------------- --------------- --------------- --------------- --------------- + Name Self CPU total % Self CPU total CPU total % CPU total CPU time avg Number of Calls + --------- --------------- --------------- --------------- --------------- --------------- --------------- + sub 85.06% 76.275us 100.00% 89.667us 89.667us 1 + empty 14.94% 13.392us 14.94% 13.392us 13.392us 1 + --------- --------------- --------------- --------------- --------------- --------------- --------------- + Self CPU time total: 89.667us + >>> print(outer_profile_rref.rpc_sync().key_averages()) + --------- --------------- --------------- --------------- --------------- --------------- --------------- + Name Self CPU total % Self CPU total CPU total % CPU total CPU time avg Number of Calls + --------- --------------- --------------- --------------- --------------- --------------- --------------- + sub 35.65% 76.275us 41.91% 89.667us 89.667us 1 + empty 12.67% 27.101us 12.67% 27.101us 13.551us 2 + add 51.68% 110.550us 58.09% 124.259us 124.259us 1 + --------- --------------- --------------- --------------- --------------- --------------- --------------- + Self CPU time total: 213.926us + >>> rpc.shutdown() + + >>> # On worker 1: + >>> import torch.distributed.rpc as rpc + >>> rpc.init_rpc("worker1", rank=1, world_size=2) + >>> # wait for worker 0 to finish work, and then shutdown. + >>> rpc.shutdown() + """ + + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + + def __enter__(self): + """ + Turn on server-side process-global profiling. + This enables thread-local profiler on all RPC threads running server-side request callbacks. + """ + if not self.enabled: + return + + if self.entered: # type: ignore[has-type] + raise RuntimeError("autograd profiler traces are not reentrant") + self.entered = True + + profiler_kind = ( + torch.autograd.ProfilerState.CUDA + if self.use_cuda + else torch.autograd.ProfilerState.CPU + ) + profiler_config = torch.autograd.ProfilerConfig( + profiler_kind, + self.record_shapes, + self.profile_memory, + False, + False, + False, + torch.profiler._ExperimentalConfig(), + ) + _enable_server_process_global_profiler(profiler_config) + return self + + def __exit__(self, exc_type, exc_val, exc_tb): + """ + Turn off server-side process-global profiling. + Aggregate all profiling events recorded by RPC threads. + + These attributes are assigned on exiting context. + + Attributes: + function_events (torch.autograd.profiler.EventList). It's a list that has helper + methods, like 1) show record items in a pretty-print table. + 2) do averaging by grouping on keys. 3) and more. + + process_global_function_events (List[torch.autograd.profiler.FunctionEvent]). + It's a list of ``FunctionEvent`` elements. Every element is a profiling result + of an RPC request handling within the profiling range. + """ + if not self.enabled: + return + + process_global_events = _disable_server_process_global_profiler() + + # Every element in this list is a thread profiling result from an RPC request handling. + process_global_function_events = [] + for thread_local_events in process_global_events: + # Parse from ``Event``s to ``FunctionEvent``s. + thread_local_function_events = ( + torch.autograd.profiler_legacy._parse_legacy_records( + thread_local_events + ) + ) + thread_local_function_events.sort( + key=lambda function_event: [ + function_event.time_range.start, + -(function_event.time_range.end), + ] + ) + process_global_function_events.append(thread_local_function_events) + + flattened_function_events = list( + itertools.chain.from_iterable(process_global_function_events) + ) + self.function_events = torch.autograd.profiler_util.EventList( + flattened_function_events, + use_device="cuda" if self.use_cuda else None, + profile_memory=self.profile_memory, + ) + self.function_events._build_tree() + + self.process_global_function_events = process_global_function_events + + return False diff --git a/janus/lib/python3.10/site-packages/torch/distributed/tensor/__pycache__/_collective_utils.cpython-310.pyc b/janus/lib/python3.10/site-packages/torch/distributed/tensor/__pycache__/_collective_utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..88741ec247c2f83cc1d983670867ba2a732b284e Binary files /dev/null and b/janus/lib/python3.10/site-packages/torch/distributed/tensor/__pycache__/_collective_utils.cpython-310.pyc differ diff --git a/janus/lib/python3.10/site-packages/torch/distributed/tensor/__pycache__/_dispatch.cpython-310.pyc b/janus/lib/python3.10/site-packages/torch/distributed/tensor/__pycache__/_dispatch.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2fa061277a87fb2d9b10ed771246be05fe628aed Binary files /dev/null and b/janus/lib/python3.10/site-packages/torch/distributed/tensor/__pycache__/_dispatch.cpython-310.pyc differ diff --git a/janus/lib/python3.10/site-packages/torch/distributed/tensor/__pycache__/_dtensor_spec.cpython-310.pyc b/janus/lib/python3.10/site-packages/torch/distributed/tensor/__pycache__/_dtensor_spec.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3c509709e0d716e184dd130460b5084b617e4c4e Binary files /dev/null and b/janus/lib/python3.10/site-packages/torch/distributed/tensor/__pycache__/_dtensor_spec.cpython-310.pyc differ diff --git a/janus/lib/python3.10/site-packages/torch/distributed/tensor/__pycache__/_tp_conv.cpython-310.pyc b/janus/lib/python3.10/site-packages/torch/distributed/tensor/__pycache__/_tp_conv.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..624948a864f3e8de441d30a9d6f40b38be9ad968 Binary files /dev/null and b/janus/lib/python3.10/site-packages/torch/distributed/tensor/__pycache__/_tp_conv.cpython-310.pyc differ diff --git a/janus/lib/python3.10/site-packages/torch/distributed/tensor/debug/__pycache__/__init__.cpython-310.pyc b/janus/lib/python3.10/site-packages/torch/distributed/tensor/debug/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0a65ad114ce7ca0720cc54c6cfdae4efc8535ee7 Binary files /dev/null and b/janus/lib/python3.10/site-packages/torch/distributed/tensor/debug/__pycache__/__init__.cpython-310.pyc differ diff --git a/janus/lib/python3.10/site-packages/torch/distributed/tensor/debug/__pycache__/_op_coverage.cpython-310.pyc b/janus/lib/python3.10/site-packages/torch/distributed/tensor/debug/__pycache__/_op_coverage.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1a5779f96f5ed93f7fbe3dc88dc5c454a6dc3a2b Binary files /dev/null and b/janus/lib/python3.10/site-packages/torch/distributed/tensor/debug/__pycache__/_op_coverage.cpython-310.pyc differ diff --git a/janus/lib/python3.10/site-packages/torch/distributed/tensor/debug/__pycache__/_visualize_sharding.cpython-310.pyc b/janus/lib/python3.10/site-packages/torch/distributed/tensor/debug/__pycache__/_visualize_sharding.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..08c0521554f334d3ab9ca177d368261992d40dff Binary files /dev/null and b/janus/lib/python3.10/site-packages/torch/distributed/tensor/debug/__pycache__/_visualize_sharding.cpython-310.pyc differ diff --git a/janus/lib/python3.10/site-packages/torch/export/__init__.py b/janus/lib/python3.10/site-packages/torch/export/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..367b3f127a16bba26b6e4191abc00220918c0093 --- /dev/null +++ b/janus/lib/python3.10/site-packages/torch/export/__init__.py @@ -0,0 +1,518 @@ +import builtins +import copy +import dataclasses +import inspect +import io +import os +import sys +import typing +import warnings +import zipfile +from enum import auto, Enum +from typing import ( + Any, + Callable, + Dict, + Iterator, + List, + Optional, + Tuple, + Type, + TYPE_CHECKING, + Union, +) + +import torch +import torch.utils._pytree as pytree +from torch.fx._compatibility import compatibility +from torch.fx.passes.infra.pass_base import PassResult +from torch.fx.passes.infra.pass_manager import PassManager +from torch.utils._pytree import ( + FlattenFunc, + FromDumpableContextFn, + ToDumpableContextFn, + UnflattenFunc, +) + + +if TYPE_CHECKING: + # Import the following modules during type checking to enable code intelligence features, + # Do not import unconditionally, as they import sympy and importing sympy is very slow + from torch.fx.experimental.symbolic_shapes import StrictMinMaxConstraint + + +__all__ = [ + "Constraint", + "Dim", + "ExportBackwardSignature", + "ExportGraphSignature", + "ExportedProgram", + "ModuleCallEntry", + "ModuleCallSignature", + "dims", + "export", + "export_for_training", + "load", + "register_dataclass", + "save", + "unflatten", + "FlatArgsAdapter", + "UnflattenedModule", +] + + +from .dynamic_shapes import Constraint, Dim, dims, ShapesCollection +from .exported_program import ExportedProgram, ModuleCallEntry, ModuleCallSignature +from .graph_signature import ExportBackwardSignature, ExportGraphSignature +from .unflatten import FlatArgsAdapter, unflatten, UnflattenedModule + + +PassType = Callable[[torch.fx.GraphModule], Optional[PassResult]] + + +def export_for_training( + mod: torch.nn.Module, + args: Tuple[Any, ...], + kwargs: Optional[Dict[str, Any]] = None, + *, + dynamic_shapes: Optional[Union[Dict[str, Any], Tuple[Any], List[Any]]] = None, + strict: bool = True, + preserve_module_call_signature: Tuple[str, ...] = (), +) -> ExportedProgram: + """ + :func:`export_for_training` takes any nn.Module along with example inputs, and produces a traced graph representing + only the Tensor computation of the function in an Ahead-of-Time (AOT) fashion, + which can subsequently be executed with different inputs or serialized. The + traced graph (1) produces normalized operators in the all ATen operator set + (as well as any user-specified custom operators), (2) has eliminated all Python control + flow and data structures (with certain exceptions), and (3) records the set of + shape constraints needed to show that this normalization and control-flow elimination + is sound for future inputs. This API is intended for PT2 quantization training use cases + and will soon be the default IR of torch.export.export in the near future. + + **Soundness Guarantee** + + See :func:`export()` docstring for more details. + + Args: + mod: We will trace the forward method of this module. + + args: Example positional inputs. + + kwargs: Optional example keyword inputs. + + dynamic_shapes: + An optional argument where the type should either be: + 1) a dict from argument names of ``f`` to their dynamic shape specifications, + 2) a tuple that specifies dynamic shape specifications for each input in original order. + If you are specifying dynamism on keyword args, you will need to pass them in the order that + is defined in the original function signature. + + The dynamic shape of a tensor argument can be specified as either + (1) a dict from dynamic dimension indices to :func:`Dim` types, where it is + not required to include static dimension indices in this dict, but when they are, + they should be mapped to None; or (2) a tuple / list of :func:`Dim` types or None, + where the :func:`Dim` types correspond to dynamic dimensions, and static dimensions + are denoted by None. Arguments that are dicts or tuples / lists of tensors are + recursively specified by using mappings or sequences of contained specifications. + + strict: When enabled (default), the export function will trace the program through + TorchDynamo which will ensure the soundness of the resulting graph. Otherwise, the + exported program will not validate the implicit assumptions baked into the graph and + may cause behavior divergence between the original model and the exported one. This is + useful when users need to workaround bugs in the tracer, or simply want incrementally + enable safety in their models. Note that this does not affect the resulting IR spec + to be different and the model will be serialized in the same way regardless of what value + is passed here. + WARNING: This option is experimental and use this at your own risk. + + Returns: + An :class:`ExportedProgram` containing the traced callable. + + **Acceptable input/output types** + + Acceptable types of inputs (for ``args`` and ``kwargs``) and outputs include: + + - Primitive types, i.e. ``torch.Tensor``, ``int``, ``float``, ``bool`` and ``str``. + - Dataclasses, but they must be registered by calling :func:`register_dataclass` first. + - (Nested) Data structures comprising of ``dict``, ``list``, ``tuple``, ``namedtuple`` and + ``OrderedDict`` containing all above types. + + """ + from ._trace import _export_for_training + + if not isinstance(mod, torch.nn.Module): + raise ValueError( + f"Expected `mod` to be an instance of `torch.nn.Module`, got {type(mod)}." + ) + if isinstance(mod, torch.jit.ScriptModule): + raise ValueError( + "Exporting a ScriptModule is not supported. " + "Maybe try converting your ScriptModule to an ExportedProgram " + "using `TS2EPConverter(mod, args, kwargs).convert()` instead." + ) + return _export_for_training( + mod, + args, + kwargs, + dynamic_shapes, + strict=strict, + preserve_module_call_signature=preserve_module_call_signature, + ) + + +def export( + mod: torch.nn.Module, + args: Tuple[Any, ...], + kwargs: Optional[Dict[str, Any]] = None, + *, + dynamic_shapes: Optional[Union[Dict[str, Any], Tuple[Any], List[Any]]] = None, + strict: bool = True, + preserve_module_call_signature: Tuple[str, ...] = (), +) -> ExportedProgram: + """ + :func:`export` takes an arbitrary Python callable (an nn.Module, a function or + a method) along with example inputs, and produces a traced graph representing + only the Tensor computation of the function in an Ahead-of-Time (AOT) fashion, + which can subsequently be executed with different inputs or serialized. The + traced graph (1) produces normalized operators in the functional ATen operator set + (as well as any user-specified custom operators), (2) has eliminated all Python control + flow and data structures (with certain exceptions), and (3) records the set of + shape constraints needed to show that this normalization and control-flow elimination + is sound for future inputs. + + **Soundness Guarantee** + + While tracing, :func:`export()` takes note of shape-related assumptions + made by the user program and the underlying PyTorch operator kernels. + The output :class:`ExportedProgram` is considered valid only when these + assumptions hold true. + + Tracing makes assumptions on the shapes (not values) of input tensors. + Such assumptions must be validated at graph capture time for :func:`export` + to succeed. Specifically: + + - Assumptions on static shapes of input tensors are automatically validated without additional effort. + - Assumptions on dynamic shape of input tensors require explicit specification + by using the :func:`Dim` API to construct dynamic dimensions and by associating + them with example inputs through the ``dynamic_shapes`` argument. + + If any assumption can not be validated, a fatal error will be raised. When that happens, + the error message will include suggested fixes to the specification that are needed + to validate the assumptions. For example :func:`export` might suggest the + following fix to the definition of a dynamic dimension ``dim0_x``, say appearing in the + shape associated with input ``x``, that was previously defined as ``Dim("dim0_x")``:: + + dim = Dim("dim0_x", max=5) + + This example means the generated code requires dimension 0 of input ``x`` to be less + than or equal to 5 to be valid. You can inspect the suggested fixes to dynamic dimension + definitions and then copy them verbatim into your code without needing to change the + ``dynamic_shapes`` argument to your :func:`export` call. + + Args: + mod: We will trace the forward method of this module. + + args: Example positional inputs. + + kwargs: Optional example keyword inputs. + + dynamic_shapes: + An optional argument where the type should either be: + 1) a dict from argument names of ``f`` to their dynamic shape specifications, + 2) a tuple that specifies dynamic shape specifications for each input in original order. + If you are specifying dynamism on keyword args, you will need to pass them in the order that + is defined in the original function signature. + + The dynamic shape of a tensor argument can be specified as either + (1) a dict from dynamic dimension indices to :func:`Dim` types, where it is + not required to include static dimension indices in this dict, but when they are, + they should be mapped to None; or (2) a tuple / list of :func:`Dim` types or None, + where the :func:`Dim` types correspond to dynamic dimensions, and static dimensions + are denoted by None. Arguments that are dicts or tuples / lists of tensors are + recursively specified by using mappings or sequences of contained specifications. + + strict: When enabled (default), the export function will trace the program through + TorchDynamo which will ensure the soundness of the resulting graph. Otherwise, the + exported program will not validate the implicit assumptions baked into the graph and + may cause behavior divergence between the original model and the exported one. This is + useful when users need to workaround bugs in the tracer, or simply want incrementally + enable safety in their models. Note that this does not affect the resulting IR spec + to be different and the model will be serialized in the same way regardless of what value + is passed here. + WARNING: This option is experimental and use this at your own risk. + + Returns: + An :class:`ExportedProgram` containing the traced callable. + + **Acceptable input/output types** + + Acceptable types of inputs (for ``args`` and ``kwargs``) and outputs include: + + - Primitive types, i.e. ``torch.Tensor``, ``int``, ``float``, ``bool`` and ``str``. + - Dataclasses, but they must be registered by calling :func:`register_dataclass` first. + - (Nested) Data structures comprising of ``dict``, ``list``, ``tuple``, ``namedtuple`` and + ``OrderedDict`` containing all above types. + + """ + from ._trace import _export + + if not isinstance(mod, torch.nn.Module): + raise ValueError( + f"Expected `mod` to be an instance of `torch.nn.Module`, got {type(mod)}." + ) + if isinstance(mod, torch.jit.ScriptModule): + raise ValueError( + "Exporting a ScriptModule is not supported. " + "Maybe try converting your ScriptModule to an ExportedProgram " + "using `TS2EPConverter(mod, args, kwargs).convert()` instead." + ) + return _export( + mod, + args, + kwargs, + dynamic_shapes, + strict=strict, + preserve_module_call_signature=preserve_module_call_signature, + pre_dispatch=True, + ) + + +def save( + ep: ExportedProgram, + f: Union[str, os.PathLike, io.BytesIO], + *, + extra_files: Optional[Dict[str, Any]] = None, + opset_version: Optional[Dict[str, int]] = None, +) -> None: + """ + + .. warning:: + Under active development, saved files may not be usable in newer versions + of PyTorch. + + Saves an :class:`ExportedProgram` to a file-like object. It can then be + loaded using the Python API :func:`torch.export.load `. + + Args: + ep (ExportedProgram): The exported program to save. + + f (Union[str, os.PathLike, io.BytesIO): A file-like object (has to + implement write and flush) or a string containing a file name. + + extra_files (Optional[Dict[str, Any]]): Map from filename to contents + which will be stored as part of f. + + opset_version (Optional[Dict[str, int]]): A map of opset names + to the version of this opset + + + Example:: + + import torch + import io + + class MyModule(torch.nn.Module): + def forward(self, x): + return x + 10 + + ep = torch.export.export(MyModule(), (torch.randn(5),)) + + # Save to file + torch.export.save(ep, 'exported_program.pt2') + + # Save to io.BytesIO buffer + buffer = io.BytesIO() + torch.export.save(ep, buffer) + + # Save with extra files + extra_files = {'foo.txt': b'bar'.decode('utf-8')} + torch.export.save(ep, 'exported_program.pt2', extra_files=extra_files) + + """ + if not isinstance(ep, ExportedProgram): + raise TypeError( + f"The 'ep' parameter must be an instance of 'ExportedProgram', got '{type(ep).__name__}' instead." + ) + + from torch._export.serde.schema import SCHEMA_VERSION + from torch._export.serde.serialize import serialize, SerializedArtifact + + artifact: SerializedArtifact = serialize(ep, opset_version) + + if isinstance(f, (str, os.PathLike)): + f = os.fspath(f) + + with zipfile.ZipFile(f, "w") as zipf: + # Save every field in the SerializedArtifact to a file. + assert isinstance(artifact.exported_program, bytes) + zipf.writestr("serialized_exported_program.json", artifact.exported_program) + zipf.writestr("serialized_state_dict.pt", artifact.state_dict) + zipf.writestr("serialized_constants.pt", artifact.constants) + zipf.writestr("serialized_example_inputs.pt", artifact.example_inputs) + + zipf.writestr("version", ".".join(map(str, SCHEMA_VERSION))) + + # Add extra files if provided + if extra_files: + for extra_file_name, content in extra_files.items(): + encoded_content = content.encode("utf-8") + zipf.writestr(f"extra_files/{extra_file_name}", encoded_content) + + +def load( + f: Union[str, os.PathLike, io.BytesIO], + *, + extra_files: Optional[Dict[str, Any]] = None, + expected_opset_version: Optional[Dict[str, int]] = None, +) -> ExportedProgram: + """ + + .. warning:: + Under active development, saved files may not be usable in newer versions + of PyTorch. + + Loads an :class:`ExportedProgram` previously saved with + :func:`torch.export.save `. + + Args: + ep (ExportedProgram): The exported program to save. + + f (Union[str, os.PathLike, io.BytesIO): A file-like object (has to + implement write and flush) or a string containing a file name. + + extra_files (Optional[Dict[str, Any]]): The extra filenames given in + this map would be loaded and their content would be stored in the + provided map. + + expected_opset_version (Optional[Dict[str, int]]): A map of opset names + to expected opset versions + + Returns: + An :class:`ExportedProgram` object + + Example:: + + import torch + import io + + # Load ExportedProgram from file + ep = torch.export.load('exported_program.pt2') + + # Load ExportedProgram from io.BytesIO object + with open('exported_program.pt2', 'rb') as f: + buffer = io.BytesIO(f.read()) + buffer.seek(0) + ep = torch.export.load(buffer) + + # Load with extra files. + extra_files = {'foo.txt': ''} # values will be replaced with data + ep = torch.export.load('exported_program.pt2', extra_files=extra_files) + print(extra_files['foo.txt']) + print(ep(torch.randn(5))) + """ + if isinstance(f, (str, os.PathLike)): + f = os.fspath(f) + + extra_files = extra_files or {} + + with zipfile.ZipFile(f, "r") as zipf: + # Check the version + version = zipf.read("version").decode().split(".") + from torch._export.serde.schema import SCHEMA_VERSION + + assert len(version) == len(SCHEMA_VERSION) + if version[0] != str(SCHEMA_VERSION[0]): + raise RuntimeError( + f"Serialized version {version} does not match our current " + f"schema version {SCHEMA_VERSION}." + ) + + from torch._export.serde.serialize import deserialize, SerializedArtifact + + # Load serialized_ep and serialized_state_dict from the zip file + + serialized_exported_program: Optional[bytes] = None + serialized_state_dict: Optional[bytes] = None + serialized_constants: Optional[bytes] = None + serialized_example_inputs: Optional[bytes] = None + + for file_info in zipf.infolist(): + file_content = zipf.read(file_info.filename) + + if file_info.filename == "serialized_exported_program.json": + serialized_exported_program = file_content + elif file_info.filename == "serialized_state_dict.json": + warnings.warn("This version of file is deprecated") + serialized_state_dict = file_content + elif file_info.filename == "serialized_constants.json": + warnings.warn("This version of file is deprecated") + serialized_constants = file_content + elif file_info.filename == "serialized_state_dict.pt": + serialized_state_dict = file_content + elif file_info.filename == "serialized_constants.pt": + serialized_constants = file_content + elif file_info.filename == "serialized_example_inputs.pt": + serialized_example_inputs = file_content + elif file_info.filename.startswith("extra_files"): + filename = file_info.filename.split("/", 1)[1] + extra_files[filename] = file_content.decode("utf-8") + + assert serialized_exported_program is not None + assert serialized_state_dict is not None + assert serialized_constants is not None + assert serialized_example_inputs is not None + artifact: SerializedArtifact = SerializedArtifact( + serialized_exported_program, + serialized_state_dict, + serialized_constants, + serialized_example_inputs, + ) + + # Deserialize ExportedProgram + ep = deserialize(artifact, expected_opset_version) + + return ep + + +def register_dataclass( + cls: Type[Any], + *, + serialized_type_name: Optional[str] = None, +) -> None: + """ + Registers a dataclass as a valid input/output type for :func:`torch.export.export`. + + Args: + cls: the dataclass type to register + serialized_type_name: The serialized name for the dataclass. This is + required if you want to serialize the pytree TreeSpec containing this + dataclass. + + Example:: + + @dataclass + class InputDataClass: + feature: torch.Tensor + bias: int + + class OutputDataClass: + res: torch.Tensor + + torch.export.register_dataclass(InputDataClass) + torch.export.register_dataclass(OutputDataClass) + + def fn(o: InputDataClass) -> torch.Tensor: + res = res=o.feature + o.bias + return OutputDataClass(res=res) + + ep = torch.export.export(fn, (InputDataClass(torch.ones(2, 2), 1), )) + print(ep) + + """ + + from torch._export.utils import register_dataclass_as_pytree_node + + return register_dataclass_as_pytree_node( + cls, serialized_type_name=serialized_type_name + ) diff --git a/janus/lib/python3.10/site-packages/torch/export/_unlift.py b/janus/lib/python3.10/site-packages/torch/export/_unlift.py new file mode 100644 index 0000000000000000000000000000000000000000..ad48372e2d9aef93e3e079135f1bd4af73904201 --- /dev/null +++ b/janus/lib/python3.10/site-packages/torch/export/_unlift.py @@ -0,0 +1,361 @@ +# mypy: allow-untyped-defs +import copy +import warnings +from itertools import chain +from typing import Any, Dict, List, Optional, Tuple + +import torch +import torch.utils._pytree as pytree +from torch._export.utils import _check_input_constraints_for_graph +from torch.export.unflatten import _assign_attr, _AttrKind, _recursive_getattr +from torch.fx.graph import _PyTreeCodeGen, _PyTreeInfo + +from ._remove_effect_tokens_pass import _remove_effect_tokens +from .exported_program import ( + ExportedProgram, + ExportGraphSignature, + InputKind, + OutputKind, +) + + +@torch._dynamo.disable +def _check_input_constraints_pre_hook(self, *args, **kwargs): + flat_args_with_path, received_spec = pytree.tree_flatten_with_path(args) + + if received_spec != self._in_spec: + raise ValueError( # noqa: B904 + "Trying to flatten user inputs with exported input tree spec: \n" + f"{self._in_spec}\n" + "but actually got inputs with tree spec of: \n" + f"{received_spec}" + ) + + return _check_input_constraints_for_graph( + [node for node in self.graph.nodes if node.op == "placeholder"], + flat_args_with_path, + self.range_constraints, + ) + + +def _unlift_inputs_as_getattr( + gm: torch.fx.GraphModule, + lifted_inputs: List[Optional[str]], +) -> Tuple[Dict[str, torch.fx.Node], Dict[str, torch.fx.Node]]: + """ + Unlift inputs referring to params/buffers/constants as getattr nodes in the + graph + """ + unlifted_name_to_node = {} + input_name_to_node = {} + + placeholder_nodes = [node for node in gm.graph.nodes if node.op == "placeholder"] + assert len(lifted_inputs) == len(placeholder_nodes) + for input_node, lifted_node in zip(placeholder_nodes, lifted_inputs): + if lifted_node is None: + input_name_to_node[input_node.name] = input_node + + else: + with gm.graph.inserting_after(input_node): + getattr_node = gm.graph.get_attr(lifted_node) + input_node.replace_all_uses_with(getattr_node) + metadata = input_node.meta + gm.graph.erase_node(input_node) + getattr_node.meta = metadata + unlifted_name_to_node[lifted_node] = getattr_node + + return unlifted_name_to_node, input_name_to_node + + +def _insert_copy_for_mutations( + gm: torch.fx.GraphModule, + mutated_outputs: List[Optional[str]], + unlifted_name_to_node: Dict[str, torch.fx.Node], + input_name_to_node: Dict[str, torch.fx.Node], +) -> None: + """ + Find the all the buffers and inputs that were mutated and insert copy_ + operators to reflect mutations. + """ + output_node = None + for node in gm.graph.nodes: + if node.op == "output": + output_node = node + break + assert output_node is not None + outputs = pytree.tree_flatten(output_node.args)[0] + assert len(outputs) == len(mutated_outputs) + + user_output_nodes = [] + return_nodes_to_copy = {} + for return_node, mutated_node_name in zip(outputs, mutated_outputs): + if mutated_node_name is None: + user_output_nodes.append(return_node) + continue + + if mutated_node_name in unlifted_name_to_node: + mutated_node = unlifted_name_to_node[mutated_node_name] + elif mutated_node_name in input_name_to_node: + mutated_node = input_name_to_node[mutated_node_name] + else: + raise RuntimeError( + f"Could not find {mutated_node_name} in either buffer or input nodes" + ) + + with gm.graph.inserting_before(output_node): + copy_node = gm.graph.call_function( + torch.ops.aten.copy_.default, (mutated_node, return_node) + ) + return_nodes_to_copy[return_node] = copy_node + + output_args = [ + return_nodes_to_copy[node] if node in return_nodes_to_copy else node + for node in user_output_nodes + ] + with gm.graph.inserting_before(output_node): + # Only return user outputs + new_output = gm.graph.output(tuple(output_args)) + new_output.meta.update(output_node.meta) + output_node.replace_all_uses_with(new_output) + gm.graph.erase_node(output_node) + + +def _get_codegen( + in_spec: pytree.TreeSpec, + out_spec: Optional[pytree.TreeSpec], + forward_arg_names: Optional[List[str]] = None, +) -> _PyTreeCodeGen: + """ + Create the codegen for the graph module based on the in/out specs + """ + if forward_arg_names: + names = forward_arg_names + else: + if ( + in_spec.type == tuple + and in_spec.num_children == 2 + and in_spec.children_specs[0].type == tuple + and in_spec.children_specs[1].type == dict + ): + # if in_spec contains the args (tuple) and kwargs (dict) + names = [f"arg_{i}" for i in range(in_spec.children_specs[0].num_children)] + # add kwarg names + names.extend(in_spec.children_specs[1].context) + else: + names = [f"arg_{i}" for i in range(in_spec.num_children)] + + return _PyTreeCodeGen( + _PyTreeInfo( + names, + in_spec, + out_spec, + ) + ) + + +def _unlift( + gm: torch.fx.GraphModule, + lifted_inputs: List[Optional[str]], + mutated_outputs: List[Optional[str]], + in_spec: pytree.TreeSpec, + out_spec: Optional[pytree.TreeSpec], + state_dict: Dict[str, Any], + constants: Dict[str, Any], + forward_arg_names: Optional[List[str]] = None, +): + """ + Args: + lifted_inputs: A list matching the graph module's input nodes. For + an input node that is referring to a lifted parameter/buffer, this + list will contain the fqn the corresponding attribute. Otherwise, this + list will contain None. This is used to unlift the lifted parameters as + get_attr nodes. + + mutated_outputs: A list matching the graph module's output nodes. For + an output node that is referring to a mutated buffer or user input, this + list will contain the name of the corresponding buffer or user input + that needs to be mutated. Otherwise, this list will contain None. This + is used to re-insert an inplace copy_ operator to copy the mutated + values back to the original node. + """ + unlifted_name_to_node, input_name_to_node = _unlift_inputs_as_getattr( + gm, lifted_inputs + ) + _insert_copy_for_mutations( + gm, mutated_outputs, unlifted_name_to_node, input_name_to_node + ) + gm.graph._codegen = _get_codegen(in_spec, out_spec, forward_arg_names) + gm.graph.lint() + gm.recompile() + return gm + + +def _register_attrs_to_new_gm( + new_gm: torch.fx.GraphModule, + graph_signature: ExportGraphSignature, + state_dict: Dict[str, Any], + constants: Dict[str, Any], +) -> None: + non_persistent_buffers = set(graph_signature.non_persistent_buffers) + for name in graph_signature.buffers: + if name in non_persistent_buffers: + persistent = False + value = constants[name] + else: + persistent = True + value = state_dict[name] + _assign_attr( + value, new_gm, name, attr_kind=_AttrKind.BUFFER, persistent=persistent + ) + for name in graph_signature.parameters: + value = state_dict[name] + _assign_attr( + value, + new_gm, + name, + attr_kind=_AttrKind.PARAMETER, + ) + + for name in chain( + graph_signature.lifted_custom_objs, graph_signature.lifted_tensor_constants + ): + value = constants[name] + _assign_attr( + value, + new_gm, + name, + attr_kind=_AttrKind.CONSTANT, + ) + + +class _StatefulGraphModuleFactory(type): + """ + Metaclass that ensures a private constructor for _StatefulGraphModule + """ + + def __call__(cls, *args, **kwargs): + raise TypeError( + f"{cls.__module__}.{cls.__qualname__} has no public constructor. " + ) + + def _create(cls, root, graph, range_constraints=None): + return super().__call__( + root, + graph, + range_constraints=range_constraints, + ) + + +class _StatefulGraphModule(torch.fx.GraphModule, metaclass=_StatefulGraphModuleFactory): + def __init__(self, root, graph, range_constraints=None): + super().__init__(root, graph) + # Need to fix up non-persistent buffers. + self.range_constraints = range_constraints or [] + + +def _create_stateful_graph_module( + plain_graph_module: torch.fx.GraphModule, + range_constraints, + # TODO(suo) this should not be optional, but is since we still ahve + # capture_pre_autograd_graph grr + graph_signature: Optional[ExportGraphSignature] = None, +): + stateful_gm = _StatefulGraphModule._create( + plain_graph_module, + plain_graph_module.graph, + range_constraints=range_constraints, + ) + + stateful_gm.register_forward_pre_hook( + _check_input_constraints_pre_hook, with_kwargs=True + ) + + if graph_signature is None: + return stateful_gm + + # Fix up lifted tensor constants. + # fx.GraphModule() constructor silently turns a constant attribute of plain_graph_module + # into a buffer in stateful_gm and creates an inconsistency with graph_signature. + # We fix this by de-registering these buffers in lifted_tensor_constants + # and call _assign_attr(attr_kind=CONSTANT) to register them as constants. + for constant_fqn in graph_signature.lifted_tensor_constants: + # Sometimes, the constant can require gradient, this is probably a bug in user code, + # e.g. `self.const = torch.randn(2, 2, requires_grad=True)`. + # We call detach on the constant_val since they're tensor contants and we don't need to + # compute their gradients anyway. + # Users should properly register it as parameter if they want it to require gradient. + buffer = stateful_gm.get_buffer(constant_fqn) + if buffer.requires_grad: + warnings.warn( + f"A model attribute `{constant_fqn}` requires gradient. " + f"but it's not properly registered as a parameter. " + f"torch.export will detach it and treat it as a constant tensor " + f"but please register it as parameter instead." + ) + buffer = buffer.detach() + *prefix, field = constant_fqn.rsplit(".") + submod = _recursive_getattr(stateful_gm, prefix) + delattr(submod, field) + _assign_attr(buffer, stateful_gm, constant_fqn, attr_kind=_AttrKind.CONSTANT) + + # Fix up non-persistent buffers. torch.fx does not distinguish between + # persistent and non-persistent buffers, so we must restore that distinction + # here. + for buffer in graph_signature.non_persistent_buffers: + _assign_attr( + plain_graph_module.get_buffer(buffer), + stateful_gm, + buffer, + attr_kind=_AttrKind.BUFFER, + persistent=False, + ) + + return stateful_gm + + +def _unlift_exported_program_lifted_states(ep: ExportedProgram) -> torch.nn.Module: + ep = _remove_effect_tokens(ep) + new_gm = torch.fx.GraphModule(ep.graph_module, copy.deepcopy(ep.graph)) + _register_attrs_to_new_gm(new_gm, ep.graph_signature, ep.state_dict, ep.constants) + forward_arg_names = ep.graph_module.meta.get("forward_arg_names") + + lifted_inputs: List[Optional[str]] = [ + ( + in_spec.target + if in_spec.kind + in ( + InputKind.BUFFER, + InputKind.CONSTANT_TENSOR, + InputKind.PARAMETER, + InputKind.CUSTOM_OBJ, + ) + else None + ) + for in_spec in ep.graph_signature.input_specs + ] + + mutated_outputs: List[Optional[str]] = [ + ( + out_spec.target + if out_spec.kind + in (OutputKind.BUFFER_MUTATION, OutputKind.USER_INPUT_MUTATION) + else None + ) + for out_spec in ep.graph_signature.output_specs + ] + + new_gm = _unlift( + new_gm, + lifted_inputs, + mutated_outputs, + ep.call_spec.in_spec, + ep.call_spec.out_spec, + ep.state_dict, + ep.constants, + forward_arg_names=forward_arg_names, + ) + unlift_gm = _create_stateful_graph_module( + new_gm, ep.range_constraints, ep.graph_signature + ) + unlift_gm.meta.update(ep.graph_module.meta) + return unlift_gm diff --git a/janus/lib/python3.10/site-packages/torch/export/dynamic_shapes.py b/janus/lib/python3.10/site-packages/torch/export/dynamic_shapes.py new file mode 100644 index 0000000000000000000000000000000000000000..18fc1e73f0cf05fd4be077834ed4702d78097136 --- /dev/null +++ b/janus/lib/python3.10/site-packages/torch/export/dynamic_shapes.py @@ -0,0 +1,1220 @@ +# mypy: allow-untyped-defs +import dataclasses +import inspect +import logging +import sys +from collections import defaultdict +from enum import auto, Enum +from typing import Any, Callable, Dict, List, Optional, Set, Tuple, TYPE_CHECKING, Union + +import torch +from torch.utils._pytree import ( + _get_node_type, + BUILTIN_TYPES, + keystr, + LeafSpec, + MappingKey, + SequenceKey, + SUPPORTED_NODES, + tree_flatten, + tree_map_with_path, +) + +from .exported_program import ExportedProgram + + +if TYPE_CHECKING: + from sympy import Symbol + + from torch._guards import Source + from torch.fx.experimental.symbolic_shapes import ShapeEnv, StrictMinMaxConstraint + +__all__ = [ + "Constraint", + "Dim", + "dims", + "refine_dynamic_shapes_from_suggested_fixes", +] + + +log = logging.getLogger(__name__) + + +class _DimHint(Enum): + """ + Enum for dynamic shape hints. + - AUTO means automatic inference of shape (static or dynamic). + - STATIC means static shape (always specialized). + """ + + AUTO = auto() + STATIC = auto() + + +class _Dim(type): + """ + Metaclass for :func:`Dim` types. + """ + + @staticmethod + def readable(name, min_, max_): + from torch.utils._sympy.numbers import int_oo + + if min_ == 2: + min_ = None + if max_ == int_oo: + max_ = None + if min_ is None and max_ is None: + return f"Dim('{name}')" + if min_ is None: + return f"Dim('{name}', max={max_})" + if max_ is None: + return f"Dim('{name}', min={min_})" + return f"Dim('{name}', min={min_}, max={max_})" + + def __add__(cls, other): + # e.g., dim + 1 + if type(other) is not int: + raise NotImplementedError( + f"Attempted to add {other} to {cls.__name__}, where an integer was expected. " + "(Only increasing linear operations with integer coefficients are supported.)" + ) + return cls._derive(lambda x: x + other) + + def __radd__(cls, other): + return cls + other + + def __sub__(cls, other): + # e.g., dim - 1 + if type(other) is not int: + raise NotImplementedError( + f"Attempted to subtract {other} from {cls.__name__}, where an integer was expected. " + "(Only increasing linear operations with integer coefficients are supported.)" + ) + return cls._derive(lambda x: x - other) + + def __rsub__(cls, other): + raise NotImplementedError( + f"Attempted to negate {cls.__name__}. " + "(Only increasing linear operations with integer coefficients are supported.)" + ) + + def __mul__(cls, other): + # e.g., dim * 2 + if type(other) is not int or other <= 0: + raise NotImplementedError( + f"Attempted to multiply {other} with {cls.__name__}, where a positive integer was expected. " + "(Only increasing linear operations with integer coefficients are supported.)" + ) + return cls._derive(lambda x: x * other) + + def __rmul__(cls, other): + return cls * other + + def _derived_name(cls, fn): + from sympy import sympify + + return str(fn(sympify(cls.__name__))) + + def _derive(cls, fn): + return _DerivedDim(cls._derived_name(fn), (int,), {"root": cls, "fn": fn}) + + +class _StaticDim(_Dim): + """ + Meta class for static :func:`Dim` types. + + This class is only for setting and checking static dim constraints, + and the user should never interact with it. + """ + + @property + def min(self): + return self.value # type: ignore[attr-defined] + + @property + def max(self): + return self.value # type: ignore[attr-defined] + + +class _DerivedDim(_Dim): + """ + Metaclass for derived :func:`Dim` types. + + Currently we only support increasing linear expressions with integer coefficients. + In other words, a derived Dim can always be written in the form Ax + B, where + x is a regular Dim (i.e., non-derived Dim), A and B are integers, and A is positive. + (In particular, the latter ensures that x < y => Ax + B < Ay + B.) + These restrictions on the form of derived Dims makes the metatheory simpler: e.g., + it simplifies computing ranges for derived Dims, solving for underlying regular Dims, + deciding equalities between derived Dims, and so on. + + The function lambda x: Ax + B is expressed by `fn`, where x is a normal Dim, `root`. + The range of a derived Dim is computed by mapping `fn` over the range of its `root`. + """ + + @property + def min(self): + # assume that self.fn is an increasing function + # TODO(avik): use sympy value range analysis instead? + from sympy import Integer + + from torch.utils._sympy.numbers import int_oo + + if self.root.min is -int_oo: # type: ignore[attr-defined] + return -int_oo # fn not needed cuz increasing + + _min_symint = self.fn(Integer(self.root.min)) # type: ignore[attr-defined] + root = self.root # type: ignore[attr-defined] + assert _min_symint >= 0, ( + f"Expected derived min value of {self.__name__} to be >= 0. " + f"Please specify an appropriate min value for {root.__name__} " + f"(currently {root.min})." + ) + return int(_min_symint) + + @property + def max(self): + # assume that self.fn is an increasing function + # TODO(avik): use sympy value range analysis instead? + from sympy import Integer + + from torch.utils._sympy.numbers import int_oo + + if self.root.max is int_oo: # type: ignore[attr-defined] + return int_oo # fn not needed cuz increasing + + _max_symint = self.fn(Integer(self.root.max)) # type: ignore[attr-defined] + root = self.root # type: ignore[attr-defined] + assert _max_symint <= sys.maxsize - 1, ( + f"Expected derived max value of {self.__name__} to be <= {sys.maxsize - 1}. " + f"Please specify an appropriate max value for {root.__name__} " + f"(currently {root.max})." + ) + return int(_max_symint) + + def _derive(self, fn): + # We support nesting, e.g., 2*dim + 1. + # This is implemented by composing operations on the same root. + # As a consequence, roots are always regular Dims (i.e., not derived Dims). + return _DerivedDim( + self._derived_name(fn), + (int,), + {"root": self.root, "fn": lambda x: fn(self.fn(x))}, # type: ignore[attr-defined] + ) + + +def Dim(name: str, *, min: Optional[int] = None, max: Optional[int] = None): + """ + :func:`Dim` constructs a type analogous to a named symbolic integer with a range. + It can be used to describe multiple possible values of a dynamic tensor dimension. + Note that different dynamic dimensions of the same tensor, or of different tensors, + can be described by the same type. + + Args: + name (str): Human-readable name for debugging. + min (Optional[int]): Minimum possible value of given symbol (inclusive) + max (Optional[int]): Maximum possible value of given symbol (inclusive) + + Returns: + A type that can be used in dynamic shape specifications for tensors. + """ + + from torch.utils._sympy.numbers import int_oo + + _min = 0 if min is None else min + _max = int_oo if max is None else max + assert _max > _min, f"Cannot create Dim with inconsistent min={min}, max={max}" + assert name.isidentifier(), f"Dim name must be a valid identifier, got {name}" + dim = _Dim(name, (int,), {"min": _min, "max": _max}) + dim.__module__ = getattr( + inspect.getmodule(inspect.stack()[1][0]), "__name__", "__main__" + ) + return dim + + +Dim.AUTO = _DimHint.AUTO # type: ignore[attr-defined] +Dim.STATIC = _DimHint.STATIC # type: ignore[attr-defined] + + +def dims(*names: str, min: Optional[int] = None, max: Optional[int] = None): + """ + Util to create multiple :func:`Dim` types. + """ + return tuple(Dim(name, min=min, max=max) for name in names) + + +@dataclasses.dataclass +class _ConstraintTarget: + """ + This represents input tensor dimensions. + """ + + t_id: int + dim: int + + +@dataclasses.dataclass +class _Constraint(_ConstraintTarget): + """ + This represents a Dim describing a constraint target. + + `name` is the name of the Dim. + `constraint_range` contains the min/max bounds of the Dim. + """ + + name: str + constraint_range: "StrictMinMaxConstraint" + + def _clone_with_range(self, lower=0, upper=None): + # Import sympy locally + from torch.fx.experimental.symbolic_shapes import StrictMinMaxConstraint + from torch.utils._sympy.numbers import int_oo + from torch.utils._sympy.value_ranges import ValueRanges + + if upper is None: + upper = int_oo + + constraint_range = StrictMinMaxConstraint( + vr=self.constraint_range.vr & ValueRanges(lower=lower, upper=upper), + warn_only=False, + ) + return _Constraint( + self.t_id, + self.dim, + self.name, + constraint_range, + ) + + def __ge__(self, lower): + return self._clone_with_range(lower=lower) + + def __gt__(self, lower): + return self._clone_with_range(lower=lower + 1) + + def __le__(self, upper): + return self._clone_with_range(upper=upper) + + def __lt__(self, upper): + return self._clone_with_range(upper=upper - 1) + + def __bool__(self): + # NOTE(avik): We do not support compound expressions like a <= x <= b. + # This is because Python implicitly desugars them into bool(a <= x) and bool(x <= b), + # and moreover, enforces that any overload of __bool__ must return True or False. + # FWIW, sympy also raises TypeError in this case. + raise TypeError( + "Cannot determine truth value of _Constraint. " + "If you are trying to combine _Constraint's with logical connectives, " + "you can specify them separately instead." + ) + + @property + def serializable_spec(self): + # We need a serialization compatible format of the constraint so that it + # can be savedin the graph module w/o breaking the module serialization. + # The saved constraints will be used directly for the post-exporting pass + # that converts constraints to runtime assertion. The saved constraints + # will not be saved in the serialized module. + # TODO: A better way is needed. Currently we use 't_id' to map the constraint, + # which is not reliable + return { + "t_id": self.t_id, + "dim": self.dim, + "min": self.constraint_range.vr.lower, + "max": self.constraint_range.vr.upper, + } + + +@dataclasses.dataclass +class _PhantomRoot: + """ + This represents the root of a derived Dim where the root does not directly + specify the shape of any input dimension, but the derived Dim does. + + e.g., the input shapes 2*dim and dim + 1 are related via a "phantom" dim. + + The fields `name`, `constraint_range`, and `val` carried by a phantom root + help create a symbol for it. Any derived dims with this phantom root are + backed by expressions over this symbol. + """ + + name: str + constraint_range: "StrictMinMaxConstraint" + val: int + + +@dataclasses.dataclass +class _DerivedConstraint(_ConstraintTarget): + """ + This represents a derived Dim, whose root is either a regular constraint target + (which directly specifies the shape of some input dimension) or a phantom root + (which does so indirectly). + + It can be thought of as a subclass of `_Constraint`, except that it does not + support <, <=, >, >= operations. + """ + + name: str + constraint_range: "StrictMinMaxConstraint" + root: Union[_ConstraintTarget, _PhantomRoot] + fn: Callable + + @property + def serializable_spec(self): + # same as _Constraint.serializable_spec + return { + "t_id": self.t_id, + "dim": self.dim, + "min": self.constraint_range.vr.lower, + "max": self.constraint_range.vr.upper, + } + + +Constraint = Union[_Constraint, _DerivedConstraint] + + +def _process_equalities( + constraint: Constraint, + get_sources: Callable[[int, int], List["Source"]], + shape_env: "ShapeEnv", + names: Dict[str, Tuple[int, int]], + source_pairs: List[Tuple["Source", "Source"]], + derived_equalities: List[Tuple["Source", Union["Source", "Symbol"], Callable]], + phantom_symbols: Dict[str, "Symbol"], +): + """ + Updates `source_pairs`, `derived_equalities`, and `phantom_symbols` (which become + fields of `EqualityConstraint`) based on a given input `constraint`. + """ + + sources = get_sources(constraint.t_id, constraint.dim) + if not sources: # empty sources due to unused shapes + return + + source, *other_sources = sources + # When t.size()[dim] maps to src0, src1, ..., srcN, we add + # constraints that make src0 "equal" to src1, ..., srcN. + source_pairs.extend((source, other_source) for other_source in other_sources) + if not isinstance(constraint, _DerivedConstraint): + if constraint.name in names: + shared_t_id, shared_dim = names[constraint.name] + other_sources = get_sources(shared_t_id, shared_dim) + source_pairs.extend( + (source, other_source) for other_source in other_sources + ) + else: + names[constraint.name] = (constraint.t_id, constraint.dim) + else: + # branch based on the root of the _DerivedConstraint + if not isinstance(constraint.root, _PhantomRoot): + # either root points to an input source + root = get_sources(constraint.root.t_id, constraint.root.dim)[0] # type: ignore[assignment] + else: + # or root points to a phantom symbol + if constraint.root.name in phantom_symbols: + root = phantom_symbols[constraint.root.name] # type: ignore[assignment] + else: + # create a phantom symbol in the shape env based on the _PhantomRoot + root = shape_env.create_symbol( + val=constraint.root.val, + source=torch._dynamo.source.ConstantSource(constraint.root.name), + dynamic_dim=torch.fx.experimental.symbolic_shapes.DimDynamic.DYNAMIC, + constraint_dim=constraint.root.constraint_range, + ) + phantom_symbols[constraint.root.name] = root # type: ignore[assignment] + + fn = constraint.fn + # A derived equality (source, root, fn) informally corresponds to source = fn(root). + # Here source describes an input and root might describe another input or a phantom symbol. + derived_equalities.append((source, root, fn)) + + +def _tree_map_with_path( + func: Callable[..., Any], + tree: Any, + *dynamic_shapes: Any, + tree_name: Optional[str] = None, +) -> Any: + """ + Customized tree_map for mapping pytrees to dynamic_shapes. + + For built-in types (e.g., standard collections) this behaves exactly like tree_map. + + OTOH for a user-defined class C registered with pytree, we cannot assume that a C + containing tensors can be mapped to a C containing dynamic shapes (i.e., C may not + be a polymorphic container). In that case we use the flattened form of C instead. + Thus a C(**tensors) that flattens to (**tensors) will map to (**dynamic_shapes). + + Args: + func: function to apply to each (int, float, str, bool, None, torch.Tensor) + tree: input pytree + dynamic_shapes: zero or more (typically one) dynamic_shapes to match + + Returns: + output pytree mapping func to each (int, float, str, bool, None, torch.Tensor) + """ + + def is_leaf(t): + # BUILTIN_TYPES is a subset of SUPPORTED_NODES, the latter being all types + # registered with pytree. Types *not* in BUILTIN_TYPES include primitive types + # (int, float, str, bool, None, torch.Tensor), which are not in SUPPORTED_NODES, + # as well as user-defined classes registered with pytree, which are. + return _get_node_type(t) not in BUILTIN_TYPES + + def f(path, t, *dynamic_shapes): + typ = _get_node_type(t) + # typ is not in BUILTIN_TYPES + if typ in SUPPORTED_NODES: + # thus typ is a user-defined class registered with pytree, + # in which case flatten and recurse + return tree_map_with_path( + f, + SUPPORTED_NODES[typ].flatten_fn(t)[0], + *dynamic_shapes, + is_leaf=is_leaf, + ) + else: + return func(path, t, *dynamic_shapes) + + try: + return tree_map_with_path(f, tree, *dynamic_shapes, is_leaf=is_leaf) + except ValueError as e: + if "mismatch" in e.args[0]: + # When PyTree finds a structural mismatch between tree and dynamic_shapes, + # the error message is unfortunately quite horrible. Let's fix that. + assert dynamic_shapes, "Cannot be a mismatch if there is no dynamic_shapes" + assert tree_name, "Must provide a tree_name when there might be a mismatch" + + def _key(type_, context, i): + # derive a PyTree key given the type, context, and child # of a TreeSpec + if type_ is dict: + return MappingKey(context[i]) + if type_ in (list, tuple): + assert context is None + return SequenceKey(i) + raise AssertionError(f"Did not expect type {type_}") + + def raise_mismatch_error(msg): + from torch._dynamo.exc import UserError, UserErrorType + + raise UserError( + UserErrorType.INVALID_INPUT, + f"Detected mismatch between the structure of `{tree_name}` and `dynamic_shapes`: {msg}", + case_name="dynamic_shapes_validation", + ) + + def _compare(tree, dynamic_shapes, path): + # raise an error at the point where tree and dynamic_shapes differ, + # including the path to that point and the reason for the difference + rendered_path = keystr(path) + if isinstance(tree, LeafSpec): + return + if isinstance(dynamic_shapes, LeafSpec): + raise_mismatch_error( + f"`{tree_name}{rendered_path}` is a {tree.type}, " + f"but `dynamic_shapes{rendered_path}` is not" + ) + if tree.type != dynamic_shapes.type: + raise_mismatch_error( + f"`{tree_name}{rendered_path}` is a {tree.type}, " + f"but `dynamic_shapes{rendered_path}` is a {dynamic_shapes.type}" + ) + if len(tree.children_specs) != len(dynamic_shapes.children_specs): + raise_mismatch_error( + f"`{tree_name}{rendered_path}` has {len(tree.children_specs)} elements, " + f"but `dynamic_shapes{rendered_path}` has {len(dynamic_shapes.children_specs)} elements" + ) + if tree.type is dict: + # context, children could be out of order + if sorted(tree.context) != sorted(dynamic_shapes.context): + raise_mismatch_error( + f"`{tree_name}{rendered_path}` has keys {tree.context}, " + f"but `dynamic_shapes{rendered_path}` has keys {dynamic_shapes.context}" + ) + _remap = dict( + zip(dynamic_shapes.context, dynamic_shapes.children_specs) + ) + dynamic_shapes_children_specs = [_remap[k] for k in tree.context] + else: + dynamic_shapes_children_specs = dynamic_shapes.children_specs + for i, (tree_, dynamic_shapes_) in enumerate( + zip(tree.children_specs, dynamic_shapes_children_specs) + ): + _compare( + tree_, + dynamic_shapes_, + path + [_key(tree.type, tree.context, i)], + ) + + _, tree_spec = tree_flatten(tree, is_leaf=is_leaf) + for other_tree in dynamic_shapes: + _, other_tree_spec = tree_flatten(other_tree, is_leaf) + _compare(tree_spec, other_tree_spec, []) + raise + + +def _combine_args(f, args, kwargs, _is_torch_jit_trace=False) -> Dict[str, Any]: + # combine args and kwargs following the signature of f, as it happens + # in the body of f when called with *args, **kwargs + if isinstance(f, ExportedProgram): + f = f.module() + if not _is_torch_jit_trace: + signature = ( + inspect.signature(f.forward) + if isinstance(f, torch.nn.Module) + else inspect.signature(f) + ) + kwargs = kwargs if kwargs is not None else {} + return signature.bind(*args, **kwargs).arguments + return args + + +class ShapesCollection: + """ + Builder for dynamic_shapes. + Used to assign dynamic shape specifications to tensors that appear in inputs. + + Example:: + args = ({"x": tensor_x, "others": [tensor_y, tensor_z]}) + + dim = torch.export.Dim(...) + dynamic_shapes = torch.export.ShapesCollection() + dynamic_shapes[tensor_x] = (dim, dim + 1, 8) + dynamic_shapes[tensor_y] = {0: dim * 2} + # This is equivalent to the following (now auto-generated): + # dynamic_shapes = {"x": (dim, dim + 1, 8), "others": [{0: dim * 2}, None]} + + torch.export(..., args, dynamic_shapes=dynamic_shapes) + """ + + def __init__(self): + self._shapes = {} + + def __setitem__(self, t, shape): + assert isinstance( + t, torch.Tensor + ), f"Cannot assign shape to non-tensor type {type(t)}" + # TODO(avik): check that shape is indeed a Shape + t_id = id(t) + if t_id in self._shapes: + _shape = self._shapes[t_id] + assert ( + shape == _shape + ), f"Shapes assigned to tensor do not match: expected {_shape}, got {shape}" + else: + self._shapes[id(t)] = shape + + def __getitem__(self, t): + t_id = id(t) + if t_id in self._shapes: + return self._shapes[t_id] + else: + return None + + def __len__(self): + return len(self._shapes) + + def dynamic_shapes(self, m, args, kwargs=None): + """ + Generate dynamic_shapes. + """ + + t_ids = set() + + def find_shape(path, t): + t_id = id(t) + if t_id in self._shapes: + t_ids.add(t_id) + return self._shapes[t_id] + else: + return None + + combined_args = _combine_args(m, args, kwargs) + dynamic_shapes = _tree_map_with_path(find_shape, combined_args) + if any(t_id not in t_ids for t_id in self._shapes): + raise ValueError( + "Some tensors that were assigned shapes were not found in args. " + "Maybe such tensors were copied when passing them as args? " + "Maybe such tensors are contained in classes that were not registered with pytree?" + ) + return dynamic_shapes + + +def _warn_on_None_dynamic_shape_dimension(): + msg = ( + "Using None as a dynamic shape dimension is deprecated. " + "Please use Dim.STATIC instead" + ) + # TODO(avik): raise an error in the future + log.warning(msg) + + +def _check_dynamic_shapes( + combined_args: Dict[str, Any], + dynamic_shapes: Union[Dict[str, Any], Tuple[Any], List[Any], None], +): + """ + Checks the dynamic_shapes specification for correctness, + using combined args + kwargs as reference for inputs structure. + """ + from torch._dynamo.exc import UserError, UserErrorType + from torch._export.non_strict_utils import _flatten_dynamic_shapes + + if dynamic_shapes is None or len(dynamic_shapes) == 0: + return + if isinstance(dynamic_shapes, (tuple, list)): + combined_args = type(dynamic_shapes)(combined_args.values()) # type: ignore[assignment, misc] + + bounds: Dict[str, Tuple[int, int]] = {} + + def check_same_bounds(dim): + if dim.__name__ in bounds: + min_, max_ = bounds[dim.__name__] + if dim.min != min_ or dim.max != max_: + this_ = _Dim.readable(dim.__name__, min_, max_) + that_ = _Dim.readable(dim.__name__, dim.min, dim.max) + raise UserError( + UserErrorType.INVALID_INPUT, + f"Found different definitions {this_} and {that_} " + f"for the same symbolic dimension {dim}!", + ) + else: + bounds[dim.__name__] = (dim.min, dim.max) + + def check_symbols(path, tensor, shape): + if isinstance(shape, dict): + for i, dim in shape.items(): + if isinstance(dim, _Dim): + check_same_bounds(dim) + elif dim is None: + _warn_on_None_dynamic_shape_dimension() + elif not (isinstance(dim, (int, _DimHint))): + raise UserError( + UserErrorType.INVALID_INPUT, + f"Unexpected dimension mapped to index {i} in input tensor shape {shape} " + f"specified at `dynamic_shapes{keystr(path)}` " + f"(expected None, an int, a Dim, Dim.AUTO, or Dim.STATIC, but got {dim} instead)", + case_name="dynamic_shapes_validation", + ) + elif isinstance(shape, (tuple, list)): + for i, dim in enumerate(shape): + if isinstance(dim, _Dim): + check_same_bounds(dim) + elif dim is None: + _warn_on_None_dynamic_shape_dimension() + elif not (isinstance(dim, (int, _DimHint))): + raise UserError( + UserErrorType.INVALID_INPUT, + f"Unexpected dimension #{i} in input tensor shape {shape} " + f"specified at `dynamic_shapes{keystr(path)}` " + f"(expected None, an int, a Dim, Dim.AUTO, or Dim.STATIC, but got {dim} instead)", + case_name="dynamic_shapes_validation", + ) + elif shape is not None: + raise UserError( + UserErrorType.INVALID_INPUT, + f"Unexpected input tensor shape {shape} specified at `dynamic_shapes{keystr(path)}` " + f"(expected either a list/tuple of dimensions, or a dict mapping indices to dimensions," + f" where each dimension is an int, a Dim, Dim.AUTO, or Dim.STATIC)", + case_name="dynamic_shapes_validation", + ) + + assert isinstance(dynamic_shapes, (dict, tuple, list)) + if isinstance(dynamic_shapes, dict): + got_keys = list(dynamic_shapes.keys()) + expected_arg_names = list(combined_args.keys()) + if sorted(got_keys) != sorted(expected_arg_names): + msg = ( + f"When `dynamic_shapes` is specified as a dict, its top-level keys " + f"must be the arg names {expected_arg_names} of `inputs`, but " + f"here they are {got_keys}. " + ) + if ( + len(combined_args) == 1 + and expected_arg_names[0] not in got_keys + and isinstance(combined_args[expected_arg_names[0]], dict) + ): + msg += ( + "Since here `inputs` is a list/tuple enclosing a single dict, " + "maybe you just forgot to enclose `dynamic_shapes` in a list/tuple?" + ) + else: + msg += ( + "Alternatively, you could also ignore arg names entirely " + "and specify `dynamic_shapes` as a list/tuple matching `inputs`." + ) + raise UserError( + UserErrorType.INVALID_INPUT, msg, case_name="dynamic_shapes_validation" + ) + + def check_shape(path, t, dynamic_shape): + if isinstance(t, torch.Tensor): + check_symbols(path, t, dynamic_shape) + else: + if dynamic_shape is not None: + rendered_path = keystr(path) + raise UserError( + UserErrorType.INVALID_INPUT, + f"Cannot associate shape {dynamic_shape} specified at `dynamic_shapes{rendered_path}` " + f"to non-tensor type {type(t)} at `inputs{rendered_path}` (expected None)", + case_name="dynamic_shapes_validation", + ) + + _tree_map_with_path(check_shape, combined_args, dynamic_shapes, tree_name="inputs") + + # raise user warning if both Dim.AUTO & Dims are specified in dynamic_shapes + flat_dynamic_shapes = _flatten_dynamic_shapes(combined_args, dynamic_shapes) + flatter_dynamic_shapes, _ = tree_flatten(flat_dynamic_shapes) + if any(isinstance(s, _Dim) for s in flatter_dynamic_shapes) and any( + s == _DimHint.AUTO for s in flatter_dynamic_shapes + ): + raise UserError( + UserErrorType.INVALID_INPUT, + "Specifying both `Dim.AUTO` and `Dim` or `DerivedDim` in `dynamic_shapes` is not well supported at the moment, " + "and can easily lead to constraint violation errors or obscure errors in torch.export. Dim/DerivedDims " + "expect all equal or related dimensions to be specified, and does not yet compose well with `Dim.AUTO`. " + "We suggest using `Dim.AUTO` mixed with `None` for auto-dynamic + static shapes, plus torch._check(dim >= min), " + "torch._check(dim <= max) calls in your program to specify min/max ranges, or `Dim`/`DerivedDim` mixed with `None` " + "if you want to assert on the exact specification of your program's dynamic shapes behavior.", + case_name="dynamic_shapes_validation", + ) + + +def _transform_shapes_for_default_dynamic( + combined_args: Dict[str, Any], + dynamic_shapes: Union[Dict[str, Any], Tuple[Any], List[Any], None], +) -> Union[Dict[str, Any], Tuple[Any], List[Any], None]: + """ + In the long run this might not be needed, but this exists because export.export() and _dynamo.export() + historically have different semantics for how dynamic_shapes are specified, but go through the same + process of producing constraints, and now both use assume_static_by_default=False. + + For _dynamo.export(), the semantics for dynamic_shapes are: + - None: dynamic, allocated a symbol + - Dim/DerivedDim: a strict assertion on the min/max range for this symbol, and require a specification + for all dims governed by this symbol (i.e. relations, equality, linear relations, etc.) + + For export.export(), historically dynamism for unspecified dims has been undesirable, so the semantics are: + - Dim.AUTO: dynamic, allocated a symbol + - None/unspecified/Dim.STATIC: static + - Dim/DerivedDims: also a strict assertion + + To allow both APIs to follow the same process for producing constraints, this function converts dynamic_shapes + for export.export() to be compatible with _process_dynamic_shapes() and assume_static_by_default=False, turning them + into essentially what they'd look like for _dynamo.export(). + + An example conversion might look like, for a 3-d input tensor: + + input spec: { + 0: Dim.AUTO, + 1: None, # or Dim.STATIC + 2: Dim("dx"), + } + output spec: { + 0: None, # None: dynamic by default + 1: 32, # explicitly provide static shape + 2: Dim("dx"), # remains the same + } + """ + + def _tree_map_helper(tree, val): + """ + If the user generally specifies dynamic_shapes=None for a pytree input, + we'd like to convert this into a tree of Nones following the input spec, + so we can explicitly specify static dims for all tensor dimensions. + Non-builtin types for pytree (e.g. custom dataclasses) creates some difficulty, + in which case the correct format is a list containing specs for each child attribute. + """ + if (node_type := _get_node_type(tree)) not in SUPPORTED_NODES: # is_leaf + return val + flatten_fn = SUPPORTED_NODES[node_type].flatten_fn + child_pytrees, context = flatten_fn(tree) # flatten from whatever original type + unflatten_fn = SUPPORTED_NODES[ + node_type if node_type in BUILTIN_TYPES else list + ].unflatten_fn + children = [_tree_map_helper(child, val) for child in child_pytrees] + return unflatten_fn( + children, context + ) # unflatten into original type, or list if not built-in type + + if ( + dynamic_shapes is None or len(dynamic_shapes) == 0 + ): # create pytree structure of static dim + dynamic_shapes = _tree_map_helper(combined_args, None) + if isinstance(dynamic_shapes, (tuple, list)): + combined_args = type(dynamic_shapes)(combined_args.values()) # type: ignore[assignment, misc] + + def transform_shapes(path, tensor, shape): + def _marked_dynamic(tensor, i): + # TODO(pianpwk): deprecate mark_dynamic() usage for export + return i in getattr(tensor, "_dynamo_dynamic_indices", set()) + + out: Union[None, List[Any], Dict[int, Any]] = None + if isinstance(shape, dict): + out = {} + for i, val in enumerate(tensor.shape): + dim = shape.get(i, _DimHint.STATIC) + if _marked_dynamic(tensor, i) or dim == _DimHint.AUTO: + # don't have to specify anything if dynamic + # None also works, since assume_static_by_default=False + if dim == _DimHint.AUTO: + torch._dynamo.maybe_mark_dynamic(tensor, i) # avoid duck sizing + continue + elif isinstance(dim, _Dim): + out[i] = dim + elif isinstance(dim, int): + # important that this is dim and not val, + # so we can raise error if user-specified dim != val + out[i] = dim + elif dim is None: + _warn_on_None_dynamic_shape_dimension() + out[i] = val + else: + # make explicitly static + assert dim == _DimHint.STATIC + out[i] = val + elif isinstance(shape, (tuple, list)): + out = [] + for i, val in enumerate(tensor.shape): + dim = shape[i] + if _marked_dynamic(tensor, i) or dim == _DimHint.AUTO: + if dim == _DimHint.AUTO: + torch._dynamo.maybe_mark_dynamic(tensor, i) # avoid duck sizing + out.append(None) + elif isinstance(dim, _Dim): + out.append(dim) + elif isinstance(dim, int): + out.append(dim) + elif dim is None: + _warn_on_None_dynamic_shape_dimension() + out.append(val) + else: + assert dim == _DimHint.STATIC + out.append(val) + out = type(shape)(out) # type: ignore[assignment] + else: + assert shape is None + if isinstance(tensor, torch.Tensor): + out = [] + for i, val in enumerate(tensor.shape): + out.append(None if _marked_dynamic(tensor, i) else val) + out = out or None + else: + out = None + return out + + def transform_shape(path, t, dynamic_shape): + if isinstance(t, torch.Tensor): + return transform_shapes(path, t, dynamic_shape) + + result = _tree_map_with_path( + transform_shape, combined_args, dynamic_shapes, tree_name="inputs" + ) + return result + + +def _process_dynamic_shapes( + combined_args: Dict[str, Any], + dynamic_shapes: Union[Dict[str, Any], Tuple[Any], List[Any], None], +) -> List[Constraint]: + """ + Reads the dynamic_shapes specification and produces a list of constraints. + """ + from torch._dynamo.exc import UserError, UserErrorType + + if dynamic_shapes is None or len(dynamic_shapes) == 0: + # we run with dynamic by default, so no need to produce constraints + return [] + if isinstance(dynamic_shapes, (tuple, list)): + combined_args = type(dynamic_shapes)(combined_args.values()) # type: ignore[assignment, misc] + + # map of Dim names representing input shape dimensions to constraints on them + symbols: Dict[str, List[Constraint]] = defaultdict(list) + # track roots that do not directly represent input shape dimensions + phantom_roots: Dict[str, _PhantomRoot] = {} + derived_constraints_with_phantom_root: List[_DerivedConstraint] = [] + + def to_constraint(dim, tensor, i): + import sympy + + from torch.fx.experimental.symbolic_shapes import StrictMinMaxConstraint + from torch.utils._sympy.solve import try_solve + from torch.utils._sympy.value_ranges import ValueRanges + + def root_value(): + # given tensor.shape[i] is the value of dim = fn(root), + # find the value of root + symbol = sympy.Symbol(dim.root.__name__, integer=True) + expr = dim.fn(symbol) + solution = try_solve(sympy.Eq(expr, tensor.shape[i]), symbol) + if solution is not None: + return int(solution[1]) # type: ignore[call-overload] + else: + raise UserError( # noqa: B904 + UserErrorType.CONSTRAINT_VIOLATION, + f"Expected shape[{i}] = {tensor.shape[i]} of input Tensor to be " + f"of the form {expr}, where {symbol} is an integer", + ) + + if isinstance(dim, _DerivedDim): + # generate a _DerivedConstraint where the root is: + # - either a _ConstraintTarget (if dim.root directly describes an input shape) + # - or a _PhantomRoot (otherwise) + dim_root = dim.root # type: ignore[attr-defined] + if dim_root.__name__ in symbols: + # root represents an input shape dimension + root_constraint = symbols[dim_root.__name__][0] + root = _ConstraintTarget( + root_constraint.t_id, + root_constraint.dim, + ) + elif dim_root.__name__ not in phantom_roots: + # create a phantom root + root = _PhantomRoot( # type: ignore[assignment] + name=dim_root.__name__, + constraint_range=StrictMinMaxConstraint( + vr=ValueRanges(lower=dim_root.min, upper=dim_root.max), + warn_only=False, + ), + val=root_value(), + ) + phantom_roots[dim_root.__name__] = root # type: ignore[assignment] + else: + root = phantom_roots[dim_root.__name__] # type: ignore[assignment] + constraint = _DerivedConstraint( + id(tensor), + i, + dim.__name__, + StrictMinMaxConstraint( + vr=ValueRanges(lower=dim.min, upper=dim.max), + warn_only=False, + ), + root, + dim.fn, # type: ignore[attr-defined] + ) + if isinstance(root, _PhantomRoot): + # NOTE(avik): since we have not processed all inputs yet, we may replace this + # with a root that does represent an input shape dimension later (see below) + derived_constraints_with_phantom_root.append(constraint) + elif isinstance(dim, _StaticDim): + constraint = _Constraint( # type: ignore[assignment] + id(tensor), + i, + dim.__name__, + StrictMinMaxConstraint( + vr=ValueRanges(lower=dim.value, upper=dim.value), warn_only=False # type: ignore[attr-defined] + ), + ) + else: + constraint = _Constraint( # type: ignore[assignment] + id(tensor), + i, + dim.__name__, + StrictMinMaxConstraint( + vr=ValueRanges(lower=dim.min, upper=dim.max), warn_only=False # type: ignore[attr-defined] + ), + ) + return constraint + + def update_symbols(path, tensor, shape): + def _create_static_dim(tensor, i, value): + return _StaticDim(str(value), (int,), {"value": value}) + + if isinstance(shape, dict): + for i, dim in shape.items(): + if isinstance(dim, (int, _Dim)): + if isinstance(dim, int): + dim = _create_static_dim(tensor, i, dim) + constraint = to_constraint(dim, tensor, i) + symbols[dim.__name__].append(constraint) + elif isinstance(shape, (tuple, list)): + for i, dim in enumerate(shape): + if isinstance(dim, (int, _Dim)): + if isinstance(dim, int): + dim = _create_static_dim(tensor, i, dim) + constraint = to_constraint(dim, tensor, i) + symbols[dim.__name__].append(constraint) + + def assoc_shape(path, t, dynamic_shape): + if isinstance(t, torch.Tensor): + update_symbols(path, t, dynamic_shape) + + _tree_map_with_path(assoc_shape, combined_args, dynamic_shapes, tree_name="inputs") + + constraints = [] + for derived_constraint_with_phantom_root in derived_constraints_with_phantom_root: + phantom_root_name = derived_constraint_with_phantom_root.root.name # type: ignore[union-attr] + if phantom_root_name in symbols: + # We found an input shape dimension corresponding to this name, so we + # do not need a phantom symbol for it after all. + # NOTE(avik): Overall we want to maintain the invariant that roots that + # are phantom symbols are really "phantom," i.e., they cannot be represented + # by any input source. This is important when we are deciding derived equalities, + # since we can focus our attention exclusively on input sources: deciding + # derived equalities involving phantom symbols are, in comparison, trivial. + derived_constraint_with_phantom_root.root = symbols[phantom_root_name][0] + + for dynamic_dims in symbols.values(): + constraints.extend(dynamic_dims) + + return constraints # type: ignore[return-value] + + +def _get_dim_name_mapping( + dynamic_shapes: Union[Dict[str, Any], Tuple[Any], List[Any], None] +): + name_to_dim = {} + for dim in tree_flatten( + dynamic_shapes, + is_leaf=lambda x: isinstance(x, _Dim), + )[0]: + if dim is None: + # NOTE: this must denote a non-Tensor or automatic at this point. + continue + if isinstance(dim, int): + continue + assert isinstance(dim, _Dim) # dim hints should have boiled away + name_to_dim[dim.__name__] = dim + if isinstance(dim, _DerivedDim): + name_to_dim[dim.root.__name__] = dim.root # type: ignore[attr-defined] + return name_to_dim + + +def refine_dynamic_shapes_from_suggested_fixes( + msg: str, + dynamic_shapes: Union[Dict[str, Any], Tuple[Any], List[Any]], +) -> Union[Dict[str, Any], Tuple[Any], List[Any]]: + """ + For working with export's dynamic shapes suggested fixes, and/or automatic dynamic shapes. + Refines the given dynamic shapes spec, given a ConstraintViolation error message and the original dynamic shapes. + + For most cases behavior is straightforward - i.e. for suggested fixes that specialize or refine a Dim's range, + or fixes that suggest a derived relation, the new dynamic shapes spec will be updated as such. + + e.g. + Suggested fixes: + + dim = Dim('dim', min=3, max=6) -> this just refines the dim's range + dim = 4 -> this specializes to a constant + dy = dx + 1 -> dy was specified as an independent dim, but is actually tied to dx with this relation + + However, suggested fixes associated with derived dims can be more complicated. + For example, if a suggested fix is provided for a root dim, the new derived dim value is evaluated based on the root. + + e.g. + dx = Dim('dx') + dy = dx + 2 + dynamic_shapes = {"x": (dx,), "y": (dy,)} + + Suggested fixes: + + dx = 4 # specialization will lead to dy also specializing = 6 + dx = Dim('dx', max=6) # dy now has max = 8 + + Derived dims suggested fixes can also be used to express divisibility constraints. + This involves creating new root dims that aren't tied to a particular input shape. + In this case the root dims won't appear directly in the new spec, but as a root of + one of the dims. + + e.g. + Suggested fixes: + + _dx = Dim('_dx', max=1024) # this won't appear in the return result, but dx will + dx = 4*_dx # dx is now divisible by 4, with a max value of 4096 + """ + + import re + + import sympy + + from torch._dynamo.exc import UserError, UserErrorType + from torch.fx.experimental.symbolic_shapes import _is_supported_equivalence + + try: + shape_fixes_msg = msg.split("Suggested fixes:")[1].strip() + except Exception as exc: + raise UserError( + UserErrorType.INVALID_INPUT, + "Suggested fixes not found in error message given to refine_dynamic_shapes_from_suggested_fixes()", + ) from exc + + # build shape_fixes dictionary + shape_fixes = {} + for fix in shape_fixes_msg.split("\n"): + fix = fix.strip() + if match := re.match(r"(.*) = Dim\('(.*)'.*\)", fix): + name = match.group(1) + _min, _max = None, None + if match_min := re.match(r".* = Dim\('.*', min\=([0-9]+).*\)", fix): + _min = int(match_min.group(1)) + if match_max := re.match(r".* = Dim\('.*'.*max\=([0-9]+)\)", fix): + _max = int(match_max.group(1)) + shape_fixes[name] = Dim(name, min=_min, max=_max) + else: + name, expr = fix.split(" = ") + expr = sympy.sympify(expr) + if isinstance(expr, sympy.Number): + # static, integer + shape_fixes[name] = int(expr) # type: ignore[assignment] + else: + # relation or derived dim + shape_fixes[name] = expr + + name_to_dim = _get_dim_name_mapping(dynamic_shapes) + + # track derived dim roots + roots: Set[str] = set() + for k, c in shape_fixes.items(): + assert isinstance(c, (int, _Dim, _DerivedDim, sympy.Expr)) + if isinstance(c, sympy.Expr): # check dim/derived dim expression + assert _is_supported_equivalence(c) + shape_fixes[k] = c + roots.add(str(next(iter(c.free_symbols)))) + if isinstance(c, _DerivedDim): + roots.add(c.root.__name__) # type: ignore[attr-defined] + + # check keys are existing dims or new roots + for k, c in shape_fixes.items(): + assert k in name_to_dim or k in roots + + # cache so we don't produce multiple derived dim objects + derived_dim_cache: Dict[str, _DerivedDim] = {} + + def apply_fixes(path, dim, dummy): + if dim is None or isinstance(dim, int): # not dynamic + return dim + elif dim.__name__ in shape_fixes: # directly fix + fix = shape_fixes[dim.__name__] + if isinstance(fix, sympy.Expr): # now derived or related + if str(fix) in derived_dim_cache: + return derived_dim_cache[str(fix)] + else: + symbol = next(iter(fix.free_symbols)) + # try to locate symbol + if symbol.name in shape_fixes: # type: ignore[attr-defined] + root = shape_fixes[symbol.name] # type: ignore[attr-defined] + else: + assert symbol.name in name_to_dim # type: ignore[attr-defined] + root = name_to_dim[symbol.name] # type: ignore[attr-defined] + # figure out value of fix + modulus, remainder = sympy.polys.polytools.div(fix, symbol) + dim = root + if modulus != 1: + dim = int(modulus) * dim + if remainder != 0: + dim = dim + int(remainder) + derived_dim_cache[str(fix)] = dim + return dim + else: + return fix + elif isinstance(dim, _DerivedDim) and dim.root.__name__ in shape_fixes: # type: ignore[attr-defined] + if dim.__name__ in derived_dim_cache: + return derived_dim_cache[dim.__name__] + else: # evaluate new derived value based on root + _dim = dim.fn(shape_fixes[dim.root.__name__]) # type: ignore[attr-defined] + derived_dim_cache[dim.__name__] = _dim + return _dim + return dim # unchanged dim + + return _tree_map_with_path(apply_fixes, dynamic_shapes, dynamic_shapes) diff --git a/janus/lib/python3.10/site-packages/torch/export/graph_signature.py b/janus/lib/python3.10/site-packages/torch/export/graph_signature.py new file mode 100644 index 0000000000000000000000000000000000000000..4730cf6febcddfdfbfa0407ec0cd8895507d36a0 --- /dev/null +++ b/janus/lib/python3.10/site-packages/torch/export/graph_signature.py @@ -0,0 +1,593 @@ +# mypy: allow-untyped-defs +import dataclasses +from enum import auto, Enum +from typing import Collection, Dict, List, Mapping, Optional, Set, TYPE_CHECKING, Union + +from torch._library.fake_class_registry import FakeScriptObject + + +if TYPE_CHECKING: + import torch + from torch._functorch._aot_autograd.schemas import GraphSignature + +__all__ = [ + "ConstantArgument", + "CustomObjArgument", + "ExportBackwardSignature", + "ExportGraphSignature", + "InputKind", + "InputSpec", + "OutputKind", + "OutputSpec", + "SymIntArgument", + "TensorArgument", +] + + +@dataclasses.dataclass +class TensorArgument: + name: str + + +@dataclasses.dataclass +class TokenArgument: + name: str + + +@dataclasses.dataclass +class SymIntArgument: + name: str + + +@dataclasses.dataclass +class CustomObjArgument: + name: str + class_fqn: str + fake_val: Optional[FakeScriptObject] = None + + +@dataclasses.dataclass +class ConstantArgument: + name: str + value: Union[int, float, bool, str, None] + + +ArgumentSpec = Union[ + TensorArgument, + SymIntArgument, + ConstantArgument, + CustomObjArgument, + TokenArgument, +] + + +class InputKind(Enum): + USER_INPUT = auto() + PARAMETER = auto() + BUFFER = auto() + CONSTANT_TENSOR = auto() + CUSTOM_OBJ = auto() + TOKEN = auto() + + +@dataclasses.dataclass +class InputSpec: + kind: InputKind + arg: ArgumentSpec + target: Optional[str] + persistent: Optional[bool] = None + + def __post_init__(self): + if self.kind == InputKind.BUFFER: + assert ( + self.persistent is not None + ), "Failed to specify persistent flag on BUFFER." + assert isinstance( + self.arg, + ( + TensorArgument, + SymIntArgument, + ConstantArgument, + CustomObjArgument, + TokenArgument, + ), + ), f"got {type(self.arg)}" + + +class OutputKind(Enum): + USER_OUTPUT = auto() + LOSS_OUTPUT = auto() + BUFFER_MUTATION = auto() + GRADIENT_TO_PARAMETER = auto() + GRADIENT_TO_USER_INPUT = auto() + USER_INPUT_MUTATION = auto() + TOKEN = auto() + + +@dataclasses.dataclass +class OutputSpec: + kind: OutputKind + arg: ArgumentSpec + target: Optional[str] + + def __post_init__(self): + assert isinstance( + self.arg, + ( + TensorArgument, + SymIntArgument, + ConstantArgument, + TokenArgument, + CustomObjArgument, + ), + ), self.arg + + +@dataclasses.dataclass +class ExportBackwardSignature: + gradients_to_parameters: Dict[str, str] + gradients_to_user_inputs: Dict[str, str] + loss_output: str + + +@dataclasses.dataclass +class ExportGraphSignature: + """ + :class:`ExportGraphSignature` models the input/output signature of Export Graph, + which is a fx.Graph with stronger invariants gurantees. + + Export Graph is functional and does not access "states" like parameters + or buffers within the graph via ``getattr`` nodes. Instead, :func:`export` + gurantees that parameters, buffers, and constant tensors are lifted out of + the graph as inputs. Similarly, any mutations to buffers are not included + in the graph either, instead the updated values of mutated buffers are + modeled as additional outputs of Export Graph. + + The ordering of all inputs and outputs are:: + + Inputs = [*parameters_buffers_constant_tensors, *flattened_user_inputs] + Outputs = [*mutated_inputs, *flattened_user_outputs] + + e.g. If following module is exported:: + + class CustomModule(nn.Module): + def __init__(self) -> None: + super(CustomModule, self).__init__() + + # Define a parameter + self.my_parameter = nn.Parameter(torch.tensor(2.0)) + + # Define two buffers + self.register_buffer('my_buffer1', torch.tensor(3.0)) + self.register_buffer('my_buffer2', torch.tensor(4.0)) + + def forward(self, x1, x2): + # Use the parameter, buffers, and both inputs in the forward method + output = (x1 + self.my_parameter) * self.my_buffer1 + x2 * self.my_buffer2 + + # Mutate one of the buffers (e.g., increment it by 1) + self.my_buffer2.add_(1.0) # In-place addition + + return output + + Resulting Graph would be:: + + graph(): + %arg0_1 := placeholder[target=arg0_1] + %arg1_1 := placeholder[target=arg1_1] + %arg2_1 := placeholder[target=arg2_1] + %arg3_1 := placeholder[target=arg3_1] + %arg4_1 := placeholder[target=arg4_1] + %add_tensor := call_function[target=torch.ops.aten.add.Tensor](args = (%arg3_1, %arg0_1), kwargs = {}) + %mul_tensor := call_function[target=torch.ops.aten.mul.Tensor](args = (%add_tensor, %arg1_1), kwargs = {}) + %mul_tensor_1 := call_function[target=torch.ops.aten.mul.Tensor](args = (%arg4_1, %arg2_1), kwargs = {}) + %add_tensor_1 := call_function[target=torch.ops.aten.add.Tensor](args = (%mul_tensor, %mul_tensor_1), kwargs = {}) + %add_tensor_2 := call_function[target=torch.ops.aten.add.Tensor](args = (%arg2_1, 1.0), kwargs = {}) + return (add_tensor_2, add_tensor_1) + + Resulting ExportGraphSignature would be:: + + ExportGraphSignature( + input_specs=[ + InputSpec(kind=, arg=TensorArgument(name='arg0_1'), target='my_parameter'), + InputSpec(kind=, arg=TensorArgument(name='arg1_1'), target='my_buffer1'), + InputSpec(kind=, arg=TensorArgument(name='arg2_1'), target='my_buffer2'), + InputSpec(kind=, arg=TensorArgument(name='arg3_1'), target=None), + InputSpec(kind=, arg=TensorArgument(name='arg4_1'), target=None) + ], + output_specs=[ + OutputSpec(kind=, arg=TensorArgument(name='add_2'), target='my_buffer2'), + OutputSpec(kind=, arg=TensorArgument(name='add_1'), target=None) + ] + ) + """ + + input_specs: List[InputSpec] + output_specs: List[OutputSpec] + + # A list of parameters uniquely identified by mangled fully qualified name + @property + def parameters(self) -> Collection[str]: + return tuple( + s.target + for s in self.input_specs + if s.kind == InputKind.PARAMETER + if isinstance(s.target, str) + ) + + # A list of buffers uniquely identified by mangled fully qualified name + @property + def buffers(self) -> Collection[str]: + return tuple( + s.target + for s in self.input_specs + if s.kind == InputKind.BUFFER + if isinstance(s.target, str) + ) + + @property + def non_persistent_buffers(self) -> Collection[str]: + return tuple( + s.target + for s in self.input_specs + if s.kind == InputKind.BUFFER + if s.persistent is False + if isinstance(s.target, str) + ) + + # A list of lifted constant tensors + @property + def lifted_tensor_constants(self) -> Collection[str]: + return tuple( + s.target + for s in self.input_specs + if s.kind == InputKind.CONSTANT_TENSOR + if isinstance(s.target, str) + ) + + @property + def lifted_custom_objs(self) -> Collection[str]: + return tuple( + s.target + for s in self.input_specs + if s.kind == InputKind.CUSTOM_OBJ + if isinstance(s.target, str) + ) + + # Graph node names of pytree-flattened inputs of original program + @property + def user_inputs(self) -> Collection[Union[int, float, bool, None, str]]: + user_inputs: List[Union[int, float, bool, None, str]] = [] + for s in self.input_specs: + if s.kind != InputKind.USER_INPUT: + continue + + if isinstance(s.arg, (TensorArgument, SymIntArgument, CustomObjArgument)): + user_inputs.append(s.arg.name) + elif isinstance(s.arg, ConstantArgument): + user_inputs.append(s.arg.value) + else: + raise RuntimeError(f"{s.arg} is not a valid user inputs") + return tuple(user_inputs) + + # Graph node names of pytree-flattened outputs of original program + @property + def user_outputs(self) -> Collection[Union[int, float, bool, None, str]]: + user_outputs: List[Union[int, float, bool, None, str]] = [] + for s in self.output_specs: + if s.kind != OutputKind.USER_OUTPUT: + continue + + if isinstance(s.arg, (TensorArgument, SymIntArgument)): + user_outputs.append(s.arg.name) + elif isinstance(s.arg, ConstantArgument): + user_outputs.append(s.arg.value) + elif isinstance(s.arg, CustomObjArgument): + user_outputs.append(s.arg.name) + else: + raise RuntimeError(f"{s.arg} is not a valid user output") + return tuple(user_outputs) + + # A dictionary mapping graph input node names to parameters. If a graph input + # name is found in this dictionary, it is guranteed to be a lifted parameter. + @property + def inputs_to_parameters(self) -> Mapping[str, str]: + return _immutable_dict( + (s.arg.name, s.target) + for s in self.input_specs + if s.kind == InputKind.PARAMETER + and isinstance(s.arg, TensorArgument) + and isinstance(s.target, str) + ) + + # A dictionary mapping graph input node names to buffers. If a graph input + # name is found in this dictionary, it is guranteed to be a lifted buffer. + @property + def inputs_to_buffers(self) -> Mapping[str, str]: + return _immutable_dict( + (s.arg.name, s.target) # type: ignore[union-attr, misc] + for s in self.input_specs + if s.kind == InputKind.BUFFER + and isinstance(s.arg, TensorArgument) + and isinstance(s.target, str) + ) + + # A dictionary mapping graph output node names to buffers that are mutated in the + # original program. Buffers that are not mutated will not be found in this dictionary. + @property + def buffers_to_mutate(self) -> Mapping[str, str]: + return _immutable_dict( + (s.arg.name, s.target) + for s in self.output_specs + if s.kind == OutputKind.BUFFER_MUTATION + and isinstance(s.arg, TensorArgument) + and isinstance(s.target, str) + ) + + @property + def user_inputs_to_mutate(self) -> Mapping[str, str]: + return _immutable_dict( + (s.arg.name, s.target) + for s in self.output_specs + if s.kind == OutputKind.USER_INPUT_MUTATION + and isinstance(s.arg, TensorArgument) + and isinstance(s.target, str) + ) + + # A dictionary mapping graph input node names to lifted tensor constants. + @property + def inputs_to_lifted_tensor_constants(self) -> Mapping[str, str]: + return _immutable_dict( + (s.arg.name, s.target) + for s in self.input_specs + if s.kind == InputKind.CONSTANT_TENSOR + and isinstance(s.arg, TensorArgument) + and isinstance(s.target, str) + ) + + @property + def inputs_to_lifted_custom_objs(self) -> Mapping[str, str]: + return _immutable_dict( + (s.arg.name, s.target) + for s in self.input_specs + if s.kind == InputKind.CUSTOM_OBJ + and isinstance(s.arg, CustomObjArgument) + and isinstance(s.target, str) + ) + + @property + def backward_signature(self) -> Optional[ExportBackwardSignature]: + loss_output = None + gradients_to_parameters: Dict[str, str] = {} + gradients_to_user_inputs: Dict[str, str] = {} + for spec in self.output_specs: + if spec.kind == OutputKind.LOSS_OUTPUT: + assert loss_output is None + assert isinstance(spec.arg, TensorArgument) + loss_output = spec.arg.name + elif spec.kind == OutputKind.GRADIENT_TO_PARAMETER: + assert isinstance(spec.target, str) + assert isinstance(spec.arg, TensorArgument) + gradients_to_parameters[spec.arg.name] = spec.target + elif spec.kind == OutputKind.GRADIENT_TO_USER_INPUT: + assert isinstance(spec.target, str) + assert isinstance(spec.arg, TensorArgument) + gradients_to_user_inputs[spec.arg.name] = spec.target + + if loss_output is None: + return None + + return ExportBackwardSignature( + loss_output=loss_output, + gradients_to_parameters=gradients_to_parameters, + gradients_to_user_inputs=gradients_to_user_inputs, + ) + + # Map from assertion dependency token index to assertion dep token output + # name in output. The shape of output after aot_autograd will be like: + # (updated_inputs, user_outputs, dep_token). + @property + def assertion_dep_token(self) -> Optional[Mapping[int, str]]: + return None + + @property + def input_tokens(self) -> Collection[str]: + input_tokens = [] + for s in self.input_specs: + if s.kind == InputKind.TOKEN: + assert isinstance(s.arg, TokenArgument) + input_tokens.append(s.arg.name) + return tuple(input_tokens) + + @property + def output_tokens(self) -> Collection[str]: + output_tokens = [] + for s in self.output_specs: + if s.kind == OutputKind.TOKEN: + assert isinstance(s.arg, TokenArgument) + output_tokens.append(s.arg.name) + return tuple(output_tokens) + + def __post_init__(self) -> None: + assertion_dep_token = self.assertion_dep_token + if assertion_dep_token is None: + return + assert len(assertion_dep_token) == 1 + assertion_dep_token_index = next(iter(assertion_dep_token.keys())) + assert ( + len(self.user_outputs) + len(self.buffers_to_mutate) + == assertion_dep_token_index + ) + + def replace_all_uses(self, old: str, new: str): + """ + Replace all uses of the old name with new name in the signature. + """ + assert isinstance(old, str) + assert isinstance(new, str) + arg_types = (TensorArgument, SymIntArgument, CustomObjArgument, TokenArgument) + for o in self.output_specs: + if isinstance(o.arg, arg_types): + if o.arg.name == old: + o.arg.name = new + for i in self.input_specs: + if isinstance(i.arg, arg_types): + if i.arg.name == old: + i.arg.name = new + + def get_replace_hook(self): + def _(old, new, user): + if user.op in ("output", "input"): + self.replace_all_uses(old.name, new) + + return _ + + +def _immutable_dict(items): + """ + Creates a mapping where items cannot be added, deleted, or updated. + NOTE: The immutability is shallow (like tuple is an immutable collection). + """ + from types import MappingProxyType + + return MappingProxyType(dict(items)) + + +def _make_argument_spec(node, token_names) -> ArgumentSpec: + from torch import ScriptObject, SymInt + from torch._library.fake_class_registry import FakeScriptObject + from torch._subclasses.fake_tensor import FakeTensor + + if isinstance(node, (int, bool, float, type(None), str)): + # For const outputs we just directly return this + return ConstantArgument(name="", value=node) + + assert ( + "val" in node.meta + ), f"{node} is not a constant or a node with a 'val' metadata field" + val = node.meta["val"] + if node.name in token_names: + return TokenArgument(name=node.name) + elif isinstance(val, FakeTensor): + return TensorArgument(name=node.name) + elif isinstance(val, SymInt): + return SymIntArgument(name=node.name) + elif isinstance(val, ScriptObject): + return CustomObjArgument(name=node.name, class_fqn=val._type().qualified_name()) # type: ignore[attr-defined] + elif isinstance(val, FakeScriptObject): + return CustomObjArgument( + name=node.name, class_fqn=val.script_class_name, fake_val=val + ) + elif isinstance(val, (int, bool, str, float, type(None))): + return ConstantArgument(name=node.name, value=val) + else: + raise AssertionError( + f"Encountered an unsupported object of type {type(val)} " + f"while writing the metadata for exported program" + ) + + +def _convert_to_export_graph_signature( + graph_signature: "GraphSignature", + gm: "torch.fx.GraphModule", + non_persistent_buffers: Set[str], +) -> "ExportGraphSignature": + from torch.utils import _pytree as pytree + + is_joint = graph_signature.backward_signature is not None + + # unpack objects + user_inputs = set(graph_signature.user_inputs) + inputs_to_parameters = graph_signature.inputs_to_parameters + inputs_to_buffers = graph_signature.inputs_to_buffers + user_outputs = set(graph_signature.user_outputs) + buffer_mutations = graph_signature.buffers_to_mutate + user_input_mutations = graph_signature.user_inputs_to_mutate + grad_params = graph_signature.backward_signature.gradients_to_parameter if is_joint else {} # type: ignore[union-attr] + grad_user_inputs = graph_signature.backward_signature.gradients_to_user_inputs if is_joint else {} # type: ignore[union-attr] + loss_output = graph_signature.backward_signature.loss_output if is_joint else None # type: ignore[union-attr] + input_tokens = graph_signature.input_tokens + output_tokens = graph_signature.output_tokens + + inputs = [ + _make_argument_spec(node, input_tokens) + for node in gm.graph.nodes + if node.op == "placeholder" + ] + outputs = [ + _make_argument_spec(node, output_tokens) + for node in pytree.tree_leaves(next(iter(reversed(gm.graph.nodes))).args) + ] + + def to_input_spec(inp: ArgumentSpec) -> InputSpec: + if isinstance(inp, TokenArgument): + return InputSpec(kind=InputKind.TOKEN, arg=inp, target=None) + + if not isinstance(inp, TensorArgument): + return InputSpec(kind=InputKind.USER_INPUT, arg=inp, target=None) + name = inp.name + if name in user_inputs: + return InputSpec(kind=InputKind.USER_INPUT, arg=inp, target=None) + elif name in inputs_to_parameters: + return InputSpec( + kind=InputKind.PARAMETER, + arg=inp, + target=inputs_to_parameters[name], # type: ignore[index] + ) + elif name in inputs_to_buffers: + return InputSpec( + kind=InputKind.BUFFER, + arg=inp, + target=inputs_to_buffers[name], # type: ignore[index] + persistent=(inputs_to_buffers[name] not in non_persistent_buffers), # type: ignore[index] + ) + else: + raise AssertionError(f"Unknown tensor input kind: {name}") + + def to_output_spec(idx: int, o: ArgumentSpec) -> OutputSpec: + if isinstance(o, TokenArgument): + return OutputSpec(kind=OutputKind.TOKEN, arg=o, target=None) + + if not isinstance(o, TensorArgument): + return OutputSpec(kind=OutputKind.USER_OUTPUT, arg=o, target=None) + name = o.name + if idx < len(buffer_mutations) + len(user_input_mutations) + len(output_tokens): + if name in buffer_mutations: + return OutputSpec( + kind=OutputKind.BUFFER_MUTATION, + arg=o, + target=buffer_mutations[name], # type: ignore[index] + ) + elif name in user_input_mutations: + return OutputSpec( + kind=OutputKind.USER_INPUT_MUTATION, + arg=o, + target=user_input_mutations[name], # type: ignore[index] + ) + else: + raise AssertionError(f"Unknown tensor mutation kind: {name}") + else: + if name in user_outputs: + return OutputSpec(kind=OutputKind.USER_OUTPUT, arg=o, target=None) + + elif name in grad_params: + return OutputSpec( + kind=OutputKind.GRADIENT_TO_PARAMETER, + arg=o, + target=grad_params[name], + ) + elif name in grad_user_inputs: + return OutputSpec( + kind=OutputKind.GRADIENT_TO_USER_INPUT, + arg=o, + target=grad_user_inputs[name], + ) + elif name == loss_output: + return OutputSpec(kind=OutputKind.LOSS_OUTPUT, arg=o, target=None) + + else: + raise AssertionError(f"Unknown tensor output kind: {name}") + + input_specs = [to_input_spec(inp) for inp in inputs] + output_specs = [to_output_spec(idx, o) for idx, o in enumerate(outputs)] + return ExportGraphSignature(input_specs=input_specs, output_specs=output_specs) diff --git a/janus/lib/python3.10/site-packages/torch/onnx/__pycache__/symbolic_opset9.cpython-310.pyc b/janus/lib/python3.10/site-packages/torch/onnx/__pycache__/symbolic_opset9.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..46a8fde114f670aceb65921f4a4a790bf249bb58 --- /dev/null +++ b/janus/lib/python3.10/site-packages/torch/onnx/__pycache__/symbolic_opset9.cpython-310.pyc @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2ac3ecc0109812d42ed22335bf7c7775d29fc143e4e374d371498ec1b4e69d41 +size 135032