diff --git a/vllm/lib/python3.10/site-packages/torch/distributed/checkpoint/__pycache__/__init__.cpython-310.pyc b/vllm/lib/python3.10/site-packages/torch/distributed/checkpoint/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e880e0ea4c4bb58cfffec9655ff4e1b392921b5d Binary files /dev/null and b/vllm/lib/python3.10/site-packages/torch/distributed/checkpoint/__pycache__/__init__.cpython-310.pyc differ diff --git a/vllm/lib/python3.10/site-packages/torch/distributed/checkpoint/__pycache__/_checkpointer.cpython-310.pyc b/vllm/lib/python3.10/site-packages/torch/distributed/checkpoint/__pycache__/_checkpointer.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4ebc5bff70543a36401edd2ac885f14bfb53d651 Binary files /dev/null and b/vllm/lib/python3.10/site-packages/torch/distributed/checkpoint/__pycache__/_checkpointer.cpython-310.pyc differ diff --git a/vllm/lib/python3.10/site-packages/torch/distributed/checkpoint/__pycache__/_dedup_save_plans.cpython-310.pyc b/vllm/lib/python3.10/site-packages/torch/distributed/checkpoint/__pycache__/_dedup_save_plans.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c64ba58d49067672bc8c1542ddfd806e422d6b47 Binary files /dev/null and b/vllm/lib/python3.10/site-packages/torch/distributed/checkpoint/__pycache__/_dedup_save_plans.cpython-310.pyc differ diff --git a/vllm/lib/python3.10/site-packages/torch/distributed/checkpoint/__pycache__/_dedup_tensors.cpython-310.pyc b/vllm/lib/python3.10/site-packages/torch/distributed/checkpoint/__pycache__/_dedup_tensors.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4ecb4396660e4006e877fbb0a143a678a42d880a Binary files /dev/null and b/vllm/lib/python3.10/site-packages/torch/distributed/checkpoint/__pycache__/_dedup_tensors.cpython-310.pyc differ diff --git a/vllm/lib/python3.10/site-packages/torch/distributed/checkpoint/__pycache__/_fsspec_filesystem.cpython-310.pyc b/vllm/lib/python3.10/site-packages/torch/distributed/checkpoint/__pycache__/_fsspec_filesystem.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f2bcc1b0df6a2a4066e33d8c8fa8047a541956a5 Binary files /dev/null and b/vllm/lib/python3.10/site-packages/torch/distributed/checkpoint/__pycache__/_fsspec_filesystem.cpython-310.pyc differ diff --git a/vllm/lib/python3.10/site-packages/torch/distributed/checkpoint/__pycache__/_nested_dict.cpython-310.pyc b/vllm/lib/python3.10/site-packages/torch/distributed/checkpoint/__pycache__/_nested_dict.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..41aa4e7d1c16399e8144f6d4101cb7e05c19967b Binary files /dev/null and b/vllm/lib/python3.10/site-packages/torch/distributed/checkpoint/__pycache__/_nested_dict.cpython-310.pyc differ diff --git a/vllm/lib/python3.10/site-packages/torch/distributed/checkpoint/__pycache__/_sharded_tensor_utils.cpython-310.pyc b/vllm/lib/python3.10/site-packages/torch/distributed/checkpoint/__pycache__/_sharded_tensor_utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..cfe7de0d5500807183cbbc634068334f0c3fdccf Binary files /dev/null and b/vllm/lib/python3.10/site-packages/torch/distributed/checkpoint/__pycache__/_sharded_tensor_utils.cpython-310.pyc differ diff --git a/vllm/lib/python3.10/site-packages/torch/distributed/checkpoint/__pycache__/_storage_utils.cpython-310.pyc b/vllm/lib/python3.10/site-packages/torch/distributed/checkpoint/__pycache__/_storage_utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..dd21a04394159c9cd4978eb1f004dc87dcf10f80 Binary files /dev/null and b/vllm/lib/python3.10/site-packages/torch/distributed/checkpoint/__pycache__/_storage_utils.cpython-310.pyc differ diff --git a/vllm/lib/python3.10/site-packages/torch/distributed/checkpoint/__pycache__/_traverse.cpython-310.pyc b/vllm/lib/python3.10/site-packages/torch/distributed/checkpoint/__pycache__/_traverse.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..142b35cae45c7e5ef5efe76cf13fbc73e6356bce Binary files /dev/null and b/vllm/lib/python3.10/site-packages/torch/distributed/checkpoint/__pycache__/_traverse.cpython-310.pyc differ diff --git a/vllm/lib/python3.10/site-packages/torch/distributed/checkpoint/__pycache__/_version.cpython-310.pyc b/vllm/lib/python3.10/site-packages/torch/distributed/checkpoint/__pycache__/_version.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..59ce112bee3465fa757442aadb6b540d4ad36a3c Binary files /dev/null and b/vllm/lib/python3.10/site-packages/torch/distributed/checkpoint/__pycache__/_version.cpython-310.pyc differ diff --git a/vllm/lib/python3.10/site-packages/torch/distributed/checkpoint/__pycache__/api.cpython-310.pyc b/vllm/lib/python3.10/site-packages/torch/distributed/checkpoint/__pycache__/api.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2bd3af91e1fa4cc457142e7691719b5f0c04a74e Binary files /dev/null and b/vllm/lib/python3.10/site-packages/torch/distributed/checkpoint/__pycache__/api.cpython-310.pyc differ diff --git a/vllm/lib/python3.10/site-packages/torch/distributed/checkpoint/__pycache__/default_planner.cpython-310.pyc b/vllm/lib/python3.10/site-packages/torch/distributed/checkpoint/__pycache__/default_planner.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d0734127649ac977ad8c94e345c7d08e4c72bd34 Binary files /dev/null and b/vllm/lib/python3.10/site-packages/torch/distributed/checkpoint/__pycache__/default_planner.cpython-310.pyc differ diff --git a/vllm/lib/python3.10/site-packages/torch/distributed/checkpoint/__pycache__/filesystem.cpython-310.pyc b/vllm/lib/python3.10/site-packages/torch/distributed/checkpoint/__pycache__/filesystem.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d65c61a659711031e94cb12c75d2cf649f831840 Binary files /dev/null and b/vllm/lib/python3.10/site-packages/torch/distributed/checkpoint/__pycache__/filesystem.cpython-310.pyc differ diff --git a/vllm/lib/python3.10/site-packages/torch/distributed/checkpoint/__pycache__/format_utils.cpython-310.pyc b/vllm/lib/python3.10/site-packages/torch/distributed/checkpoint/__pycache__/format_utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5b0557195ec782a6325c32e19f5f74bb83623acf Binary files /dev/null and b/vllm/lib/python3.10/site-packages/torch/distributed/checkpoint/__pycache__/format_utils.cpython-310.pyc differ diff --git a/vllm/lib/python3.10/site-packages/torch/distributed/checkpoint/__pycache__/logger.cpython-310.pyc b/vllm/lib/python3.10/site-packages/torch/distributed/checkpoint/__pycache__/logger.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9228ede473bf36be2d687e0f39d0772e23333cd7 Binary files /dev/null and b/vllm/lib/python3.10/site-packages/torch/distributed/checkpoint/__pycache__/logger.cpython-310.pyc differ diff --git a/vllm/lib/python3.10/site-packages/torch/distributed/checkpoint/__pycache__/logging_handlers.cpython-310.pyc b/vllm/lib/python3.10/site-packages/torch/distributed/checkpoint/__pycache__/logging_handlers.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..bf39fa4e53f5fed4b7b985e378ae0e1e7241e7e4 Binary files /dev/null and b/vllm/lib/python3.10/site-packages/torch/distributed/checkpoint/__pycache__/logging_handlers.cpython-310.pyc differ diff --git a/vllm/lib/python3.10/site-packages/torch/distributed/checkpoint/__pycache__/metadata.cpython-310.pyc b/vllm/lib/python3.10/site-packages/torch/distributed/checkpoint/__pycache__/metadata.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..57a19f359c746c3997db27f1130ca1b8dcc43b8d Binary files /dev/null and b/vllm/lib/python3.10/site-packages/torch/distributed/checkpoint/__pycache__/metadata.cpython-310.pyc differ diff --git a/vllm/lib/python3.10/site-packages/torch/distributed/checkpoint/__pycache__/optimizer.cpython-310.pyc b/vllm/lib/python3.10/site-packages/torch/distributed/checkpoint/__pycache__/optimizer.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..fe6c6dbaf869de6cdad8110ab45d0f36b5850c83 Binary files /dev/null and b/vllm/lib/python3.10/site-packages/torch/distributed/checkpoint/__pycache__/optimizer.cpython-310.pyc differ diff --git a/vllm/lib/python3.10/site-packages/torch/distributed/checkpoint/__pycache__/planner.cpython-310.pyc b/vllm/lib/python3.10/site-packages/torch/distributed/checkpoint/__pycache__/planner.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e895fb34c58ca68614162b4ec53dec0ee4855c26 Binary files /dev/null and b/vllm/lib/python3.10/site-packages/torch/distributed/checkpoint/__pycache__/planner.cpython-310.pyc differ diff --git a/vllm/lib/python3.10/site-packages/torch/distributed/checkpoint/__pycache__/planner_helpers.cpython-310.pyc b/vllm/lib/python3.10/site-packages/torch/distributed/checkpoint/__pycache__/planner_helpers.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0ebb16c7e299f8c69d9211ceb287a770b457fb82 Binary files /dev/null and b/vllm/lib/python3.10/site-packages/torch/distributed/checkpoint/__pycache__/planner_helpers.cpython-310.pyc differ diff --git a/vllm/lib/python3.10/site-packages/torch/distributed/checkpoint/__pycache__/resharding.cpython-310.pyc b/vllm/lib/python3.10/site-packages/torch/distributed/checkpoint/__pycache__/resharding.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..802221770f4bf3c2a5cd31a47966a33da78f7040 Binary files /dev/null and b/vllm/lib/python3.10/site-packages/torch/distributed/checkpoint/__pycache__/resharding.cpython-310.pyc differ diff --git a/vllm/lib/python3.10/site-packages/torch/distributed/checkpoint/__pycache__/staging.cpython-310.pyc b/vllm/lib/python3.10/site-packages/torch/distributed/checkpoint/__pycache__/staging.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a140ec20a68bf9d670344bfb2d3d03a5efaf4375 Binary files /dev/null and b/vllm/lib/python3.10/site-packages/torch/distributed/checkpoint/__pycache__/staging.cpython-310.pyc differ diff --git a/vllm/lib/python3.10/site-packages/torch/distributed/checkpoint/__pycache__/state_dict.cpython-310.pyc b/vllm/lib/python3.10/site-packages/torch/distributed/checkpoint/__pycache__/state_dict.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c04ff4d81bd7a8eb40752743ed1147890ef47070 Binary files /dev/null and b/vllm/lib/python3.10/site-packages/torch/distributed/checkpoint/__pycache__/state_dict.cpython-310.pyc differ diff --git a/vllm/lib/python3.10/site-packages/torch/distributed/checkpoint/__pycache__/state_dict_loader.cpython-310.pyc b/vllm/lib/python3.10/site-packages/torch/distributed/checkpoint/__pycache__/state_dict_loader.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e7f7033ad83eb0680994dbc38f8cc86b7d9801a5 Binary files /dev/null and b/vllm/lib/python3.10/site-packages/torch/distributed/checkpoint/__pycache__/state_dict_loader.cpython-310.pyc differ diff --git a/vllm/lib/python3.10/site-packages/torch/distributed/checkpoint/__pycache__/state_dict_saver.cpython-310.pyc b/vllm/lib/python3.10/site-packages/torch/distributed/checkpoint/__pycache__/state_dict_saver.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..88f06e347da124191eff9a4680d9a5fe719098bf Binary files /dev/null and b/vllm/lib/python3.10/site-packages/torch/distributed/checkpoint/__pycache__/state_dict_saver.cpython-310.pyc differ diff --git a/vllm/lib/python3.10/site-packages/torch/distributed/checkpoint/__pycache__/stateful.cpython-310.pyc b/vllm/lib/python3.10/site-packages/torch/distributed/checkpoint/__pycache__/stateful.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7ad932a417108e028c5920dfeae53ea378dcc28b Binary files /dev/null and b/vllm/lib/python3.10/site-packages/torch/distributed/checkpoint/__pycache__/stateful.cpython-310.pyc differ diff --git a/vllm/lib/python3.10/site-packages/torch/distributed/checkpoint/__pycache__/storage.cpython-310.pyc b/vllm/lib/python3.10/site-packages/torch/distributed/checkpoint/__pycache__/storage.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..30e86b59fcba9cf5fb523061c1dd49254a892249 Binary files /dev/null and b/vllm/lib/python3.10/site-packages/torch/distributed/checkpoint/__pycache__/storage.cpython-310.pyc differ diff --git a/vllm/lib/python3.10/site-packages/torch/distributed/checkpoint/__pycache__/utils.cpython-310.pyc b/vllm/lib/python3.10/site-packages/torch/distributed/checkpoint/__pycache__/utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..74178ac19e92ee2ae6d552ccd7c020bdb1f6a963 Binary files /dev/null and b/vllm/lib/python3.10/site-packages/torch/distributed/checkpoint/__pycache__/utils.cpython-310.pyc differ diff --git a/vllm/lib/python3.10/site-packages/torch/distributed/checkpoint/_checkpointer.py b/vllm/lib/python3.10/site-packages/torch/distributed/checkpoint/_checkpointer.py new file mode 100644 index 0000000000000000000000000000000000000000..a93fe8197dea66e5e331133f94c0067ff8ecb90f --- /dev/null +++ b/vllm/lib/python3.10/site-packages/torch/distributed/checkpoint/_checkpointer.py @@ -0,0 +1,100 @@ +from concurrent.futures import Future +from typing import Any, Dict, List, Optional + +import torch.distributed as dist +import torch.distributed.checkpoint.state_dict_loader as loader +import torch.distributed.checkpoint.state_dict_saver as saver +from torch.distributed.checkpoint.metadata import Metadata, STATE_DICT_TYPE +from torch.distributed.checkpoint.storage import ( + LoadPlanner, + SavePlanner, + StorageReader, + StorageWriter, +) + + +__all__: List[str] = [] + + +class _Checkpointer: + """This base class specefies a high level API for saving and loading + distributed `state_dict` 's. It provides an abstraction over the low-level APIs + provided by :py:mod:`torch.distributed.checkpoint.storage`, essentially calling + :py:meth: `torch.distributed.state_dict_saver.save` and + :py:meth: `torch.distributed.state_dict_loader.load` with the provided storage + readers and writers. + + .. warning:: + This feature is experimental and subject to removal/change. + + """ + + def __init__( + self, + storage_writer: StorageWriter, + storage_reader: StorageReader, + *, + process_group: Optional[dist.ProcessGroup] = None, + coordinator_rank: int = 0, + no_dist: bool = False, + load_planner: Optional[LoadPlanner] = None, + save_planner: Optional[SavePlanner] = None, + ): + """Initializes the Checkpointer instance. + + Args: + storage_writer: Instance of StorageWrite use to perform writes. + storage_reader: StorageReader used to load data from. + process_group: ProcessGroup to be used for cross-rank synchronization. + coordinator_rank: Rank to use to coordinate the checkpoint. rank0 is used by default. + no_dist: If ``True``, distributed checkpoint will not load in SPMD style. (Default: ``False``) + loader_planner: Instance of LoadPlanner to use when loading. + save_planner: Instance of SavePlanner to use when saving. + """ + self.storage_writer = storage_writer + self.storage_reader = storage_reader + self.process_group = process_group + self.coordinator_rank = coordinator_rank + self.no_dist = no_dist + self.load_planner = load_planner + self.save_planner = save_planner + + def save( + self, + state_dict: STATE_DICT_TYPE, + ) -> Metadata: + """Calls :py:meth: `torch.distributed.state_dict_saver.save`. Utilizing values passed during initialization.""" + return saver.save( + state_dict, + self.storage_writer, + process_group=self.process_group, + coordinator_rank=self.coordinator_rank, + no_dist=self.no_dist, + planner=self.save_planner, + ) + + def async_save( + self, + state_dict: STATE_DICT_TYPE, + ) -> Future: + """ + Calls :py:meth: `torch.distributed.state_dict_saver._async_save`. Utilizing values passed during initialization. + + Returns: + Future: A future holding the resultant Metadata object from `save`. + """ + return saver.async_save( + state_dict, + storage_writer=self.storage_writer, + process_group=self.process_group, + planner=self.save_planner, + ) + + def load(self, state_dict: Dict[str, Any]) -> None: + """Calls :py:meth: `torch.distributed.state_dict_loader.load`. Utilizing values passed during initialization.""" + loader.load( + state_dict, + storage_reader=self.storage_reader, + process_group=self.process_group, + planner=self.load_planner, + ) diff --git a/vllm/lib/python3.10/site-packages/torch/distributed/checkpoint/_fsspec_filesystem.py b/vllm/lib/python3.10/site-packages/torch/distributed/checkpoint/_fsspec_filesystem.py new file mode 100644 index 0000000000000000000000000000000000000000..b57df9c3456ca6bcd70ef65594a7e1e15eb95268 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/torch/distributed/checkpoint/_fsspec_filesystem.py @@ -0,0 +1,137 @@ +# Mypy will not try inferring the types of any 3rd party libraries installed. +# mypy: ignore-errors + +import io +import os +from contextlib import contextmanager +from pathlib import Path +from typing import Generator, Optional, Union + +import fsspec +from fsspec import AbstractFileSystem +from fsspec.core import url_to_fs + +from torch.distributed.checkpoint.filesystem import ( + FileSystemBase, + FileSystemReader, + FileSystemWriter, +) + + +__all__ = [ + "FsspecWriter", + "FsspecReader", +] + + +class FileSystem(FileSystemBase): + def __init__(self) -> None: + self.fs: Optional[AbstractFileSystem] = None + + @contextmanager + def create_stream( + self, path: Union[str, os.PathLike], mode: str + ) -> Generator[io.IOBase, None, None]: + assert self.fs is not None + with self.fs.transaction: + with fsspec.open(str(path), mode) as stream: + yield stream + + def concat_path( + self, path: Union[str, os.PathLike], suffix: str + ) -> Union[str, os.PathLike]: + return os.path.join(path, suffix) + + def init_path(self, path: Union[str, os.PathLike]) -> Union[str, os.PathLike]: + self.fs, _ = url_to_fs(path) + return path + + def rename( + self, path: Union[str, os.PathLike], new_path: Union[str, os.PathLike] + ) -> None: + self.fs.rename(path, new_path) + + def mkdir(self, path: [str, os.PathLike]) -> None: + self.fs.makedirs(path, exist_ok=True) + + @classmethod + def validate_checkpoint_id(cls, checkpoint_id: Union[str, os.PathLike]) -> bool: + if isinstance(checkpoint_id, Path): + return False + + try: + url_to_fs(checkpoint_id) + except ValueError: + return False + + return True + + def exists(self, path: Union[str, os.PathLike]) -> bool: + return self.fs.exists(path) + + def rm_file(self, path: Union[str, os.PathLike]) -> None: + self.fs.rm(path) + + +# TODO: add the dcp.async_save mixin +class FsspecWriter(FileSystemWriter): + """ + Basic implementation of StorageWriter using FFspec. + + This implementation makes the following assumptions and simplifications: + + * The checkpoint path is an empty or non-existing directory. + * File creation is atomic + + The checkpoint consist of one file per write request plus + a `.metadata` file with the serialized metadata. + + """ + + def __init__( + self, + path: Union[str, os.PathLike], + single_file_per_rank: bool = True, + sync_files: bool = True, + thread_count: int = 1, + per_thread_copy_ahead: int = 10_000_000, + overwrite: bool = True, + ) -> None: + """ + Initialize the writer pointing to `path`. + + Args: + path: directory where the checkpoint will be written to. + single_file_per_rank: Produce one file per rank instead of one file per tensor/blob. Default to True. + sync_files : force files to be synced to permanent storage. Default to True. + thread_count: Number of IO threads to use to write. Default to 1. + per_thread_copy_ahead: How many bytes to copy from the GPU ahead of saving then. Default 10Mb. + overwrite: Whether to allow overwriting existing checkpoints. Defaults to True. + + N. B. If sync_files is disabled, there's no guarantee that the checkpoint will be consistent in the case of a failure. + """ + super().__init__( + path, + single_file_per_rank, + sync_files, + thread_count, + per_thread_copy_ahead, + overwrite=overwrite, + ) + self.fs = FileSystem() + self.path = self.fs.init_path(path) + + @classmethod + def validate_checkpoint_id(cls, checkpoint_id: Union[str, os.PathLike]) -> bool: + return FileSystem.validate_checkpoint_id(checkpoint_id) + + +class FsspecReader(FileSystemReader): + def __init__(self, path: Union[str, os.PathLike]) -> None: + super().__init__(path) + self.fs = FileSystem() + self.path = self.fs.init_path(path) + + @classmethod + def validate_checkpoint_id(cls, checkpoint_id: Union[str, os.PathLike]) -> bool: + return FileSystem.validate_checkpoint_id(checkpoint_id) diff --git a/vllm/lib/python3.10/site-packages/torch/distributed/checkpoint/_nested_dict.py b/vllm/lib/python3.10/site-packages/torch/distributed/checkpoint/_nested_dict.py new file mode 100644 index 0000000000000000000000000000000000000000..b846a1d47d843433e1c3de295c86d318b7157a1e --- /dev/null +++ b/vllm/lib/python3.10/site-packages/torch/distributed/checkpoint/_nested_dict.py @@ -0,0 +1,70 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates +from typing import Dict, Tuple + +from torch.distributed.checkpoint.metadata import STATE_DICT_TYPE + +from . import _version +from ._traverse import ( + OBJ_PATH, + set_element, + STATE_DICT_ITEM, + traverse_state_dict, + traverse_state_dict_v_2_3, +) + + +""" +TODO: +Need to add ability to handle tuple, OrderedDict, NamedTuple. +Update mappings from dict to a class. +Change set_element to recreate the right type for tuple, OrderedDict, and NamedTuple. +""" + + +FLATTEN_MAPPING = Dict[str, OBJ_PATH] + + +# TODO: Update Docstring for nested_dict.py +def flatten_state_dict( + state_dict: STATE_DICT_TYPE, +) -> Tuple[STATE_DICT_TYPE, FLATTEN_MAPPING]: + """ + Flatten ``state_dict`` made of nested dicts and lists into a top level dictionary. + + Use ``unflatten_state_dict`` to revert this process. + Returns: + A tuple with the flatten state_dict and a mapping from original to new state_dict. + N.B. The new keys are derived from the object paths, joined by dot. + For example: ``{ 'a': {'b':...}}`` results in the key `a.b`. + """ + flattened: STATE_DICT_TYPE = {} + mappings: FLATTEN_MAPPING = {} + + def flat_copy(path: OBJ_PATH, value: STATE_DICT_ITEM) -> None: + new_fqn = ".".join(map(str, path)) + if new_fqn in flattened: + raise ValueError(f"duplicated flatten key {new_fqn}") + flattened[new_fqn] = value + mappings[new_fqn] = path + + # We started to flatten dictionary since v2.4. But in order to not break + # the checkpoints that were saved before v2.4, we need to keep the old + # traversal so that we can reconstruct those checkpoints. + use_v_2_3 = ( + _version._derived_version is not None and _version._derived_version == "2_3" + ) + if use_v_2_3: + traverse_state_dict_v_2_3(state_dict, flat_copy) + else: + traverse_state_dict(state_dict, flat_copy) + return flattened, mappings + + +def unflatten_state_dict( + state_dict: STATE_DICT_TYPE, mapping: FLATTEN_MAPPING +) -> STATE_DICT_TYPE: + """Restore the original nested state_dict according to ``mapping`` and the flattened ``state_dict``.""" + nested: STATE_DICT_TYPE = {} + for key, value in state_dict.items(): + set_element(nested, mapping[key], value) + return nested diff --git a/vllm/lib/python3.10/site-packages/torch/distributed/checkpoint/_sharded_tensor_utils.py b/vllm/lib/python3.10/site-packages/torch/distributed/checkpoint/_sharded_tensor_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..a68bcddeb7f9d9ffe6f89056dfe1ccc30cc12eb5 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/torch/distributed/checkpoint/_sharded_tensor_utils.py @@ -0,0 +1,107 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates + +import copy +from typing import TYPE_CHECKING + +import torch.distributed as dist +from torch.distributed._shard.sharded_tensor import Shard, ShardedTensor, ShardMetadata +from torch.distributed.checkpoint.metadata import STATE_DICT_TYPE +from torch.distributed.remote_device import _remote_device + +from ._traverse import OBJ_PATH, set_element, STATE_DICT_ITEM, traverse_state_dict +from .utils import _element_wise_add, _normalize_device_info + + +if TYPE_CHECKING: + from torch.distributed._shard.sharded_tensor.metadata import ShardedTensorMetadata + + +# TODO: We need to refactor this code. +def _flatten_sharded_tensors(state_dict: STATE_DICT_TYPE) -> STATE_DICT_TYPE: + r""" + Transform ``state_dict`` by flattening all nested ShardedTensor instances found. + + The resulting ShardedTensor instances are only correct regarding the local shard and + MUST not be used for any other purpose but checkpointing, as no operator will work with them. + + This function should be used in conjunction with a state_dict produced by FSDP's + StateDictType.SHARDED_STATE_DICT methods. + """ + new_state_dict: STATE_DICT_TYPE = {} + + def rewrite_dict(path: OBJ_PATH, value: STATE_DICT_ITEM) -> None: + if not isinstance(value, ShardedTensor): + set_element(new_state_dict, path, value) + return + shards = value.local_shards() + + if len(shards) == 0: + return + if len(shards) != 1: + set_element(new_state_dict, path, value) + return + + outer_shard = shards[0] + + inner_st = outer_shard.tensor + if not isinstance(inner_st, ShardedTensor): + set_element(new_state_dict, path, value) + return + + if len(inner_st.local_shards()) != 1: + raise ValueError("Cannot handle inner tensor with more than 1 shard") + inner_shard = inner_st.local_shards()[0] + + local_shards = [ + Shard( + tensor=inner_shard.tensor, + metadata=ShardMetadata( + shard_offsets=_element_wise_add( + outer_shard.metadata.shard_offsets, + inner_shard.metadata.shard_offsets, + ), + shard_sizes=inner_shard.metadata.shard_sizes, + placement=f"rank:{dist.get_rank()}/{inner_shard.tensor.device}", + ), + ) + ] + + st_meta: ShardedTensorMetadata = copy.deepcopy(value.metadata()) + other_rank = 0 if dist.get_rank() > 0 else 1 + device_info = _normalize_device_info(inner_shard.tensor.device.type, 0) + + # Remove the outer ST shard the inner ST covers + for i, shard_md in enumerate(st_meta.shards_metadata): + if shard_md.shard_offsets == outer_shard.metadata.shard_offsets: + st_meta.shards_metadata.pop(i) + break + + # Attribute other rank for the other shards + for shard_md in st_meta.shards_metadata: + shard_md.placement = _remote_device(f"rank:{other_rank}/{device_info}") + + # Add other inner shards from the inner tensor + for inner_md in inner_st.metadata().shards_metadata: + if inner_md.shard_offsets != inner_shard.metadata.shard_offsets: + st_meta.shards_metadata.append( + ShardMetadata( + shard_offsets=_element_wise_add( + outer_shard.metadata.shard_offsets, + inner_md.shard_offsets, + ), + shard_sizes=inner_md.shard_sizes, + placement=f"rank:{other_rank}/{device_info}", + ) + ) + + # Finally add this shard + st_meta.shards_metadata.append(local_shards[0].metadata) + + st = ShardedTensor._init_from_local_shards_and_global_metadata( + local_shards=local_shards, + sharded_tensor_metadata=st_meta, + ) + set_element(new_state_dict, path, st) + + traverse_state_dict(state_dict, rewrite_dict) + return new_state_dict diff --git a/vllm/lib/python3.10/site-packages/torch/distributed/checkpoint/_version.py b/vllm/lib/python3.10/site-packages/torch/distributed/checkpoint/_version.py new file mode 100644 index 0000000000000000000000000000000000000000..b3065bdfd6a2c141a959ef0ffe30aeafdc2dc54f --- /dev/null +++ b/vllm/lib/python3.10/site-packages/torch/distributed/checkpoint/_version.py @@ -0,0 +1,6 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates + +from typing import Optional + + +_derived_version: Optional[str] = None diff --git a/vllm/lib/python3.10/site-packages/torch/distributed/checkpoint/api.py b/vllm/lib/python3.10/site-packages/torch/distributed/checkpoint/api.py new file mode 100644 index 0000000000000000000000000000000000000000..e587580617a1bff43f4d50861dd554595b371c3e --- /dev/null +++ b/vllm/lib/python3.10/site-packages/torch/distributed/checkpoint/api.py @@ -0,0 +1,43 @@ +# mypy: allow-untyped-defs +import traceback as tb +from typing import Any, Dict, Tuple + + +WRAPPED_EXCEPTION = Tuple[BaseException, tb.StackSummary] + +__all__ = ["CheckpointException"] + + +def _wrap_exception(exc: BaseException) -> WRAPPED_EXCEPTION: + return (exc, tb.extract_tb(exc.__traceback__)) + + +def _is_wrapped_exception(obj: Any) -> bool: + if not isinstance(obj, tuple): + return False + if len(obj) != 2: + return False + return isinstance(obj[0], BaseException) and isinstance(obj[1], tb.StackSummary) + + +class CheckpointException(BaseException): + """Exception raised if failure was detected as part of a checkpoint load or save.""" + + def __init__(self, msg: str, failures: Dict[int, WRAPPED_EXCEPTION]): + super().__init__(msg, failures) + self._failures = failures + + @property + def failures(self) -> Dict[int, WRAPPED_EXCEPTION]: + """Return a dictionary mapping node ranks to their associated exceptions in case of failure.""" + return self._failures + + def __str__(self): + str = f"CheckpointException ranks:{self._failures.keys()}\n" + for rank, exc_pair in self._failures.items(): + exc, trace = exc_pair + str += f"Traceback (most recent call last): (RANK {rank})\n" + if trace is not None: + str += "".join(tb.format_list(trace)) + str += "".join(tb.format_exception_only(type(exc), value=exc)) + return str diff --git a/vllm/lib/python3.10/site-packages/torch/distributed/checkpoint/default_planner.py b/vllm/lib/python3.10/site-packages/torch/distributed/checkpoint/default_planner.py new file mode 100644 index 0000000000000000000000000000000000000000..67f3c73bca51daf1b1d54c34bb575185e021ebf9 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/torch/distributed/checkpoint/default_planner.py @@ -0,0 +1,546 @@ +# mypy: allow-untyped-defs +# Copyright (c) Meta Platforms, Inc. and affiliates + +import dataclasses +import io +import logging +import operator +from collections import ChainMap +from functools import reduce +from typing import Any, cast, Dict, List, Optional, Tuple, Union + +import torch +from torch.distributed._shard._utils import narrow_tensor_by_index +from torch.distributed.checkpoint._dedup_save_plans import dedup_save_plans +from torch.distributed.checkpoint._nested_dict import ( + FLATTEN_MAPPING, + flatten_state_dict, +) +from torch.distributed.checkpoint._sharded_tensor_utils import _flatten_sharded_tensors +from torch.distributed.checkpoint._traverse import set_element +from torch.distributed.checkpoint.metadata import ( + BytesStorageMetadata, + ChunkStorageMetadata, + Metadata, + MetadataIndex, + STATE_DICT_TYPE, + STORAGE_TYPES, + StorageMeta, + TensorStorageMetadata, +) +from torch.distributed.checkpoint.planner import ( + LoadPlan, + LoadPlanner, + ReadItem, + SavePlan, + SavePlanner, + WriteItem, + WriteItemType, +) +from torch.distributed.checkpoint.planner_helpers import ( + _create_default_metadata_only_plan, + _create_read_items, + _create_write_items, + _init_state_dict, +) +from torch.distributed.checkpoint.utils import find_state_dict_object +from torch.distributed.tensor import DTensor + +from . import _version + + +logger: logging.Logger = logging.getLogger(__name__) + + +__all__ = [ + "DefaultSavePlanner", + "DefaultLoadPlanner", + "create_default_local_load_plan", + "create_default_global_load_plan", + "create_default_local_save_plan", + "create_default_global_save_plan", +] + + +# TODO: Update docstrings for default_planner.py +class DefaultSavePlanner(SavePlanner): + mappings: FLATTEN_MAPPING + + def __init__( + self, + flatten_state_dict: bool = True, + flatten_sharded_tensors: bool = True, + dedup_replicated_tensors: Optional[bool] = None, + dedup_save_to_lowest_rank: bool = False, + ) -> None: + self.flatten_state_dict = flatten_state_dict + self.flatten_sharded_tensors = flatten_sharded_tensors + self.mappings = {} + self.dedup_save_to_lowest_rank = dedup_save_to_lowest_rank + if dedup_replicated_tensors is not None: + logger.warning( + "DefaultSavePlanner's `dedup_replicated_tensors` argument is being " + "deprecated, and no longer has any effect. Please remove this argument " + "from your call." + ) + + def set_up_planner( + self, + state_dict: STATE_DICT_TYPE, + storage_meta: Optional[StorageMeta] = None, + is_coordinator: bool = False, + ) -> None: + if self.flatten_state_dict: + state_dict, self.mappings = flatten_state_dict(state_dict) + if self.flatten_sharded_tensors: + state_dict = _flatten_sharded_tensors(state_dict) + self.state_dict = state_dict + self.is_coordinator = is_coordinator + + def create_local_plan(self) -> SavePlan: + plan = create_default_local_save_plan(self.state_dict, self.is_coordinator) + if self.flatten_state_dict: + plan = dataclasses.replace(plan, planner_data=self.mappings) + self.plan = plan + + return self.plan + + def create_global_plan( + self, all_plans: List[SavePlan] + ) -> Tuple[List[SavePlan], Metadata]: + all_plans = dedup_save_plans(all_plans, self.dedup_save_to_lowest_rank) + + global_plan, metadata = create_default_global_save_plan(all_plans) + + if self.flatten_state_dict: + # | does not work for Python 3.8 or older version. + # merged_mappings = reduce( + # lambda x, y: x | y, (p.planner_data for p in global_plan) + # ) + planner_data_dict = [p.planner_data for p in global_plan] + merged_mappings = dict(ChainMap(*planner_data_dict)) + metadata = dataclasses.replace(metadata, planner_data=merged_mappings) + + if not _validate_global_plan(global_plan, metadata): + raise ValueError("Failed to validate global plan") + + self.global_plan = global_plan + self.metadata = metadata + + return self.global_plan, self.metadata + + def finish_plan(self, new_plan: SavePlan) -> SavePlan: + self.plan = new_plan + return new_plan + + def resolve_data(self, write_item: WriteItem) -> Union[torch.Tensor, io.BytesIO]: + object = self.lookup_object(write_item.index) + return self.transform_object(write_item, object) + + def lookup_object(self, index: MetadataIndex) -> Any: + """Extension from the planner interface to make it easy to extend the default planner.""" + return find_state_dict_object(self.state_dict, index) + + def transform_object(self, write_item: WriteItem, object: Any): + """Extension from the planner interface to make it easy to extend the default planner.""" + if write_item.type == WriteItemType.BYTE_IO: + bytes = io.BytesIO() + torch.save(object, bytes) + object = bytes + return object + + +class DefaultLoadPlanner(LoadPlanner): + """ + DefaultLoadPlanner that adds multiple features on top of LoadPlanner. + + In particular it adds the following: + + flatten_state_dict: Handle state_dict with nested dicts + flatten_sharded_tensors: For FSDP in 2D parallel mode + allow_partial_load: If False, will raise a runtime error if a key is present in state_dict, but not in the checkpoint. + """ + + original_state_dict: STATE_DICT_TYPE + mappings: FLATTEN_MAPPING + + def __init__( + self, + flatten_state_dict: bool = True, + flatten_sharded_tensors: bool = True, + allow_partial_load: bool = False, + ) -> None: + self.flatten_state_dict = flatten_state_dict + self.flatten_sharded_tensors = flatten_sharded_tensors + self.original_state_dict = {} + self.mappings = {} + self.allow_partial_load = allow_partial_load + + def set_up_planner( + self, + state_dict: STATE_DICT_TYPE, + metadata: Optional[Metadata] = None, + is_coordinator: bool = False, + ) -> None: + _init_state_dict(state_dict) + self.original_state_dict = state_dict + + if self.flatten_sharded_tensors: + state_dict = _flatten_sharded_tensors(state_dict) + + if self.flatten_state_dict: + state_dict, self.mappings = flatten_state_dict(state_dict) + + self.state_dict = state_dict + self.metadata = metadata + self.is_coordinator = is_coordinator + + def create_local_plan(self) -> LoadPlan: + assert self.metadata is not None + if self.flatten_state_dict: + # To support checkpoints that are saved before v2.4, we have to + # differentiate if the missing keys are due to old checkpoints. + # The contracts are: + # 1. There are 3 cases when we found a missing key. + # 1.1 Actual missing key, but allow_partial_load is False + # 1.2 Actual missing key, but allow_partial load is True + # 1.3 Old checkpoint, but allow_partial_load is False + # 1.4 Old checkpoint, but allow_partial_load is True + # 2. If we found a missing key, we first convert the keys back to + # the key format of v2.3 + # 3. If the previous missing keys are in the v2.3 keys, we assume + # this is a old checkpoint. + # 4. Pass the state_dict to `create_default_local_load_plan()`, + # which has the logic to check missing for allow_partial_load. + # So for 1.2 and 1.4 cases, we delegate allow_partial_load check to + # `create_default_local_load_plan()`. The logic here is to determine + # whether the checkpoint belong to 2.3 (or before) or 2.4 (or after). + current_keys = set(self.state_dict.keys()) + load_keys = set(self.metadata.state_dict_metadata.keys()) + missing_keys = load_keys - current_keys + if missing_keys: + _version._derived_version = "2_3" + old_state_dict, old_mappings = flatten_state_dict( + self.original_state_dict + ) + old_keys = set(old_state_dict.keys()) + if old_keys & missing_keys: + self.state_dict, self.mappings = old_state_dict, old_mappings + # _derived_version is only used by flatten_state_dict now. + # Set it back to None so that later we can save to a new version. + _version._derived_version = None + + return create_default_local_load_plan( + self.state_dict, self.metadata, not self.allow_partial_load + ) + + def create_global_plan(self, global_plan: List[LoadPlan]) -> List[LoadPlan]: + return create_default_global_load_plan(global_plan) + + def finish_plan(self, new_plan: LoadPlan) -> LoadPlan: + return new_plan + + def load_bytes(self, read_item: ReadItem, value: io.BytesIO) -> None: + if self.flatten_state_dict: + set_element( + self.original_state_dict, + self.mappings[read_item.dest_index.fqn], + torch.load(value, weights_only=False), + ) + else: + self.state_dict[read_item.dest_index.fqn] = torch.load( + value, weights_only=False + ) + + def resolve_tensor(self, read_item: ReadItem): + tensor = self.lookup_tensor(read_item.dest_index) + return self.transform_tensor(read_item, tensor) + + def commit_tensor(self, read_item: ReadItem, tensor: torch.Tensor) -> None: + pass + + def lookup_tensor(self, index: MetadataIndex) -> torch.Tensor: + """Extension from the planner interface to make it easy to extend the default planner.""" + return find_state_dict_object(self.state_dict, index) + + def transform_tensor(self, read_item: ReadItem, tensor: torch.Tensor): + """Extension from the planner interface to make it easy to extend the default planner.""" + return narrow_tensor_by_index(tensor, read_item.dest_offsets, read_item.lengths) + + +class _EmptyStateDictLoadPlanner(DefaultLoadPlanner): + """ + Extension of DefaultLoadPlanner, which rebuilds state_dict from the saved metadata. + Useful for loading in state_dict without first initializing a model, such as + when converting a DCP checkpoint into a Torch save file. + + . N.B. `state_dict` must be an empty dictionary when used with this LoadPlanner + + .. warning:: + Because the entire state dict is initialized, It's recommended to only utilize + this LoadPlanner on a single rank or process to avoid OOM. + + """ + + def __init__(self, keys=None, *args, **kwargs): + self.keys = keys + super().__init__(*args, **kwargs) + + def _should_include_key(self, key: str, metadata: Metadata) -> bool: + if self.keys is None: + return True + + if key in self.keys: + True + + unflattened_keys: List[str] = [] + planner_data = metadata.planner_data.get(key) + for unflattened_key in planner_data: + if unflattened_keys: + unflattened_keys.append( + ".".join([unflattened_keys[-1], str(unflattened_key)]) + ) + + else: + unflattened_keys.append(unflattened_key) + + if any(unflattened_key in self.keys for unflattened_key in unflattened_keys): + return True + + return False + + def set_up_planner( + self, + state_dict: STATE_DICT_TYPE, + metadata: Optional[Metadata] = None, + is_coordinator: bool = False, + ) -> None: + assert not state_dict + assert metadata is not None + + # rebuild the state dict from the metadata + for k, v in metadata.state_dict_metadata.items(): + if not self._should_include_key(k, metadata): + continue + + if isinstance(v, TensorStorageMetadata): + v = torch.empty(v.size, dtype=v.properties.dtype) # type: ignore[assignment] + if k in metadata.planner_data: + set_element(state_dict, metadata.planner_data[k], v) + else: + state_dict[k] = v + + super().set_up_planner(state_dict, metadata, is_coordinator) + + +def create_default_local_load_plan( + state_dict: Dict[str, Any], metadata: Metadata, strict: bool = True +) -> LoadPlan: + requests = [] + """ + Create the ``LoadPlan`` used by DefaultLoadPlanner. + + It produces one read item per value in ``state_dict`` using the metadata in ``metadata``. + + The default behavior is to match key exactly between state_dict and metadata. + It handles resharding by issuing multiple read requests against storage in order to match + load requirements. + """ + + for fqn, obj in state_dict.items(): + # ignore state_dict keys which do not exist in `state_dict` if strict=False + if fqn not in metadata.state_dict_metadata: + if strict: + raise RuntimeError(f"Missing key in checkpoint state_dict: {fqn}.") + else: + continue + + md = metadata.state_dict_metadata[fqn] + # Since DTensor supports submesh, adding extra check to ensure _create_read_items() + # gets called only when the current rank is part of the mesh for the corresponding DTensor. + if isinstance(obj, DTensor): + if obj.device_mesh.get_coordinate() is not None: + requests += _create_read_items(fqn, md, obj) + else: + requests += _create_read_items(fqn, md, obj) + + return LoadPlan(requests) + + +def create_default_global_load_plan( + all_plans: List[LoadPlan], +) -> List[LoadPlan]: + """ + Create global load plan used by DefaultLoadPlanner. + + The default load behavior involved no global coordination and this function + currently doesn't change the local plans. + """ + return all_plans + + +def create_default_local_save_plan( + state_dict: Dict[str, Any], is_coordinator: bool +) -> SavePlan: + """ + Create the ``SavePlan`` used by DefaultSavePlanner. + + On non-coordinator ranks, this function ignores tensors and non-tensor objects, + only producing writes for ShardedTensor objects. + + On the coordinator rank, produce writes for all values. + """ + requests = [] + for fqn, obj in state_dict.items(): + # Since DTensor supports submesh, adding extra check to ensure _create_write_items() + # gets called only when the current rank is part of the mesh for the corresponding DTensor. + if isinstance(obj, DTensor): + if obj.device_mesh.get_coordinate() is not None: + requests += _create_write_items(fqn, obj) + else: + # For the plain tensor and non-tensor values, add the request for all + # the ranks. Coordinator will decides whether to deduplicate the + # values based on the keys. + requests += _create_write_items(fqn, obj) + + return SavePlan(requests) + + +def create_default_global_save_plan( + all_plans: List[SavePlan], + rewrite_index_hints: bool = True, +) -> Tuple[List[SavePlan], Metadata]: + """ + Create the global plan and metadata used by DefaultSavePlanner. + + Metadata is produced by concatenating the metadata of all ``WriteItem`` from the supplied plans. + + The only global planning change is to update index hints in all ``MetadataIndex`` objects if + ``rewrite_index_hints`` is True. + """ + md: Dict[str, STORAGE_TYPES] = {} + new_plans = [] + for plan in all_plans: + new_items = [] + for item in plan.items: + if not item.type == WriteItemType.SHARD: + assert item.index.fqn not in md + + if item.type == WriteItemType.BYTE_IO: + md[item.index.fqn] = BytesStorageMetadata() + new_items.append(item) + else: + assert item.tensor_data is not None + tensor_md = cast( + TensorStorageMetadata, + md.setdefault( + item.index.fqn, + TensorStorageMetadata( + properties=item.tensor_data.properties, + size=item.tensor_data.size, + chunks=[], + ), + ), + ) + new_item = item + if rewrite_index_hints: + new_index = dataclasses.replace( + item.index, index=len(tensor_md.chunks) + ) + new_item = dataclasses.replace(item, index=new_index) + new_items.append(new_item) + + assert ( + item.tensor_data.chunk is not None + ), f""" + Cannot create MD for tensor without bounds. + FQN: {item.index.fqn} + """ + tensor_md.chunks.append(item.tensor_data.chunk) + new_plans.append(dataclasses.replace(plan, items=new_items)) + return (new_plans, Metadata(md)) + + +def _create_default_local_metadata(state_dict: STATE_DICT_TYPE) -> Metadata: + """Return the ``Metadata`` if DefaultSavePlanner was used to checkpoint ``state_dict``.""" + plan = _create_default_metadata_only_plan(state_dict) + _, md = create_default_global_save_plan([plan]) + return md + + +def _check_box_overlap(box0: ChunkStorageMetadata, box1: ChunkStorageMetadata) -> bool: + """Check if two boxes overlap. Tuples are (offset, lengths).""" + # For each dim of each shard, check if one shard resides on the other + # end of second shard with respect to that dim. As an example for a 2D + # shard, we would check if one shard is above or on the left of the + # other shard. + ndims = len(box0.offsets) + for i in range(ndims): + if box0.offsets[i] >= box1.offsets[i] + box1.sizes[i]: + return False + if box1.offsets[i] >= box0.offsets[i] + box0.sizes[i]: + return False + + return True + + +def _check_box_bounds( + outer_box_size: torch.Size, inner_box: ChunkStorageMetadata +) -> bool: + for i in range(len(outer_box_size)): + if inner_box.offsets[i] < 0: + return False + if inner_box.sizes[i] < 0: + return False + if inner_box.offsets[i] + inner_box.sizes[i] > outer_box_size[i]: + return False + + return True + + +def _validate_global_plan(global_plan: List[SavePlan], metadata: Metadata) -> bool: + all_good = True + for key, value in metadata.state_dict_metadata.items(): + if isinstance(value, BytesStorageMetadata): + continue + if len(value.size) == 0: + continue + chunks_volume = 0 + for chunk_idx, chunk0 in enumerate(value.chunks): + # Compute the volume + if not _check_box_bounds(value.size, chunk0): + logger.warning( + """ + key:%s has out of bounds chunk: + tensor-size:%s chunk: %s + """, + key, + value.size, + chunk0, + ) + all_good = False + chunks_volume += reduce(operator.mul, chunk0.sizes, 1) + + # Check for overlap + for chunk1 in value.chunks[chunk_idx + 1 :]: + if _check_box_overlap(chunk0, chunk1): + logger.warning( + "key:%s has overlapping chunks: %s %s", key, chunk0, chunk1 + ) + all_good = False + + # Check whether combined chunk cover the whole tensor + tensor_volume = reduce(operator.mul, value.size, 1) + if chunks_volume != tensor_volume: + logger.warning( + """ + key:%s invalid fill tensor-volume: + %s chunks-volume: %s + """, + key, + tensor_volume, + chunks_volume, + ) + all_good = False + + return all_good diff --git a/vllm/lib/python3.10/site-packages/torch/distributed/checkpoint/format_utils.py b/vllm/lib/python3.10/site-packages/torch/distributed/checkpoint/format_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..ec97504e79b58f10771654eff0c04d7f9690cd46 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/torch/distributed/checkpoint/format_utils.py @@ -0,0 +1,280 @@ +# mypy: allow-untyped-defs +import argparse +import os +from enum import Enum +from typing import cast, Dict, List, Optional, Union + +import torch +import torch.distributed as dist +from torch.distributed._shard._utils import narrow_tensor_by_index +from torch.distributed.checkpoint import FileSystemReader, FileSystemWriter +from torch.distributed.checkpoint._nested_dict import flatten_state_dict +from torch.distributed.checkpoint.default_planner import ( + _EmptyStateDictLoadPlanner, + DefaultLoadPlanner, +) +from torch.distributed.checkpoint.metadata import ( + Metadata, + STATE_DICT_TYPE, + STORAGE_TYPES, + TensorProperties, + TensorStorageMetadata, +) +from torch.distributed.checkpoint.planner import LoadItemType, LoadPlan, LoadPlanner +from torch.distributed.checkpoint.planner_helpers import _create_chunk_list +from torch.distributed.checkpoint.state_dict_loader import _load_state_dict +from torch.distributed.checkpoint.state_dict_saver import _save_state_dict +from torch.distributed.checkpoint.storage import StorageReader +from torch.futures import Future + + +__all__ = [ + "dcp_to_torch_save", + "torch_save_to_dcp", + "BroadcastingTorchSaveReader", + "DynamicMetaLoadPlanner", +] + + +class BroadcastingTorchSaveReader(StorageReader): + """ + StorageReader for reading a Torch Save file. This reader will read the entire checkpoint + on the coordinator rank, and then broadcast and shard each tensor to all ranks. + + . N.B. Intended to be used with DynamicMetaLoadPlanner + + .. warning:: + Current implementation only supports loading Tensors. + + >>> # xdoctest: +SKIP("undefined vars") + >>> sd = {"mode": model} + >>> dcp.load( + >>> sd, + >>> storage_reader=BroadcastingTorchSaveReader(), + >>> planner=DynamicMetaLoadPlanner(), + >>> checkpoint_id="path_to_model.pt" + >>> ) + """ + + def __init__( + self, + checkpoint_id: Optional[Union[str, os.PathLike]] = None, + coordinator_rank: int = 0, + ) -> None: + self.checkpoint_id = checkpoint_id + self.coordinator_rank = coordinator_rank + + def read_metadata(self) -> Metadata: + """Extends the default StorageReader to support building the metadata file""" + # Metadata is built in planner.set_up_planner, since we are not actually reading metadata from + # the disk + return Metadata(state_dict_metadata={}) + + def read_data(self, plan: LoadPlan, planner: LoadPlanner) -> Future[None]: + """ + Reads torch save data on the coordinator rank, and broadcast afterwards + this incurrs a communication cost, but avoids having to load + the entire checkpoint on each rank, hopefully preventing OOM issues + """ + planner = cast(DefaultLoadPlanner, planner) + + # data is read in on the coordinator rank, and broadcast afterwards + # this incurrs a communication cost, but it avoids having to load + # the entire checkpoint on each rank, hopefully preventing OOM issues + # TODO: read on each host, instead of only the coordinator + if self.is_coordinator: + assert self.checkpoint_id is not None + torch_state_dict = torch.load( + self.checkpoint_id, map_location="cpu", weights_only=False + ) + if planner.flatten_state_dict: + torch_state_dict, _ = flatten_state_dict(torch_state_dict) + else: + torch_state_dict = None + + for req in plan.items: + if req.type == LoadItemType.BYTE_IO: + raise RuntimeError( + f"Non-tensor value identified at {req.storage_index.fqn}. " + f"At this time {type(self).__name__} only supports loading Tensors." + ) + + # Broadcast the tensor from the coordinator rank + if self.is_coordinator: + pg_device = dist.distributed_c10d._get_pg_default_device() + tensor = torch_state_dict[req.storage_index.fqn].to(pg_device) + else: + tensor = torch.empty_like(planner.state_dict[req.storage_index.fqn]) + + dist.broadcast(tensor, src=self.coordinator_rank, async_op=False) + + tensor = narrow_tensor_by_index(tensor, req.storage_offsets, req.lengths) + target_tensor = planner.resolve_tensor(req).detach() + assert target_tensor.size() == tensor.size(), ( + f"req {req.storage_index} mismatch sizes, " + f"{target_tensor.size()} vs {tensor.size()}" + ) + target_tensor.copy_(tensor) + planner.commit_tensor(req, target_tensor) + + fut: Future = Future() + fut.set_result(None) + return fut + + def set_up_storage_reader(self, metadata: Metadata, is_coordinator: bool) -> None: + """Implementation of the StorageReader method""" + self.is_coordinator = is_coordinator + if self.is_coordinator: + assert dist.get_rank() == self.coordinator_rank + + assert self.checkpoint_id is not None + + def prepare_local_plan(self, plan: LoadPlan) -> LoadPlan: + """Implementation of the StorageReader method""" + return plan + + def prepare_global_plan(self, global_plan: List[LoadPlan]) -> List[LoadPlan]: + """Implementation of the StorageReader method""" + return global_plan + + def reset(self, checkpoint_id: Union[str, os.PathLike, None] = None) -> None: + """Implementation of the StorageReader method""" + self.checkpoint_id = checkpoint_id + + @classmethod + def validate_checkpoint_id(cls, checkpoint_id: Union[str, os.PathLike]) -> bool: + """Implementation of the StorageReader method""" + return os.path.isfile(checkpoint_id) + + +class DynamicMetaLoadPlanner(DefaultLoadPlanner): + """ + Extension of DefaultLoadPlanner, which creates a new Metadata object based on the passed in state dict, + avoiding the need to read metadata from disk. This is useful when reading formats which don't have a + metadata file, like Torch Save files. + + . N.B. Intended to be used with BroadcastingTorchSaveReader + + .. warning:: + Current implementation only supports loading Tensors. + + >>> # xdoctest: +SKIP("undefined vars") + >>> sd = {"mode": model} + >>> dcp.load( + >>> sd, + >>> storage_reader=BroadcastingTorchSaveReader(), + >>> planner=DynamicMetaLoadPlanner(), + >>> checkpoint_id="path_to_model.pt" + >>> ) + """ + + def set_up_planner( + self, + state_dict: STATE_DICT_TYPE, + metadata: Optional[Metadata] = None, + is_coordinator: bool = False, + ) -> None: + """Setups of the planner, extnding default behavior by creating the Metadata object from the state dict""" + super().set_up_planner(state_dict, metadata, is_coordinator) + + state_dict_metadata: Dict[str, STORAGE_TYPES] = {} + for key, tensor in self.state_dict.items(): + if not torch.is_tensor(tensor): + raise RuntimeError( + f"Non-tensor value identified at {key}. " + f"At this time {type(self).__name__} only supports loading Tensors." + ) + + state_dict_metadata[key] = TensorStorageMetadata( + TensorProperties(dtype=tensor.dtype), + tensor.size(), + _create_chunk_list(tensor), + ) + self.metadata = Metadata(state_dict_metadata=state_dict_metadata) + + +def dcp_to_torch_save( + dcp_checkpoint_dir: Union[str, os.PathLike], + torch_save_path: Union[str, os.PathLike], +): + """ + Given a directory containing a DCP checkpoint, this function will convert it into a + Torch save file. + + Args: + dcp_checkpoint_dir: Directory containing the DCP checkpoint. + torch_save_path: Filename to store the converted Torch save file. + + .. warning:: + To avoid OOM, it's recommended to only run this function on a single rank. + """ + sd: STATE_DICT_TYPE = {} + _load_state_dict( + sd, + storage_reader=FileSystemReader(dcp_checkpoint_dir), + planner=_EmptyStateDictLoadPlanner(), + no_dist=True, + ) + torch.save(sd, torch_save_path) + + +def torch_save_to_dcp( + torch_save_path: Union[str, os.PathLike], + dcp_checkpoint_dir: Union[str, os.PathLike], +): + """ + Given the location of a torch save file, converts it into a DCP checkpoint. + + Args: + torch_save_path: Filename of the Torch save file. + dcp_checkpoint_dir: Directory to store the DCP checkpoint. + + .. warning:: + To avoid OOM, it's recommended to only run this function on a single rank. + """ + + state_dict = torch.load(torch_save_path, weights_only=False) + # we don't need stateful behavior here because the expectation is anything loaded by + # torch.load would not contain stateful objects. + _save_state_dict( + state_dict, storage_writer=FileSystemWriter(dcp_checkpoint_dir), no_dist=True + ) + + +if __name__ == "__main__": + + class FormatMode(Enum): + TORCH_TO_DCP = "torch_to_dcp" + DCP_TO_TORCH = "dcp_to_torch" + + # Parse command-line arguments + parser = argparse.ArgumentParser() + parser.add_argument( + "mode", + type=str, + help="Conversion mode", + choices=[m.value for m in FormatMode], + default=FormatMode.TORCH_TO_DCP, + ) + parser.add_argument("src", type=str, help="Path to the source model") + parser.add_argument("dst", type=str, help="Path to the destination model") + args = parser.parse_args() + + print( + f"Converting checkpoint from {args.src} to {args.dst} using method: '{args.mode}'" + ) + checkpoint_missing_warning = ( + f"No checkpoint found at {args.src}. Skipping conversion." + ) + if args.mode == FormatMode.TORCH_TO_DCP.value: + if os.path.isfile(args.src): + torch_save_to_dcp(args.src, args.dst) + else: + print(checkpoint_missing_warning) + elif args.mode == FormatMode.DCP_TO_TORCH.value: + if os.path.isdir(args.src): + dcp_to_torch_save(args.src, args.dst) + else: + print(checkpoint_missing_warning) + else: + raise ValueError(f"Unknown conversion mode: {args.mode}") diff --git a/vllm/lib/python3.10/site-packages/torch/distributed/checkpoint/logger.py b/vllm/lib/python3.10/site-packages/torch/distributed/checkpoint/logger.py new file mode 100644 index 0000000000000000000000000000000000000000..ee7ae4d9a5b94983c3bceb3bbdc64f897419ebb6 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/torch/distributed/checkpoint/logger.py @@ -0,0 +1,103 @@ +# mypy: allow-untyped-defs +import functools +import time +from typing import Any, Callable, Dict, List, TypeVar +from typing_extensions import ParamSpec +from uuid import uuid4 + +import torch.distributed.c10d_logger as c10d_logger +from torch.distributed.checkpoint.logging_handlers import DCP_LOGGER_NAME + + +__all__: List[str] = [] + +global _dcp_logger +_dcp_logger = c10d_logger._get_or_create_logger(DCP_LOGGER_NAME) + +_T = TypeVar("_T") +_P = ParamSpec("_P") + + +def _msg_dict_from_dcp_method_args(*args, **kwargs) -> Dict[str, Any]: + """ + Extracts log data from dcp method args + """ + msg_dict = {} + + # checkpoint ID can be passed in through the serializer or through the checkpoint id directly + storage_writer = kwargs.get("storage_writer", None) + storage_reader = kwargs.get("storage_reader", None) + planner = kwargs.get("planner", None) + + checkpoint_id = kwargs.get("checkpoint_id", None) + if not checkpoint_id and (serializer := storage_writer or storage_reader): + checkpoint_id = getattr(serializer, "checkpoint_id", None) + + msg_dict["checkpoint_id"] = ( + str(checkpoint_id) if checkpoint_id is not None else checkpoint_id + ) + + # Uniquely identify a _dcp_method_logger wrapped function call. + msg_dict["uuid"] = str(uuid4().int) + + if storage_writer: + msg_dict["storage_writer"] = storage_writer.__class__.__name__ + + if storage_reader: + msg_dict["storage_reader"] = storage_reader.__class__.__name__ + + if planner: + msg_dict["planner"] = planner.__class__.__name__ + + return msg_dict + + +def _get_msg_dict(func_name, *args, **kwargs) -> Dict[str, Any]: + msg_dict = _msg_dict_from_dcp_method_args(*args, **kwargs) + msg_dict.update(c10d_logger._get_msg_dict(func_name, **msg_dict)) + + return msg_dict + + +def _dcp_method_logger( + log_exceptions: bool = False, **wrapper_kwargs: Any +) -> Callable[[Callable[_P, _T]], Callable[_P, _T]]: # pyre-ignore + """This method decorator logs the start, end, and exception of wrapped events.""" + + def decorator(func: Callable[_P, _T]): + @functools.wraps(func) + def wrapper(*args: _P.args, **kwargs: _P.kwargs) -> _T: + msg_dict = _get_msg_dict( + func.__name__, *args, **{**wrapper_kwargs, **kwargs} + ) + + # log start event + msg_dict["event"] = "start" + t0 = time.time_ns() + msg_dict["time"] = t0 + msg_dict["log_exceptions"] = log_exceptions + _dcp_logger.debug(msg_dict) + + # exceptions + try: + result = func(*args, **kwargs) + except BaseException as error: + if log_exceptions: + msg_dict["event"] = "exception" + msg_dict["error"] = f"{error}" + msg_dict["time"] = time.time_ns() + _dcp_logger.error(msg_dict) + raise + + # end event + msg_dict["event"] = "end" + t1 = time.time_ns() + msg_dict["time"] = time.time_ns() + msg_dict["times_spent"] = t1 - t0 + _dcp_logger.debug(msg_dict) + + return result + + return wrapper + + return decorator diff --git a/vllm/lib/python3.10/site-packages/torch/distributed/checkpoint/logging_handlers.py b/vllm/lib/python3.10/site-packages/torch/distributed/checkpoint/logging_handlers.py new file mode 100644 index 0000000000000000000000000000000000000000..119dd52b7679750180c6ce1a1b0182762bc3f9e3 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/torch/distributed/checkpoint/logging_handlers.py @@ -0,0 +1,15 @@ +import logging +from typing import List + +from torch.distributed.logging_handlers import _log_handlers + + +__all__: List[str] = [] + +DCP_LOGGER_NAME = "dcp_logger" + +_log_handlers.update( + { + DCP_LOGGER_NAME: logging.NullHandler(), + } +) diff --git a/vllm/lib/python3.10/site-packages/torch/distributed/checkpoint/optimizer.py b/vllm/lib/python3.10/site-packages/torch/distributed/checkpoint/optimizer.py new file mode 100644 index 0000000000000000000000000000000000000000..5ad170768db9294e63e09b2c99da8f0d6b93241d --- /dev/null +++ b/vllm/lib/python3.10/site-packages/torch/distributed/checkpoint/optimizer.py @@ -0,0 +1,356 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates + +import dataclasses +from typing import cast, Dict, List, Optional, Sequence, Tuple, Union + +import torch +import torch.distributed as dist +from torch._utils import _get_device_module +from torch.distributed._shard.sharded_tensor.api import ShardedTensor +from torch.distributed._shard.sharded_tensor.metadata import ( + TensorProperties as ShardTensorProperties, +) +from torch.distributed._shard.sharded_tensor.shard import Shard +from torch.distributed._shard.sharding_spec.chunk_sharding_spec import ChunkShardingSpec +from torch.distributed.checkpoint._nested_dict import unflatten_state_dict +from torch.distributed.checkpoint.default_planner import DefaultLoadPlanner +from torch.distributed.checkpoint.metadata import ( + BytesStorageMetadata, + ChunkStorageMetadata, + Metadata, + MetadataIndex, + STATE_DICT_TYPE, + TensorProperties, + TensorStorageMetadata, +) +from torch.distributed.checkpoint.planner import LoadPlan, LoadPlanner +from torch.distributed.checkpoint.planner_helpers import ( + _create_read_items, + create_read_items_for_chunk_list, +) +from torch.distributed.checkpoint.state_dict_loader import load_state_dict +from torch.distributed.checkpoint.storage import StorageReader +from torch.distributed.checkpoint.utils import ( + _element_wise_add, + _element_wise_sub, + _normalize_device_info, +) +from torch.distributed.distributed_c10d import _get_default_group +from torch.distributed.fsdp._shard_utils import _create_chunk_sharded_tensor +from torch.distributed.remote_device import _remote_device +from torch.distributed.tensor import DTensor + + +STATE_DICT_2D_LAYOUT = Dict[str, Tuple[Optional[Sequence[int]], Sequence[int]]] + + +# TODO: Update docstrings for optimizer.py +__all__ = [ + "load_sharded_optimizer_state_dict", +] + + +def _gen_rank_device(global_rank: int, device_type: str = "cuda") -> str: + if device_type == "cpu": + return "cpu" + device_module = _get_device_module(device_type) + if device_module.is_available(): + return _normalize_device_info( + device_type, global_rank % device_module.device_count() + ) + return "cpu" + + +def _create_colwise_spec( + pg: Optional[dist.ProcessGroup] = None, +) -> ChunkShardingSpec: + pg_device_type = dist.distributed_c10d._get_pg_default_device(pg).type + if pg is None: + placements = [ + f"rank:{idx}/{_gen_rank_device(idx, pg_device_type)}" + for idx in range(dist.get_world_size()) + ] + else: + placements = [ + f"rank:{idx}/{_gen_rank_device(dist.get_global_rank(pg, idx), pg_device_type)}" + for idx in range(pg.size()) + ] + return ChunkShardingSpec( + dim=0, + placements=cast(List[Union[_remote_device, str]], placements), + ) + + +def _is_nested_tensor(val: torch.Tensor) -> bool: + if type(val) is ShardedTensor: + if len(val.local_shards()) == 0: + return False + if type(val.local_shards()[0].tensor) is ShardedTensor: + return True + if type(val.local_shards()[0].tensor) is DTensor: + raise ValueError("Cannot handle DTensor nested insided ShardedTensor") + elif type(val) is DTensor and ( + type(val._local_tensor) is DTensor or type(val._local_tensor) is ShardedTensor + ): + raise ValueError("Cannot handle nested DTensor") + return False + + +def _alloc_tensor( + props: TensorProperties, size: Sequence[int], device_type: str = "cuda" +) -> torch.Tensor: + if device_type == "cpu": + device = cast(torch.device, _get_device_module(device_type).current_device()) + else: + device = torch.device( + device_type, _get_device_module(device_type).current_device() + ) + + return torch.empty( + size=size, + dtype=props.dtype, + layout=props.layout, + requires_grad=props.requires_grad, + pin_memory=props.pin_memory, + device=device, + ) + + +def _get_state_dict_2d_layout( + state_dict: STATE_DICT_TYPE, +) -> Tuple[STATE_DICT_2D_LAYOUT, Optional[dist.ProcessGroup]]: + """ + Load the right TP slice of the optimizer state. + + This is not easy since the per-tensor slicing can't be inferred from checkpoint metadata. + We take advantage of the model state_dict producing a sliced ST to figure out what we need to load. + This is pretty fragile and it might be easier for FSDP to compute this info for us. + Returns a dictionary where keys are the same of the state_dict and the value is a tuple of + (offset, size) for the current rank TP slice. + N.B. The state_dict *MUST* come from FSDP.sharded_state_dict. + """ + specs: STATE_DICT_2D_LAYOUT = {} + dp_pg: Optional[dist.ProcessGroup] = None + for key, value in state_dict.items(): + specs[key] = (None, value.size()) + if _is_nested_tensor(value): + assert ( + len(value.local_shards()) == 1 + ), "Cannot handle ST with multiple shards" + assert isinstance( + value, ShardedTensor + ), "Can only handle nested ShardedTensor" + shard = value.local_shards()[0] + specs[key] = ( + shard.metadata.shard_offsets, + shard.metadata.shard_sizes, + ) + dp_pg = shard.tensor._process_group # type: ignore[attr-defined] + + return ( + specs, + dp_pg, + ) + + +class _ReaderWithOffset(DefaultLoadPlanner): + translation: Dict[MetadataIndex, MetadataIndex] + state_dict: STATE_DICT_TYPE + metadata: Metadata + + def __init__(self, fqn_to_offset: Dict[str, Sequence[int]]) -> None: + super().__init__() + self.fqn_to_offset = fqn_to_offset + self.metadata = Metadata({}) + self.state_dict = {} + self.translation = {} + + def create_local_plan(self) -> LoadPlan: + requests = [] + self.translation = {} + for fqn, obj in self.state_dict.items(): + md = self.metadata.state_dict_metadata[fqn] + if not isinstance(obj, ShardedTensor): + requests += _create_read_items(fqn, md, obj) + continue + + if fqn not in self.fqn_to_offset: + requests += _create_read_items(fqn, md, obj) + continue + + offset = self.fqn_to_offset[fqn] + + assert len(obj.local_shards()) == 1 + original_shard = obj.local_shards()[0] + local_chunks = [ + ChunkStorageMetadata( + offsets=torch.Size( + _element_wise_add(original_shard.metadata.shard_offsets, offset) + ), + sizes=torch.Size(original_shard.metadata.shard_sizes), + ) + ] + + reqs = create_read_items_for_chunk_list( + fqn, cast(TensorStorageMetadata, md), local_chunks + ) + # TODO: The ReadItems will have a displaced MetadataIndex, fix it. + # TODO: we should change _create_sharded_read_items to have more ergonomic API + for ri in reqs: + assert ri.dest_index.offset is not None + original_offset = _element_wise_sub(ri.dest_index.offset, offset) + original_index = dataclasses.replace( + ri.dest_index, offset=torch.Size(original_offset) + ) + self.translation[ri.dest_index] = original_index + + requests += reqs + return LoadPlan(requests) + + def lookup_tensor(self, index: MetadataIndex) -> torch.Tensor: + return super().lookup_tensor(self.translation.get(index, index)) + + +def load_sharded_optimizer_state_dict( + model_state_dict: STATE_DICT_TYPE, + optimizer_key: str, + storage_reader: StorageReader, + planner: Optional[LoadPlanner] = None, +) -> STATE_DICT_TYPE: + """ + Load a state_dict in conjunction with FSDP sharded optimizer state. + + This is the current recommended way to checkpoint FSDP. + >>> # xdoctest: +SKIP + >>> import torch.distributed.checkpoint as dist_cp + >>> # Save + >>> model: torch.nn.Model + >>> optim_params = model.parameters() + >>> optim = torch.optim.SGD(optim_params, lr=0.01) + >>> # Save + >>> with FSDP.state_dict_type(model, StateDictType.SHARDED_STATE_DICT): + >>> state_dict = { + >>> "optimizer": FSDP.optim_state_dict(model, optim), + >>> "model": model.state_dict() + >>> } + >>> dist_cp.save_state_dict( + >>> state_dict=optim_state, + >>> storage_writer=dist_cp.FileSystemWriter("checkpoint"), + >>> planner=dist_cp.DefaultSavePlanner(), + >>> ) + >>> + >>> # Load + >>> with FSDP.state_dict_type(model_tp, StateDictType.SHARDED_STATE_DICT): + >>> model_state_dict = model_tp.state_dict() + >>> checkpoint = { + >>> "model": model_state_dict + >>> } + >>> dist_cp.load_state_dict( + >>> state_dict=checkpoint, + >>> storage_reader=dist_cp.FileSystemReader(checkpoint_file), + >>> planner=dist_cp.DefaultLoadPlanner(), + >>> ) + >>> model.load_state_dict(checkpoint["model_state"]) + >>> + >>> optim_state = dist_cp.load_sharded_optimizer_state_dict( + >>> model_state_dict, + >>> optimizer_key="optimizer", + >>> storage_reader=dist_cp.FileSystemReader("checkpoint"), + >>> ) + >>> + >>> flattened_osd = FSDP.optim_state_dict_to_load( + >>> model, optim, optim_state["optimizer"] + >>> ) + >>> + >>> optim.load_state_dict(flattened_osd) + """ + metadata = storage_reader.read_metadata() + + layout_specs, dp_pg = _get_state_dict_2d_layout(model_state_dict) + dp_pg_device_type = dist.distributed_c10d._get_pg_default_device(dp_pg).type + device_module = _get_device_module(dp_pg_device_type) + + if dp_pg is None: + placements = [] + for i in range(dist.get_world_size()): + device_info = _normalize_device_info( + dp_pg_device_type, i % device_module.device_count() + ) + placements.append(f"rank:{i}/{device_info}") + sharding_spec = ChunkShardingSpec(dim=0, placements=placements) # type: ignore[arg-type] + else: + sharding_spec = _create_colwise_spec(dp_pg) + + # Create a state_dict for optimizer state + state_dict: STATE_DICT_TYPE = {} + + fqn_to_offset: Dict[str, Sequence[int]] = {} + for key, value in metadata.state_dict_metadata.items(): + key_path = metadata.planner_data[key] + if key_path[0] != optimizer_key: + continue + + if isinstance(value, BytesStorageMetadata): + state_dict[key] = "" + continue + + # value: TensorStorageMetadata + if value.size.numel() == 1: + state_dict[key] = _alloc_tensor( + value.properties, value.size, dp_pg_device_type + ) + elif dp_pg is None: + state_dict[key] = _create_chunk_sharded_tensor( + _alloc_tensor(value.properties, value.size, dp_pg_device_type), + rank=dist.get_rank(), + world_size=dist.get_world_size(), + num_devices_per_node=device_module.device_count(), + pg=_get_default_group(), + ) + else: + spec_key = key_path[2] + alloc_size = layout_specs.get(spec_key, (None, value.size))[1] + + properties = ShardTensorProperties( + dtype=value.properties.dtype, + layout=value.properties.layout, + requires_grad=value.properties.requires_grad, + memory_format=value.properties.memory_format, + pin_memory=value.properties.pin_memory, + ) + + st_md = sharding_spec.build_metadata(torch.Size(alloc_size), properties) + local_shards = [] + current_rank = dist.get_rank(dp_pg) + for shard_md in st_md.shards_metadata: + if cast(_remote_device, shard_md.placement).rank() != current_rank: + continue + local_shards.append( + Shard( + tensor=_alloc_tensor( + value.properties, shard_md.shard_sizes, dp_pg_device_type + ), + metadata=shard_md, + ) + ) + + st = ShardedTensor._init_from_local_shards_and_global_metadata( + local_shards, st_md, process_group=dp_pg + ) + + if spec_key in layout_specs and layout_specs[spec_key][0] is not None: + fqn_to_offset[key] = cast(Sequence[int], layout_specs[spec_key][0]) + + state_dict[key] = st + + # Whether we unflatten before or after doesn't matter + load_state_dict( + state_dict=state_dict, + storage_reader=storage_reader, + # FIXME the type of planner is wrong in load_state_dict + planner=_ReaderWithOffset(fqn_to_offset) if dp_pg is not None else planner, + ) + + state_dict = unflatten_state_dict(state_dict, metadata.planner_data) + + return state_dict diff --git a/vllm/lib/python3.10/site-packages/torch/distributed/checkpoint/planner_helpers.py b/vllm/lib/python3.10/site-packages/torch/distributed/checkpoint/planner_helpers.py new file mode 100644 index 0000000000000000000000000000000000000000..d715c651e024ba31ccfc818d0d66d68e1b0008de --- /dev/null +++ b/vllm/lib/python3.10/site-packages/torch/distributed/checkpoint/planner_helpers.py @@ -0,0 +1,386 @@ +# mypy: allow-untyped-defs +import io +from typing import Any, Callable, cast, Dict, List + +import torch +import torch.distributed as dist +from torch._utils import _get_device_module +from torch.distributed._shard.metadata import ShardMetadata +from torch.distributed._shard.sharded_tensor import ShardedTensor +from torch.distributed.tensor import DTensor +from torch.distributed.tensor._utils import compute_local_shape_and_global_offset + +from .metadata import ( + BytesStorageMetadata, + ChunkStorageMetadata, + MetadataIndex, + STATE_DICT_TYPE, + STORAGE_TYPES, + TensorProperties, + TensorStorageMetadata, +) +from .planner import ( + LoadItemType, + ReadItem, + SavePlan, + TensorWriteData, + WriteItem, + WriteItemType, +) +from .resharding import ( + _check_shard_metadata_pair_overlap, + _shards_get_overlap_region_wrt_saved_tensor, +) + + +__all__: List[str] = ["create_read_items_for_chunk_list"] + + +def _create_chunk_from_tensor(tensor: torch.Tensor) -> ChunkStorageMetadata: + return ChunkStorageMetadata( + offsets=torch.Size([0] * len(tensor.size())), sizes=tensor.size() + ) + + +def _chunk_for_shard(shard_md: ShardMetadata) -> ChunkStorageMetadata: + return ChunkStorageMetadata( + offsets=torch.Size(shard_md.shard_offsets), + sizes=torch.Size(shard_md.shard_sizes), + ) + + +def _sharded_tensor_metadata( + sharded_tensor: ShardedTensor, shard_md: ShardMetadata +) -> TensorWriteData: + shard_properties = sharded_tensor.metadata().tensor_properties + + properties = TensorProperties( + dtype=shard_properties.dtype, + layout=shard_properties.layout, + requires_grad=shard_properties.requires_grad, + memory_format=shard_properties.memory_format, + pin_memory=shard_properties.pin_memory, + ) + + return TensorWriteData( + chunk=_chunk_for_shard(shard_md), + properties=properties, + size=sharded_tensor.metadata().size, + ) + + +def _create_write_items_for_dtensor(fqn: str, tensor: DTensor) -> WriteItem: + sizes, offsets = compute_local_shape_and_global_offset( + tensor.shape, tensor.device_mesh, tensor.placements + ) + sizes, offsets = torch.Size(sizes), torch.Size(offsets) + + return WriteItem( + index=MetadataIndex(fqn, offsets), + type=WriteItemType.SHARD, + tensor_data=TensorWriteData( + chunk=ChunkStorageMetadata( + offsets=offsets, + sizes=sizes, + ), + properties=TensorProperties.create_from_tensor(tensor.to_local()), + size=tensor.size(), + ), + ) + + +def _create_write_item_for_shard( + fqn: str, sharded_tensor: ShardedTensor, shard_md: ShardMetadata +) -> WriteItem: + offsets = torch.Size(shard_md.shard_offsets) + return WriteItem( + index=MetadataIndex(fqn, offsets), + type=WriteItemType.SHARD, + tensor_data=_sharded_tensor_metadata(sharded_tensor, shard_md), + ) + + +def _create_write_item_for_tensor(fqn: str, tensor: torch.Tensor) -> WriteItem: + offsets = torch.Size([0] * len(tensor.size())) + return WriteItem( + index=MetadataIndex(fqn, offsets), + type=WriteItemType.TENSOR, + tensor_data=TensorWriteData( + chunk=ChunkStorageMetadata(offsets=offsets, sizes=tensor.size()), + properties=TensorProperties.create_from_tensor(tensor), + size=tensor.size(), + ), + ) + + +def _create_write_item_for_bytesio(fqn: str, bytes: Any): + return WriteItem( + index=MetadataIndex(fqn), + type=WriteItemType.BYTE_IO, + ) + + +def _create_read_item_for_byteio( + dest_index, dest_offset, storage_index, storage_offset, length +): + return ReadItem( + type=LoadItemType.BYTE_IO, + dest_index=dest_index, + dest_offsets=torch.Size((dest_offset,)), + storage_index=storage_index, + storage_offsets=torch.Size((storage_offset,)), + lengths=torch.Size((length,)), + ) + + +def _create_read_item_for_tensor( + dest_index, dest_offsets, storage_index, storage_offsets, lengths +): + return ReadItem( + type=LoadItemType.TENSOR, + dest_index=dest_index, + dest_offsets=torch.Size(dest_offsets), + storage_index=storage_index, + storage_offsets=torch.Size(storage_offsets), + lengths=torch.Size(lengths), + ) + + +def create_read_items_for_chunk_list( + fqn: str, + checkpoint_md: TensorStorageMetadata, + local_chunks: List[ChunkStorageMetadata], +) -> List[ReadItem]: + """ + Create a list of ``ReadItem`` based on the checkpoint and local chunks. + + This applies the resharding algorithm and computes the reads needed + to satisfy ``local_chunks`` with a checkpoint described by ``checkpoint_md``. + + Args: + fqn (str) : The state_dict FQN to pass to ``ReadItem``. + checkpoint_md (TensorStorageMetadata): metadata for a given tensor + from a checkpoint. + local_chunks (List[ChunkStorageMetadata]): Local chunks that needs to be + loaded. + + Returns: + A list of ``ReadItem`` that will satisfy all input chunks. + """ + read_items = [] + # this is a naive quadratic algo that can be optimized later + for idx, shard in enumerate(local_chunks): + for storage_idx, storage_md in enumerate(checkpoint_md.chunks): + if not _check_shard_metadata_pair_overlap(shard, storage_md): + continue + + storage_offsets = [] + dest_offsets = [] + lengths = [] + for ( + dim, + offset_for_saved_tensor, + offset_for_current_tensor, + length, + ) in _shards_get_overlap_region_wrt_saved_tensor( + saved_shard=storage_md, current_shard=shard + ): + storage_offsets.append(offset_for_saved_tensor) + dest_offsets.append(offset_for_current_tensor) + lengths.append(length) + + read_items.append( + _create_read_item_for_tensor( + dest_index=MetadataIndex(fqn, shard.offsets, idx), + dest_offsets=dest_offsets, + storage_index=MetadataIndex(fqn, storage_md.offsets, storage_idx), + storage_offsets=storage_offsets, + lengths=lengths, + ) + ) + return read_items + + +def _create_default_metadata_only_plan(state_dict: STATE_DICT_TYPE) -> SavePlan: + requests = [] + for fqn, obj in state_dict.items(): + if isinstance(obj, DTensor): + requests.append(_create_write_items_for_dtensor(fqn, obj)) + elif isinstance(obj, ShardedTensor): + for shard_md in obj.metadata().shards_metadata: + requests.append(_create_write_item_for_shard(fqn, obj, shard_md)) + elif isinstance(obj, torch.Tensor): + requests.append(_create_write_item_for_tensor(fqn, obj)) + else: + requests.append(_create_write_item_for_bytesio(fqn, obj)) + return SavePlan(requests) + + +def _create_write_items(fqn: str, object: Any) -> List[WriteItem]: + if hasattr(object, "__create_write_items__"): + # DTensor implements _Checkpointable + return object.__create_write_items__(fqn, object) + elif isinstance(object, ShardedTensor): + return [ + _create_write_item_for_shard(fqn, object, shard.metadata) + for shard in object.local_shards() + ] + elif isinstance(object, torch.Tensor): + return [_create_write_item_for_tensor(fqn, object)] + else: + return [_create_write_item_for_bytesio(fqn, object)] + + +def _create_chunk_from_dtensor(tensor: DTensor) -> ChunkStorageMetadata: + sizes, offsets = compute_local_shape_and_global_offset( + tensor.shape, tensor.device_mesh, tensor.placements + ) + sizes, offsets = torch.Size(sizes), torch.Size(offsets) + return ChunkStorageMetadata( + offsets=offsets, + sizes=sizes, + ) + + +def _create_chunk_list(tensor: torch.Tensor) -> List[ChunkStorageMetadata]: + if hasattr(tensor, "__create_chunk_list__"): + # DTensor implements _Checkpointable + local_chunks = tensor.__create_chunk_list__() # type: ignore[attr-defined] + elif isinstance(tensor, ShardedTensor): + local_chunks = [ + _chunk_for_shard(shard.metadata) for shard in tensor.local_shards() + ] + elif isinstance(tensor, torch.Tensor): + local_chunks = [_create_chunk_from_tensor(tensor)] + else: + raise ValueError( + "Unsupported Type, expecting one of [Tensor, DTensor, ShardedTensor] " + f",but got {type(tensor)}" + ) + + return local_chunks + + +def _create_read_items(fqn: str, md: STORAGE_TYPES, obj: Any) -> List[ReadItem]: + if not isinstance(md, BytesStorageMetadata): + try: + local_chunks = _create_chunk_list(obj) + except ValueError as ex: + raise ValueError( + f"Invalid checkpoint metadata for {fqn}, " + + f"expected BytesStorageMetadata but found {type(md)}", + ) from ex + + return create_read_items_for_chunk_list(fqn, md, local_chunks) + else: + return [ + _create_read_item_for_byteio( + dest_index=MetadataIndex(fqn), + dest_offset=0, + storage_index=MetadataIndex(fqn), + storage_offset=0, + length=0, + ) + ] + + +def _init_state_dict(state_dict: Dict[str, Any]) -> Any: + """ + Initializes meta tensor if the meta tensor is DTensor or torch.Tensor. + """ + + def dtensor_func(value: DTensor): + device = getattr(value, "device", None) + if device == torch.device("meta"): + device_type = dist.distributed_c10d._get_pg_default_device().type + device = cast( + torch.device, _get_device_module(device_type).current_device() + ) + new_local_tensor = torch.empty_like(value.to_local(), device=device) + # We need to pass shape and stride explicitly, since DTensor might be + # sharded unevenly. + dtensor = DTensor.from_local( + new_local_tensor, + device_mesh=value.device_mesh, + placements=value.placements, + shape=value.size(), + stride=value.stride(), + ) + return dtensor + else: + return value + + def sharded_tensor_func(value: Any): + device = getattr(value, "device", None) + if device == torch.device("meta"): + raise RuntimeError( + f"Found unsupported type {type(value)} for meta device loading." + ) + else: + return value + + def tensor_func(value: torch.Tensor): + device = getattr(value, "device", None) + if device == torch.device("meta"): + device_type = dist.distributed_c10d._get_pg_default_device().type + device = cast( + torch.device, _get_device_module(device_type).current_device() + ) + tensor = torch.empty_like(value, device=device) + return tensor + else: + return value + + _iterate_state_dict( + state_dict, + dtensor_func, + sharded_tensor_func, + tensor_func, + ) + + +def _iterate_state_dict( + iter_object: Any, + dtensor_func: Callable, + sharded_tensor_func: Callable, + tensor_func: Callable, +): + """ + Iterate through the state dict, applying the given functions to each tensor type + and update the state dict in place. + + Args: + iter_object (Any): the target state_dict. + sharded_tensor_func (Callable): the function to apply to ShardedTensor + dtensor_func (Callable): the function to apply to DTensor + tensor_func (Callable): the function to apply to Tensor + + # TODO: let state_dict_util._iterate_state_dict() to support in place option + so we don't need to have two versions of _iterate_state_dict. + """ + + if isinstance(iter_object, DTensor): + return dtensor_func(iter_object) + elif isinstance(iter_object, ShardedTensor): + return sharded_tensor_func(iter_object) + elif isinstance(iter_object, torch.Tensor): + return tensor_func(iter_object) + elif ( + isinstance(iter_object, (int, float, str, bytes, io.BytesIO)) + or iter_object is None + ): + return iter_object + elif isinstance(iter_object, dict): + for key, value in iter_object.items(): + iter_object[key] = _iterate_state_dict( + value, dtensor_func, sharded_tensor_func, tensor_func + ) + return iter_object + elif isinstance(iter_object, (list, tuple)): + ret = [ + _iterate_state_dict(v, dtensor_func, sharded_tensor_func, tensor_func) + for v in iter_object + ] + if isinstance(iter_object, tuple): + ret = tuple(ret) # type: ignore[assignment] + return ret diff --git a/vllm/lib/python3.10/site-packages/torch/distributed/checkpoint/resharding.py b/vllm/lib/python3.10/site-packages/torch/distributed/checkpoint/resharding.py new file mode 100644 index 0000000000000000000000000000000000000000..0e5153df8da0adbe58b2c525c233ce805cf8b1c1 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/torch/distributed/checkpoint/resharding.py @@ -0,0 +1,72 @@ +# mypy: allow-untyped-defs +from typing import List, Tuple + +from torch.distributed.checkpoint.metadata import ChunkStorageMetadata + + +__all__: List[str] = [] + + +def _check_shard_metadata_pair_overlap( + shard1: ChunkStorageMetadata, shard2: ChunkStorageMetadata +): + """Check if two shards overlap.""" + # For each dim of each shard, check if one shard resides on the other + # end of second shard with respect to that dim. As an example for a 2D + # shard, we would check if one shard is above or on the left of the + # other shard. + ndims = len(shard1.offsets) + for i in range(ndims): + if shard1.offsets[i] >= shard2.offsets[i] + shard2.sizes[i]: + return False + if shard2.offsets[i] >= shard1.offsets[i] + shard1.sizes[i]: + return False + + return True + + +def _shards_get_overlap_region_wrt_saved_tensor( + saved_shard: ChunkStorageMetadata, current_shard: ChunkStorageMetadata +) -> List[Tuple[int, int, int, int]]: + """ + Return the overlapping region between saved_shard and current_shard. + + There returned list has the same number of elements as the tensor's dimension. + For each element, we produce a tuple with the following contents: + (dimension, `saved_shard` offset, `current_shard` offset, length) + + Offsets are relative to each shard. + """ + narrows = [] + for dim, ( + saved_shard_offset, + current_shard_offset, + saved_shard_size, + current_shard_size, + ) in enumerate( + zip( + saved_shard.offsets, + current_shard.offsets, + saved_shard.sizes, + current_shard.sizes, + ) + ): + min_range_end = min( + saved_shard_offset + saved_shard_size, + current_shard_offset + current_shard_size, + ) + + length = min_range_end - max(current_shard_offset, saved_shard_offset) + + if saved_shard_offset > current_shard_offset: + offset_for_saved_tensor = 0 + offset_for_current_tensor = saved_shard_offset - current_shard_offset + else: + offset_for_saved_tensor = current_shard_offset - saved_shard_offset + offset_for_current_tensor = 0 + + narrows.append( + (dim, offset_for_saved_tensor, offset_for_current_tensor, length) + ) + + return narrows diff --git a/vllm/lib/python3.10/site-packages/torch/distributed/checkpoint/staging.py b/vllm/lib/python3.10/site-packages/torch/distributed/checkpoint/staging.py new file mode 100644 index 0000000000000000000000000000000000000000..22eed63c1cc517bc18f777d8943045115b440f06 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/torch/distributed/checkpoint/staging.py @@ -0,0 +1,117 @@ +from typing import Optional, runtime_checkable +from typing_extensions import Protocol + +from torch.distributed._state_dict_utils import ( + _copy_state_dict, + _create_cpu_state_dict, + _offload_state_dict_to_cpu, +) +from torch.distributed.checkpoint.metadata import STATE_DICT_TYPE + + +__all__ = ["AsyncStager", "BlockingAsyncStager"] + + +@runtime_checkable +class AsyncStager(Protocol): + """ + This protocol is meant to provide customization and extensibility for dcp.async_save, allowing users + to customize how data is staged previous to executing the usual dcp.save path in parallel. + The expected order of operations (concretely defined in `torch.distributed.state_dict_saver.async_save`) + is the following: + + 1. AsyncStager.stage_data(state_dict): + This call gives the AsyncStager the opportunity to 'stage' + the state_dict. The expectation and purpose of staging in this context is to create a "training-safe" + representation of the state dict, meaning that any updates to module data after staging is complete + should not be reflected in the state dict returned from this method. For example, in the default + case a copy of the entire state dict is created on CPU RAM and returned here, allowing users + to continue training without risking changes to data which is being serialized. + + 2. dcp.save is called on the state_dict returned from stage in parallel. This call is responsible + for serializing the state_dict and writing it to storage. + + 3. If AsyncStager.should_synchronize_after_execute is True, this method will be called immediately after + the serialization thread starts and before returning from dcp.async_save. If this is set to False, + the assumption is the user has defined a custom synchronization point for the the purpose of further + optimizing save latency in the training loop (for example, by overlapping staging with the + forward/backward pass), and it is the respondsibility of the user to call `AsyncStager.synchronize_staging` + at the appropriate time. + + """ + + # default to True since the common case is to stage synchronously + _synchronize_after_execute: bool = True + + @property + def should_synchronize_after_execute(self) -> bool: + """ + Whether to synchronize after executing the stage. + """ + + return self._synchronize_after_execute + + def stage(self, state_dict: STATE_DICT_TYPE) -> STATE_DICT_TYPE: + """ + Returns a "staged" copy of `state_dict`. The expectation of the staged copy is that it is + innoculated from any updates incurred after the stage call is complete. + """ + raise NotImplementedError( + f"{self.__class__.__name__} must implement stage method" + ) + + def synchronize_staging(self) -> None: + """ + In the case `stage` is async in some way, this method should be called to ensure staging + is complete and it is safe to begin modifying the original `state_dict` + """ + + +class BlockingAsyncStager(AsyncStager): + """ + An implementation of AsyncStager which stages the state_dict on CPU RAM and blocks until the copy is complete. + This implementation also provides an option to optimize stage latency using pinned memory. + + N.B. synchronize_staging is a no-op in this case. + + + """ + + # default to True since the common case is to stage synchronously + _synchronize_after_execute: bool = False + + def __init__( + self, + cache_staged_state_dict: bool = False, + type_check: bool = False, + ): + """ + Initializes the BlockingAsyncStager. + + Args: + cache_staged_state_dict: Whether to cache the staged state_dict. This option decreases staging latency + at the cost of increases memory usage. Additionally, if this parameter is set to True, it's the expectation + that the stager is maintained and re-used for multiple dcp.async_save calls. Default to False. + type_check: Whether to perform a type check during cpu_offload. Defaults to False. + + """ + self.cache_staged_state_dict = cache_staged_state_dict + self.type_check = type_check + self.state_dict_cache: Optional[STATE_DICT_TYPE] = None + + def stage(self, state_dict: STATE_DICT_TYPE) -> STATE_DICT_TYPE: + """ + Returns a copy of `state_dict` on the CPU. + """ + + if not self.cache_staged_state_dict: + return _offload_state_dict_to_cpu(state_dict, type_check=self.type_check) + + if self.state_dict_cache is None: + self.state_dict_cache = _create_cpu_state_dict(state_dict, pin_memory=True) + return _copy_state_dict(state_dict, self.state_dict_cache) + + def synchronize_staging(self) -> None: + """ + No-op function, since staging is blocking. + """ diff --git a/vllm/lib/python3.10/site-packages/torch/distributed/checkpoint/state_dict_loader.py b/vllm/lib/python3.10/site-packages/torch/distributed/checkpoint/state_dict_loader.py new file mode 100644 index 0000000000000000000000000000000000000000..1d92327e9e4761c4331a80876406101b9eef4868 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/torch/distributed/checkpoint/state_dict_loader.py @@ -0,0 +1,316 @@ +# mypy: allow-untyped-decorators +# mypy: allow-untyped-defs +import os +import warnings +from typing import Any, cast, Dict, Optional, Set, Union +from typing_extensions import deprecated + +import torch +import torch.distributed as dist +from torch.distributed.checkpoint.default_planner import _EmptyStateDictLoadPlanner +from torch.distributed.checkpoint.logger import _dcp_method_logger +from torch.distributed.checkpoint.stateful import Stateful + +from ._storage_utils import _storage_setup +from .default_planner import DefaultLoadPlanner +from .planner import LoadPlan, LoadPlanner +from .storage import StorageReader +from .utils import _all_gather_keys, _api_bc_check, _DistWrapper, _profile + + +__all__ = ["load_state_dict", "load"] + + +@deprecated( + "`load_state_dict` is deprecated and will be removed in future versions. " + "Please use `load` instead.", + category=FutureWarning, +) +def load_state_dict( + state_dict: Dict[str, Any], + storage_reader: StorageReader, + process_group: Optional[dist.ProcessGroup] = None, + coordinator_rank: int = 0, + no_dist: bool = False, + planner: Optional[LoadPlanner] = None, +) -> None: + """This method is deprecated. Please switch to 'load'.""" + storage_reader.reset() + with _profile(): + # TODO: test returning `load` here instead. + return _load_state_dict( + state_dict, + storage_reader, + process_group, + coordinator_rank, + no_dist, + planner, + ) + + +@_dcp_method_logger(log_exceptions=True) +@_api_bc_check +def load( + state_dict: Dict[str, Any], + *, + checkpoint_id: Union[str, os.PathLike, None] = None, + storage_reader: Optional[StorageReader] = None, + planner: Optional[LoadPlanner] = None, + process_group: Optional[dist.ProcessGroup] = None, +) -> None: + """ + Load a distributed ``state_dict`` in SPMD style. + + Each rank will try to read the least amount of data necessary + to fullfill the requested `state_dict`. When loading :class:`ShardedTensor` + or :class:`DTensor` instances, each rank only reads data for their local shards. + + For each ``Stateful`` object (having both a ``state_dict`` and a ``load_state_dict``), + load will first call ``state_dict`` before attempting deserialization, followed by + ``load_state_dict`` once the deserialization is complete. + + .. warning:: + All tensors in ``state_dict`` must be allocated on their + destination device *prior to* calling this function. + + All non-tensor data is loaded using `torch.load()` and modified in place + on state_dict. + + .. warning:: + Users must call `load_state_dict` on the root module to ensure load + pos-processing and non-tensor data properly propagates. + + .. note: + If no process group is initialized, this function will assume the intent + is to load a checkpoint into the local process. This can be useful in the + case of local inference, and when using regular Tensors (as opposed to DTensor + or ShardedTensor) + + .. note: + Rank 0 is assumed to be the coordinator rank. + + Args: + state_dict (Dict[str, Any]): The state_dict to save. + checkpoint_id (Union[str, os.PathLike, None]): + The ID of this checkpoint instance. The meaning of the checkpoint_id + depends on the storage. It can be a path to a folder or to a file. + It can also be a key if the storage is a key-value store. + (Default: ``None``) + storage_reader (Optional[StorageReader]): + Instance of StorageWriter used to perform reads. If this is not + specified, DCP will automatically infer the reader based on the + checkpoint_id. If checkpoint_id is also None, an exception will + be raised. (Default: ``None``) + planner (Optional[LoadPlanner]): + Instance of LoadPlanner. If this is not specificed, the default + planner will be used. (Default: ``None``) + process_group (Optional[ProcessGroup]): + ProcessGroup to be used for cross-rank synchronization. + (Default: ``None``) + + Returns: + None. + + Examples + >>> # xdoctest: +SKIP + >>> my_model = MyModule() + >>> optimizer = Adagrad(my_model.parameters()) + >>> model_state_dict = my_model.state_dict() + >>> fs_storage_reader = torch.distributed.checkpoint.FileSystemReader("/checkpoint/1") + + >>> torch.distributed.checkpoint.load_state_dict( + >>> state_dict=model_state_dict, + >>> storage_reader=fs_storage_reader, + >>> ) + + >>> # module.load_state_dict() function might have customized steps + >>> # to flush the state_dict, must call it to + >>> # ensure correct behavior. + >>> my_model.load_state_dict(model_state_dict) + + .. note:: + load_state_dict uses collectives to coordinate reads across ranks. + For NCCL-based process groups, internal tensor representations of + objects must be moved to the GPU device before communication takes place. + In this case, the device used is given by ``torch.cuda.current_device()`` + and it is the user's responsibility to ensure that this is set so that each + rank has an individual GPU, via ``torch.cuda.set_device()``. + """ + + no_dist = not (dist.is_available() and dist.is_initialized()) + if no_dist: + warnings.warn( + "torch.distributed is unavailable or uninitialized, assuming the intent is to load in a single process." + ) + + with _profile(): + storage_reader = cast( + StorageReader, _storage_setup(storage_reader, checkpoint_id, reader=True) + ) + + if no_dist: + keys = list(state_dict.keys()) + else: + keys = _all_gather_keys(state_dict, process_group) + if keys != sorted(state_dict.keys()): + warnings.warn( + "Detected mismatched keys in state dict after all gather!" + " This behavior is unsupported and may cause errors may cause errors." + ) + + statetful_sd = {} + for key in keys: + if key not in state_dict: + continue + elem = state_dict[key] + statetful_sd[key] = ( + elem.state_dict() if isinstance(elem, Stateful) else elem + ) + + _load_state_dict( + state_dict=statetful_sd, + storage_reader=storage_reader, + process_group=process_group, + no_dist=no_dist, + planner=planner, + ) + for key in keys: + if key not in state_dict: + continue + elem = state_dict[key] + if isinstance(elem, Stateful): + elem.load_state_dict(statetful_sd[key]) + state_dict[key] = statetful_sd[key] + + +def _load_state_dict( + state_dict: Dict[str, Any], + storage_reader: StorageReader, + process_group: Optional[dist.ProcessGroup] = None, + coordinator_rank: int = 0, + no_dist: bool = False, + planner: Optional[LoadPlanner] = None, +) -> None: + torch._C._log_api_usage_once("torch.distributed.checkpoint.load_state_dict") + + distW = _DistWrapper(process_group, not no_dist, coordinator_rank) + if planner is None: + planner = DefaultLoadPlanner() + + ckpt_kwargs = {} + if (ckpt_id := getattr(storage_reader, "checkpoint_id", None)) is not None: + ckpt_kwargs["checkpoint_id"] = ckpt_id + + @_dcp_method_logger(**ckpt_kwargs) + def local_step(): + assert planner is not None + metadata = storage_reader.read_metadata() + planner.set_up_planner(state_dict, metadata, distW.is_coordinator) + storage_reader.set_up_storage_reader(metadata, distW.is_coordinator) + + local_plan = planner.create_local_plan() + local_plan = storage_reader.prepare_local_plan(local_plan) + return local_plan + + @_dcp_method_logger(**ckpt_kwargs) + def global_step(all_local_plans): + assert planner is not None + all_local_plans = planner.create_global_plan(all_local_plans) + all_local_plans = storage_reader.prepare_global_plan(all_local_plans) + return all_local_plans + + central_plan: LoadPlan = distW.reduce_scatter("plan", local_step, global_step) + + @_dcp_method_logger(**ckpt_kwargs) + def read_data(): + assert planner is not None + final_local_plan = planner.finish_plan(central_plan) + all_reads = storage_reader.read_data(final_local_plan, planner) + + all_reads.wait() + return None + + _ = distW.all_gather("read", read_data) + + +def _load_state_dict_from_keys( + keys: Optional[Union[Set[str], str]] = None, + *, + checkpoint_id: Union[str, os.PathLike, None] = None, + storage_reader: Optional[StorageReader] = None, + process_group: Optional[dist.ProcessGroup] = None, +) -> Dict[str, Any]: + """ + Load only the specified keys from the checkpoint, if no keys are specified, the entire + checkpoint will be loaded. Note, this method completely loads the checkpoint into the + current process and is not distributed. + + .. warning:: + + + .. warning:: + + All non-tensor data is loaded using `torch.load()` + + .. note: + As opposed to the usual pattern, this function does not take a state dict as input + and does not load inplace. Instead, a new state dict is directly initialized and read + from file. + + .. note: + If no process group is initialized, this function will assume the intent + is to load a checkpoint into the local process. This can be useful in the + case of local inference, and when using regular Tensors (as opposed to DTensor + or ShardedTensor) + + .. note: + Rank 0 is assumed to be the coordinator rank. + + Args: + keys (Optional[Union[Set[str], str]]): + Loads any key specified in this set. If no keys are specified, the entire checkpoint + is loaded. + checkpoint_id (Union[str, os.PathLike, None]): + The ID of this checkpoint instance. The meaning of the checkpoint_id + depends on the storage. It can be a path to a folder or to a file. + It can also be a key if the storage is a key-value store. + (Default: ``None``) + storage_reader (Optional[StorageReader]): + Instance of StorageWriter used to perform reads. If this is not + specified, DCP will automatically infer the reader based on the + checkpoint_id. If checkpoint_id is also None, an exception will + be raised. (Default: ``None``) + process_group (Optional[ProcessGroup]): + ProcessGroup to be used for cross-rank synchronization. + (Default: ``None``) + + Returns: + State dict from specified keys + """ + torch._C._log_api_usage_once( + "torch.distributed.checkpoint._load_state_dict_from_keys" + ) + + no_dist = not (dist.is_available() and dist.is_initialized()) + if no_dist: + warnings.warn( + "torch.distributed is unavailable or uninitialized, assuming the intent is to load in a single process." + ) + + storage_reader = cast( + StorageReader, _storage_setup(storage_reader, checkpoint_id, reader=True) + ) + + if isinstance(keys, str): + keys = {keys} + + sd: Dict[str, Any] = {} + _load_state_dict( + state_dict=sd, + storage_reader=storage_reader, + process_group=process_group, + no_dist=no_dist, + planner=_EmptyStateDictLoadPlanner(keys=keys or set()), + ) + + return sd diff --git a/vllm/lib/python3.10/site-packages/torch/distributed/checkpoint/state_dict_saver.py b/vllm/lib/python3.10/site-packages/torch/distributed/checkpoint/state_dict_saver.py new file mode 100644 index 0000000000000000000000000000000000000000..307c6d8d4a96052372d6627695709ffbbef47f06 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/torch/distributed/checkpoint/state_dict_saver.py @@ -0,0 +1,333 @@ +# mypy: allow-untyped-decorators +# mypy: allow-untyped-defs +import inspect +import os +import warnings +from concurrent.futures import Future, ThreadPoolExecutor +from typing import cast, Optional, Union +from typing_extensions import deprecated + +import torch +import torch.distributed as dist +from torch.distributed._state_dict_utils import _offload_state_dict_to_cpu +from torch.distributed.checkpoint._storage_utils import _storage_setup +from torch.distributed.checkpoint.default_planner import DefaultSavePlanner +from torch.distributed.checkpoint.logger import _dcp_method_logger +from torch.distributed.checkpoint.metadata import Metadata, STATE_DICT_TYPE +from torch.distributed.checkpoint.planner import SavePlan, SavePlanner +from torch.distributed.checkpoint.staging import AsyncStager +from torch.distributed.checkpoint.stateful import Stateful +from torch.distributed.checkpoint.storage import StorageWriter +from torch.distributed.distributed_c10d import _get_default_group + +from .utils import _api_bc_check, _DistWrapper, _profile + + +__all__ = ["save_state_dict", "save", "async_save"] + + +@deprecated( + "`save_state_dict` is deprecated and will be removed in future versions." + "Please use `save` instead.", + category=FutureWarning, +) +def save_state_dict( + state_dict: STATE_DICT_TYPE, + storage_writer: StorageWriter, + process_group: Optional[dist.ProcessGroup] = None, + coordinator_rank: int = 0, + no_dist: bool = False, + planner: Optional[SavePlanner] = None, +) -> Metadata: + """This method is deprecated. Please switch to 'save'.""" + storage_writer.reset() + + # TODO: test returning `save` here instead. + with _profile(): + return _save_state_dict( + state_dict, + storage_writer, + process_group, + coordinator_rank, + no_dist, + planner, + ) + + +@_dcp_method_logger(log_exceptions=True) # type: ignore[arg-type] +@_api_bc_check +def save( + state_dict: STATE_DICT_TYPE, + *, + checkpoint_id: Union[str, os.PathLike, None] = None, + storage_writer: Optional[StorageWriter] = None, + planner: Optional[SavePlanner] = None, + process_group: Optional[dist.ProcessGroup] = None, +) -> Metadata: + """ + Save a distributed model in SPMD style. + + This function is different from ``torch.save()`` as it handles + ``ShardedTensor`` , and ``DTensor`` by having each rank only save their local shards. + + For each ``Stateful`` object (having both a ``state_dict`` and a ``load_state_dict``), + save will call ``state_dict`` before serialization. + + .. warning:: + There is no guarantees of Backwards Compatibility across PyTorch versions + for saved state_dicts. + + .. warning:: + If using the `process_group` argument, make sure that only its ranks + call `save_state_dict` and that all data in state_dict belong to it. + + .. note:: + When saving checkpoint for FSDP's `ShardingStrategy.HYBRID_SHARD`, only one of + the shard_group should be calling `save_state_dict` and the corresponding process + group needs to be passed in. + + .. note:: + If no process group is available, this function assumes the intention is to save the + state_dict in the local process. + + .. note: + Rank 0 is assumed to be the coordinator rank. + + + Args: + state_dict (Dict[str, Any]): The state_dict to save. + checkpoint_id (Union[str, os.PathLike, None]): + The ID of this checkpoint instance. The meaning of the checkpoint_id + depends on the storage. It can be a path to a folder or to a file. + It can also be a key if the storage is a key-value store. + (Default: ``None``) + storage_writer (Optional[StorageWriter]): + Instance of StorageWriter used to perform writes. If this is not + specified, DCP will automatically infer the writer based on the + checkpoint_id. If checkpoint_id is also None, an exception will + be raised. (Default: ``None``) + planner (Optional[SavePlanner]): + Instance of SavePlanner. If this is not specificed, the default + planner will be used. (Default: ``None``) + process_group (Optional[ProcessGroup]): + ProcessGroup to be used for cross-rank synchronization. + (Default: ``None``) + + Returns: + Metadata: Metadata object for the saved checkpoint. + + Example: + >>> # xdoctest: +SKIP + >>> my_model = MyModule() + + >>> state_dict = {"model": my_model} + + >>> fs_storage_writer = torch.distributed.checkpoint.FileSystemWriter("/checkpoint/1") + >>> torch.distributed.checkpoint.save( + >>> state_dict=state_dict, + >>> storage_writer=fs_storage_writer, + >>> ) + + .. note:: + save_state_dict uses collectives to coordinate writes across ranks. + For NCCL-based process groups, internal tensor representations of + objects must be moved to the GPU device before communication takes place. + In this case, the device used is given by ``torch.cuda.current_device()`` + and it is the user's responsibility to ensure that this is set so that + each rank has an individual GPU, via ``torch.cuda.set_device()``. + """ + torch._C._log_api_usage_once("torch.distributed.checkpoint.save") + + no_dist = not (dist.is_available() and dist.is_initialized()) + if no_dist: + warnings.warn( + "torch.distributed is unavailable or uninitialized, assuming the intent is to save in a single process." + ) + + with _profile(): + storage_writer = cast( + StorageWriter, _storage_setup(storage_writer, checkpoint_id, reader=False) + ) + + return _save_state_dict( + state_dict=_stateful_to_state_dict(state_dict), + storage_writer=storage_writer, + process_group=process_group, + no_dist=no_dist, + planner=planner, + ) + + +@_dcp_method_logger(log_exceptions=True) +def async_save( + state_dict: STATE_DICT_TYPE, + *, + checkpoint_id: Union[str, os.PathLike, None] = None, + storage_writer: Optional[StorageWriter] = None, + planner: Optional[SavePlanner] = None, + process_group: Optional[dist.ProcessGroup] = None, +) -> Future: + """Asynchronous version of ``save``. This code first de-stages the state_dict on to the + staging storage (defaults to CPU memory), and then calls the `save` in a separate thread. + + .. warning:: + This feature is experimental and subject to change. + + Args: + state_dict (Dict[str, Any]): The state_dict to save. + checkpoint_id (Union[str, os.PathLike, None]): + The ID of this checkpoint instance. The meaning of the checkpoint_id + depends on the storage. It can be a path to a folder or to a file. + It can also be a key if the storage is a key-value store. + (Default: ``None``) + storage_writer (Optional[StorageWriter]): + Instance of StorageWriter used to perform 'stage' and 'save'. If + this is not specified, DCP will automatically infer the writer based on the + checkpoint_id. If checkpoint_id is also None, an exception will + be raised. (Default: ``None``) + planner (Optional[SavePlanner]): + Instance of SavePlanner. If this is not specificed, the default + planner will be used. (Default: ``None``) + process_group (Optional[ProcessGroup]): + ProcessGroup to be used for cross-rank synchronization. + (Default: ``None``) + + Returns: + Future: A future holding the resultant Metadata object from `save`. + + Example: + >>> # xdoctest: +SKIP + >>> my_model = MyModule() + + >>> state_dict = {"model": my_model} + + >>> fs_storage_writer = torch.distributed.checkpoint.FileSystemWriter("/checkpoint/1") + >>> checkpoint_future = torch.distributed.checkpoint.async_save( + >>> state_dict=state_dict, + >>> storage_writer=fs_storage_writer, + >>> ) + >>> + >>> # ... do some work ... + >>> + >>> checkpoint_future.result() + + """ + torch._C._log_api_usage_once("torch.distributed.checkpoint.async_save") + + if dist.is_available() and dist.is_initialized(): + pg = process_group or _get_default_group() + assert ( + torch.device("cpu") in pg._device_types # type: ignore[attr-defined] + ), "A CPU backend must be enabled for async save; try initializing process group with 'cpu:gloo,cuda:nccl'" + + storage_writer = cast( + StorageWriter, _storage_setup(storage_writer, checkpoint_id, reader=False) + ) + + state_dict = _stateful_to_state_dict(state_dict) + if isinstance(storage_writer, AsyncStager): + staged_state_dict = storage_writer.stage(state_dict) + else: # provides bwc for storage_writers not implementing AsyncStager + staged_state_dict = _offload_state_dict_to_cpu(state_dict, type_check=False) + + executor = ThreadPoolExecutor(max_workers=1) + f: Future = executor.submit( + save, + staged_state_dict, + checkpoint_id=checkpoint_id, + storage_writer=storage_writer, + planner=planner, + process_group=process_group, + ) + f.add_done_callback(lambda f: executor.shutdown(wait=False)) + + if ( + isinstance(storage_writer, AsyncStager) + and storage_writer.should_synchronize_after_execute + ): + storage_writer.synchronize_staging() + + return f + + +def _stateful_to_state_dict(state_dict: STATE_DICT_TYPE) -> STATE_DICT_TYPE: + """Creates a shallow copy of `state_dict` where `state_dict` is called for each Stateful object.""" + stateful_state_dict = {} + for key, elem in state_dict.items(): + stateful_state_dict[key] = ( + elem.state_dict() if isinstance(elem, Stateful) else elem + ) + return stateful_state_dict + + +def _save_state_dict( + state_dict: STATE_DICT_TYPE, + storage_writer: StorageWriter, + process_group: Optional[dist.ProcessGroup] = None, + coordinator_rank: int = 0, + no_dist: bool = False, + planner: Optional[SavePlanner] = None, +) -> Metadata: + torch._C._log_api_usage_once("torch.distributed.checkpoint.save_state_dict") + + distW = _DistWrapper(process_group, not no_dist, coordinator_rank) + if planner is None: + planner = DefaultSavePlanner() + assert planner is not None + + global_metadata = None + + ckpt_kwargs = {} + if (ckpt_id := getattr(storage_writer, "checkpoint_id", None)) is not None: + ckpt_kwargs["checkpoint_id"] = ckpt_id + + @_dcp_method_logger(**ckpt_kwargs) + def local_step(): + assert planner is not None + storage_meta = storage_writer.storage_meta() + if "storage_meta" not in inspect.signature(planner.set_up_planner).parameters: + warnings.warn( + "The function definition for SavePlanner.set_up_planner has been updated" + " to include the storage_meta argument. Please update your implementation" + " to include this parameter." + ) + planner.set_up_planner(state_dict, distW.is_coordinator) # type: ignore[call-arg, arg-type] + else: + planner.set_up_planner( + state_dict=state_dict, + storage_meta=storage_meta, + is_coordinator=distW.is_coordinator, + ) + storage_writer.set_up_storage_writer(distW.is_coordinator) + + local_plan = planner.create_local_plan() + local_plan = storage_writer.prepare_local_plan(local_plan) + return local_plan + + @_dcp_method_logger(**ckpt_kwargs) + def global_step(all_local_plans): + nonlocal global_metadata + + assert planner is not None + all_local_plans, global_metadata = planner.create_global_plan(all_local_plans) + all_local_plans = storage_writer.prepare_global_plan(all_local_plans) + return all_local_plans + + central_plan: SavePlan = distW.reduce_scatter("plan", local_step, global_step) + + @_dcp_method_logger(**ckpt_kwargs) + def write_data(): + assert planner is not None + final_local_plan = planner.finish_plan(central_plan) + all_writes = storage_writer.write_data(final_local_plan, planner) + + all_writes.wait() + return all_writes.value() + + @_dcp_method_logger(**ckpt_kwargs) + def finish_checkpoint(all_results): + assert global_metadata is not None + storage_writer.finish(metadata=global_metadata, results=all_results) + return global_metadata + + return distW.all_reduce("write", write_data, finish_checkpoint) diff --git a/vllm/lib/python3.10/site-packages/torch/distributed/checkpoint/stateful.py b/vllm/lib/python3.10/site-packages/torch/distributed/checkpoint/stateful.py new file mode 100644 index 0000000000000000000000000000000000000000..97d83ddbdf15d564b4724f59fdd97ac3368de0b1 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/torch/distributed/checkpoint/stateful.py @@ -0,0 +1,42 @@ +from typing import Any, Dict, runtime_checkable, TypeVar +from typing_extensions import Protocol + + +__all__ = ["Stateful", "StatefulT"] + + +@runtime_checkable +class Stateful(Protocol): + """ + Stateful protocol for objects that can be checkpointed and restored. + """ + + def state_dict(self) -> Dict[str, Any]: + """ + Objects should return their state_dict representation as a dictionary. + The output of this function will be checkpointed, and later restored in + `load_state_dict()`. + + .. warning:: + Because of the inplace nature of restoring a checkpoint, this function + is also called during `torch.distributed.checkpoint.load`. + + + Returns: + Dict: The objects state dict + """ + + ... + + def load_state_dict(self, state_dict: Dict[str, Any]) -> None: + """ + Restore the object's state from the provided state_dict. + + Args: + state_dict: The state dict to restore from + """ + + ... + + +StatefulT = TypeVar("StatefulT", bound=Stateful) diff --git a/vllm/lib/python3.10/site-packages/torch/distributed/checkpoint/storage.py b/vllm/lib/python3.10/site-packages/torch/distributed/checkpoint/storage.py new file mode 100644 index 0000000000000000000000000000000000000000..985e91df9e739d1033725ce6173e35ea25296907 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/torch/distributed/checkpoint/storage.py @@ -0,0 +1,284 @@ +import abc +import os +from dataclasses import dataclass +from typing import Any, List, Optional, Union + +from torch.distributed.checkpoint.metadata import Metadata, MetadataIndex, StorageMeta +from torch.distributed.checkpoint.planner import ( + LoadPlan, + LoadPlanner, + SavePlan, + SavePlanner, +) +from torch.futures import Future + + +__all__ = ["WriteResult", "StorageWriter", "StorageReader"] + + +@dataclass(frozen=True) +class WriteResult: + index: MetadataIndex + + size_in_bytes: int + storage_data: Any + + +class StorageWriter(abc.ABC): + """ + Interface used by ``save_state_dict`` to write to storage. + + One StorageWriter instance acts as both the coordinator and the follower + in a distributed checkpoint. As part of initialization, each instance + is told its role. + + A subclass should expect the following sequence of calls. + + 0) (all ranks) set checkpoint_id if users pass a valid checkpoint_id. + 1) (all ranks) set_up_storage_writer() + 2) (all ranks) prepare_local_plan() + 3) (coordinator) prepare_global_plan() + 4) (all ranks) write_data() + 5) (coordinator) finish() + """ + + @abc.abstractmethod + def reset(self, checkpoint_id: Union[str, os.PathLike, None] = None) -> None: + """ + Calls to indicates a brand new checkpoint write is going to happen. + A checkpoint_id may be present if users set the checkpoint_id for + this checkpoint write. The meaning of the checkpiont_id is + storage-dependent. It can be a path to a folder/file or a key for + a key-value storage. + + Args: + checkpoint_id (Union[str, os.PathLike, None]): + The ID of this checkpoint instance. The meaning of the checkpoint_id + depends on the storage. It can be a path to a folder or to a file. + It can also be a key if the storage is a key-value store. + (Default: ``None``) + """ + ... + + @abc.abstractmethod + def set_up_storage_writer(self, is_coordinator: bool) -> None: + """ + Initialize this instance. + + Args: + is_coordinator (bool): Whether this instance is responsible for coordinating + the checkpoint. + """ + + @abc.abstractmethod + def prepare_local_plan(self, plan: SavePlan) -> SavePlan: + """ + Perform storage-specific local planning. + + While this method can produce a completely different plan, the recommended + way is to store storage specific data in SavePlan::storage_data. + + Args: + plan (SavePlan): The local plan from the ``SavePlanner`` in use. + + Returns: + A transformed ``SavePlan`` after storage local planning + """ + + @abc.abstractmethod + def prepare_global_plan(self, plans: List[SavePlan]) -> List[SavePlan]: + """ + Perform centralized planning of storage. + + This method is only called on the coordinator instance. + + While this method can produce a completely different plan, the preferred + way is to store storage specific data in SavePlan::storage_data. + + Args: + plans: A list of ``SavePlan`` instances, one for each rank. + + Returns: + A list of transformed ``SavePlan`` after storage global planning + """ + + @abc.abstractmethod + def write_data( + self, plan: SavePlan, planner: SavePlanner + ) -> Future[List[WriteResult]]: + """ + Write all items from ``plan`` using ``planner`` to resolve the data. + + A subclass should call ``SavePlanner::resolve_data`` on each item + from the plan to get access to the underlying object to write. + + Subclasses should lazily call `resolve_data` as it can allocate memory. + In case of tensors, make following assumptions: + + - They might be on any device, including not matching the one on ``WriteItem::tensor_data`` + - They might be views or not contiguous. Only the projection needs to be saved. + + Args: + plan (SavePlan): The save plan to execute. + planner (SavePlanner): Planner object to be used to resolve items to data. + + Returns: + A future that completes to a list of WriteResult + """ + + @abc.abstractmethod + def finish(self, metadata: Metadata, results: List[List[WriteResult]]) -> None: + """ + Write the metadata and marks the current checkpoint as successful. + + The actual format/schema used for serializing `metadata` is an + implementation detail. The only requirement is that it's recoverable + in to the same object graph. + + Args: + metadata (Metadata): metadata for the new checkpoint + results: A list of WriteResults from all ranks. + + Returns: + None + """ + + @classmethod + @abc.abstractmethod + def validate_checkpoint_id(cls, checkpoint_id: Union[str, os.PathLike]) -> bool: + """ + Check if the given checkpoint_id is supported by the stroage. This allow + us to enable automatic storage selection. + """ + ... + + def storage_meta(self) -> Optional[StorageMeta]: + """ + Return the storage-specific metadata. This is used to store additional information + in a checkpoint that can be useful for providing request-level observability. StorageMeta + is passed to the ``SavePlanner`` during save calls. Returns None by default. + + TODO: provide an example + """ + return None + + +class StorageReader(abc.ABC): + """ + Interface used by ``load_state_dict`` to read from storage. + + One StorageReader instance acts as both the coordinator and the follower + in a distributed checkpoint. As part of initialization, each instance + is told its role. + + A subclass should expected the following sequence of calls by ``load_state_dict``: + + 0) (all ranks) set checkpoint_id if users pass a valid checkpoint_id. + 1) (all ranks) read_metadata() + 2) (all ranks) set_up_storage_reader() + 3) (all ranks) prepare_local_plan() + 4) (coordinator) prepare_global_plan() + 5) (all ranks) read_data() + """ + + @abc.abstractmethod + def reset(self, checkpoint_id: Union[str, os.PathLike, None] = None) -> None: + """ + Calls to indicates a brand new checkpoint read is going to happen. + A checkpoint_id may be present if users set the checkpoint_id for + this checkpoint read. The meaning of the checkpiont_id is + storage-dependent. It can be a path to a folder/file or a key for + a key-value storage. + + Args: + checkpoint_id (Union[str, os.PathLike, None]): + The ID of this checkpoint instance. The meaning of the checkpoint_id + depends on the storage. It can be a path to a folder or to a file. + It can also be a key if the storage is more like a key-value store. + (Default: ``None``) + """ + ... + + @abc.abstractmethod + def read_metadata(self) -> Metadata: + """ + Read the checkpoint metadata. + + Returns: + The metadata object associated with the checkpoint being loaded. + + """ + + @abc.abstractmethod + def set_up_storage_reader(self, metadata: Metadata, is_coordinator: bool) -> None: + """ + Initialize this instance. + + Args: + metadata (Metadata): The metadata schema to use. + is_coordinator (bool): Whether this instance is responsible for coordinating + the checkpoint. + """ + + @abc.abstractmethod + def prepare_local_plan(self, plan: LoadPlan) -> LoadPlan: + """ + Perform storage-specific local planning. + + While this method can produce a completely different plan, the recommended + way is to store storage specific data in LoadPlan::storage_data. + + Args: + plan (LoadPlan): The local plan from the ``LoadPlan`` in use. + + Returns: + A transformed ``LoadPlan`` after storage local planning + """ + + @abc.abstractmethod + def prepare_global_plan(self, plans: List[LoadPlan]) -> List[LoadPlan]: + """ + Perform centralized planning of storage loading. + + This method is only called on the coordinator instance. + + While this method can produce a completely different plan, the preferred + way is to store storage specific data in LoadPlan::storage_data. + + Args: + plans: A list of ``LoadPlan`` instances, one for each rank. + + Returns: + A list of transformed ``LoadPlan`` after storage global planning + """ + + @abc.abstractmethod + def read_data(self, plan: LoadPlan, planner: LoadPlanner) -> Future[None]: + """ + Read all items from ``plan`` using ``planner`` to resolve the data. + + A subclass should call ``LoadPlanner::load_bytes`` to deserialize a BytesIO + object into the right place. + + A subclass should call ``LoadPlanner::resolve_tensor`` to get access to the + tensors that in should load data into. + + It's the StorageLayer responsibility to properly schedule any cross device copies + required. + + Args: + plan (LoadPlan): The local plan to execute on + planner (LoadPlanner): The planner object to use to resolve items. + + Returns: + A future that completes once all reads are finished. + """ + + @classmethod + @abc.abstractmethod + def validate_checkpoint_id(cls, checkpoint_id: Union[str, os.PathLike]) -> bool: + """ + Check if the given checkpoint_id is supported by the stroage. This allow + us to enable automatic storage selection. + """ + ... diff --git a/vllm/lib/python3.10/site-packages/torch/distributed/checkpoint/utils.py b/vllm/lib/python3.10/site-packages/torch/distributed/checkpoint/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..cb90dd119121593f5df5bf355f30df98361f5764 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/torch/distributed/checkpoint/utils.py @@ -0,0 +1,431 @@ +# mypy: allow-untyped-defs +import cProfile +import inspect +import io +import itertools +import os +import warnings +from contextlib import contextmanager +from functools import wraps +from pstats import Stats +from typing import Any, Callable, cast, Dict, List, Optional, Sequence, TypeVar, Union + +import torch +import torch.distributed as dist +from torch.distributed._shard.sharded_tensor import ShardedTensor +from torch.distributed._shard.sharded_tensor.shard import Shard + +from .api import ( + _is_wrapped_exception, + _wrap_exception, + CheckpointException, + WRAPPED_EXCEPTION, +) +from .metadata import MetadataIndex, STATE_DICT_TYPE + + +__all__ = ["find_tensor_shard", "find_state_dict_object"] + +T = TypeVar("T") +R = TypeVar("R") + + +def _get_failure_dict( + results: List[Union[T, WRAPPED_EXCEPTION]] +) -> Dict[int, WRAPPED_EXCEPTION]: + return cast( + Dict[int, WRAPPED_EXCEPTION], + {i: err for i, err in enumerate(results) if _is_wrapped_exception(err)}, + ) + + +def _all_gather_keys( + local_dict: Dict[Any, Any], group: Optional[dist.ProcessGroup] = None +) -> List[Any]: + """Gathers all keys, and returns them sorted.""" + keys = list(local_dict.keys()) + gathered_keys: List[List[Any]] = [None] * dist.get_world_size(group) # type: ignore[list-item] + + dist.all_gather_object(gathered_keys, keys, group=group) + return sorted(set(itertools.chain.from_iterable(gathered_keys))) + + +class _DistWrapper: + """ + This is a wrapper around PG that provides a series of features around object collectives. + + It works without distributed initialized, where most collectives turns into nops. + + All variants that take functions are exception robust, meaning that if one or more + ranks raise errors, all ranks will observe those. + """ + + def __init__( + self, + group: Optional[dist.ProcessGroup], + use_dist: bool, + coordinator_rank: int, + ): + self.group = group + self.use_dist = use_dist + self.coordinator_rank = coordinator_rank + if self.use_dist: + self.rank = dist.get_rank(group) + self.is_coordinator = self.rank == coordinator_rank + else: + self.rank = 0 + self.is_coordinator = True + + def get_rank(self) -> int: + return self.rank + + def get_world_size(self) -> int: + if self.use_dist: + return dist.get_world_size(self.group) + return 1 + + def broadcast_object(self, object: Optional[T]) -> T: + """Implement functionality similar to c10d::broadcast_object_list but without distributed enabled.""" + object_list = [object] + if self.use_dist: + dist.broadcast_object_list( + object_list=object_list, + group=self.group, + src=self.coordinator_rank, + ) + return cast(T, object_list[0]) + + def gather_object(self, object: T) -> Optional[List[T]]: + """Implement functionality similar to c10d::gather_object but without distributed enabled.""" + if self.use_dist: + gather_objs = ( + cast(List[T], [None] * dist.get_world_size(self.group)) + if self.is_coordinator + else None + ) + + dist.gather_object( + obj=object, + object_gather_list=gather_objs if self.is_coordinator else None, + dst=self.coordinator_rank, + group=self.group, + ) + result = gather_objs + else: + result = [object] + return result + + def all_gather_object(self, object: T) -> List[T]: + """Implement functionality similar to c10d::all_gather_object but without distributed enabled.""" + if self.use_dist: + gather_objs = cast(List[T], [None] * dist.get_world_size(self.group)) + + dist.all_gather_object( + object_list=gather_objs, obj=object, group=self.group + ) + else: + gather_objs = [object] + return gather_objs + + def scatter_object(self, object_list: Optional[List[T]]) -> T: + """Implement functionality similar to c10d::scatter_object but without distributed enabled.""" + if self.use_dist: + gather_result = cast(List[T], [None]) + dist.scatter_object_list( + scatter_object_output_list=gather_result, + scatter_object_input_list=object_list if self.is_coordinator else None, + src=self.coordinator_rank, + group=self.group, + ) + + local_reply = gather_result[0] + else: + assert object_list is not None + local_reply = object_list[0] + return local_reply + + def reduce_scatter( + self, + step: str, + map_fun: Callable[[], T], + reduce_fun: Callable[[List[T]], List[R]], + ) -> R: + """ + Compute a value on each rank, then do centralized reduce on a single rank, followed by a scatter. + + This method operates in the following way: + Run ``map_fun`` on all ranks + Gather results on rank 0 + Call ``reduce_fun`` on all those values + Scatter to each rank part of the result. + """ + local_data: Union[WRAPPED_EXCEPTION, T] + try: + local_data = map_fun() + except BaseException as e: + local_data = _wrap_exception(e) + + all_data = self.gather_object(local_data) + all_results: Optional[List[Union[R, CheckpointException]]] = None + if self.is_coordinator: + assert all_data is not None + node_failures = _get_failure_dict(all_data) + + if len(node_failures) == 0: + try: + # N.B. why can't mypy cast List[R] to List[Union[R, WRAPPED_EXCEPTION]]? + all_results = cast( + List[Union[R, CheckpointException]], + reduce_fun(cast(List[T], all_data)), + ) + except BaseException as e: + node_failures[self.rank] = _wrap_exception(e) + + if len(node_failures) > 0: + all_results = [ + CheckpointException(step, node_failures) + ] * self.get_world_size() + + result = self.scatter_object(all_results) + if isinstance(result, CheckpointException): + raise result + return result + + def all_reduce( + self, + step: str, + map_fun: Callable[[], T], + reduce_fun: Callable[[List[T]], R], + ) -> R: + """ + Compute a value on each rank, then do centralized reduce on a single rank, followed by a broadcast. + + This method operates in the following way: + Run ``map_fun`` on all ranks + Gather results on rank 0 + Call ``reduce_fun`` on all those values + Broadcast the reduced value to all ranks. + """ + local_data: Union[T, WRAPPED_EXCEPTION] + try: + local_data = map_fun() + except BaseException as e: + local_data = _wrap_exception(e) + + all_data = self.gather_object(local_data) + result: Optional[Union[R, CheckpointException]] = None + if self.is_coordinator: + assert all_data is not None + node_failures = _get_failure_dict(all_data) + if len(node_failures) == 0: + try: + result = reduce_fun(cast(List[T], all_data)) + except BaseException as e: + node_failures[self.rank] = _wrap_exception(e) + + if len(node_failures) > 0: + result = CheckpointException(step, node_failures) + + final_result = self.broadcast_object(result) + if isinstance(final_result, CheckpointException): + raise final_result + return cast(R, final_result) + + def all_gather( + self, + step: str, + map_fun: Callable[[], T], + ) -> List[T]: + """ + Compute a value on each rank, then all_gather them. + + This method operates in the following way: + Run ``map_cp`` on all ranks + all_gather the values to all ranks + """ + result: Union[T, WRAPPED_EXCEPTION] + try: + result = map_fun() + except BaseException as e: + result = _wrap_exception(e) + + all_results = self.all_gather_object(result) + + node_failures = _get_failure_dict(all_results) + if len(node_failures) > 0: + raise CheckpointException(step, node_failures) + return cast(List[T], all_results) + + def broadcast( + self, + step: str, + map_fun: Callable[[], T], + ) -> T: + """ + Compute a value on rank 0 and broadcast it. + + This method operates in the following way: + Run ``map_cp`` on rank 0 + broadcast the value + """ + result: Optional[Union[T, CheckpointException]] = None + if self.is_coordinator: + try: + result = map_fun() + except BaseException as e: + result = CheckpointException(step, {self.rank: _wrap_exception(e)}) + final_result = self.broadcast_object(result) + if isinstance(final_result, CheckpointException): + raise final_result + return cast(T, final_result) + + +def _find_shard(tensor: ShardedTensor, index: MetadataIndex) -> Shard: + if index.offset is None: + raise ValueError( + f"Cannot lookup {index.fqn} since its a ShardedTensor and no offset was provided" + ) + + shards = tensor.local_shards() + # index fast path + if index.index is not None: + if ( + len(shards) > index.index + and torch.Size(shards[index.index].metadata.shard_offsets) == index.offset + ): + return shards[index.index] + + for shard in shards: + if torch.Size(shard.metadata.shard_offsets) == index.offset: + return shard + raise ValueError(f"Could not find shard at '{index.offset}' for FQN: '{index.fqn}'") + + +def find_tensor_shard(tensor: torch.Tensor, index: MetadataIndex) -> torch.Tensor: + if hasattr(tensor, "__get_tensor_shard__"): + # DTensor implements _Checkpointable + return tensor.__get_tensor_shard__(index) # type: ignore[attr-defined] + if isinstance(tensor, ShardedTensor): + return _find_shard(tensor, index).tensor + if index.offset is not None: + # special case looking up a tensor by origin + if index.offset == torch.Size([0] * len(tensor.size())): + return tensor + raise ValueError( + f"FQN: '{index.fqn}' is not a ShardedTensor, can't find by offset: '{index.offset}'" + ) + return tensor + + +def find_state_dict_object(state_dict: STATE_DICT_TYPE, index: MetadataIndex) -> Any: + if index.fqn not in state_dict: + raise ValueError(f"Could not find FQN: '{index.fqn}'") + obj = state_dict[index.fqn] + + if isinstance(obj, torch.Tensor): + return find_tensor_shard(obj, index) + elif index.offset is not None: + raise ValueError( + f"FQN: '{index.fqn}' is not a ShardedTensor, can't find by offset: '{index.offset}'" + ) + return obj + + +def _element_wise_add(a: Sequence[int], b: Sequence[int]) -> List[int]: + return [i_a + i_b for i_a, i_b in zip(a, b)] + + +def _element_wise_sub(a: Sequence[int], b: Sequence[int]) -> List[int]: + return [i_a - i_b for i_a, i_b in zip(a, b)] + + +class _ReaderView(io.IOBase): + def __init__(self, base_stream: io.IOBase, offset: int, len: int): + super().__init__() + self.offset = offset + self.len = len + self.base_stream = base_stream + self.seek(0) + + def seek(self, __offset: int, __whence: int = os.SEEK_SET) -> int: + if __whence == os.SEEK_SET: + __offset = self.offset + __offset + elif __whence == os.SEEK_END: + __whence = os.SEEK_SET + __offset = (self.offset + self.len) - __offset + return self.base_stream.seek(__offset, __whence) + + def tell(self) -> int: + return self.base_stream.tell() - self.offset + + def readable(self) -> bool: + return self.base_stream.readable() + + def seekable(self) -> bool: + return self.base_stream.seekable() + + def readinto(self, b): + return self.base_stream.readinto(b) # type: ignore[attr-defined] + + def read(self, size=-1): + return self.base_stream.read(size) + + +def _create_file_view(file: io.IOBase, offset: int, length: int) -> io.IOBase: + # FIXME (kumpera) torch.load fails if we wrap with io.BufferedReader + return _ReaderView(file, offset, length) + + +def _normalize_device_info(device_type: str, device_id: int) -> str: + """Device info normalization.""" + if device_type == "cpu": + return "cpu" + return f"{device_type}:{device_id}" + + +# TODO: integrate with distributed logging flag +ENABLE_PROFILE = False + + +@contextmanager +def _profile(): + # Only log the profiling when it is enable and is on rank0 or dist is not + # avaiable. + if ENABLE_PROFILE and (not dist.is_available() or dist.get_rank() == 0): + profiler = cProfile.Profile() + profiler.enable() + try: + yield + finally: + profiler.disable() + stats = Stats(profiler) + stats.sort_stats("time").print_stats(10) + else: + yield + + +def _api_bc_check(func): + @wraps(func) + def inner_func(*args, **kwargs) -> Any: + if len(args) == 2: + warnings.warn( + f"The argument order of {func.__name__} has been changed. " + "Please check the document to avoid future breakages." + ) + sig = inspect.signature(func) + kwonlyargs = [ + p.name for p in sig.parameters.values() if p.kind == p.KEYWORD_ONLY + ] + if "storage_writer" in kwonlyargs: + assert "storage_writer" not in kwargs, (args, kwargs) + kwargs["storage_writer"] = args[1] + elif "storage_reader" in kwonlyargs: + assert "storage_reader" not in kwargs, (args, kwargs) + kwargs["storage_reader"] = args[1] + else: + raise RuntimeError(f"Unexpected kwonlyargs = {kwonlyargs}") + return func(args[0], **kwargs) + else: + return func(*args, **kwargs) + + return inner_func diff --git a/vllm/lib/python3.10/site-packages/torch/distributed/elastic/__init__.py b/vllm/lib/python3.10/site-packages/torch/distributed/elastic/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..a7c9b29a750593a812907ce2cf4c800d7d1435bb --- /dev/null +++ b/vllm/lib/python3.10/site-packages/torch/distributed/elastic/__init__.py @@ -0,0 +1,77 @@ +#!/usr/bin/env/python3 + +# Copyright (c) Facebook, Inc. and its affiliates. +# All rights reserved. +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. + +""" + +Torchelastic agent and user worker failover contract: + +**TL;DR;**: + +* TE(torchelastic) expects user workers to finish with the 5 minutes drift +* It is better to design DDP app to fail for all workers, rather than a single one. +* TE does not synchronize number of restarts between agents +* TE re-rendezvous does not trigger restart decrease +* When a single agent finishes its job(successfully or not), it will close rendezvous. + If other agents still have workers in progress, they will be terminated. +* Based on above, scale down does not work if at least single agent finishes the job. +* When Scale up is detected by agents, it will not decrease ``max_restarts`` + + +In general TE(torchelastic) can launch arbitrary user code, but there is some +clarifications need to be done around what failover mechanism torchelastic +provides and what failover mechanism it expects from user workers. + +Torchelastic currently supports DDP style applications. That means that +TE expects *ALL* workers finish approximately at the same time. In practice, +it is nearly to impossible to guarantee that all workers in arbitrary +DDP application finish at the time, so TE provides a finalization barrier +that waits for TIMEOUT(5 minutes) for worker finalization. + +**Worker Failure** + +When worker fails, TE will check the number of restarts +available, if there is more than 0 restarts, TE will start a new rendezvous +round and restart the worker process. New rendezvous round will other +TE agents to terminate their workers. + +.. note:: The TE agent does not synchronize restarts between themselves. + When a single agent performs restart, it will trigger a local ``max_restarts`` + decrease, other agent will not decrease their ``max_restarts``. + the user to run the distributed application locally on a dev host. + +A single worker failure can cause the whole cluster to fail: +If a single worker is constantly failing, it will cause the TE agent +``max_restarts`` to go to zero. This will cause an agent to finish its +work and close rendezvous. If there are any other workers on different +agents, they will be terminated. + + +**Re-Rendezvous** + +Re-rendezvous occurs when TE agents detect a new node +trying to joint a cluster. TE will not decrease ``max_restarts``. TE agents +will terminate its workers and start a new rendezvous round. + +Note about DynamicRendezvous(etcd-v2, c10d-experimental): If the rendezvous +has already max_nodes, the new node won't be added to the wait list right +away since there is no need to tear down a rendezvous that is already fully +utilized. The new node will wait until its timeout (600 secs by default) +and periodically check the number of participants. If the number becomes +less than max_nodes, it will be added to the wait list; otherwise, it will time out after 600 secs. + +*Scale up event*. When scale up event happens, torchelastic rendezvous +will detect that there are new nodes trying to join. Torchelastic agent +will stop all workers and perform re-rendezvous. Note: when scale up event +happens, *``max_restarts``* will *not* decrease. + +*Scale down event*. When scale down event happens, rendezvous will not +notify the torchelastic agent about it. If TE agent launched with ``max_restarts=0`` , +it relies on the underlying scheduler to handle job restart. If the ``max_restarts>0`` , +TE agent will terminate workers and start a new rdzv round, which is a *Scale up event*. + +""" diff --git a/vllm/lib/python3.10/site-packages/torch/distributed/elastic/control_plane.py b/vllm/lib/python3.10/site-packages/torch/distributed/elastic/control_plane.py new file mode 100644 index 0000000000000000000000000000000000000000..e778c0868384771e02e3b9df9e04a738a9e04043 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/torch/distributed/elastic/control_plane.py @@ -0,0 +1,52 @@ +import os +from contextlib import contextmanager, ExitStack +from typing import Generator + +from torch.distributed.elastic.multiprocessing.errors import record + + +__all__ = [ + "worker_main", +] + +TORCH_WORKER_SERVER_SOCKET = "TORCH_WORKER_SERVER_SOCKET" + + +@contextmanager +def _worker_server(socket_path: str) -> Generator[None, None, None]: + from torch._C._distributed_c10d import _WorkerServer + + server = _WorkerServer(socket_path) + try: + yield + finally: + server.shutdown() + + +@contextmanager +@record +def worker_main() -> Generator[None, None, None]: + """ + This is a context manager that wraps your main entry function. This combines + the existing ``errors.record`` logic as well as a new ``_WorkerServer`` that + exposes handlers via a unix socket specified by + ``Torch_WORKER_SERVER_SOCKET``. + + Example + + :: + + @worker_main() + def main(): + pass + + if __name__=="__main__": + main() + + """ + with ExitStack() as stack: + socket_path = os.environ.get(TORCH_WORKER_SERVER_SOCKET) + if socket_path is not None: + stack.enter_context(_worker_server(socket_path)) + + yield diff --git a/vllm/lib/python3.10/site-packages/torch/distributed/elastic/multiprocessing/__init__.py b/vllm/lib/python3.10/site-packages/torch/distributed/elastic/multiprocessing/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..21cb5e47d4419216212c6c609a515643593e95b7 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/torch/distributed/elastic/multiprocessing/__init__.py @@ -0,0 +1,233 @@ +#!/usr/bin/env python3 + +# Copyright (c) Facebook, Inc. and its affiliates. +# All rights reserved. +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. + +""" +Library that launches and manages ``n`` copies of worker subprocesses either specified by a function or a binary. + +For functions, it uses ``torch.multiprocessing`` (and therefore python +``multiprocessing``) to spawn/fork worker processes. For binaries it uses python +``subprocessing.Popen`` to create worker processes. + + +Usage 1: Launching two trainers as a function + +:: + + from torch.distributed.elastic.multiprocessing import Std, start_processes + + def trainer(a, b, c): + pass # train + + + # runs two trainers + # LOCAL_RANK=0 trainer(1,2,3) + # LOCAL_RANK=1 trainer(4,5,6) + ctx = start_processes( + name="trainer", + entrypoint=trainer, + args={0: (1,2,3), 1: (4,5,6)}, + envs={0: {"LOCAL_RANK": 0}, 1: {"LOCAL_RANK": 1}}, + log_dir="/tmp/foobar", + redirects=Std.ALL, # write all worker stdout/stderr to a log file + tee={0: Std.ERR}, # tee only local rank 0's stderr to console + ) + + # waits for all copies of trainer to finish + ctx.wait() + +Usage 2: Launching 2 echo workers as a binary + +:: + + # same as invoking + # echo hello + # echo world > stdout.log + ctx = start_processes( + name="echo" + entrypoint="echo", + log_dir="/tmp/foobar", + args={0: "hello", 1: "world"}, + redirects={1: Std.OUT}, + ) + +Just like ``torch.multiprocessing``, the return value of the function +:func:`start_processes` is a process context (:class:`api.PContext`). If a function +was launched, a :class:`api.MultiprocessContext` is returned and if a binary +was launched a :class:`api.SubprocessContext` is returned. Both are specific +implementations of the parent :class:`api.PContext` class. +""" + +from typing import Callable, Dict, Optional, Tuple, Union + +from torch.distributed.elastic.multiprocessing.api import ( # noqa: F401 + _validate_full_rank, + DefaultLogsSpecs, + LogsDest, + LogsSpecs, + MultiprocessContext, + PContext, + ProcessFailure, + RunProcsResult, + SignalException, + Std, + SubprocessContext, + to_map, +) +from torch.distributed.elastic.utils.logging import get_logger + + +__all__ = [ + "start_processes", + "MultiprocessContext", + "PContext", + "ProcessFailure", + "RunProcsResult", + "SignalException", + "Std", + "LogsDest", + "LogsSpecs", + "DefaultLogsSpecs", + "SubprocessContext", + "to_map", +] + + +def start_processes( + name: str, + entrypoint: Union[Callable, str], + args: Dict[int, Tuple], + envs: Dict[int, Dict[str, str]], + logs_specs: LogsSpecs, + log_line_prefixes: Optional[Dict[int, str]] = None, + start_method: str = "spawn", +) -> PContext: + """ + Start ``n`` copies of ``entrypoint`` processes with the provided options. + + ``entrypoint`` is either a ``Callable`` (function) or a ``str`` (binary). + The number of copies is determined by the number of entries for ``args`` and + ``envs`` arguments, which need to have the same key set. + + ``args`` and ``env`` parameters are the arguments and environment variables + to pass down to the entrypoint mapped by the replica index (local rank). + All local ranks must be accounted for. + That is, the keyset should be ``{0,1,...,(nprocs-1)}``. + + .. note:: When the ``entrypoint`` is a binary (``str``), ``args`` can only be strings. + If any other type is given, then it is casted to a string representation + (e.g. ``str(arg1)``). Furthermore, a binary failure will only write + an ``error.json`` error file if the main function is annotated with + ``torch.distributed.elastic.multiprocessing.errors.record``. For function launches, + this is done by default and there is no need to manually annotate + with the ``@record`` annotation. + + ``redirects`` and ``tee`` are bitmasks specifying which std stream(s) to redirect + to a log file in the ``log_dir``. Valid mask values are defined in ``Std``. + To redirect/tee only certain local ranks, pass ``redirects`` as a map with the key as + the local rank to specify the redirect behavior for. + Any missing local ranks will default to ``Std.NONE``. + + ``tee`` acts like the unix "tee" command in that it redirects + prints to console. + To avoid worker stdout/stderr from printing to console, use the ``redirects`` parameter. + + For each process, the ``log_dir`` will contain: + + #. ``{local_rank}/error.json``: if the process failed, a file with the error info + #. ``{local_rank}/stdout.json``: if ``redirect & STDOUT == STDOUT`` + #. ``{local_rank}/stderr.json``: if ``redirect & STDERR == STDERR`` + + .. note:: It is expected that the ``log_dir`` exists, is empty, and is a directory. + + Example: + :: + + log_dir = "/tmp/test" + + # ok; two copies of foo: foo("bar0"), foo("bar1") + start_processes( + name="trainer", + entrypoint=foo, + args:{0:("bar0",), 1:("bar1",), + envs:{0:{}, 1:{}}, + log_dir=log_dir + ) + + # invalid; envs missing for local rank 1 + start_processes( + name="trainer", + entrypoint=foo, + args:{0:("bar0",), 1:("bar1",), + envs:{0:{}}, + log_dir=log_dir + ) + + # ok; two copies of /usr/bin/touch: touch file1, touch file2 + start_processes( + name="trainer", + entrypoint="/usr/bin/touch", + args:{0:("file1",), 1:("file2",), + envs:{0:{}, 1:{}}, + log_dir=log_dir + ) + + # caution; arguments casted to string, runs: + # echo "1" "2" "3" and echo "[1, 2, 3]" + start_processes( + name="trainer", + entrypoint="/usr/bin/echo", + args:{0:(1,2,3), 1:([1,2,3],), + envs:{0:{}, 1:{}}, + log_dir=log_dir + ) + + Args: + name: a human readable short name that describes what the processes are + (used as header when tee'ing stdout/stderr outputs) + entrypoint: either a ``Callable`` (function) or ``cmd`` (binary) + args: arguments to each replica + envs: env vars to each replica + log_dir: directory used to write log files + start_method: multiprocessing start method (spawn, fork, forkserver) + ignored for binaries + redirects: which std streams to redirect to a log file + tee: which std streams to redirect + print to console + local_ranks_filter: which ranks' logs to print to console + + """ + + nprocs = len(args) + _validate_full_rank(args, nprocs, "args") + _validate_full_rank(envs, nprocs, "envs") + + context: PContext + if isinstance(entrypoint, str): + context = SubprocessContext( + name=name, + entrypoint=entrypoint, + args=args, + envs=envs, + logs_specs=logs_specs, + log_line_prefixes=log_line_prefixes, + ) + else: + context = MultiprocessContext( + name=name, + entrypoint=entrypoint, + args=args, + envs=envs, + log_line_prefixes=log_line_prefixes, + start_method=start_method, + logs_specs=logs_specs, + ) + + try: + context.start() + return context + except Exception: + context.close() + raise diff --git a/vllm/lib/python3.10/site-packages/torch/distributed/elastic/multiprocessing/__pycache__/api.cpython-310.pyc b/vllm/lib/python3.10/site-packages/torch/distributed/elastic/multiprocessing/__pycache__/api.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ec7985fd4f41e428d770d726ffcbcdd979a442de Binary files /dev/null and b/vllm/lib/python3.10/site-packages/torch/distributed/elastic/multiprocessing/__pycache__/api.cpython-310.pyc differ diff --git a/vllm/lib/python3.10/site-packages/torch/distributed/elastic/multiprocessing/__pycache__/redirects.cpython-310.pyc b/vllm/lib/python3.10/site-packages/torch/distributed/elastic/multiprocessing/__pycache__/redirects.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5164582c8c248f9e1c07f7ffee2e94cbc45c0466 Binary files /dev/null and b/vllm/lib/python3.10/site-packages/torch/distributed/elastic/multiprocessing/__pycache__/redirects.cpython-310.pyc differ diff --git a/vllm/lib/python3.10/site-packages/torch/distributed/elastic/multiprocessing/__pycache__/tail_log.cpython-310.pyc b/vllm/lib/python3.10/site-packages/torch/distributed/elastic/multiprocessing/__pycache__/tail_log.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..679ace8fcdb8162e1e5c18b9b6f428fabd9f1bf0 Binary files /dev/null and b/vllm/lib/python3.10/site-packages/torch/distributed/elastic/multiprocessing/__pycache__/tail_log.cpython-310.pyc differ diff --git a/vllm/lib/python3.10/site-packages/torch/distributed/elastic/multiprocessing/api.py b/vllm/lib/python3.10/site-packages/torch/distributed/elastic/multiprocessing/api.py new file mode 100644 index 0000000000000000000000000000000000000000..5befbffe49573db01280a9c7c8999aed67ad4365 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/torch/distributed/elastic/multiprocessing/api.py @@ -0,0 +1,923 @@ +#!/usr/bin/env python3 +# mypy: allow-untyped-defs + +# Copyright (c) Facebook, Inc. and its affiliates. +# All rights reserved. +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. + +import abc +import logging +import os +import re +import shutil +import signal +import subprocess +import sys +import tempfile +import threading +import time +from abc import ABC, abstractmethod +from contextlib import nullcontext +from dataclasses import dataclass, field +from enum import IntFlag +from multiprocessing import synchronize +from types import FrameType +from typing import Any, Callable, Dict, Optional, Set, Tuple, Union + +import torch.multiprocessing as mp +from torch.distributed.elastic.multiprocessing.errors import ProcessFailure, record +from torch.distributed.elastic.multiprocessing.redirects import ( + redirect_stderr, + redirect_stdout, +) +from torch.distributed.elastic.multiprocessing.subprocess_handler import ( + get_subprocess_handler, + SubprocessHandler, +) +from torch.distributed.elastic.multiprocessing.tail_log import TailLog + + +IS_WINDOWS = sys.platform == "win32" +IS_MACOS = sys.platform == "darwin" + + +logger = logging.getLogger(__name__) + +__all__ = [ + "DefaultLogsSpecs", + "SignalException", + "Std", + "to_map", + "RunProcsResult", + "PContext", + "get_std_cm", + "MultiprocessContext", + "SubprocessContext", + "LogsDest", + "LogsSpecs", +] + + +class SignalException(Exception): + """ + Exception is raised inside the torchelastic agent process by the termination handler + if the death signal got received by the process. + """ + + def __init__(self, msg: str, sigval: signal.Signals) -> None: + super().__init__(msg) + self.sigval = sigval + + +def _terminate_process_handler(signum: int, frame: Optional[FrameType]) -> None: + """Termination handler that raises exceptions on the main process. + + When the process receives death signal(SIGTERM, SIGINT), this termination handler will + be invoked. It raises the ``SignalException`` exception that should be processed by the + user code. Python does not terminate process after the termination handler is finished, + so the exception should not be silently ignored, otherwise the process will never + be terminated. + """ + sigval = signal.Signals(signum) + raise SignalException(f"Process {os.getpid()} got signal: {sigval}", sigval=sigval) + + +def _get_kill_signal() -> signal.Signals: + """Get the kill signal. SIGKILL for unix, CTRL_C_EVENT for windows.""" + if IS_WINDOWS: + return signal.CTRL_C_EVENT # type: ignore[attr-defined] # noqa: F821 + else: + return signal.SIGKILL + + +def _get_default_signal() -> signal.Signals: + """Get the default termination signal. SIGTERM for unix, CTRL_C_EVENT for windows.""" + if IS_WINDOWS: + return signal.CTRL_C_EVENT # type: ignore[attr-defined] # noqa: F821 + else: + return signal.SIGTERM + + +def _validate_full_rank(d: Dict[int, Any], nprocs: int, what: str): + actual_keys = set(d.keys()) + expected_keys = set(range(nprocs)) + + if actual_keys != expected_keys: + raise RuntimeError( + f"{what}, local rank mapping mismatch," + f" expected: {expected_keys}, actual: {actual_keys}" + ) + + +_MAPPING_REGEX = r"^(\d:[0123],)*(\d:[0123])$" +_VALUE_REGEX = r"^[0123]$" + + +class Std(IntFlag): + NONE = 0 + OUT = 1 + ERR = 2 + ALL = OUT | ERR + + @classmethod + def from_str(cls, vm: str) -> Union["Std", Dict[int, "Std"]]: + """ + Example: + :: + + from_str("0") -> Std.NONE + from_str("1") -> Std.OUT + from_str("0:3,1:0,2:1,3:2") -> {0: Std.ALL, 1: Std.NONE, 2: Std.OUT, 3: Std.ERR} + + Any other input raises an exception + """ + + def to_std(v: str) -> Std: # type: ignore[return] + s = Std(int(v)) + if s in Std: + return s + # return None -> should NEVER reach here since we regex check input + + if re.match(_VALUE_REGEX, vm): # vm is a number (e.g. 0) + return to_std(vm) + elif re.match(_MAPPING_REGEX, vm): # vm is a mapping (e.g. 0:1,1:2) + d: Dict[int, Std] = {} + for m in vm.split(","): + i, v = m.split(":") + d[int(i)] = to_std(v) + return d + else: + raise ValueError( + f"{vm} does not match: <{_VALUE_REGEX}> or <{_MAPPING_REGEX}>" + ) + + +def to_map( + val_or_map: Union[Std, Dict[int, Std]], local_world_size: int +) -> Dict[int, Std]: + """ + Certain APIs take redirect settings either as a single value (e.g. apply to all + local ranks) or as an explicit user-provided mapping. This method is a convenience + method that converts a value or mapping into a mapping. + + Example: + :: + + to_map(Std.OUT, local_world_size=2) # returns: {0: Std.OUT, 1: Std.OUT} + to_map({1: Std.OUT}, local_world_size=2) # returns: {0: Std.NONE, 1: Std.OUT} + to_map({0: Std.OUT, 1: Std.OUT}, local_world_size=2) # returns: {0: Std.OUT, 1: Std.OUT} + """ + if isinstance(val_or_map, Std): + return dict.fromkeys(range(local_world_size), val_or_map) + else: + map = {} + for i in range(local_world_size): + map[i] = val_or_map.get(i, Std.NONE) + return map + + +@dataclass +class LogsDest: + """ + For each log type, holds mapping of local rank ids to file paths. + """ + + stdouts: Dict[int, str] = field(default_factory=dict) + stderrs: Dict[int, str] = field(default_factory=dict) + tee_stdouts: Dict[int, str] = field(default_factory=dict) + tee_stderrs: Dict[int, str] = field(default_factory=dict) + error_files: Dict[int, str] = field(default_factory=dict) + + +class LogsSpecs(ABC): + """ + Defines logs processing and redirection for each worker process. + + Args: + log_dir: + Base directory where logs will be written. + redirects: + Streams to redirect to files. Pass a single ``Std`` + enum to redirect for all workers, or a mapping keyed + by local_rank to selectively redirect. + tee: + Streams to duplicate to stdout/stderr. + Pass a single ``Std`` enum to duplicate streams for all workers, + or a mapping keyed by local_rank to selectively duplicate. + """ + + def __init__( + self, + log_dir: Optional[str] = None, + redirects: Union[Std, Dict[int, Std]] = Std.NONE, + tee: Union[Std, Dict[int, Std]] = Std.NONE, + local_ranks_filter: Optional[Set[int]] = None, + ) -> None: + self._root_log_dir = log_dir + self._redirects = redirects + self._tee = tee + self._local_ranks_filter = local_ranks_filter + + @abstractmethod + def reify( + self, + envs: Dict[int, Dict[str, str]], + ) -> LogsDest: + """ + Given the environment variables, builds destination of log files for each of the local ranks. + + Envs parameter contains env variables dict for each of the local ranks, where entries are defined in: + :func:`~torchelastic.distributed.elastic.agent.server.local_elastic_agent.LocalElasticAgent._start_workers`. + """ + + @property + @abstractmethod + def root_log_dir(self) -> str: + pass + + +class DefaultLogsSpecs(LogsSpecs): + """ + Default LogsSpecs implementation: + + - `log_dir` will be created if it doesn't exist + - Generates nested folders for each attempt and rank. + """ + + def __init__( + self, + log_dir: Optional[str] = None, + redirects: Union[Std, Dict[int, Std]] = Std.NONE, + tee: Union[Std, Dict[int, Std]] = Std.NONE, + local_ranks_filter: Optional[Set[int]] = None, + ) -> None: + if log_dir != os.devnull: + if not log_dir: + log_dir = tempfile.mkdtemp(prefix="torchelastic_") + elif not os.path.exists(log_dir): + os.makedirs(log_dir, exist_ok=True) + else: + if os.path.isfile(log_dir): + raise NotADirectoryError(f"log_dir: {log_dir} is a file") + super().__init__(log_dir, redirects, tee, local_ranks_filter) + # initialized only once + self._run_log_dir = None + + @property + def root_log_dir(self) -> str: + return str(self._root_log_dir) + + def _make_log_dir(self, log_dir: Optional[str], rdzv_run_id: str): + base_log_dir = log_dir or tempfile.mkdtemp(prefix="torchelastic_") + os.makedirs(base_log_dir, exist_ok=True) + dir = tempfile.mkdtemp(prefix=f"{rdzv_run_id}_", dir=base_log_dir) + logger.info("log directory set to: %s", dir) + return dir + + def reify( + self, + envs: Dict[int, Dict[str, str]], + ) -> LogsDest: + """ + Uses following scheme to build log destination paths: + + - `//attempt_//stdout.log` + - `//attempt_//stderr.log` + - `//attempt_//error.json` + """ + nprocs = len(envs) + global_env = {} # use only to query properies that are not dependent on a rank + if nprocs > 0: + global_env = envs[0] + else: + logger.warning( + "Empty envs map provided when defining logging destinations." + ) + # Keys are always defined, but values can be missing in unit tests + run_id = global_env.get("TORCHELASTIC_RUN_ID", "test_run_id") + restart_count = global_env.get("TORCHELASTIC_RESTART_COUNT", "0") + + attempt_log_dir: str = "" + if self._root_log_dir != os.devnull: + if not self._run_log_dir: + self._run_log_dir = self._make_log_dir(self._root_log_dir, run_id) + + attempt_log_dir = os.path.join(self._run_log_dir, f"attempt_{restart_count}") # type: ignore[call-overload] + shutil.rmtree(attempt_log_dir, ignore_errors=True) + os.makedirs(attempt_log_dir) + + if self._root_log_dir == os.devnull: + attempt_log_dir = os.devnull + + # create subdirs for each local rank in the logs_dir + # logs_dir + # |- 0 + # |- error.json + # |- stdout.log + # |- stderr.log + # |- ... + # |- (nprocs-1) + redirs = to_map(self._redirects, nprocs) + ts = to_map(self._tee, nprocs) + + # to tee stdout/stderr we first redirect into a file + # then tail -f stdout.log/stderr.log so add tee settings to redirects + for local_rank, tee_std in ts.items(): + redirect_std = redirs[local_rank] + redirs[local_rank] = redirect_std | tee_std + + SYS_STREAM = "" # special case to indicate to output to console + stdouts = dict.fromkeys(range(nprocs), SYS_STREAM) + stderrs = dict.fromkeys(range(nprocs), SYS_STREAM) + tee_stdouts: Dict[int, str] = {} + tee_stderrs: Dict[int, str] = {} + error_files = {} + + for local_rank in range(nprocs): + if attempt_log_dir == os.devnull: + tee_stdouts[local_rank] = os.devnull + tee_stderrs[local_rank] = os.devnull + error_files[local_rank] = os.devnull + envs[local_rank]["TORCHELASTIC_ERROR_FILE"] = "" + else: + clogdir = os.path.join(attempt_log_dir, str(local_rank)) + os.mkdir(clogdir) + + rd = redirs[local_rank] + if (rd & Std.OUT) == Std.OUT: + stdouts[local_rank] = os.path.join(clogdir, "stdout.log") + if (rd & Std.ERR) == Std.ERR: + stderrs[local_rank] = os.path.join(clogdir, "stderr.log") + + t = ts[local_rank] + if t & Std.OUT == Std.OUT: + tee_stdouts[local_rank] = stdouts[local_rank] + if t & Std.ERR == Std.ERR: + tee_stderrs[local_rank] = stderrs[local_rank] + + if ( + self._local_ranks_filter + and local_rank not in self._local_ranks_filter + ): + # If stream is tee'd, only write to file, but don't tail + if local_rank in tee_stdouts: + tee_stdouts.pop(local_rank, None) + if local_rank in tee_stderrs: + tee_stderrs.pop(local_rank, None) + + # If stream is not redirected, don't print + if stdouts[local_rank] == SYS_STREAM: + stdouts[local_rank] = os.devnull + if stderrs[local_rank] == SYS_STREAM: + stderrs[local_rank] = os.devnull + + error_file = os.path.join(clogdir, "error.json") + error_files[local_rank] = error_file + logger.info( + "Setting worker%s reply file to: %s", local_rank, error_file + ) + envs[local_rank]["TORCHELASTIC_ERROR_FILE"] = error_file + + return LogsDest(stdouts, stderrs, tee_stdouts, tee_stderrs, error_files) + + def __repr__(self) -> str: + return ( + f"DefaultLogsSpecs(root_log_dir={self._root_log_dir}, redirects={self._redirects}, " + f"tee={self._tee}, local_ranks_filter={self._local_ranks_filter})" + ) + + def __eq__(self, other: object) -> bool: + if not isinstance(other, DefaultLogsSpecs): + return False + + return ( + self._root_log_dir == other._root_log_dir + and self._redirects == other._redirects + and self._tee == other._tee + and self._local_ranks_filter == other._local_ranks_filter + ) + + +@dataclass +class RunProcsResult: + """ + Results of a completed run of processes started with ``start_processes()``. Returned by ``PContext``. + + Note the following: + + 1. All fields are mapped by local rank + 2. ``return_values`` - only populated for functions (not the binaries). + 3. ``stdouts`` - path to stdout.log (empty string if no redirect) + 4. ``stderrs`` - path to stderr.log (empty string if no redirect) + + """ + + return_values: Dict[int, Any] = field(default_factory=dict) + failures: Dict[int, ProcessFailure] = field(default_factory=dict) + stdouts: Dict[int, str] = field(default_factory=dict) + stderrs: Dict[int, str] = field(default_factory=dict) + + def is_failed(self) -> bool: + return len(self.failures) > 0 + + +class PContext(abc.ABC): + """ + The base class that standardizes operations over a set of processes that are launched via different mechanisms. + + The name ``PContext`` is intentional to disambiguate with ``torch.multiprocessing.ProcessContext``. + + .. warning:: stdouts and stderrs should ALWAYS be a superset of + tee_stdouts and tee_stderrs (respectively) this is b/c + tee is implemented as a redirect + tail -f + """ + + def __init__( + self, + name: str, + entrypoint: Union[Callable, str], + args: Dict[int, Tuple], + envs: Dict[int, Dict[str, str]], + logs_specs: LogsSpecs, + log_line_prefixes: Optional[Dict[int, str]] = None, + ): + self.name = name + # validate that all mappings have the same number of keys and + # all local ranks are accounted for + nprocs = len(args) + + # TODO log_line_prefixes can be exanded too + logs_dest = logs_specs.reify(envs) + + _validate_full_rank(logs_dest.stdouts, nprocs, "stdouts") + _validate_full_rank(logs_dest.stderrs, nprocs, "stderrs") + + self.entrypoint = entrypoint + self.args = args + self.envs = envs + self.stdouts = logs_dest.stdouts + self.stderrs = logs_dest.stderrs + self.error_files = logs_dest.error_files + self.nprocs = nprocs + + self._stdout_tail = TailLog( + name, logs_dest.tee_stdouts, sys.stdout, log_line_prefixes + ) + self._stderr_tail = TailLog( + name, logs_dest.tee_stderrs, sys.stderr, log_line_prefixes + ) + + def start(self) -> None: + """Start processes using parameters defined in the constructor.""" + if threading.current_thread() is threading.main_thread(): + signal.signal(signal.SIGTERM, _terminate_process_handler) + signal.signal(signal.SIGINT, _terminate_process_handler) + if not IS_WINDOWS: + signal.signal(signal.SIGHUP, _terminate_process_handler) + signal.signal(signal.SIGQUIT, _terminate_process_handler) + else: + logger.warning( + "Failed to register signal handlers since torchelastic is running on a child thread. " + "This could lead to orphaned worker processes if the torchrun is terminated." + ) + self._start() + self._stdout_tail.start() + self._stderr_tail.start() + + @abc.abstractmethod + def _start(self) -> None: + """Start processes using strategy defined in a particular context.""" + raise NotImplementedError + + @abc.abstractmethod + def _poll(self) -> Optional[RunProcsResult]: + """ + Poll the run status of the processes running under this context. + This method follows an "all-or-nothing" policy and returns + a ``RunProcessResults`` object if either all processes complete + successfully or any process fails. Returns ``None`` if + all processes are still running. + """ + raise NotImplementedError + + def wait(self, timeout: float = -1, period: float = 1) -> Optional[RunProcsResult]: + """ + Wait for the specified ``timeout`` seconds, polling every ``period`` seconds + for the processes to be done. Returns ``None`` if the processes are still running + on timeout expiry. Negative timeout values are interpreted as "wait-forever". + A timeout value of zero simply queries the status of the processes (e.g. equivalent + to a poll). + + ..note: Multiprocessing library registers SIGTERM and SIGINT signal handlers that raise + ``SignalException`` when the signals received. It is up to the consumer of the code + to properly handle the exception. It is important not to swallow the exception otherwise + the process would not terminate. Example of the typical workflow can be: + + .. code-block:: python + pc = start_processes(...) + try: + pc.wait(1) + .. do some other work + except SignalException as e: + pc.shutdown(e.sigval, timeout=30) + + If SIGTERM or SIGINT occurs, the code above will try to shutdown child processes by propagating + received signal. If child processes will not terminate in the timeout time, the process will send + the SIGKILL. + """ + if timeout == 0: + return self._poll() + + if timeout < 0: + timeout = sys.maxsize + + expiry = time.time() + timeout + while time.time() < expiry: + pr = self._poll() + if pr: + return pr + time.sleep(period) + + return None + + @abc.abstractmethod + def pids(self) -> Dict[int, int]: + """Return pids of processes mapped by their respective local_ranks.""" + raise NotImplementedError + + @abc.abstractmethod + def _close(self, death_sig: signal.Signals, timeout: int = 30) -> None: + r""" + Terminates all processes managed by this context and cleans up any + meta resources (e.g. redirect, error_file files). + """ + raise NotImplementedError + + def close( + self, death_sig: Optional[signal.Signals] = None, timeout: int = 30 + ) -> None: + r""" + Terminates all processes managed by this context and cleans up any + meta resources (e.g. redirect, error_file files). + + Args: + death_sig: Death signal to terminate processes. + timeout: Time to wait for processes to finish, if process is + still alive after this time, it will be terminated via SIGKILL. + """ + if not death_sig: + death_sig = _get_default_signal() + self._close(death_sig=death_sig, timeout=timeout) + if self._stdout_tail: + self._stdout_tail.stop() + if self._stderr_tail: + self._stderr_tail.stop() + + +def get_std_cm(std_rd: str, redirect_fn): + if IS_WINDOWS or IS_MACOS or not std_rd: + return nullcontext() + else: + return redirect_fn(std_rd) + + +def _wrap( + local_rank: int, + fn: Callable, + args: Dict[int, Tuple], + envs: Dict[int, Dict[str, str]], + stdout_redirects: Dict[int, str], # redirect file for stdout (to console if None) + stderr_redirects: Dict[int, str], # redirect file for stderr (to console if None) + ret_vals: Dict[int, mp.SimpleQueue], + queue_finished_reading_event: synchronize.Event, +) -> None: + # get the per-rank params up front so we fail fast if no mapping is found + args_ = args[local_rank] + env_ = envs[local_rank] + ret_val_ = ret_vals[local_rank] + + stdout_rd = stdout_redirects[local_rank] + stderr_rd = stderr_redirects[local_rank] + + stdout_cm = get_std_cm(stdout_rd, redirect_stdout) + stderr_cm = get_std_cm(stderr_rd, redirect_stderr) + + for k, v in env_.items(): + os.environ[k] = v + + with stdout_cm, stderr_cm: + ret = record(fn)(*args_) + ret_val_.put(ret) + queue_finished_reading_event.wait() + + +class MultiprocessContext(PContext): + """``PContext`` holding worker processes invoked as a function.""" + + def __init__( + self, + name: str, + entrypoint: Callable, + args: Dict[int, Tuple], + envs: Dict[int, Dict[str, str]], + start_method: str, + logs_specs: LogsSpecs, + log_line_prefixes: Optional[Dict[int, str]] = None, + ): + super().__init__( + name, + entrypoint, + args, + envs, + logs_specs, + log_line_prefixes, + ) + + self.start_method = start_method + # each ret_val queue will always contain a single element. + self._ret_vals = { + local_rank: mp.get_context(self.start_method).SimpleQueue() + for local_rank in range(self.nprocs) + } + + # see comments in ``join()`` for what this is + self._return_values: Dict[int, Any] = {} + self._pc: Optional[mp.ProcessContext] = None + # Note: set method should ONLY be invoked for the use case when all processes finished + # successfully. If any process died on event.wait() calling set() method will deadlock. + self._worker_finished_event = mp.get_context(self.start_method).Event() + + def _start(self): + if self._pc: + raise ValueError( + "The process context already initialized." + " Most likely the start method got called twice." + ) + self._pc = mp.start_processes( + fn=_wrap, + args=( + self.entrypoint, + self.args, + self.envs, + self.stdouts, + self.stderrs, + self._ret_vals, + self._worker_finished_event, + ), + nprocs=self.nprocs, + join=False, + daemon=False, + start_method=self.start_method, + ) + + def _is_done(self) -> bool: + return len(self._return_values) == self.nprocs + + def _poll(self) -> Optional[RunProcsResult]: + assert self._pc is not None # assertion for mypy type checker + + try: + # torch.mp.ProcessContext Throws an Exception if some/all of + # worker processes failed + # timeout < 0 checks worker status and return immediately + # Join will never return success since we use synchronize.Event to wait + # for all processes to finish. + self._pc.join(-1) + + # IMPORTANT: we use multiprocessing.Queue to carry worker return values + # back to the parent, the worker process will wait before terminating + # until all the buffered items are fed by the feeder thread to the underlying + # pipe. Hence to prevent deadlocks on large return values, + # we opportunistically try queue.get on each join call + # See: https://docs.python.org/2/library/multiprocessing.html#all-platforms + for local_rank in range(0, self.nprocs): + return_queue = self._ret_vals[local_rank] + if not return_queue.empty(): + # save the return values temporarily into a member var + self._return_values[local_rank] = return_queue.get() + + if self._is_done(): + # we should ALWAYS have ALL the return values when all the processes are done + self._worker_finished_event.set() + + # At this point workers finished running the user function + # But the child process might still have not exited. Wait for them. + # pc.join() blocks [forever] until "a" proc exits. Loop until all of them exits. + while not self._pc.join(): + logger.debug( + "entrypoint fn finished, waiting for all child procs to exit..." + ) + + _validate_full_rank( + self._return_values, self.nprocs, "return_value queue" + ) + self.close() + return RunProcsResult( + return_values=self._return_values, + stdouts=self.stdouts, + stderrs=self.stderrs, + ) + else: + return None + except (mp.ProcessRaisedException, mp.ProcessExitedException) as e: + failed_local_rank = e.error_index + + # entrypoint for MultiprocessContext will always be a Callable + fn_name = self.entrypoint.__qualname__ # type: ignore[union-attr] + failed_proc = self._pc.processes[failed_local_rank] + error_filepath = self.error_files[failed_local_rank] + + logger.exception( + "failed (exitcode: %s)" + " local_rank: %s (pid: %s)" + " of fn: %s (start_method: %s)", + failed_proc.exitcode, + failed_local_rank, + e.pid, + fn_name, + self.start_method, + ) + + self.close() + return RunProcsResult( + failures={ + failed_local_rank: ProcessFailure( + local_rank=failed_local_rank, + pid=e.pid, + exitcode=failed_proc.exitcode, + error_file=error_filepath, + ) + }, + stdouts=self.stdouts, + stderrs=self.stderrs, + ) + + def pids(self) -> Dict[int, int]: + assert self._pc is not None # assertion for mypy type checking + return dict(enumerate(self._pc.pids())) + + def _close(self, death_sig: signal.Signals, timeout: int = 30) -> None: + if not self._pc: + return + for proc in self._pc.processes: + if proc.is_alive(): + logger.warning( + "Closing process %s via signal %s", proc.pid, death_sig.name + ) + try: + os.kill(proc.pid, death_sig) + except ProcessLookupError: + # If the process exited because of some reason, + # `ProcessLookupError` will be raised, it is safe to ignore it. + pass + end = time.monotonic() + timeout + for proc in self._pc.processes: + time_to_wait = end - time.monotonic() + if time_to_wait <= 0: + break + proc.join(time_to_wait) + for proc in self._pc.processes: + if proc.is_alive(): + logger.warning( + "Unable to shutdown process %s via %s, forcefully exiting via %s", + proc.pid, + death_sig, + _get_kill_signal(), + ) + try: + os.kill(proc.pid, _get_kill_signal()) + except ProcessLookupError: + # If the process exited because of some reason, + # `ProcessLookupError` will be raised, it is safe to ignore it. + pass + proc.join() + + +class SubprocessContext(PContext): + """``PContext`` holding worker processes invoked as a binary.""" + + def __init__( + self, + name: str, + entrypoint: str, + args: Dict[int, Tuple], + envs: Dict[int, Dict[str, str]], + logs_specs: LogsSpecs, + log_line_prefixes: Optional[Dict[int, str]] = None, + ): + super().__init__( + name, + entrypoint, + args, + envs, + logs_specs, + log_line_prefixes, + ) + + # state vector; _vdone[local_rank] -> is local_rank finished or not + self._running_local_ranks: Set[int] = set(range(self.nprocs)) + self._failures: Dict[int, ProcessFailure] = {} + self.subprocess_handlers: Dict[int, SubprocessHandler] = {} + + def _start(self): + if self.subprocess_handlers: + raise ValueError( + "The subprocess handlers already initialized. Most likely the start method got called twice." + ) + self.subprocess_handlers = { + local_rank: get_subprocess_handler( + entrypoint=self.entrypoint, # type: ignore[arg-type] # entrypoint is always a str + args=self.args[local_rank], + env=self.envs[local_rank], + stdout=self.stdouts[local_rank], + stderr=self.stderrs[local_rank], + local_rank_id=local_rank, + ) + for local_rank in range(self.nprocs) + } + + def _poll(self) -> Optional[RunProcsResult]: + done_local_ranks = set() + for local_rank in self._running_local_ranks: + handler = self.subprocess_handlers[local_rank] + exitcode = handler.proc.poll() + if exitcode is not None: + done_local_ranks.add(local_rank) + if exitcode != 0: # failed or signaled + self._failures[local_rank] = ProcessFailure( + local_rank=local_rank, + pid=handler.proc.pid, + exitcode=exitcode, + error_file=self.error_files[local_rank], + ) + # else: --> succeeded; nothing to do + + self._running_local_ranks.difference_update(done_local_ranks) + + # if ALL procs are finished or ANY have failed + if not self._running_local_ranks or self._failures: + self.close() # terminate all running procs + result = RunProcsResult( + failures=self._failures, + stdouts=self.stdouts, + stderrs=self.stderrs, + ) + if result.is_failed(): + first_failure = min(result.failures.values(), key=lambda f: f.timestamp) + logger.error( + "failed (exitcode: %s)" + " local_rank: %s (pid: %s)" + " of binary: %s", + first_failure.exitcode, + first_failure.local_rank, + first_failure.pid, + self.entrypoint, + ) + else: + # Populate return with dummy values. This provides consistency with MultiprocessingHandler + result.return_values = dict.fromkeys(range(self.nprocs)) + + return result + else: # there are no failures and procs still running + return None + + def pids(self) -> Dict[int, int]: + return { + local_rank: sh.proc.pid + for local_rank, sh in self.subprocess_handlers.items() + } + + def _close(self, death_sig: signal.Signals, timeout: int = 30) -> None: + if not self.subprocess_handlers: + return + for handler in self.subprocess_handlers.values(): + if handler.proc.poll() is None: + logger.warning( + "Sending process %s closing signal %s", + handler.proc.pid, + death_sig.name, + ) + handler.close(death_sig=death_sig) + end = time.monotonic() + timeout + for handler in self.subprocess_handlers.values(): + time_to_wait = end - time.monotonic() + if time_to_wait <= 0: + break + try: + handler.proc.wait(time_to_wait) + except subprocess.TimeoutExpired: + # Ignore the timeout expired exception, since + # the child process will be forcefully terminated via SIGKILL + pass + for handler in self.subprocess_handlers.values(): + if handler.proc.poll() is None: + logger.warning( + "Unable to shutdown process %s via %s, forcefully exiting via %s", + handler.proc.pid, + death_sig, + _get_kill_signal(), + ) + handler.close(death_sig=_get_kill_signal()) + handler.proc.wait() diff --git a/vllm/lib/python3.10/site-packages/torch/distributed/elastic/multiprocessing/errors/__init__.py b/vllm/lib/python3.10/site-packages/torch/distributed/elastic/multiprocessing/errors/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..2f5ed2d1ab0b81dbcf635245fbd49acc7ed02f79 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/torch/distributed/elastic/multiprocessing/errors/__init__.py @@ -0,0 +1,383 @@ +#!/usr/bin/env python3 +# mypy: allow-untyped-defs + +# Copyright (c) Facebook, Inc. and its affiliates. +# All rights reserved. +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. + +""" +Each host in a distributed PyTorch job runs with a single TorchElastic agent, +and multiple workers (as children processes of the TorchElastic agent). +Since the workers are user-provided (your PyTorch script/job), TorchElastic +has a way to propagate errors on the trainers through the agent and up to the +scheduler, which ultimately informs the end-user about the state of the job +and applies any retry policies. + +TorchElastic categorizes errors into 3 categories: + ++----------------+----------------+--------------------------------------------------------------+ +| Category | Sub-Category | Description | ++================+================+==============================================================+ +| User Error | Input Error | invalid inputs to TorchElastic APIs (e.g. min > max nodes) | +| +----------------+--------------------------------------------------------------+ +| | Worker Failure | any failures on the worker child process | ++----------------+----------------+--------------------------------------------------------------+ +| Platform Error | n/a | failures caused by the agent | ++----------------+----------------+--------------------------------------------------------------+ +| Infra Error | n/a | failures outside the domain of the agent and workers | +| | | (e.g. host failures) | ++----------------+----------------+--------------------------------------------------------------+ + +All errors other than "Worker Failure" are either raised canonically from the +agent process or implicitly or explicitly crash the agent process. So the +standard language (python) provided exception handling strategies apply. + +Worker Failures are special because the exception/failure originates on a different +process from the agent so the error needs to be propagated inter-process +(e.g. the agent cannot simply ``try-catch`` an exception raised on the worker process). + +TorchElastic agents use :func:`torch.distributed.elastic.multiprocessing.start_processes` +to launch the workers which has a simple file based inter-process error propagation +built-in. + +Any function or binary entrypoint decorated with :func:`record` +will write uncaught exceptions (with the trace information) to a file specified by the +environment variable ``TORCHELASTIC_ERROR_FILE``. The parent process (e.g. agent) +sets this env var on each child it launches, then aggregates the error files for all +children, and propagates the one with the **smallest** timestamp (e.g. the **first** error). +""" + +import json +import os +import signal +import socket +import time +import warnings +from dataclasses import dataclass, field +from datetime import datetime +from functools import wraps +from string import Template +from typing import Any, Callable, Dict, List, Optional, Tuple, TypeVar + +from torch.distributed.elastic.utils.logging import get_logger + +from .error_handler import ErrorHandler # noqa: F401 +from .handlers import get_error_handler # noqa: F401 + + +__all__ = [ + "ProcessFailure", + "ChildFailedError", + "record", + "ErrorHandler", + "get_error_handler", +] + +logger = get_logger(__name__) + + +JSON = Dict + +_EMPTY_ERROR_DATA = {"message": ""} +_NOT_AVAILABLE = "" + +T = TypeVar("T") + + +@dataclass +class ProcessFailure: + """ + Represent the failed process result. When the worker process fails, it may record failure root cause into the file. + + Tries to read the failure timestamp from the provided ``error_file``, + if the ``error_file`` does not exist, the timestamp is the current + timestamp (seconds since epoch). + + The ``message`` field is a concise explanation of the failure. If + the error file exists then the message is obtained from the error file. + Otherwise one is generated based on the failure signature. + + .. note:: It is assumed that the ``error_file`` is written by + ``torch.distributed.elastic.multiprocessing.errors.error_handler.ErrorHandler``. + Otherwise the behavior is undefined. + + """ + + local_rank: int + pid: int + exitcode: int + error_file: str + error_file_data: JSON = field(init=False) + message: str = field(init=False) + timestamp: int = field(init=False) + + def __post_init__(self): + self.error_file_data = _EMPTY_ERROR_DATA + if os.path.isfile(self.error_file): + try: + with open(self.error_file) as fp: + self.error_file_data = json.load(fp) + logger.debug( + "User process failed with error data: %s", + json.dumps(self.error_file_data, indent=2), + ) + self.message, self.timestamp = self._get_error_data( + self.error_file_data + ) + except Exception: + logger.exception("Failed to parse reply file: %s", self.error_file) + raise + else: + self._set_no_reply_file() + + # make up an informative message if not already present + if not self.message: + # signals typically do not generate an error file message + if self.exitcode < 0: + self.message = ( + f"Signal {-self.exitcode} ({self.signal_name()})" + f" received by PID {self.pid}" + ) + else: + self.message = "To enable traceback see: https://pytorch.org/docs/stable/elastic/errors.html" + + def _get_error_data(self, error_file_data: Dict[str, Any]) -> Tuple[str, int]: + message = error_file_data["message"] + if isinstance(message, str): + timestamp = int(error_file_data.get("timestamp", 0)) + else: + timestamp = int(message["extraInfo"]["timestamp"]) + return (message, timestamp) + + def _set_no_reply_file(self): + self.error_file = _NOT_AVAILABLE + self.error_file_data = _EMPTY_ERROR_DATA + self.message = "" + self.timestamp = int(time.time()) + + def signal_name(self) -> str: + if self.exitcode < 0: + # We don't want to kill the parent process trying to find the signal name. + # if the signal doesn't map to a known name, use not available. + try: + return signal.Signals(-self.exitcode).name + except Exception: + return _NOT_AVAILABLE + else: + return _NOT_AVAILABLE + + def timestamp_isoformat(self): + """Return timestamp in ISO format (YYYY-MM-DD_HH:MM:SS).""" + return datetime.fromtimestamp(self.timestamp).isoformat(sep="_") + + +GlobalRank = int + +_FAILURE_FORMAT_TEMPLATE = """[${idx}]: + time : ${time} + host : ${hostname} + rank : ${rank} (local_rank: ${local_rank}) + exitcode : ${exitcode} (pid: ${pid}) + error_file: ${error_file} + traceback : ${message}""" + +# extra new lines before and after are intentional +_MSG_FORMAT_TEMPLATE = """ +${boarder} +${title} +${section} +Failures: +${other_failures} +${section} +Root Cause (first observed failure): +${root_failure} +${boarder}""" + + +class ChildFailedError(Exception): + """ + Special exception type that can be raised from a function annotated with the + ``@record`` decorator to have the child process' (root exception) propagate + up the stack as-is (e.g. without being wrapped in the parent's traceback). + + Useful in cases where the parent is a simple nanny process + and the child (worker) processes are actually doing meaningful compute. + In this case, errors typically occur on the child process as the parent + is not doing anything non-trivial, and child errors should be propagated + to the scheduler for accurate root cause diagnostics. + + .. note:: The propagation relies on error files rather than exception handling to + support both function and binary launches. + + Example: + :: + + # process tree on a host (container) + 0: scheduler-init-process: + |- 1: torchelastic_agent: + |- 2: trainer_0 (ok) + |- 3: trainer_1 (fail) -> error.json + |- ... + |- n+2: trainer_n (ok) + |- n+3: other processes + |- ... + + In the example above, trainer 1's failure (written into error.json) is + the root cause and should be reported to the scheduler's init process. + The torchelastic agent raises a ``ChildFailedError("trainer", {1: "trainer_1/error.json"})`` + upon detecting trainer 1's failure which would propagate the contents + of trainer 1's error file to the scheduler's init process. + """ + + def __init__(self, name: str, failures: Dict[GlobalRank, ProcessFailure]): + self.name = name + self.failures = failures + assert ( + self.failures + ) # does not make sense to create a ChildFaileError with no failures + super().__init__(self.format_msg()) + + def get_first_failure(self) -> Tuple[GlobalRank, ProcessFailure]: + rank = min(self.failures.keys(), key=lambda r: self.failures[r].timestamp) + return rank, self.failures[rank] + + def format_msg(self, boarder_delim="=", section_delim="-"): + title = f"{self.name} FAILED" + root_rank, root_failure = self.get_first_failure() + + root_failure_fmt: str = "" + other_failures_fmt: List[str] = [] + width = len(title) + for idx, (rank, failure) in enumerate(self.failures.items()): + fmt, w = self._format_failure(idx, rank, failure) + width = max(width, w) + if rank == root_rank: + root_failure_fmt = fmt + else: + other_failures_fmt.append(fmt) + + # upper boundary on width + width = min(width, 60) + + return Template(_MSG_FORMAT_TEMPLATE).substitute( + boarder=boarder_delim * width, + title=title, + section=section_delim * width, + root_failure=root_failure_fmt, + other_failures="\n".join(other_failures_fmt or [" "]), + ) + + def _format_failure( + self, idx: int, rank: int, failure: ProcessFailure + ) -> Tuple[str, int]: + # failure.message is either a str (when the failure does not generate a traceback - e.g. signals) + # or a dict (json) of the form + # {"message": $ERROR_MSG, "extraInfo": {"py_callstack": $TRACEBACK, timestamp: $TS}} + # so the display logic is: + # 1. if failure.message is not a dict (it is a str) just show it as is + # 2. else try to get the traceback (py_callstack) + # 3. if the traceback is not there, use the message + # 4. if the message is not there show + msg = failure.message + if isinstance(failure.message, dict): + msg = ( + failure.message.get("extraInfo", {}) + .get("py_callstack", failure.message.get("message", "")) + .replace("\n", "\n ") # to properly indent the traceback + ) + + fmt = Template(_FAILURE_FORMAT_TEMPLATE).substitute( + idx=idx, + time=failure.timestamp_isoformat(), + hostname=socket.getfqdn(), + rank=rank, + local_rank=failure.local_rank, + exitcode=failure.exitcode, + pid=failure.pid, + error_file=failure.error_file, + message=msg, + ) + width = 0 + for line in fmt.split("\n"): + width = max(width, len(line)) + return fmt, width + + +def record( + fn: Callable[..., T], error_handler: Optional[ErrorHandler] = None +) -> Callable[..., T]: + """ + Syntactic sugar to record errors/exceptions that happened in the decorated + function using the provided ``error_handler``. + + Using this decorator is equivalent to: + + :: + + error_handler = get_error_handler() + error_handler.initialize() + try: + foobar() + except ChildFailedError as e: + _, failure = e.get_first_failure() + error_handler.dump_error_file(failure.error_file, failure.exitcode) + raise + except Exception as e: + error_handler.record(e) + raise + + .. important:: use this decorator once per process at the top level method, + typically this is the main method. + + Example + + :: + + @record + def main(): + pass + + if __name__=="__main__": + main() + + """ + if not error_handler: + error_handler = get_error_handler() + + def wrap(f): + @wraps(f) + def wrapper(*args, **kwargs): + assert error_handler is not None # assertion for mypy type checker + error_handler.initialize() + try: + return f(*args, **kwargs) + except SystemExit as se: + # For run_path based entrypoints, SystemExit with code = 0 will never exit. + # Handling it here by returning a value: + if se.code == 0: + return None + else: + raise + except ChildFailedError as e: + rank, failure = e.get_first_failure() + if failure.error_file != _NOT_AVAILABLE: + error_handler.dump_error_file(failure.error_file, failure.exitcode) + else: + logger.info( + ( + "local_rank %s FAILED with no error file." + " Decorate your entrypoint fn with @record for traceback info." + " See: https://pytorch.org/docs/stable/elastic/errors.html", + rank, + ) + ) + raise + except Exception as e: + error_handler.record_exception(e) + raise + + return wrapper + + return wrap(fn) diff --git a/vllm/lib/python3.10/site-packages/torch/distributed/elastic/multiprocessing/errors/__pycache__/__init__.cpython-310.pyc b/vllm/lib/python3.10/site-packages/torch/distributed/elastic/multiprocessing/errors/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..278ed9df873a6f2f4c21465cb90fad639e0e8b3b Binary files /dev/null and b/vllm/lib/python3.10/site-packages/torch/distributed/elastic/multiprocessing/errors/__pycache__/__init__.cpython-310.pyc differ diff --git a/vllm/lib/python3.10/site-packages/torch/distributed/elastic/multiprocessing/errors/__pycache__/error_handler.cpython-310.pyc b/vllm/lib/python3.10/site-packages/torch/distributed/elastic/multiprocessing/errors/__pycache__/error_handler.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..935dd7b28c6afcd65ddd532f97f6f4c5ac1c1d71 Binary files /dev/null and b/vllm/lib/python3.10/site-packages/torch/distributed/elastic/multiprocessing/errors/__pycache__/error_handler.cpython-310.pyc differ diff --git a/vllm/lib/python3.10/site-packages/torch/distributed/elastic/multiprocessing/errors/__pycache__/handlers.cpython-310.pyc b/vllm/lib/python3.10/site-packages/torch/distributed/elastic/multiprocessing/errors/__pycache__/handlers.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..75b4cd855be7e4907fe3b54f1ffe8d8921336e20 Binary files /dev/null and b/vllm/lib/python3.10/site-packages/torch/distributed/elastic/multiprocessing/errors/__pycache__/handlers.cpython-310.pyc differ diff --git a/vllm/lib/python3.10/site-packages/torch/distributed/elastic/multiprocessing/errors/error_handler.py b/vllm/lib/python3.10/site-packages/torch/distributed/elastic/multiprocessing/errors/error_handler.py new file mode 100644 index 0000000000000000000000000000000000000000..89e7fffdd5c7dc664dd4ee096a0338a3004a6dfa --- /dev/null +++ b/vllm/lib/python3.10/site-packages/torch/distributed/elastic/multiprocessing/errors/error_handler.py @@ -0,0 +1,166 @@ +#!/usr/bin/env python3 +# mypy: allow-untyped-defs + +# Copyright (c) Facebook, Inc. and its affiliates. +# All rights reserved. +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. +import faulthandler +import json +import logging +import os +import time +import traceback +import warnings +from typing import Any, Dict, Optional + + +__all__ = ["ErrorHandler"] + +logger = logging.getLogger(__name__) + + +class ErrorHandler: + """ + Write the provided exception object along with some other metadata about + the error in a structured way in JSON format to an error file specified by the + environment variable: ``TORCHELASTIC_ERROR_FILE``. If this environment + variable is not set, then simply logs the contents of what would have been + written to the error file. + + This handler may be subclassed to customize the handling of the error. + Subclasses should override ``initialize()`` and ``record_exception()``. + """ + + def _get_error_file_path(self) -> Optional[str]: + """ + Return the error file path. + + May return ``None`` to have the structured error be logged only. + """ + return os.environ.get("TORCHELASTIC_ERROR_FILE", None) + + def initialize(self) -> None: + """ + Call prior to running code that we wish to capture errors/exceptions. + + Typically registers signal/fault handlers. Users can override this + function to add custom initialization/registrations that aid in + propagation/information of errors/signals/exceptions/faults. + """ + try: + faulthandler.enable(all_threads=True) + except Exception as e: + warnings.warn(f"Unable to enable fault handler. {type(e).__name__}: {e}") + + def _write_error_file(self, file_path: str, error_msg: str) -> None: + """Write error message to the file.""" + try: + with open(file_path, "w") as fp: + fp.write(error_msg) + except Exception as e: + warnings.warn(f"Unable to write error to file. {type(e).__name__}: {e}") + + def record_exception(self, e: BaseException) -> None: + """ + Write a structured information about the exception into an error file in JSON format. + + If the error file cannot be determined, then logs the content + that would have been written to the error file. + """ + file = self._get_error_file_path() + if file: + data = { + "message": { + "message": f"{type(e).__name__}: {e}", + "extraInfo": { + "py_callstack": traceback.format_exc(), + "timestamp": str(int(time.time())), + }, + } + } + with open(file, "w") as fp: + json.dump(data, fp) + + def override_error_code_in_rootcause_data( + self, + rootcause_error_file: str, + rootcause_error: Dict[str, Any], + error_code: int = 0, + ): + """Modify the rootcause_error read from the file, to correctly set the exit code.""" + if "message" not in rootcause_error: + logger.warning( + "child error file (%s) does not have field `message`. \n" + "cannot override error code: %s", + rootcause_error_file, + error_code, + ) + elif isinstance(rootcause_error["message"], str): + logger.warning( + "child error file (%s) has a new message format. \n" + "skipping error code override", + rootcause_error_file, + ) + else: + rootcause_error["message"]["errorCode"] = error_code + + def dump_error_file(self, rootcause_error_file: str, error_code: int = 0): + """Dump parent error file from child process's root cause error and error code.""" + with open(rootcause_error_file) as fp: + rootcause_error = json.load(fp) + # Override error code since the child process cannot capture the error code if it + # is terminated by signals like SIGSEGV. + if error_code: + self.override_error_code_in_rootcause_data( + rootcause_error_file, rootcause_error, error_code + ) + logger.debug( + "child error file (%s) contents:\n" "%s", + rootcause_error_file, + json.dumps(rootcause_error, indent=2), + ) + + my_error_file = self._get_error_file_path() + if my_error_file: + # Guard against existing error files + # This can happen when the child is created using multiprocessing + # and the same env var (TORCHELASTIC_ERROR_FILE) is used on the + # parent and child to specify the error files (respectively) + # because the env vars on the child is set in the wrapper function + # and by default the child inherits the parent's env vars, if the child + # process receives a signal before the wrapper function kicks in + # and the signal handler writes to the error file, then the child + # will write to the parent's error file. In this case just log the + # original error file contents and overwrite the error file. + self._rm(my_error_file) + self._write_error_file(my_error_file, json.dumps(rootcause_error)) + logger.info("dumped error file to parent's %s", my_error_file) + else: + logger.error( + "no error file defined for parent, to copy child error file (%s)", + rootcause_error_file, + ) + + def _rm(self, my_error_file): + if os.path.isfile(my_error_file): + # Log the contents of the original file. + with open(my_error_file) as fp: + try: + original = json.dumps(json.load(fp), indent=2) + logger.warning( + "%s already exists" + " and will be overwritten." + " Original contents:\n%s", + my_error_file, + original, + ) + except json.decoder.JSONDecodeError: + logger.warning( + "%s already exists" + " and will be overwritten." + " Unable to load original contents:\n", + my_error_file, + ) + os.remove(my_error_file) diff --git a/vllm/lib/python3.10/site-packages/torch/distributed/elastic/multiprocessing/errors/handlers.py b/vllm/lib/python3.10/site-packages/torch/distributed/elastic/multiprocessing/errors/handlers.py new file mode 100644 index 0000000000000000000000000000000000000000..b8a78e73702fd10a7a69b0b9c599b9128e3d12d2 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/torch/distributed/elastic/multiprocessing/errors/handlers.py @@ -0,0 +1,19 @@ +#!/usr/bin/env python3 +# mypy: allow-untyped-defs + +# Copyright (c) Facebook, Inc. and its affiliates. +# All rights reserved. +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. +# Multiprocessing error-reporting module + + +from torch.distributed.elastic.multiprocessing.errors.error_handler import ErrorHandler + + +__all__ = ["get_error_handler"] + + +def get_error_handler(): + return ErrorHandler() diff --git a/vllm/lib/python3.10/site-packages/torch/distributed/elastic/multiprocessing/redirects.py b/vllm/lib/python3.10/site-packages/torch/distributed/elastic/multiprocessing/redirects.py new file mode 100644 index 0000000000000000000000000000000000000000..057013fbb9e5b8a2aeca69b41d7679cbe75c0e28 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/torch/distributed/elastic/multiprocessing/redirects.py @@ -0,0 +1,104 @@ +# mypy: allow-untyped-defs +# !/usr/bin/env python3 + +# Copyright (c) Facebook, Inc. and its affiliates. +# All rights reserved. +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. + +# Taken and modified from original source: +# https://eli.thegreenplace.net/2015/redirecting-all-kinds-of-stdout-in-python/ +import ctypes +import logging +import os +import sys +from contextlib import contextmanager +from functools import partial + + +IS_WINDOWS = sys.platform == "win32" +IS_MACOS = sys.platform == "darwin" + + +logger = logging.getLogger(__name__) + + +def get_libc(): + if IS_WINDOWS or IS_MACOS: + logger.warning( + "NOTE: Redirects are currently not supported in Windows or MacOs." + ) + return None + else: + return ctypes.CDLL("libc.so.6") + + +libc = get_libc() + + +def _c_std(stream: str): + return ctypes.c_void_p.in_dll(libc, stream) + + +def _python_std(stream: str): + return {"stdout": sys.stdout, "stderr": sys.stderr}[stream] + + +_VALID_STD = {"stdout", "stderr"} + + +@contextmanager +def redirect(std: str, to_file: str): + """ + Redirect ``std`` (one of ``"stdout"`` or ``"stderr"``) to a file in the path specified by ``to_file``. + + This method redirects the underlying std file descriptor (not just python's ``sys.stdout|stderr``). + See usage for details. + + Directory of ``dst_filename`` is assumed to exist and the destination file + is overwritten if it already exists. + + .. note:: Due to buffering cross source writes are not guaranteed to + appear in wall-clock order. For instance in the example below + it is possible for the C-outputs to appear before the python + outputs in the log file. + + Usage: + + :: + + # syntactic-sugar for redirect("stdout", "tmp/stdout.log") + with redirect_stdout("/tmp/stdout.log"): + print("python stdouts are redirected") + libc = ctypes.CDLL("libc.so.6") + libc.printf(b"c stdouts are also redirected" + os.system("echo system stdouts are also redirected") + + print("stdout restored") + + """ + if std not in _VALID_STD: + raise ValueError( + f"unknown standard stream <{std}>, must be one of {_VALID_STD}" + ) + + c_std = _c_std(std) + python_std = _python_std(std) + std_fd = python_std.fileno() + + def _redirect(dst): + libc.fflush(c_std) + python_std.flush() + os.dup2(dst.fileno(), std_fd) + + with os.fdopen(os.dup(std_fd)) as orig_std, open(to_file, mode="w+b") as dst: + _redirect(dst) + try: + yield + finally: + _redirect(orig_std) + + +redirect_stdout = partial(redirect, "stdout") +redirect_stderr = partial(redirect, "stderr") diff --git a/vllm/lib/python3.10/site-packages/torch/distributed/elastic/multiprocessing/subprocess_handler/__init__.py b/vllm/lib/python3.10/site-packages/torch/distributed/elastic/multiprocessing/subprocess_handler/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..f56d423ce080fd7c331dc9b43eda58e5370678fc --- /dev/null +++ b/vllm/lib/python3.10/site-packages/torch/distributed/elastic/multiprocessing/subprocess_handler/__init__.py @@ -0,0 +1,16 @@ +#!/usr/bin/env python3 + +# Copyright (c) Facebook, Inc. and its affiliates. +# All rights reserved. +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. +from torch.distributed.elastic.multiprocessing.subprocess_handler.handlers import ( + get_subprocess_handler, +) +from torch.distributed.elastic.multiprocessing.subprocess_handler.subprocess_handler import ( + SubprocessHandler, +) + + +__all__ = ["SubprocessHandler", "get_subprocess_handler"] diff --git a/vllm/lib/python3.10/site-packages/torch/distributed/elastic/multiprocessing/subprocess_handler/__pycache__/__init__.cpython-310.pyc b/vllm/lib/python3.10/site-packages/torch/distributed/elastic/multiprocessing/subprocess_handler/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8c159aa6138cc0d233ac42c43ce16d461b1d1135 Binary files /dev/null and b/vllm/lib/python3.10/site-packages/torch/distributed/elastic/multiprocessing/subprocess_handler/__pycache__/__init__.cpython-310.pyc differ diff --git a/vllm/lib/python3.10/site-packages/torch/distributed/elastic/multiprocessing/subprocess_handler/__pycache__/handlers.cpython-310.pyc b/vllm/lib/python3.10/site-packages/torch/distributed/elastic/multiprocessing/subprocess_handler/__pycache__/handlers.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8a47fb0e71394a1b787b8598ea9ae2499d71b058 Binary files /dev/null and b/vllm/lib/python3.10/site-packages/torch/distributed/elastic/multiprocessing/subprocess_handler/__pycache__/handlers.cpython-310.pyc differ diff --git a/vllm/lib/python3.10/site-packages/torch/distributed/elastic/multiprocessing/subprocess_handler/__pycache__/subprocess_handler.cpython-310.pyc b/vllm/lib/python3.10/site-packages/torch/distributed/elastic/multiprocessing/subprocess_handler/__pycache__/subprocess_handler.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..cddc246adf357898057c3d90b4a0cc17edd1f65d Binary files /dev/null and b/vllm/lib/python3.10/site-packages/torch/distributed/elastic/multiprocessing/subprocess_handler/__pycache__/subprocess_handler.cpython-310.pyc differ diff --git a/vllm/lib/python3.10/site-packages/torch/distributed/elastic/multiprocessing/subprocess_handler/handlers.py b/vllm/lib/python3.10/site-packages/torch/distributed/elastic/multiprocessing/subprocess_handler/handlers.py new file mode 100644 index 0000000000000000000000000000000000000000..2660be5af399a4969edd64589757f918686c3f4e --- /dev/null +++ b/vllm/lib/python3.10/site-packages/torch/distributed/elastic/multiprocessing/subprocess_handler/handlers.py @@ -0,0 +1,34 @@ +#!/usr/bin/env python3 +# mypy: allow-untyped-defs + +# Copyright (c) Facebook, Inc. and its affiliates. +# All rights reserved. +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. +from typing import Dict, Tuple + +from torch.distributed.elastic.multiprocessing.subprocess_handler.subprocess_handler import ( + SubprocessHandler, +) + + +__all__ = ["get_subprocess_handler"] + + +def get_subprocess_handler( + entrypoint: str, + args: Tuple, + env: Dict[str, str], + stdout: str, + stderr: str, + local_rank_id: int, +): + return SubprocessHandler( + entrypoint=entrypoint, + args=args, + env=env, + stdout=stdout, + stderr=stderr, + local_rank_id=local_rank_id, + ) diff --git a/vllm/lib/python3.10/site-packages/torch/distributed/elastic/multiprocessing/subprocess_handler/subprocess_handler.py b/vllm/lib/python3.10/site-packages/torch/distributed/elastic/multiprocessing/subprocess_handler/subprocess_handler.py new file mode 100644 index 0000000000000000000000000000000000000000..f4b8d582f1c8161f88821b438e4e94052180f2f2 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/torch/distributed/elastic/multiprocessing/subprocess_handler/subprocess_handler.py @@ -0,0 +1,78 @@ +#!/usr/bin/env python3 + +# Copyright (c) Facebook, Inc. and its affiliates. +# All rights reserved. +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. +import os +import signal +import subprocess +import sys +from typing import Any, Dict, Optional, Tuple + + +__all__ = ["SubprocessHandler"] + +IS_WINDOWS = sys.platform == "win32" + + +def _get_default_signal() -> signal.Signals: + """Get the default termination signal. SIGTERM for unix, CTRL_C_EVENT for windows.""" + if IS_WINDOWS: + return signal.CTRL_C_EVENT # type: ignore[attr-defined] # noqa: F821 + else: + return signal.SIGTERM + + +class SubprocessHandler: + """ + Convenience wrapper around python's ``subprocess.Popen``. Keeps track of + meta-objects associated to the process (e.g. stdout and stderr redirect fds). + """ + + def __init__( + self, + entrypoint: str, + args: Tuple, + env: Dict[str, str], + stdout: Optional[str], + stderr: Optional[str], + local_rank_id: int, + ): + self._stdout = open(stdout, "w") if stdout else None + self._stderr = open(stderr, "w") if stderr else None + # inherit parent environment vars + env_vars = os.environ.copy() + env_vars.update(env) + + args_str = (entrypoint, *[str(e) for e in args]) + self.local_rank_id = local_rank_id + self.proc: subprocess.Popen = self._popen(args_str, env_vars) + + def _popen(self, args: Tuple, env: Dict[str, str]) -> subprocess.Popen: + kwargs: Dict[str, Any] = {} + if not IS_WINDOWS: + kwargs["start_new_session"] = True + return subprocess.Popen( + # pyre-fixme[6]: Expected `Union[typing.Sequence[Union[_PathLike[bytes], + # _PathLike[str], bytes, str]], bytes, str]` for 1st param but got + # `Tuple[str, *Tuple[Any, ...]]`. + args=args, + env=env, + stdout=self._stdout, + stderr=self._stderr, + **kwargs, + ) + + def close(self, death_sig: Optional[signal.Signals] = None) -> None: + if not death_sig: + death_sig = _get_default_signal() + if IS_WINDOWS: + self.proc.send_signal(death_sig) + else: + os.killpg(self.proc.pid, death_sig) + if self._stdout: + self._stdout.close() + if self._stderr: + self._stderr.close() diff --git a/vllm/lib/python3.10/site-packages/torch/distributed/elastic/multiprocessing/tail_log.py b/vllm/lib/python3.10/site-packages/torch/distributed/elastic/multiprocessing/tail_log.py new file mode 100644 index 0000000000000000000000000000000000000000..2c814ffb7be9980333bb3b6500aeaff349e52716 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/torch/distributed/elastic/multiprocessing/tail_log.py @@ -0,0 +1,158 @@ +#!/usr/bin/env python3 +# mypy: allow-untyped-defs + +# Copyright (c) Facebook, Inc. and its affiliates. +# All rights reserved. +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. + +import logging +import os +import time +from concurrent.futures.thread import ThreadPoolExecutor +from threading import Event +from typing import Dict, List, Optional, TextIO, TYPE_CHECKING + + +if TYPE_CHECKING: + from concurrent.futures._base import Future + +__all__ = ["tail_logfile", "TailLog"] + +logger = logging.getLogger(__name__) + + +def tail_logfile( + header: str, file: str, dst: TextIO, finished: Event, interval_sec: float +): + while not os.path.exists(file): + if finished.is_set(): + return + time.sleep(interval_sec) + + with open(file, errors="replace") as fp: + while True: + line = fp.readline() + + if line: + dst.write(f"{header}{line}") + else: # reached EOF + if finished.is_set(): + # log line producer is finished + break + else: + # log line producer is still going + # wait for a bit before looping again + time.sleep(interval_sec) + + +class TailLog: + """ + Tail the given log files. + + The log files do not have to exist when the ``start()`` method is called. The tail-er will gracefully wait until + the log files are created by the producer and will tail the contents of the + log files until the ``stop()`` method is called. + + .. warning:: ``TailLog`` will wait indefinitely for the log file to be created! + + Each log file's line will be suffixed with a header of the form: ``[{name}{idx}]:``, + where the ``name`` is user-provided and ``idx`` is the index of the log file + in the ``log_files`` mapping. ``log_line_prefixes`` can be used to override the + header for each log file. + + Usage: + + :: + + log_files = {0: "/tmp/0_stdout.log", 1: "/tmp/1_stdout.log"} + tailer = TailLog("trainer", log_files, sys.stdout).start() + # actually run the trainers to produce 0_stdout.log and 1_stdout.log + run_trainers() + tailer.stop() + + # once run_trainers() start writing the ##_stdout.log files + # the tailer will print to sys.stdout: + # >>> [trainer0]:log_line1 + # >>> [trainer1]:log_line1 + # >>> [trainer0]:log_line2 + # >>> [trainer0]:log_line3 + # >>> [trainer1]:log_line2 + + .. note:: Due to buffering log lines between files may not necessarily + be printed out in order. You should configure your application's + logger to suffix each log line with a proper timestamp. + + """ + + def __init__( + self, + name: str, + log_files: Dict[int, str], + dst: TextIO, + log_line_prefixes: Optional[Dict[int, str]] = None, + interval_sec: float = 0.1, + ): + n = len(log_files) + self._threadpool = None + if n > 0: + self._threadpool = ThreadPoolExecutor( + max_workers=n, + thread_name_prefix=f"{self.__class__.__qualname__}_{name}", + ) + + self._name = name + self._dst = dst + self._log_files = log_files + self._log_line_prefixes = log_line_prefixes + self._finished_events: Dict[int, Event] = { + local_rank: Event() for local_rank in log_files.keys() + } + self._futs: List[Future] = [] + self._interval_sec = interval_sec + self._stopped = False + + def start(self) -> "TailLog": + if not self._threadpool: + return self + + for local_rank, file in self._log_files.items(): + header = f"[{self._name}{local_rank}]:" + if self._log_line_prefixes and local_rank in self._log_line_prefixes: + header = self._log_line_prefixes[local_rank] + self._futs.append( + self._threadpool.submit( + tail_logfile, + header=header, + file=file, + dst=self._dst, + finished=self._finished_events[local_rank], + interval_sec=self._interval_sec, + ) + ) + return self + + def stop(self) -> None: + for finished in self._finished_events.values(): + finished.set() + + for local_rank, f in enumerate(self._futs): + try: + f.result() + except Exception as e: + logger.error( + "error in log tailor for %s%s. %s: %s", + self._name, + local_rank, + e.__class__.__qualname__, + e, + ) + + if self._threadpool: + self._threadpool.shutdown(wait=True) + + self._stopped = True + + def stopped(self) -> bool: + return self._stopped diff --git a/vllm/lib/python3.10/site-packages/torch/distributed/launcher/__init__.py b/vllm/lib/python3.10/site-packages/torch/distributed/launcher/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..fb744a2b93615b703eb0dafb7c8e6c71bc1ad5d2 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/torch/distributed/launcher/__init__.py @@ -0,0 +1,14 @@ +#!/usr/bin/env/python3 + +# Copyright (c) Facebook, Inc. and its affiliates. +# All rights reserved. +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. + + +from torch.distributed.launcher.api import ( # noqa: F401 + elastic_launch, + launch_agent, + LaunchConfig, +) diff --git a/vllm/lib/python3.10/site-packages/torch/distributed/launcher/__pycache__/__init__.cpython-310.pyc b/vllm/lib/python3.10/site-packages/torch/distributed/launcher/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2d41ae1eb30a94809396b7a70eaaa37610936a89 Binary files /dev/null and b/vllm/lib/python3.10/site-packages/torch/distributed/launcher/__pycache__/__init__.cpython-310.pyc differ diff --git a/vllm/lib/python3.10/site-packages/torch/distributed/launcher/__pycache__/api.cpython-310.pyc b/vllm/lib/python3.10/site-packages/torch/distributed/launcher/__pycache__/api.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..cdcfc145fe17657f866b4745b86f117b6448407a Binary files /dev/null and b/vllm/lib/python3.10/site-packages/torch/distributed/launcher/__pycache__/api.cpython-310.pyc differ diff --git a/vllm/lib/python3.10/site-packages/torch/distributed/launcher/api.py b/vllm/lib/python3.10/site-packages/torch/distributed/launcher/api.py new file mode 100644 index 0000000000000000000000000000000000000000..a3bcd4073c9bafab05668e8f91acdc181001a0d2 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/torch/distributed/launcher/api.py @@ -0,0 +1,289 @@ +#!/usr/bin/env python3 +# mypy: allow-untyped-defs + +# Copyright (c) Facebook, Inc. and its affiliates. +# All rights reserved. +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. +import sys +import uuid +from dataclasses import dataclass, field +from typing import Any, Callable, Dict, List, Optional, Tuple, Union + +import torch.distributed.elastic.rendezvous.registry as rdzv_registry +from torch.distributed.elastic import events, metrics +from torch.distributed.elastic.agent.server.api import WorkerSpec +from torch.distributed.elastic.agent.server.local_elastic_agent import LocalElasticAgent +from torch.distributed.elastic.multiprocessing import ( + DefaultLogsSpecs, + LogsSpecs, + SignalException, +) +from torch.distributed.elastic.multiprocessing.errors import ChildFailedError +from torch.distributed.elastic.rendezvous import RendezvousParameters +from torch.distributed.elastic.rendezvous.utils import parse_rendezvous_endpoint +from torch.distributed.elastic.utils.logging import get_logger + + +__all__ = ["LaunchConfig", "elastic_launch", "launch_agent"] + +logger = get_logger(__name__) + + +@dataclass +class LaunchConfig: + """ + Creates a rendezvous config. + + Args: + min_nodes: Minimum amount of nodes that the user function will + be launched on. Elastic agent ensures that the user + function start only when the min_nodes amount enters + the rendezvous. + max_nodes: Maximum amount of nodes that the user function + will be launched on. + nproc_per_node: On each node the elastic agent will launch + this amount of workers that will execute user + defined function. + rdzv_backend: rdzv_backend to use in the rendezvous (zeus-adapter, etcd). + rdzv_endpoint: The endpoint of the rdzv sync. storage. + rdzv_configs: Key, value pair that specifies rendezvous specific configuration. + rdzv_timeout: Legacy argument that specifies timeout for the rendezvous. It is going + to be removed in future versions, see the note below. The default timeout is 900 seconds. + run_id: The unique run id of the job (if not passed a unique one will be + deduced from run environment - flow workflow id in flow - or auto generated). + role: User defined role of the worker (defaults to "trainer"). + max_restarts: The maximum amount of restarts that elastic agent will conduct + on workers before failure. + monitor_interval: The interval in seconds that is used by the elastic_agent + as a period of monitoring workers. + start_method: The method is used by the elastic agent to start the + workers (spawn, fork, forkserver). + metrics_cfg: configuration to initialize metrics. + local_addr: address of the local node if any. If not set, a lookup on the local + machine's FQDN will be performed. + local_ranks_filter: ranks for which to show logs in console. If not set, show from all. + ..note: + `rdzv_timeout` is a legacy argument that will be removed in future. + Set the timeout via `rdzv_configs['timeout']` + + """ + + min_nodes: int + max_nodes: int + nproc_per_node: int + logs_specs: Optional[LogsSpecs] = None + run_id: str = "" + role: str = "default_role" + rdzv_endpoint: str = "" + rdzv_backend: str = "etcd" + rdzv_configs: Dict[str, Any] = field(default_factory=dict) + rdzv_timeout: int = -1 + max_restarts: int = 3 + monitor_interval: float = 0.1 + start_method: str = "spawn" + log_line_prefix_template: Optional[str] = None + metrics_cfg: Dict[str, str] = field(default_factory=dict) + local_addr: Optional[str] = None + + def __post_init__(self): + default_timeout = 900 + if self.rdzv_timeout != -1: + self.rdzv_configs["timeout"] = self.rdzv_timeout + elif "timeout" not in self.rdzv_configs: + self.rdzv_configs["timeout"] = default_timeout + + # Post-processing to enable refactoring to introduce logs_specs due to non-torchrun API usage + if self.logs_specs is None: + self.logs_specs = DefaultLogsSpecs() + + +class elastic_launch: + """ + Launches an torchelastic agent on the container that invoked the entrypoint. + + 1. Pass the ``entrypoint`` arguments as non ``kwargs`` (e.g. no named parameters)/ + ``entrypoint`` can be a function or a command. + 2. The return value is a map of each worker's output mapped + by their respective global rank. + + Usage + + :: + + def worker_fn(foo): + # ... + + def main(): + # entrypoint is a function. + outputs = elastic_launch(LaunchConfig, worker_fn)(foo) + # return rank 0's output + return outputs[0] + + # entrypoint is a command and ``script.py`` is the python module. + outputs = elastic_launch(LaunchConfig, "script.py")(args) + outputs = elastic_launch(LaunchConfig, "python")("script.py") + """ + + def __init__( + self, + config: LaunchConfig, + entrypoint: Union[Callable, str, None], + ): + self._config = config + self._entrypoint = entrypoint + + def __call__(self, *args): + return launch_agent(self._config, self._entrypoint, list(args)) + + +def _get_entrypoint_name( + entrypoint: Union[Callable, str, None], args: List[Any] +) -> str: + """Retrieve entrypoint name with the rule: + 1. If entrypoint is a function, use ``entrypoint.__qualname__``. + 2. If entrypoint is a string, check its value: + 2.1 if entrypoint equals to ``sys.executable`` (like "python"), use the first element from ``args`` + which does not start with hifen letter (for example, "-u" will be skipped). + 2.2 otherwise, use ``entrypoint`` value. + 3. Otherwise, return empty string. + """ + if isinstance(entrypoint, Callable): # type: ignore[arg-type] + return entrypoint.__name__ # type: ignore[union-attr] + elif isinstance(entrypoint, str): + if entrypoint == sys.executable: + return next((arg for arg in args if arg[0] != "-"), "") + else: + return entrypoint + else: + return "" + + +def _get_addr_and_port( + rdzv_parameters: RendezvousParameters, +) -> Tuple[Optional[str], Optional[int]]: + if rdzv_parameters.backend != "static": + return (None, None) + endpoint = rdzv_parameters.endpoint + endpoint = endpoint.strip() + if not endpoint: + raise ValueError( + "Endpoint is missing in endpoint. Try to add --master-addr and --master-port" + ) + master_addr, master_port = parse_rendezvous_endpoint(endpoint, default_port=-1) + if master_port == -1: + raise ValueError( + f"port is missing in endpoint: {endpoint}. Try to specify --master-port" + ) + return (master_addr, master_port) + + +def launch_agent( + config: LaunchConfig, + entrypoint: Union[Callable, str, None], + args: List[Any], +) -> Dict[int, Any]: + if not config.run_id: + run_id = str(uuid.uuid4().int) + logger.warning("config has no run_id, generated a random run_id: %s", run_id) + config.run_id = run_id + + entrypoint_name = _get_entrypoint_name(entrypoint, args) + + logger.info( + "Starting elastic_operator with launch configs:\n" + " entrypoint : %(entrypoint)s\n" + " min_nodes : %(min_nodes)s\n" + " max_nodes : %(max_nodes)s\n" + " nproc_per_node : %(nproc_per_node)s\n" + " run_id : %(run_id)s\n" + " rdzv_backend : %(rdzv_backend)s\n" + " rdzv_endpoint : %(rdzv_endpoint)s\n" + " rdzv_configs : %(rdzv_configs)s\n" + " max_restarts : %(max_restarts)s\n" + " monitor_interval : %(monitor_interval)s\n" + " log_dir : %(log_dir)s\n" + " metrics_cfg : %(metrics_cfg)s\n", + { + "entrypoint": entrypoint_name, + "min_nodes": config.min_nodes, + "max_nodes": config.max_nodes, + "nproc_per_node": config.nproc_per_node, + "run_id": config.run_id, + "rdzv_backend": config.rdzv_backend, + "rdzv_endpoint": config.rdzv_endpoint, + "rdzv_configs": config.rdzv_configs, + "max_restarts": config.max_restarts, + "monitor_interval": config.monitor_interval, + "log_dir": config.logs_specs.root_log_dir, # type: ignore[union-attr] + "metrics_cfg": config.metrics_cfg, + }, + ) + + rdzv_parameters = RendezvousParameters( + backend=config.rdzv_backend, + endpoint=config.rdzv_endpoint, + run_id=config.run_id, + min_nodes=config.min_nodes, + max_nodes=config.max_nodes, + local_addr=config.local_addr, + **config.rdzv_configs, + ) + + master_addr, master_port = _get_addr_and_port(rdzv_parameters) + + spec = WorkerSpec( + role=config.role, + local_world_size=config.nproc_per_node, + entrypoint=entrypoint, + args=tuple(args), + rdzv_handler=rdzv_registry.get_rendezvous_handler(rdzv_parameters), + max_restarts=config.max_restarts, + monitor_interval=config.monitor_interval, + master_addr=master_addr, + master_port=master_port, + local_addr=config.local_addr, + ) + + agent = LocalElasticAgent( + spec=spec, + logs_specs=config.logs_specs, # type: ignore[arg-type] + start_method=config.start_method, + log_line_prefix_template=config.log_line_prefix_template, + ) + + shutdown_rdzv = True + try: + metrics.initialize_metrics(metrics.MetricsConfig(config.metrics_cfg)) + + result = agent.run() + # records that agent.run() has succeeded NOT that workers have succeeded + events.record(agent.get_event_succeeded()) + + if result.is_failed(): + # ChildFailedError is treated specially by @record + # if the error files for the failed children exist + # @record will copy the first error (root cause) + # to the error file of the launcher process. + raise ChildFailedError( + name=entrypoint_name, + failures=result.failures, + ) + + return result.return_values + except ChildFailedError: + raise + except SignalException: + # when the agent dies with a signal do NOT shutdown the rdzv_handler + # since this closes the rendezvous on this rdzv_id permanently and + # prevents any additional scaling events + shutdown_rdzv = False + events.record(agent.get_event_failed()) + raise + except Exception: + events.record(agent.get_event_failed()) + raise + finally: + if shutdown_rdzv: + spec.rdzv_handler.shutdown() diff --git a/vllm/lib/python3.10/site-packages/torch/distributed/nn/api/__init__.py b/vllm/lib/python3.10/site-packages/torch/distributed/nn/api/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/vllm/lib/python3.10/site-packages/torch/distributed/nn/api/__pycache__/__init__.cpython-310.pyc b/vllm/lib/python3.10/site-packages/torch/distributed/nn/api/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..01a12ca3cc8e1e362a44d9113c7d21b0d757659c Binary files /dev/null and b/vllm/lib/python3.10/site-packages/torch/distributed/nn/api/__pycache__/__init__.cpython-310.pyc differ diff --git a/vllm/lib/python3.10/site-packages/torch/distributed/nn/api/remote_module.py b/vllm/lib/python3.10/site-packages/torch/distributed/nn/api/remote_module.py new file mode 100644 index 0000000000000000000000000000000000000000..4e18fe3245e183d49bba8e9b862ae8c3fe7402dc --- /dev/null +++ b/vllm/lib/python3.10/site-packages/torch/distributed/nn/api/remote_module.py @@ -0,0 +1,762 @@ +#!/usr/bin/python3 +# mypy: allow-untyped-defs +import collections +import io +import sys +import types +from typing import ( + Any, + Callable, + Dict, + Iterator, + List, + Mapping, + Optional, + Set, + Tuple, + Type, + TypeVar, + Union, +) + +import torch +import torch.distributed.rpc as rpc +from torch import device, dtype, nn, Tensor +from torch.distributed import _remote_device +from torch.distributed.nn.jit import instantiator +from torch.distributed.rpc.internal import _internal_rpc_pickler +from torch.nn import Module +from torch.nn.parameter import Parameter +from torch.utils.hooks import RemovableHandle + + +__all__ = ["RemoteModule"] + +_grad_t = Union[Tuple[Tensor, ...], Tensor] +# See https://mypy.readthedocs.io/en/latest/generics.html#generic-methods-and-generic-self for the use +# of `T` to annotate `self`. Many methods of `Module` return `self` and we want those return values to be +# the type of the subclass, not the looser type of `Module`. +T = TypeVar("T", bound="Module") + +_NON_SCRIPTABLE_REMOTE_MODULE_MODULE = ( + instantiator.instantiate_non_scriptable_remote_module_template() +) + +_REMOTE_MODULE_PICKLED_ATTRIBUTES = ( + "on", + "device", + "is_device_map_set", + "is_scriptable", + "generated_methods", + "module_rref", +) + +_SerializedRemoteModule = collections.namedtuple("_SerializedRemoteModule", _REMOTE_MODULE_PICKLED_ATTRIBUTES) # type: ignore[misc] + +# These attributes are mostly from RemoteModule's parent class and are intentionally not pickled. +# A new attribute of RemoteModule should be either in _REMOTE_MODULE_PICKLED_ATTRIBUTES +# or _REMOTE_MODULE_ATTRIBUTES_IGNORE_FOR_PICKLING. +# Otherwise, it will not be pickled. +_REMOTE_MODULE_ATTRIBUTES_IGNORE_FOR_PICKLING = ( + "training", + "_parameters", + "_buffers", + "_non_persistent_buffers_set", + "_backward_hooks", + "_backward_pre_hooks", + "_is_full_backward_hook", + "_forward_hooks", + "_forward_hooks_with_kwargs", + "_forward_hooks_always_called", + "_forward_pre_hooks", + "_forward_pre_hooks_with_kwargs", + "_state_dict_hooks", + "_state_dict_pre_hooks", + "_load_state_dict_pre_hooks", + "_load_state_dict_post_hooks", + "_state_dict_pre_hooks", + "_modules", + # The two attributes below are generated methods, not available at pickling time. + "forward_async", + "forward", +) + + +# RPC handler. +def _instantiate_template(module_interface_cls, enable_moving_cpu_tensors_to_cuda): + instantiator.instantiate_scriptable_remote_module_template( + module_interface_cls, enable_moving_cpu_tensors_to_cuda + ) + + +def _create_module(module_cls, args, kwargs, device): + module = module_cls(*args, **kwargs) + if not isinstance(module, nn.Module): + raise ValueError( + "Expect `module_cls(*args, **kwargs)` returns an instance of , " + f"but it returns an instance of {type(module)}." + ) + module.to(device) + return module + + +def _create_module_with_interface( + module_cls, args, kwargs, device, module_interface_cls +): + module = _create_module(module_cls, args, kwargs, device) + if module_interface_cls is not None: + module = torch.jit.script(module) + return rpc.RRef(module, module_interface_cls) + + +def _param_rrefs(module_rref, recurse) -> List[rpc.RRef[Parameter]]: + ret: List[rpc.RRef[Parameter]] = [] + for param in module_rref.local_value().parameters(recurse): + ret.append(rpc.RRef(param)) + return ret + + +def _raise_not_supported(name: str) -> None: + raise ValueError(f"Method ``{name}`` not supported for RemoteModule") + + +class _RemoteModule(nn.Module): + def __new__(cls, *args, **kwargs): + # Use __new__ for logging purposes. + torch._C._log_api_usage_once("torch.distributed.nn.api.remote_module") + return super().__new__(cls) + + def __init__( + self, + remote_device: str, + module_cls: Type[nn.Module], + args: Optional[Tuple] = None, + kwargs: Optional[Dict[str, Any]] = None, + _module_interface_cls: Any = None, + ): + """ + RemoteModule instance can only be created after RPC initialization. + + It creates a user-specified module on a specified remote node. + It behaves like a regular ``nn.Module`` except that the ``forward`` method is + executed on the remote node. + It takes care of autograd recording to ensure the backward pass propagates + gradients back to the corresponding remote module. + It can be shared across processors using `RPC framework `__, + without incurring any overheads of copying the actual module, + which is equivalent to an :class:`~torch.distributed.rpc.RRef` + pointing to the remote module. + + The arguments of ``forward_async`` and ``forward`` are the same as + the ``forward`` method of the module returned by the ``module_cls``. + + Apart from ``forward_async`` and ``forward``, no other methods are supported from nn.Module for now. + + Particularly, to create a hybrid model, typically the local modules should be + created outside of remote modules, rather than as submodules of any remote module (by calling ``add_module``). + Hybrid Example: + >>> class HybridModel(nn.Module): + >>> def __init__(self) -> None: + >>> nn.Module.__init__(self) + >>> self.remote_embedding = RemoteModule(...) + >>> self.local_linear = nn.Linear(...) + + For example, if ``module_cls`` returns an instance of ``nn.Linear``, + that has ``forward`` method signature, ``def forward(input: Tensor) -> Tensor:``, + the generated ``RemoteModule`` will have 2 methods in signature of + ``def forward(input: Tensor) -> Tensor:`` and + ``def forward_async(input: Tensor) -> Future[Tensor]:``. + + .. note:: + If the remote module is placed on a cuda device, + any input CPU tensors will be automatically moved to the same cuda device, + and GPU tensors are returned over the wire according to the device map of the remote worker on TensorPipe RPC backend. + + Args: + remote_device (str): Device on the destination worker where we'd like to place this module. + The device can be a local device or a remote device specified by one of the following remote + formats: + + 1. "rank:/" (ex: "rank:0/cuda:0"). + 2. "/" (ex: "trainer0/cuda:0"). + + In addition, the device field can be optional and the default value is "cpu". + module_cls (nn.Module): For example, + >>> class MyModule(nn.Module): + >>> def forward(input): + >>> return input + 1 + >>> + >>> module_cls = MyModule + args (Sequence, optional): args to be passed to ``module_cls``. + kwargs (Dict, optional): kwargs to be passed to ``module_cls``. + _module_interface_cls (type, optional): The TorchScript interface type for the module + to be created. The type object should be decorated by @torch.jit.interface. + If not provided, the generated RemoteModule is not torchscript-able. + Warning, this is an experimental API and susceptible to frequent changes. + + Returns: + A remote module instance which wraps the :class:`~nn.Module` created by the + user-provided ``module_cls``, it has a blocking ``forward`` method and an + asynchronous ``forward_async`` method that returns a future of the ``forward`` call + on the user-provided module on the remote side. + + Example:: + Run the following code in two different processes: + + >>> # xdoctest: +SKIP("distributed") + >>> # On worker 0: + >>> import torch + >>> import torch.distributed.rpc as rpc + >>> from torch import nn, Tensor + >>> from torch.distributed.nn.api.remote_module import RemoteModule + >>> + >>> rpc.init_rpc("worker0", rank=0, world_size=2) + >>> remote_linear_module = RemoteModule( + >>> "worker1/cpu", nn.Linear, args=(20, 30), + >>> ) + >>> input = torch.randn(128, 20) + >>> ret_fut = remote_linear_module.forward_async(input) + >>> ret = ret_fut.wait() + >>> rpc.shutdown() + + >>> # On worker 1: + >>> import torch + >>> import torch.distributed.rpc as rpc + >>> + >>> rpc.init_rpc("worker1", rank=1, world_size=2) + >>> rpc.shutdown() + """ + super().__init__() + + enable_moving_cpu_tensors_to_cuda = self._prepare_init(remote_device) + + # Default arguments preparation. + args = args if args is not None else () + kwargs = kwargs if kwargs is not None else {} + + if _module_interface_cls is not None: + # Users reply on this field to know if this generated RemoteModule is TorchScript-able. + self.is_scriptable = True + + # Instantiate template on remote side. + fut = rpc.rpc_async( + self.on, + _instantiate_template, + (_module_interface_cls, enable_moving_cpu_tensors_to_cuda), + ) + + self._init_template( + _module_interface_cls, enable_moving_cpu_tensors_to_cuda + ) + + # Instantiate template on remote side. + fut = rpc.rpc_async( + self.on, + _instantiate_template, + (_module_interface_cls, enable_moving_cpu_tensors_to_cuda), + ) + + # Create the module on the remote side. + fut.wait() # Ensure remote_module_cls is available on remote side. + + # TODO: We need to change this to rpc.remote, and make it async (see the else branch below). + # For that we need to be able to apply _module_interface_cls to the RRef returned by rpc.remote + # See https://github.com/pytorch/pytorch/issues/58098 for more context. + self.module_rref = rpc.rpc_sync( + self.on, + _create_module_with_interface, + (module_cls, args, kwargs, self.device, _module_interface_cls), + ) + else: + self.is_scriptable = False + self.generated_methods = ( + _NON_SCRIPTABLE_REMOTE_MODULE_MODULE._generated_methods + ) + # Create the module on the remote side. + self.module_rref = rpc.remote( + self.on, + _create_module, + (module_cls, args, kwargs, self.device), + ) + + self._install_generated_methods() + self._check_attribute_picklability() + + def remote_parameters(self, recurse: bool = True) -> List[rpc.RRef[Parameter]]: + """ + Return a list of :class:`~torch.distributed.rpc.RRef` pointing to the remote module's parameters. + + This can typically be used in conjunction + with :class:`~torch.distributed.optim.DistributedOptimizer`. + + Args: + recurse (bool): if True, then returns parameters of the remote + module and all submodules of the remote module. Otherwise, + returns only parameters that are direct members of the + remote module. + + Returns: + A list of :class:`~torch.distributed.rpc.RRef` (``List[RRef[nn.Parameter]]``) + to remote module's parameters. + """ + return rpc.rpc_sync(self.on, _param_rrefs, args=(self.module_rref, recurse)) + + def get_module_rref(self) -> rpc.RRef[nn.Module]: + """Return an :class:`~torch.distributed.rpc.RRef` (``RRef[nn.Module]``) pointing to the remote module.""" + return self.module_rref + + @torch.jit.export + def __getstate__(self): + raise RuntimeError( + "Cannot pickle RemoteModule in python pickler. RemoteModule can only be pickled when using RPC" + ) + + @torch.jit.export + def __setstate__(self, state): + raise RuntimeError( + "Cannot unpickle RemoteModule in python pickler. RemoteModule can only be unpickled when using RPC" + ) + + def register_buffer( + self, name: str, tensor: Optional[Tensor], persistent: bool = True + ) -> None: + _raise_not_supported(self.register_buffer.__name__) + + def register_parameter(self, name: str, param: Optional[Parameter]) -> None: + _raise_not_supported(self.register_parameter.__name__) + + def add_module(self, name: str, module: Optional[Module]) -> None: + _raise_not_supported(self.add_module.__name__) + + def apply(self: T, fn: Callable[[Module], None]) -> T: # type: ignore[return] + _raise_not_supported(self.apply.__name__) + + def cuda(self: T, device: Optional[Union[int, device]] = None) -> T: # type: ignore[return] + _raise_not_supported(self.cuda.__name__) + + def ipu(self: T, device: Optional[Union[int, device]] = None) -> T: # type: ignore[return] + _raise_not_supported(self.ipu.__name__) + + def xpu(self: T, device: Optional[Union[int, device]] = None) -> T: # type: ignore[return] + _raise_not_supported(self.xpu.__name__) + + def cpu(self: T) -> T: # type: ignore[return] + _raise_not_supported(self.cpu.__name__) + + def type(self: T, dst_type: Union[dtype, str]) -> T: # type: ignore[return] + _raise_not_supported(self.type.__name__) + + def float(self: T) -> T: # type: ignore[return] + _raise_not_supported(self.float.__name__) + + def double(self: T) -> T: # type: ignore[return] + _raise_not_supported(self.double.__name__) + + def half(self: T) -> T: # type: ignore[return] + _raise_not_supported(self.half.__name__) + + def bfloat16(self: T) -> T: # type: ignore[return] + _raise_not_supported(self.bfloat16.__name__) + + def to(self, *args, **kwargs) -> T: # type: ignore[misc, return, type-var] + _raise_not_supported(self.to.__name__) + + def register_backward_hook( # type: ignore[return] + self, hook: Callable[[Module, _grad_t, _grad_t], Union[None, _grad_t]] + ) -> RemovableHandle: + _raise_not_supported(self.register_backward_hook.__name__) + + def register_forward_pre_hook( # type: ignore[return] + self, + hook: Union[ + Callable[[T, Tuple[Any, ...]], Optional[Any]], + Callable[ + [T, Tuple[Any, ...], Dict[str, Any]], + Optional[Tuple[Any, Dict[str, Any]]], + ], + ], + prepend: bool = False, + with_kwargs: bool = False, + ) -> RemovableHandle: + _raise_not_supported(self.register_forward_pre_hook.__name__) + + def register_forward_hook( # type: ignore[return, override] + self, + hook: Union[ + Callable[[T, Tuple[Any, ...], Any], Optional[Any]], + Callable[[T, Tuple[Any, ...], Dict[str, Any], Any], Optional[Any]], + ], + prepend: bool = False, + with_kwargs: bool = False, + ) -> RemovableHandle: + _raise_not_supported(self.register_forward_hook.__name__) + + def state_dict(self, *args, **kwargs): + _raise_not_supported(self.state_dict.__name__) + + def load_state_dict( + self, + state_dict: Mapping[str, Any], + strict: bool = True, + assign: bool = False, + ): + _raise_not_supported(self.load_state_dict.__name__) + + def parameters(self, recurse: bool = True) -> Iterator[Parameter]: + raise ValueError( + "Method ``parameters`` not supported for RemoteModule. Please use ``remote_parameters`` instead." + ) + + def named_parameters( # type: ignore[return] + self, prefix: str = "", recurse: bool = True, remove_duplicate: bool = True + ) -> Iterator[Tuple[str, Parameter]]: + _raise_not_supported(self.named_parameters.__name__) + + def buffers(self, recurse: bool = True) -> Iterator[Tensor]: # type: ignore[return] + _raise_not_supported(self.buffers.__name__) + + def named_buffers( # type: ignore[return] + self, prefix: str = "", recurse: bool = True, remove_duplicate: bool = True + ) -> Iterator[Tuple[str, Tensor]]: + _raise_not_supported(self.named_buffers.__name__) + + def children(self) -> Iterator[Module]: # type: ignore[return] + _raise_not_supported(self.children.__name__) + + def named_children(self) -> Iterator[Tuple[str, Module]]: # type: ignore[return] + _raise_not_supported(self.named_children.__name__) + + def modules(self) -> Iterator[Module]: # type: ignore[return] + _raise_not_supported(self.modules.__name__) + + def named_modules( + self, + memo: Optional[Set[Module]] = None, + prefix: str = "", + remove_duplicate: bool = True, + ): + _raise_not_supported(self.named_modules.__name__) + + def train(self: T, mode: bool = True) -> T: + return self.module_rref.rpc_sync().train() # type: ignore[operator, union-attr] + + def eval(self: T) -> T: + return self.module_rref.rpc_sync().eval() # type: ignore[operator, union-attr] + + def requires_grad_(self: T, requires_grad: bool = True) -> T: # type: ignore[return] + _raise_not_supported(self.requires_grad_.__name__) + + def zero_grad(self, set_to_none: bool = True) -> None: + _raise_not_supported(self.zero_grad.__name__) + + def share_memory(self: T) -> T: # type: ignore[return] + _raise_not_supported(self.share_memory.__name__) + + def extra_repr(self) -> str: # type: ignore[return] + _raise_not_supported(self.extra_repr.__name__) + + def _prepare_init(self, remote_device_str: str) -> bool: + """Prepare the initialization and returns whether to enable automatically moving CPU tensors to CUDA devices.""" + # Sanity check. + assert rpc._is_current_rpc_agent_set(), "RemoteModule only works in RPC." + + remote_device = _remote_device(remote_device_str) + self.on = ( + remote_device.worker_name() + if remote_device.worker_name() is not None + else remote_device.rank() + ) + self.device = str(remote_device.device()) + agent = rpc._get_current_rpc_agent() + # If the device map of the remote worker is set, + # then enable moving any input CPU tensors to the same cuda device. + self.is_device_map_set = bool( + agent._get_device_map(agent.get_worker_info(self.on)) # type: ignore[arg-type] + ) + # ``enable_moving_cpu_tensors_to_cuda`` is less strict than ``is_device_map_set``: + # If ``enable_moving_cpu_tensors_to_cuda`` is true, but the device map is not set, + # then any CPU tensors can still be moved to a cuda device to run forward, + # but the output must be moved back to CPU before being sent over the wire. + enable_moving_cpu_tensors_to_cuda = torch.device(self.device).type == "cuda" + return enable_moving_cpu_tensors_to_cuda + + def _init_template(self, module_interface_cls, enable_moving_cpu_tensors_to_cuda): + """Instantiate template on local side.""" + generated_module = instantiator.instantiate_scriptable_remote_module_template( + module_interface_cls, enable_moving_cpu_tensors_to_cuda + ) + self.generated_methods = generated_module._generated_methods + + def _check_attribute_picklability(self): + """Check if all the attribute has explicitly defined whether to be pickled (i.e., picklability).""" + for k in self.__dict__.keys(): + if ( + k not in _REMOTE_MODULE_PICKLED_ATTRIBUTES + and k not in _REMOTE_MODULE_ATTRIBUTES_IGNORE_FOR_PICKLING + ): + raise AttributeError( + f"Attribute {k} must be either in ``_REMOTE_MODULE_PICKLED_ATTRIBUTES`` or " + "``_REMOTE_MODULE_ATTRIBUTES_IGNORE_FOR_PICKLING``." + ) + + def _install_generated_methods(self): + for method in self.generated_methods: + method_name = method.__name__ + method = torch.jit.export(method) + setattr(self, method_name, types.MethodType(method, self)) + + @staticmethod + def init_from_module_rref( + remote_device: str, + module_rref: rpc.RRef[nn.Module], + _module_interface_cls: Any = None, + ): + """ + Besides the constructor, a RemoteModule instance can also be initialized given a module RRef. + + This alternate initialization method can be particularly useful if we want to create multiple + RemoteModule instances that share the same underlying module and reduce memory consumption. + + Moreover, this also provides a workaround for passing script RemoteModule over RPC, + which is not supported. The recommended way is as follows: + + 1. the sender creates a RemoteModule; + 2. the sender sends its ``module_rref`` over RPC; + 3. the receiver calls this method to initialize another RemoteModule using the same ``module_rref``. + + Example:: + Run the following code in two different processes: + + >>> # xdoctest: +SKIP("distributed") + >>> # On worker 0: + >>> import torch + >>> import torch.distributed.rpc as rpc + >>> from torch import nn, Tensor + >>> from torch.distributed.nn.api.remote_module import RemoteModule + >>> + >>> rpc.init_rpc("worker0", rank=0, world_size=2) + >>> remote_module = RemoteModule( + >>> "worker1/cpu", nn.Linear, args=(20, 30), + >>> ) + >>> + >>> remote_module1 = rpc.rpc_sync( + >>> "worker1/cpu", + >>> RemoteModule.init_from_module_rref, + >>> ("worker1/cpu", remote_module1.get_module_rref()), + >>> ) + >>> rpc.shutdown() + + >>> # On worker 1: + >>> import torch + >>> import torch.distributed.rpc as rpc + >>> + >>> rpc.init_rpc("worker1", rank=1, world_size=2) + >>> rpc.shutdown() + + Args: + remote_device (str): Device on the destination worker where we'd like to place this module. + The device can be a local device or a remote device specified by one of the following remote + formats: + + 1. "rank:/" (ex: "rank:0/cuda:0"). + 2. "/" (ex: "trainer0/cuda:0"). + + In addition, the device field can be optional and the default value is "cpu". + module_rref (RRef[nn.Module]): The module reference shared by both the caller and + the created remote module. + _module_interface_cls (type, optional): The TorchScript interface type for the module + to be created. The type object should be decorated by @torch.jit.interface. + If not provided, the generated RemoteModule is not torchscript-able. + Warning, this is an experimental API and susceptible to frequent changes. + + Returns: + A remote module instance which wraps the :class:`~nn.Module` created by the + user-provided ``module_rref``, it has a blocking ``forward`` method and an + asynchronous ``forward_async`` method that returns a future of the ``forward`` call + on the user-provided module on the remote side. + """ + # NOTE: if a new attribute is added to this class, also need to add it + # to ``_REMOTE_MODULE_PICKLED_ATTRIBUTES`` for pickling/unpickling. + + remote_module = object.__new__(RemoteModule) + + enable_moving_cpu_tensors_to_cuda = remote_module._prepare_init(remote_device) + + if _module_interface_cls is not None: + # Users reply on this field to know if this generated RemoteModule is TorchScript-able. + remote_module.is_scriptable = True + + remote_module._init_template( + _module_interface_cls, enable_moving_cpu_tensors_to_cuda + ) + else: + remote_module.is_scriptable = False + remote_module.generated_methods = ( + _NON_SCRIPTABLE_REMOTE_MODULE_MODULE._generated_methods + ) + remote_module.module_rref = module_rref + + remote_module._install_generated_methods() + remote_module._check_attribute_picklability() + + return remote_module + + +class RemoteModule(_RemoteModule): + """ + A RemoteModule instance can only be created after RPC initialization. + + It creates a user-specified module on a specified remote node. + It behaves like a regular ``nn.Module`` except that the ``forward`` method is + executed on the remote node. + It takes care of autograd recording to ensure the backward pass propagates + gradients back to the corresponding remote module. + + It generates two methods ``forward_async`` and ``forward`` based on the + signature of the ``forward`` method of ``module_cls``. ``forward_async`` + runs asynchronously and returns a Future. The arguments of ``forward_async`` + and ``forward`` are the same as the ``forward`` method of the module + returned by the ``module_cls``. + + For example, if ``module_cls`` returns an instance of ``nn.Linear``, + that has ``forward`` method signature: ``def forward(input: Tensor) -> Tensor:``, + the generated ``RemoteModule`` will have 2 methods with the signatures: + + | ``def forward(input: Tensor) -> Tensor:`` + | ``def forward_async(input: Tensor) -> Future[Tensor]:`` + + Args: + remote_device (str): Device on the destination worker where we'd like to place this module. + The format should be "/", where the device field can be parsed as torch.device type. + E.g., "trainer0/cpu", "trainer0", "ps0/cuda:0". + In addition, the device field can be optional and the default value is "cpu". + module_cls (nn.Module): Class for the module to be created remotely. For example, + + >>> class MyModule(nn.Module): + >>> def forward(input): + >>> return input + 1 + >>> + >>> module_cls = MyModule + + args (Sequence, optional): args to be passed to ``module_cls``. + kwargs (Dict, optional): kwargs to be passed to ``module_cls``. + + Returns: + A remote module instance which wraps the :class:`~nn.Module` created by the + user-provided ``module_cls``, it has a blocking ``forward`` method and an + asynchronous ``forward_async`` method that returns a future of the ``forward`` call + on the user-provided module on the remote side. + + Example:: + Run the following code in two different processes: + + >>> # xdoctest: +SKIP("distributed") + >>> # On worker 0: + >>> import torch + >>> import torch.distributed.rpc as rpc + >>> from torch import nn, Tensor + >>> from torch.distributed.nn.api.remote_module import RemoteModule + >>> + >>> rpc.init_rpc("worker0", rank=0, world_size=2) + >>> remote_linear_module = RemoteModule( + >>> "worker1/cpu", nn.Linear, args=(20, 30), + >>> ) + >>> input = torch.randn(128, 20) + >>> ret_fut = remote_linear_module.forward_async(input) + >>> ret = ret_fut.wait() + >>> rpc.shutdown() + + >>> # On worker 1: + >>> import torch + >>> import torch.distributed.rpc as rpc + >>> + >>> rpc.init_rpc("worker1", rank=1, world_size=2) + >>> rpc.shutdown() + + Furthermore, a more practical example that is combined with + `DistributedDataParallel `__ (DDP) + can be found in this `tutorial `__. + """ + + def __init__( + self, + remote_device: str, + module_cls: Type[nn.Module], + args: Optional[Tuple] = None, + kwargs: Optional[Dict[str, Any]] = None, + ): + super().__init__(remote_device, module_cls, args, kwargs) + + +def _remote_module_receiver( + *remote_module_pickled_attrs, +): + """Deserializes a RemoteModule.""" + serialized_remote_module = _SerializedRemoteModule._make( + remote_module_pickled_attrs + ) + m = object.__new__(RemoteModule) + m.__dict__.update(serialized_remote_module._asdict()) + + # Unpickling the attribute `module_rref` must invoke RRef's `_deserialize()` method. + m.module_rref = rpc.PyRRef._deserialize(m.module_rref) + + # Install generated methods when unpickled. + for method in m.generated_methods: + method_name = method.__name__ + method = torch.jit.export(method) + setattr(m, method_name, types.MethodType(method, m)) + + return m + + +def _remote_module_reducer(remote_module): + """Serialize a RemoteModule.""" + pickled_attrs = {} + for k, v in remote_module.__dict__.items(): + # Pickling the attribute `module_rref` must invoke RRef's `_serialize()` method. + if k == "module_rref": + pickled_attrs[k] = v._serialize() + elif k in _REMOTE_MODULE_PICKLED_ATTRIBUTES: + pickled_attrs[k] = v + # Check if unpickled attributes are all in _REMOTE_MODULE_ATTRIBUTES_IGNORE_FOR_PICKLING. + elif k not in _REMOTE_MODULE_ATTRIBUTES_IGNORE_FOR_PICKLING: + print( + f"The new attribute ``{k}`` of RemoteModule is ignored during RPC pickling. " + "To pickle this attribute, please add it to ``_REMOTE_MODULE_PICKLED_ATTRIBUTES``. " + "Otherwise, please explicitly add it to ``_REMOTE_MODULE_ATTRIBUTES_IGNORE_FOR_PICKLING``.", + file=sys.stderr, + ) + + return ( + _remote_module_receiver, + tuple(pickled_attrs.values()), + ) + + +def _recursive_script_module_receiver( + recursive_script_module_serialized, +): + """Deserializes a RecursiveScriptModule that does not contain a script RemoteModule.""" + f = io.BytesIO(recursive_script_module_serialized) + m = torch.jit.load(f) + return m + + +def _recursive_script_module_reducer(recursive_script_module): + """Serialize a RecursiveScriptModule that does not contain a script RemoteModule, and raises an error otherwise.""" + if hasattr(recursive_script_module._c, "module_rref"): + raise RuntimeError( + "Passing a script RemoteModule over RPC is not supported. Please create a RemoteModule in the sender, " + "send the `module_rref` to the receiver, and create a new instance on the receiver end by passing this `module_rref`." + ) + + f = io.BytesIO() + torch.jit.save(recursive_script_module, f) + return (_recursive_script_module_receiver, (f.getvalue(),)) + + +_internal_rpc_pickler._register_reducer(RemoteModule, _remote_module_reducer) +_internal_rpc_pickler._register_reducer( + torch.jit.RecursiveScriptModule, _recursive_script_module_reducer +)