Add files using upload-large-folder tool
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- vllm/lib/python3.10/site-packages/torch/distributed/checkpoint/__pycache__/__init__.cpython-310.pyc +0 -0
- vllm/lib/python3.10/site-packages/torch/distributed/checkpoint/__pycache__/_checkpointer.cpython-310.pyc +0 -0
- vllm/lib/python3.10/site-packages/torch/distributed/checkpoint/__pycache__/_dedup_save_plans.cpython-310.pyc +0 -0
- vllm/lib/python3.10/site-packages/torch/distributed/checkpoint/__pycache__/_dedup_tensors.cpython-310.pyc +0 -0
- vllm/lib/python3.10/site-packages/torch/distributed/checkpoint/__pycache__/_fsspec_filesystem.cpython-310.pyc +0 -0
- vllm/lib/python3.10/site-packages/torch/distributed/checkpoint/__pycache__/_nested_dict.cpython-310.pyc +0 -0
- vllm/lib/python3.10/site-packages/torch/distributed/checkpoint/__pycache__/_sharded_tensor_utils.cpython-310.pyc +0 -0
- vllm/lib/python3.10/site-packages/torch/distributed/checkpoint/__pycache__/_storage_utils.cpython-310.pyc +0 -0
- vllm/lib/python3.10/site-packages/torch/distributed/checkpoint/__pycache__/_traverse.cpython-310.pyc +0 -0
- vllm/lib/python3.10/site-packages/torch/distributed/checkpoint/__pycache__/_version.cpython-310.pyc +0 -0
- vllm/lib/python3.10/site-packages/torch/distributed/checkpoint/__pycache__/api.cpython-310.pyc +0 -0
- vllm/lib/python3.10/site-packages/torch/distributed/checkpoint/__pycache__/default_planner.cpython-310.pyc +0 -0
- vllm/lib/python3.10/site-packages/torch/distributed/checkpoint/__pycache__/filesystem.cpython-310.pyc +0 -0
- vllm/lib/python3.10/site-packages/torch/distributed/checkpoint/__pycache__/format_utils.cpython-310.pyc +0 -0
- vllm/lib/python3.10/site-packages/torch/distributed/checkpoint/__pycache__/logger.cpython-310.pyc +0 -0
- vllm/lib/python3.10/site-packages/torch/distributed/checkpoint/__pycache__/logging_handlers.cpython-310.pyc +0 -0
- vllm/lib/python3.10/site-packages/torch/distributed/checkpoint/__pycache__/metadata.cpython-310.pyc +0 -0
- vllm/lib/python3.10/site-packages/torch/distributed/checkpoint/__pycache__/optimizer.cpython-310.pyc +0 -0
- vllm/lib/python3.10/site-packages/torch/distributed/checkpoint/__pycache__/planner.cpython-310.pyc +0 -0
- vllm/lib/python3.10/site-packages/torch/distributed/checkpoint/__pycache__/planner_helpers.cpython-310.pyc +0 -0
- vllm/lib/python3.10/site-packages/torch/distributed/checkpoint/__pycache__/resharding.cpython-310.pyc +0 -0
- vllm/lib/python3.10/site-packages/torch/distributed/checkpoint/__pycache__/staging.cpython-310.pyc +0 -0
- vllm/lib/python3.10/site-packages/torch/distributed/checkpoint/__pycache__/state_dict.cpython-310.pyc +0 -0
- vllm/lib/python3.10/site-packages/torch/distributed/checkpoint/__pycache__/state_dict_loader.cpython-310.pyc +0 -0
- vllm/lib/python3.10/site-packages/torch/distributed/checkpoint/__pycache__/state_dict_saver.cpython-310.pyc +0 -0
- vllm/lib/python3.10/site-packages/torch/distributed/checkpoint/__pycache__/stateful.cpython-310.pyc +0 -0
- vllm/lib/python3.10/site-packages/torch/distributed/checkpoint/__pycache__/storage.cpython-310.pyc +0 -0
- vllm/lib/python3.10/site-packages/torch/distributed/checkpoint/__pycache__/utils.cpython-310.pyc +0 -0
- vllm/lib/python3.10/site-packages/torch/distributed/checkpoint/_checkpointer.py +100 -0
- vllm/lib/python3.10/site-packages/torch/distributed/checkpoint/_fsspec_filesystem.py +137 -0
- vllm/lib/python3.10/site-packages/torch/distributed/checkpoint/_nested_dict.py +70 -0
- vllm/lib/python3.10/site-packages/torch/distributed/checkpoint/_sharded_tensor_utils.py +107 -0
- vllm/lib/python3.10/site-packages/torch/distributed/checkpoint/_version.py +6 -0
- vllm/lib/python3.10/site-packages/torch/distributed/checkpoint/api.py +43 -0
- vllm/lib/python3.10/site-packages/torch/distributed/checkpoint/default_planner.py +546 -0
- vllm/lib/python3.10/site-packages/torch/distributed/checkpoint/format_utils.py +280 -0
- vllm/lib/python3.10/site-packages/torch/distributed/checkpoint/logger.py +103 -0
- vllm/lib/python3.10/site-packages/torch/distributed/checkpoint/logging_handlers.py +15 -0
- vllm/lib/python3.10/site-packages/torch/distributed/checkpoint/optimizer.py +356 -0
- vllm/lib/python3.10/site-packages/torch/distributed/checkpoint/planner_helpers.py +386 -0
- vllm/lib/python3.10/site-packages/torch/distributed/checkpoint/resharding.py +72 -0
- vllm/lib/python3.10/site-packages/torch/distributed/checkpoint/staging.py +117 -0
- vllm/lib/python3.10/site-packages/torch/distributed/checkpoint/state_dict_loader.py +316 -0
- vllm/lib/python3.10/site-packages/torch/distributed/checkpoint/state_dict_saver.py +333 -0
- vllm/lib/python3.10/site-packages/torch/distributed/checkpoint/stateful.py +42 -0
- vllm/lib/python3.10/site-packages/torch/distributed/checkpoint/storage.py +284 -0
- vllm/lib/python3.10/site-packages/torch/distributed/checkpoint/utils.py +431 -0
- vllm/lib/python3.10/site-packages/torch/distributed/elastic/__init__.py +77 -0
- vllm/lib/python3.10/site-packages/torch/distributed/elastic/control_plane.py +52 -0
- vllm/lib/python3.10/site-packages/torch/distributed/elastic/multiprocessing/__init__.py +233 -0
vllm/lib/python3.10/site-packages/torch/distributed/checkpoint/__pycache__/__init__.cpython-310.pyc
ADDED
|
Binary file (967 Bytes). View file
|
|
|
vllm/lib/python3.10/site-packages/torch/distributed/checkpoint/__pycache__/_checkpointer.cpython-310.pyc
ADDED
|
Binary file (3.87 kB). View file
|
|
|
vllm/lib/python3.10/site-packages/torch/distributed/checkpoint/__pycache__/_dedup_save_plans.cpython-310.pyc
ADDED
|
Binary file (1.97 kB). View file
|
|
|
vllm/lib/python3.10/site-packages/torch/distributed/checkpoint/__pycache__/_dedup_tensors.cpython-310.pyc
ADDED
|
Binary file (1.92 kB). View file
|
|
|
vllm/lib/python3.10/site-packages/torch/distributed/checkpoint/__pycache__/_fsspec_filesystem.cpython-310.pyc
ADDED
|
Binary file (5.27 kB). View file
|
|
|
vllm/lib/python3.10/site-packages/torch/distributed/checkpoint/__pycache__/_nested_dict.cpython-310.pyc
ADDED
|
Binary file (1.89 kB). View file
|
|
|
vllm/lib/python3.10/site-packages/torch/distributed/checkpoint/__pycache__/_sharded_tensor_utils.cpython-310.pyc
ADDED
|
Binary file (2.77 kB). View file
|
|
|
vllm/lib/python3.10/site-packages/torch/distributed/checkpoint/__pycache__/_storage_utils.cpython-310.pyc
ADDED
|
Binary file (1.25 kB). View file
|
|
|
vllm/lib/python3.10/site-packages/torch/distributed/checkpoint/__pycache__/_traverse.cpython-310.pyc
ADDED
|
Binary file (5.68 kB). View file
|
|
|
vllm/lib/python3.10/site-packages/torch/distributed/checkpoint/__pycache__/_version.cpython-310.pyc
ADDED
|
Binary file (282 Bytes). View file
|
|
|
vllm/lib/python3.10/site-packages/torch/distributed/checkpoint/__pycache__/api.cpython-310.pyc
ADDED
|
Binary file (2.01 kB). View file
|
|
|
vllm/lib/python3.10/site-packages/torch/distributed/checkpoint/__pycache__/default_planner.cpython-310.pyc
ADDED
|
Binary file (14.5 kB). View file
|
|
|
vllm/lib/python3.10/site-packages/torch/distributed/checkpoint/__pycache__/filesystem.cpython-310.pyc
ADDED
|
Binary file (24.7 kB). View file
|
|
|
vllm/lib/python3.10/site-packages/torch/distributed/checkpoint/__pycache__/format_utils.cpython-310.pyc
ADDED
|
Binary file (9.19 kB). View file
|
|
|
vllm/lib/python3.10/site-packages/torch/distributed/checkpoint/__pycache__/logger.cpython-310.pyc
ADDED
|
Binary file (2.61 kB). View file
|
|
|
vllm/lib/python3.10/site-packages/torch/distributed/checkpoint/__pycache__/logging_handlers.cpython-310.pyc
ADDED
|
Binary file (450 Bytes). View file
|
|
|
vllm/lib/python3.10/site-packages/torch/distributed/checkpoint/__pycache__/metadata.cpython-310.pyc
ADDED
|
Binary file (4.67 kB). View file
|
|
|
vllm/lib/python3.10/site-packages/torch/distributed/checkpoint/__pycache__/optimizer.cpython-310.pyc
ADDED
|
Binary file (10.2 kB). View file
|
|
|
vllm/lib/python3.10/site-packages/torch/distributed/checkpoint/__pycache__/planner.cpython-310.pyc
ADDED
|
Binary file (16.6 kB). View file
|
|
|
vllm/lib/python3.10/site-packages/torch/distributed/checkpoint/__pycache__/planner_helpers.cpython-310.pyc
ADDED
|
Binary file (10 kB). View file
|
|
|
vllm/lib/python3.10/site-packages/torch/distributed/checkpoint/__pycache__/resharding.cpython-310.pyc
ADDED
|
Binary file (1.7 kB). View file
|
|
|
vllm/lib/python3.10/site-packages/torch/distributed/checkpoint/__pycache__/staging.cpython-310.pyc
ADDED
|
Binary file (5.29 kB). View file
|
|
|
vllm/lib/python3.10/site-packages/torch/distributed/checkpoint/__pycache__/state_dict.cpython-310.pyc
ADDED
|
Binary file (37.9 kB). View file
|
|
|
vllm/lib/python3.10/site-packages/torch/distributed/checkpoint/__pycache__/state_dict_loader.cpython-310.pyc
ADDED
|
Binary file (10.3 kB). View file
|
|
|
vllm/lib/python3.10/site-packages/torch/distributed/checkpoint/__pycache__/state_dict_saver.cpython-310.pyc
ADDED
|
Binary file (11.2 kB). View file
|
|
|
vllm/lib/python3.10/site-packages/torch/distributed/checkpoint/__pycache__/stateful.cpython-310.pyc
ADDED
|
Binary file (1.54 kB). View file
|
|
|
vllm/lib/python3.10/site-packages/torch/distributed/checkpoint/__pycache__/storage.cpython-310.pyc
ADDED
|
Binary file (11.2 kB). View file
|
|
|
vllm/lib/python3.10/site-packages/torch/distributed/checkpoint/__pycache__/utils.cpython-310.pyc
ADDED
|
Binary file (13.1 kB). View file
|
|
|
vllm/lib/python3.10/site-packages/torch/distributed/checkpoint/_checkpointer.py
ADDED
|
@@ -0,0 +1,100 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from concurrent.futures import Future
|
| 2 |
+
from typing import Any, Dict, List, Optional
|
| 3 |
+
|
| 4 |
+
import torch.distributed as dist
|
| 5 |
+
import torch.distributed.checkpoint.state_dict_loader as loader
|
| 6 |
+
import torch.distributed.checkpoint.state_dict_saver as saver
|
| 7 |
+
from torch.distributed.checkpoint.metadata import Metadata, STATE_DICT_TYPE
|
| 8 |
+
from torch.distributed.checkpoint.storage import (
|
| 9 |
+
LoadPlanner,
|
| 10 |
+
SavePlanner,
|
| 11 |
+
StorageReader,
|
| 12 |
+
StorageWriter,
|
| 13 |
+
)
|
| 14 |
+
|
| 15 |
+
|
| 16 |
+
__all__: List[str] = []
|
| 17 |
+
|
| 18 |
+
|
| 19 |
+
class _Checkpointer:
|
| 20 |
+
"""This base class specefies a high level API for saving and loading
|
| 21 |
+
distributed `state_dict` 's. It provides an abstraction over the low-level APIs
|
| 22 |
+
provided by :py:mod:`torch.distributed.checkpoint.storage`, essentially calling
|
| 23 |
+
:py:meth: `torch.distributed.state_dict_saver.save` and
|
| 24 |
+
:py:meth: `torch.distributed.state_dict_loader.load` with the provided storage
|
| 25 |
+
readers and writers.
|
| 26 |
+
|
| 27 |
+
.. warning::
|
| 28 |
+
This feature is experimental and subject to removal/change.
|
| 29 |
+
|
| 30 |
+
"""
|
| 31 |
+
|
| 32 |
+
def __init__(
|
| 33 |
+
self,
|
| 34 |
+
storage_writer: StorageWriter,
|
| 35 |
+
storage_reader: StorageReader,
|
| 36 |
+
*,
|
| 37 |
+
process_group: Optional[dist.ProcessGroup] = None,
|
| 38 |
+
coordinator_rank: int = 0,
|
| 39 |
+
no_dist: bool = False,
|
| 40 |
+
load_planner: Optional[LoadPlanner] = None,
|
| 41 |
+
save_planner: Optional[SavePlanner] = None,
|
| 42 |
+
):
|
| 43 |
+
"""Initializes the Checkpointer instance.
|
| 44 |
+
|
| 45 |
+
Args:
|
| 46 |
+
storage_writer: Instance of StorageWrite use to perform writes.
|
| 47 |
+
storage_reader: StorageReader used to load data from.
|
| 48 |
+
process_group: ProcessGroup to be used for cross-rank synchronization.
|
| 49 |
+
coordinator_rank: Rank to use to coordinate the checkpoint. rank0 is used by default.
|
| 50 |
+
no_dist: If ``True``, distributed checkpoint will not load in SPMD style. (Default: ``False``)
|
| 51 |
+
loader_planner: Instance of LoadPlanner to use when loading.
|
| 52 |
+
save_planner: Instance of SavePlanner to use when saving.
|
| 53 |
+
"""
|
| 54 |
+
self.storage_writer = storage_writer
|
| 55 |
+
self.storage_reader = storage_reader
|
| 56 |
+
self.process_group = process_group
|
| 57 |
+
self.coordinator_rank = coordinator_rank
|
| 58 |
+
self.no_dist = no_dist
|
| 59 |
+
self.load_planner = load_planner
|
| 60 |
+
self.save_planner = save_planner
|
| 61 |
+
|
| 62 |
+
def save(
|
| 63 |
+
self,
|
| 64 |
+
state_dict: STATE_DICT_TYPE,
|
| 65 |
+
) -> Metadata:
|
| 66 |
+
"""Calls :py:meth: `torch.distributed.state_dict_saver.save`. Utilizing values passed during initialization."""
|
| 67 |
+
return saver.save(
|
| 68 |
+
state_dict,
|
| 69 |
+
self.storage_writer,
|
| 70 |
+
process_group=self.process_group,
|
| 71 |
+
coordinator_rank=self.coordinator_rank,
|
| 72 |
+
no_dist=self.no_dist,
|
| 73 |
+
planner=self.save_planner,
|
| 74 |
+
)
|
| 75 |
+
|
| 76 |
+
def async_save(
|
| 77 |
+
self,
|
| 78 |
+
state_dict: STATE_DICT_TYPE,
|
| 79 |
+
) -> Future:
|
| 80 |
+
"""
|
| 81 |
+
Calls :py:meth: `torch.distributed.state_dict_saver._async_save`. Utilizing values passed during initialization.
|
| 82 |
+
|
| 83 |
+
Returns:
|
| 84 |
+
Future: A future holding the resultant Metadata object from `save`.
|
| 85 |
+
"""
|
| 86 |
+
return saver.async_save(
|
| 87 |
+
state_dict,
|
| 88 |
+
storage_writer=self.storage_writer,
|
| 89 |
+
process_group=self.process_group,
|
| 90 |
+
planner=self.save_planner,
|
| 91 |
+
)
|
| 92 |
+
|
| 93 |
+
def load(self, state_dict: Dict[str, Any]) -> None:
|
| 94 |
+
"""Calls :py:meth: `torch.distributed.state_dict_loader.load`. Utilizing values passed during initialization."""
|
| 95 |
+
loader.load(
|
| 96 |
+
state_dict,
|
| 97 |
+
storage_reader=self.storage_reader,
|
| 98 |
+
process_group=self.process_group,
|
| 99 |
+
planner=self.load_planner,
|
| 100 |
+
)
|
vllm/lib/python3.10/site-packages/torch/distributed/checkpoint/_fsspec_filesystem.py
ADDED
|
@@ -0,0 +1,137 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Mypy will not try inferring the types of any 3rd party libraries installed.
|
| 2 |
+
# mypy: ignore-errors
|
| 3 |
+
|
| 4 |
+
import io
|
| 5 |
+
import os
|
| 6 |
+
from contextlib import contextmanager
|
| 7 |
+
from pathlib import Path
|
| 8 |
+
from typing import Generator, Optional, Union
|
| 9 |
+
|
| 10 |
+
import fsspec
|
| 11 |
+
from fsspec import AbstractFileSystem
|
| 12 |
+
from fsspec.core import url_to_fs
|
| 13 |
+
|
| 14 |
+
from torch.distributed.checkpoint.filesystem import (
|
| 15 |
+
FileSystemBase,
|
| 16 |
+
FileSystemReader,
|
| 17 |
+
FileSystemWriter,
|
| 18 |
+
)
|
| 19 |
+
|
| 20 |
+
|
| 21 |
+
__all__ = [
|
| 22 |
+
"FsspecWriter",
|
| 23 |
+
"FsspecReader",
|
| 24 |
+
]
|
| 25 |
+
|
| 26 |
+
|
| 27 |
+
class FileSystem(FileSystemBase):
|
| 28 |
+
def __init__(self) -> None:
|
| 29 |
+
self.fs: Optional[AbstractFileSystem] = None
|
| 30 |
+
|
| 31 |
+
@contextmanager
|
| 32 |
+
def create_stream(
|
| 33 |
+
self, path: Union[str, os.PathLike], mode: str
|
| 34 |
+
) -> Generator[io.IOBase, None, None]:
|
| 35 |
+
assert self.fs is not None
|
| 36 |
+
with self.fs.transaction:
|
| 37 |
+
with fsspec.open(str(path), mode) as stream:
|
| 38 |
+
yield stream
|
| 39 |
+
|
| 40 |
+
def concat_path(
|
| 41 |
+
self, path: Union[str, os.PathLike], suffix: str
|
| 42 |
+
) -> Union[str, os.PathLike]:
|
| 43 |
+
return os.path.join(path, suffix)
|
| 44 |
+
|
| 45 |
+
def init_path(self, path: Union[str, os.PathLike]) -> Union[str, os.PathLike]:
|
| 46 |
+
self.fs, _ = url_to_fs(path)
|
| 47 |
+
return path
|
| 48 |
+
|
| 49 |
+
def rename(
|
| 50 |
+
self, path: Union[str, os.PathLike], new_path: Union[str, os.PathLike]
|
| 51 |
+
) -> None:
|
| 52 |
+
self.fs.rename(path, new_path)
|
| 53 |
+
|
| 54 |
+
def mkdir(self, path: [str, os.PathLike]) -> None:
|
| 55 |
+
self.fs.makedirs(path, exist_ok=True)
|
| 56 |
+
|
| 57 |
+
@classmethod
|
| 58 |
+
def validate_checkpoint_id(cls, checkpoint_id: Union[str, os.PathLike]) -> bool:
|
| 59 |
+
if isinstance(checkpoint_id, Path):
|
| 60 |
+
return False
|
| 61 |
+
|
| 62 |
+
try:
|
| 63 |
+
url_to_fs(checkpoint_id)
|
| 64 |
+
except ValueError:
|
| 65 |
+
return False
|
| 66 |
+
|
| 67 |
+
return True
|
| 68 |
+
|
| 69 |
+
def exists(self, path: Union[str, os.PathLike]) -> bool:
|
| 70 |
+
return self.fs.exists(path)
|
| 71 |
+
|
| 72 |
+
def rm_file(self, path: Union[str, os.PathLike]) -> None:
|
| 73 |
+
self.fs.rm(path)
|
| 74 |
+
|
| 75 |
+
|
| 76 |
+
# TODO: add the dcp.async_save mixin
|
| 77 |
+
class FsspecWriter(FileSystemWriter):
|
| 78 |
+
"""
|
| 79 |
+
Basic implementation of StorageWriter using FFspec.
|
| 80 |
+
|
| 81 |
+
This implementation makes the following assumptions and simplifications:
|
| 82 |
+
|
| 83 |
+
* The checkpoint path is an empty or non-existing directory.
|
| 84 |
+
* File creation is atomic
|
| 85 |
+
|
| 86 |
+
The checkpoint consist of one file per write request plus
|
| 87 |
+
a `.metadata` file with the serialized metadata.
|
| 88 |
+
|
| 89 |
+
"""
|
| 90 |
+
|
| 91 |
+
def __init__(
|
| 92 |
+
self,
|
| 93 |
+
path: Union[str, os.PathLike],
|
| 94 |
+
single_file_per_rank: bool = True,
|
| 95 |
+
sync_files: bool = True,
|
| 96 |
+
thread_count: int = 1,
|
| 97 |
+
per_thread_copy_ahead: int = 10_000_000,
|
| 98 |
+
overwrite: bool = True,
|
| 99 |
+
) -> None:
|
| 100 |
+
"""
|
| 101 |
+
Initialize the writer pointing to `path`.
|
| 102 |
+
|
| 103 |
+
Args:
|
| 104 |
+
path: directory where the checkpoint will be written to.
|
| 105 |
+
single_file_per_rank: Produce one file per rank instead of one file per tensor/blob. Default to True.
|
| 106 |
+
sync_files : force files to be synced to permanent storage. Default to True.
|
| 107 |
+
thread_count: Number of IO threads to use to write. Default to 1.
|
| 108 |
+
per_thread_copy_ahead: How many bytes to copy from the GPU ahead of saving then. Default 10Mb.
|
| 109 |
+
overwrite: Whether to allow overwriting existing checkpoints. Defaults to True.
|
| 110 |
+
|
| 111 |
+
N. B. If sync_files is disabled, there's no guarantee that the checkpoint will be consistent in the case of a failure.
|
| 112 |
+
"""
|
| 113 |
+
super().__init__(
|
| 114 |
+
path,
|
| 115 |
+
single_file_per_rank,
|
| 116 |
+
sync_files,
|
| 117 |
+
thread_count,
|
| 118 |
+
per_thread_copy_ahead,
|
| 119 |
+
overwrite=overwrite,
|
| 120 |
+
)
|
| 121 |
+
self.fs = FileSystem()
|
| 122 |
+
self.path = self.fs.init_path(path)
|
| 123 |
+
|
| 124 |
+
@classmethod
|
| 125 |
+
def validate_checkpoint_id(cls, checkpoint_id: Union[str, os.PathLike]) -> bool:
|
| 126 |
+
return FileSystem.validate_checkpoint_id(checkpoint_id)
|
| 127 |
+
|
| 128 |
+
|
| 129 |
+
class FsspecReader(FileSystemReader):
|
| 130 |
+
def __init__(self, path: Union[str, os.PathLike]) -> None:
|
| 131 |
+
super().__init__(path)
|
| 132 |
+
self.fs = FileSystem()
|
| 133 |
+
self.path = self.fs.init_path(path)
|
| 134 |
+
|
| 135 |
+
@classmethod
|
| 136 |
+
def validate_checkpoint_id(cls, checkpoint_id: Union[str, os.PathLike]) -> bool:
|
| 137 |
+
return FileSystem.validate_checkpoint_id(checkpoint_id)
|
vllm/lib/python3.10/site-packages/torch/distributed/checkpoint/_nested_dict.py
ADDED
|
@@ -0,0 +1,70 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) Meta Platforms, Inc. and affiliates
|
| 2 |
+
from typing import Dict, Tuple
|
| 3 |
+
|
| 4 |
+
from torch.distributed.checkpoint.metadata import STATE_DICT_TYPE
|
| 5 |
+
|
| 6 |
+
from . import _version
|
| 7 |
+
from ._traverse import (
|
| 8 |
+
OBJ_PATH,
|
| 9 |
+
set_element,
|
| 10 |
+
STATE_DICT_ITEM,
|
| 11 |
+
traverse_state_dict,
|
| 12 |
+
traverse_state_dict_v_2_3,
|
| 13 |
+
)
|
| 14 |
+
|
| 15 |
+
|
| 16 |
+
"""
|
| 17 |
+
TODO:
|
| 18 |
+
Need to add ability to handle tuple, OrderedDict, NamedTuple.
|
| 19 |
+
Update mappings from dict to a class.
|
| 20 |
+
Change set_element to recreate the right type for tuple, OrderedDict, and NamedTuple.
|
| 21 |
+
"""
|
| 22 |
+
|
| 23 |
+
|
| 24 |
+
FLATTEN_MAPPING = Dict[str, OBJ_PATH]
|
| 25 |
+
|
| 26 |
+
|
| 27 |
+
# TODO: Update Docstring for nested_dict.py
|
| 28 |
+
def flatten_state_dict(
|
| 29 |
+
state_dict: STATE_DICT_TYPE,
|
| 30 |
+
) -> Tuple[STATE_DICT_TYPE, FLATTEN_MAPPING]:
|
| 31 |
+
"""
|
| 32 |
+
Flatten ``state_dict`` made of nested dicts and lists into a top level dictionary.
|
| 33 |
+
|
| 34 |
+
Use ``unflatten_state_dict`` to revert this process.
|
| 35 |
+
Returns:
|
| 36 |
+
A tuple with the flatten state_dict and a mapping from original to new state_dict.
|
| 37 |
+
N.B. The new keys are derived from the object paths, joined by dot.
|
| 38 |
+
For example: ``{ 'a': {'b':...}}`` results in the key `a.b`.
|
| 39 |
+
"""
|
| 40 |
+
flattened: STATE_DICT_TYPE = {}
|
| 41 |
+
mappings: FLATTEN_MAPPING = {}
|
| 42 |
+
|
| 43 |
+
def flat_copy(path: OBJ_PATH, value: STATE_DICT_ITEM) -> None:
|
| 44 |
+
new_fqn = ".".join(map(str, path))
|
| 45 |
+
if new_fqn in flattened:
|
| 46 |
+
raise ValueError(f"duplicated flatten key {new_fqn}")
|
| 47 |
+
flattened[new_fqn] = value
|
| 48 |
+
mappings[new_fqn] = path
|
| 49 |
+
|
| 50 |
+
# We started to flatten dictionary since v2.4. But in order to not break
|
| 51 |
+
# the checkpoints that were saved before v2.4, we need to keep the old
|
| 52 |
+
# traversal so that we can reconstruct those checkpoints.
|
| 53 |
+
use_v_2_3 = (
|
| 54 |
+
_version._derived_version is not None and _version._derived_version == "2_3"
|
| 55 |
+
)
|
| 56 |
+
if use_v_2_3:
|
| 57 |
+
traverse_state_dict_v_2_3(state_dict, flat_copy)
|
| 58 |
+
else:
|
| 59 |
+
traverse_state_dict(state_dict, flat_copy)
|
| 60 |
+
return flattened, mappings
|
| 61 |
+
|
| 62 |
+
|
| 63 |
+
def unflatten_state_dict(
|
| 64 |
+
state_dict: STATE_DICT_TYPE, mapping: FLATTEN_MAPPING
|
| 65 |
+
) -> STATE_DICT_TYPE:
|
| 66 |
+
"""Restore the original nested state_dict according to ``mapping`` and the flattened ``state_dict``."""
|
| 67 |
+
nested: STATE_DICT_TYPE = {}
|
| 68 |
+
for key, value in state_dict.items():
|
| 69 |
+
set_element(nested, mapping[key], value)
|
| 70 |
+
return nested
|
vllm/lib/python3.10/site-packages/torch/distributed/checkpoint/_sharded_tensor_utils.py
ADDED
|
@@ -0,0 +1,107 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) Meta Platforms, Inc. and affiliates
|
| 2 |
+
|
| 3 |
+
import copy
|
| 4 |
+
from typing import TYPE_CHECKING
|
| 5 |
+
|
| 6 |
+
import torch.distributed as dist
|
| 7 |
+
from torch.distributed._shard.sharded_tensor import Shard, ShardedTensor, ShardMetadata
|
| 8 |
+
from torch.distributed.checkpoint.metadata import STATE_DICT_TYPE
|
| 9 |
+
from torch.distributed.remote_device import _remote_device
|
| 10 |
+
|
| 11 |
+
from ._traverse import OBJ_PATH, set_element, STATE_DICT_ITEM, traverse_state_dict
|
| 12 |
+
from .utils import _element_wise_add, _normalize_device_info
|
| 13 |
+
|
| 14 |
+
|
| 15 |
+
if TYPE_CHECKING:
|
| 16 |
+
from torch.distributed._shard.sharded_tensor.metadata import ShardedTensorMetadata
|
| 17 |
+
|
| 18 |
+
|
| 19 |
+
# TODO: We need to refactor this code.
|
| 20 |
+
def _flatten_sharded_tensors(state_dict: STATE_DICT_TYPE) -> STATE_DICT_TYPE:
|
| 21 |
+
r"""
|
| 22 |
+
Transform ``state_dict`` by flattening all nested ShardedTensor instances found.
|
| 23 |
+
|
| 24 |
+
The resulting ShardedTensor instances are only correct regarding the local shard and
|
| 25 |
+
MUST not be used for any other purpose but checkpointing, as no operator will work with them.
|
| 26 |
+
|
| 27 |
+
This function should be used in conjunction with a state_dict produced by FSDP's
|
| 28 |
+
StateDictType.SHARDED_STATE_DICT methods.
|
| 29 |
+
"""
|
| 30 |
+
new_state_dict: STATE_DICT_TYPE = {}
|
| 31 |
+
|
| 32 |
+
def rewrite_dict(path: OBJ_PATH, value: STATE_DICT_ITEM) -> None:
|
| 33 |
+
if not isinstance(value, ShardedTensor):
|
| 34 |
+
set_element(new_state_dict, path, value)
|
| 35 |
+
return
|
| 36 |
+
shards = value.local_shards()
|
| 37 |
+
|
| 38 |
+
if len(shards) == 0:
|
| 39 |
+
return
|
| 40 |
+
if len(shards) != 1:
|
| 41 |
+
set_element(new_state_dict, path, value)
|
| 42 |
+
return
|
| 43 |
+
|
| 44 |
+
outer_shard = shards[0]
|
| 45 |
+
|
| 46 |
+
inner_st = outer_shard.tensor
|
| 47 |
+
if not isinstance(inner_st, ShardedTensor):
|
| 48 |
+
set_element(new_state_dict, path, value)
|
| 49 |
+
return
|
| 50 |
+
|
| 51 |
+
if len(inner_st.local_shards()) != 1:
|
| 52 |
+
raise ValueError("Cannot handle inner tensor with more than 1 shard")
|
| 53 |
+
inner_shard = inner_st.local_shards()[0]
|
| 54 |
+
|
| 55 |
+
local_shards = [
|
| 56 |
+
Shard(
|
| 57 |
+
tensor=inner_shard.tensor,
|
| 58 |
+
metadata=ShardMetadata(
|
| 59 |
+
shard_offsets=_element_wise_add(
|
| 60 |
+
outer_shard.metadata.shard_offsets,
|
| 61 |
+
inner_shard.metadata.shard_offsets,
|
| 62 |
+
),
|
| 63 |
+
shard_sizes=inner_shard.metadata.shard_sizes,
|
| 64 |
+
placement=f"rank:{dist.get_rank()}/{inner_shard.tensor.device}",
|
| 65 |
+
),
|
| 66 |
+
)
|
| 67 |
+
]
|
| 68 |
+
|
| 69 |
+
st_meta: ShardedTensorMetadata = copy.deepcopy(value.metadata())
|
| 70 |
+
other_rank = 0 if dist.get_rank() > 0 else 1
|
| 71 |
+
device_info = _normalize_device_info(inner_shard.tensor.device.type, 0)
|
| 72 |
+
|
| 73 |
+
# Remove the outer ST shard the inner ST covers
|
| 74 |
+
for i, shard_md in enumerate(st_meta.shards_metadata):
|
| 75 |
+
if shard_md.shard_offsets == outer_shard.metadata.shard_offsets:
|
| 76 |
+
st_meta.shards_metadata.pop(i)
|
| 77 |
+
break
|
| 78 |
+
|
| 79 |
+
# Attribute other rank for the other shards
|
| 80 |
+
for shard_md in st_meta.shards_metadata:
|
| 81 |
+
shard_md.placement = _remote_device(f"rank:{other_rank}/{device_info}")
|
| 82 |
+
|
| 83 |
+
# Add other inner shards from the inner tensor
|
| 84 |
+
for inner_md in inner_st.metadata().shards_metadata:
|
| 85 |
+
if inner_md.shard_offsets != inner_shard.metadata.shard_offsets:
|
| 86 |
+
st_meta.shards_metadata.append(
|
| 87 |
+
ShardMetadata(
|
| 88 |
+
shard_offsets=_element_wise_add(
|
| 89 |
+
outer_shard.metadata.shard_offsets,
|
| 90 |
+
inner_md.shard_offsets,
|
| 91 |
+
),
|
| 92 |
+
shard_sizes=inner_md.shard_sizes,
|
| 93 |
+
placement=f"rank:{other_rank}/{device_info}",
|
| 94 |
+
)
|
| 95 |
+
)
|
| 96 |
+
|
| 97 |
+
# Finally add this shard
|
| 98 |
+
st_meta.shards_metadata.append(local_shards[0].metadata)
|
| 99 |
+
|
| 100 |
+
st = ShardedTensor._init_from_local_shards_and_global_metadata(
|
| 101 |
+
local_shards=local_shards,
|
| 102 |
+
sharded_tensor_metadata=st_meta,
|
| 103 |
+
)
|
| 104 |
+
set_element(new_state_dict, path, st)
|
| 105 |
+
|
| 106 |
+
traverse_state_dict(state_dict, rewrite_dict)
|
| 107 |
+
return new_state_dict
|
vllm/lib/python3.10/site-packages/torch/distributed/checkpoint/_version.py
ADDED
|
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) Meta Platforms, Inc. and affiliates
|
| 2 |
+
|
| 3 |
+
from typing import Optional
|
| 4 |
+
|
| 5 |
+
|
| 6 |
+
_derived_version: Optional[str] = None
|
vllm/lib/python3.10/site-packages/torch/distributed/checkpoint/api.py
ADDED
|
@@ -0,0 +1,43 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# mypy: allow-untyped-defs
|
| 2 |
+
import traceback as tb
|
| 3 |
+
from typing import Any, Dict, Tuple
|
| 4 |
+
|
| 5 |
+
|
| 6 |
+
WRAPPED_EXCEPTION = Tuple[BaseException, tb.StackSummary]
|
| 7 |
+
|
| 8 |
+
__all__ = ["CheckpointException"]
|
| 9 |
+
|
| 10 |
+
|
| 11 |
+
def _wrap_exception(exc: BaseException) -> WRAPPED_EXCEPTION:
|
| 12 |
+
return (exc, tb.extract_tb(exc.__traceback__))
|
| 13 |
+
|
| 14 |
+
|
| 15 |
+
def _is_wrapped_exception(obj: Any) -> bool:
|
| 16 |
+
if not isinstance(obj, tuple):
|
| 17 |
+
return False
|
| 18 |
+
if len(obj) != 2:
|
| 19 |
+
return False
|
| 20 |
+
return isinstance(obj[0], BaseException) and isinstance(obj[1], tb.StackSummary)
|
| 21 |
+
|
| 22 |
+
|
| 23 |
+
class CheckpointException(BaseException):
|
| 24 |
+
"""Exception raised if failure was detected as part of a checkpoint load or save."""
|
| 25 |
+
|
| 26 |
+
def __init__(self, msg: str, failures: Dict[int, WRAPPED_EXCEPTION]):
|
| 27 |
+
super().__init__(msg, failures)
|
| 28 |
+
self._failures = failures
|
| 29 |
+
|
| 30 |
+
@property
|
| 31 |
+
def failures(self) -> Dict[int, WRAPPED_EXCEPTION]:
|
| 32 |
+
"""Return a dictionary mapping node ranks to their associated exceptions in case of failure."""
|
| 33 |
+
return self._failures
|
| 34 |
+
|
| 35 |
+
def __str__(self):
|
| 36 |
+
str = f"CheckpointException ranks:{self._failures.keys()}\n"
|
| 37 |
+
for rank, exc_pair in self._failures.items():
|
| 38 |
+
exc, trace = exc_pair
|
| 39 |
+
str += f"Traceback (most recent call last): (RANK {rank})\n"
|
| 40 |
+
if trace is not None:
|
| 41 |
+
str += "".join(tb.format_list(trace))
|
| 42 |
+
str += "".join(tb.format_exception_only(type(exc), value=exc))
|
| 43 |
+
return str
|
vllm/lib/python3.10/site-packages/torch/distributed/checkpoint/default_planner.py
ADDED
|
@@ -0,0 +1,546 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# mypy: allow-untyped-defs
|
| 2 |
+
# Copyright (c) Meta Platforms, Inc. and affiliates
|
| 3 |
+
|
| 4 |
+
import dataclasses
|
| 5 |
+
import io
|
| 6 |
+
import logging
|
| 7 |
+
import operator
|
| 8 |
+
from collections import ChainMap
|
| 9 |
+
from functools import reduce
|
| 10 |
+
from typing import Any, cast, Dict, List, Optional, Tuple, Union
|
| 11 |
+
|
| 12 |
+
import torch
|
| 13 |
+
from torch.distributed._shard._utils import narrow_tensor_by_index
|
| 14 |
+
from torch.distributed.checkpoint._dedup_save_plans import dedup_save_plans
|
| 15 |
+
from torch.distributed.checkpoint._nested_dict import (
|
| 16 |
+
FLATTEN_MAPPING,
|
| 17 |
+
flatten_state_dict,
|
| 18 |
+
)
|
| 19 |
+
from torch.distributed.checkpoint._sharded_tensor_utils import _flatten_sharded_tensors
|
| 20 |
+
from torch.distributed.checkpoint._traverse import set_element
|
| 21 |
+
from torch.distributed.checkpoint.metadata import (
|
| 22 |
+
BytesStorageMetadata,
|
| 23 |
+
ChunkStorageMetadata,
|
| 24 |
+
Metadata,
|
| 25 |
+
MetadataIndex,
|
| 26 |
+
STATE_DICT_TYPE,
|
| 27 |
+
STORAGE_TYPES,
|
| 28 |
+
StorageMeta,
|
| 29 |
+
TensorStorageMetadata,
|
| 30 |
+
)
|
| 31 |
+
from torch.distributed.checkpoint.planner import (
|
| 32 |
+
LoadPlan,
|
| 33 |
+
LoadPlanner,
|
| 34 |
+
ReadItem,
|
| 35 |
+
SavePlan,
|
| 36 |
+
SavePlanner,
|
| 37 |
+
WriteItem,
|
| 38 |
+
WriteItemType,
|
| 39 |
+
)
|
| 40 |
+
from torch.distributed.checkpoint.planner_helpers import (
|
| 41 |
+
_create_default_metadata_only_plan,
|
| 42 |
+
_create_read_items,
|
| 43 |
+
_create_write_items,
|
| 44 |
+
_init_state_dict,
|
| 45 |
+
)
|
| 46 |
+
from torch.distributed.checkpoint.utils import find_state_dict_object
|
| 47 |
+
from torch.distributed.tensor import DTensor
|
| 48 |
+
|
| 49 |
+
from . import _version
|
| 50 |
+
|
| 51 |
+
|
| 52 |
+
logger: logging.Logger = logging.getLogger(__name__)
|
| 53 |
+
|
| 54 |
+
|
| 55 |
+
__all__ = [
|
| 56 |
+
"DefaultSavePlanner",
|
| 57 |
+
"DefaultLoadPlanner",
|
| 58 |
+
"create_default_local_load_plan",
|
| 59 |
+
"create_default_global_load_plan",
|
| 60 |
+
"create_default_local_save_plan",
|
| 61 |
+
"create_default_global_save_plan",
|
| 62 |
+
]
|
| 63 |
+
|
| 64 |
+
|
| 65 |
+
# TODO: Update docstrings for default_planner.py
|
| 66 |
+
class DefaultSavePlanner(SavePlanner):
|
| 67 |
+
mappings: FLATTEN_MAPPING
|
| 68 |
+
|
| 69 |
+
def __init__(
|
| 70 |
+
self,
|
| 71 |
+
flatten_state_dict: bool = True,
|
| 72 |
+
flatten_sharded_tensors: bool = True,
|
| 73 |
+
dedup_replicated_tensors: Optional[bool] = None,
|
| 74 |
+
dedup_save_to_lowest_rank: bool = False,
|
| 75 |
+
) -> None:
|
| 76 |
+
self.flatten_state_dict = flatten_state_dict
|
| 77 |
+
self.flatten_sharded_tensors = flatten_sharded_tensors
|
| 78 |
+
self.mappings = {}
|
| 79 |
+
self.dedup_save_to_lowest_rank = dedup_save_to_lowest_rank
|
| 80 |
+
if dedup_replicated_tensors is not None:
|
| 81 |
+
logger.warning(
|
| 82 |
+
"DefaultSavePlanner's `dedup_replicated_tensors` argument is being "
|
| 83 |
+
"deprecated, and no longer has any effect. Please remove this argument "
|
| 84 |
+
"from your call."
|
| 85 |
+
)
|
| 86 |
+
|
| 87 |
+
def set_up_planner(
|
| 88 |
+
self,
|
| 89 |
+
state_dict: STATE_DICT_TYPE,
|
| 90 |
+
storage_meta: Optional[StorageMeta] = None,
|
| 91 |
+
is_coordinator: bool = False,
|
| 92 |
+
) -> None:
|
| 93 |
+
if self.flatten_state_dict:
|
| 94 |
+
state_dict, self.mappings = flatten_state_dict(state_dict)
|
| 95 |
+
if self.flatten_sharded_tensors:
|
| 96 |
+
state_dict = _flatten_sharded_tensors(state_dict)
|
| 97 |
+
self.state_dict = state_dict
|
| 98 |
+
self.is_coordinator = is_coordinator
|
| 99 |
+
|
| 100 |
+
def create_local_plan(self) -> SavePlan:
|
| 101 |
+
plan = create_default_local_save_plan(self.state_dict, self.is_coordinator)
|
| 102 |
+
if self.flatten_state_dict:
|
| 103 |
+
plan = dataclasses.replace(plan, planner_data=self.mappings)
|
| 104 |
+
self.plan = plan
|
| 105 |
+
|
| 106 |
+
return self.plan
|
| 107 |
+
|
| 108 |
+
def create_global_plan(
|
| 109 |
+
self, all_plans: List[SavePlan]
|
| 110 |
+
) -> Tuple[List[SavePlan], Metadata]:
|
| 111 |
+
all_plans = dedup_save_plans(all_plans, self.dedup_save_to_lowest_rank)
|
| 112 |
+
|
| 113 |
+
global_plan, metadata = create_default_global_save_plan(all_plans)
|
| 114 |
+
|
| 115 |
+
if self.flatten_state_dict:
|
| 116 |
+
# | does not work for Python 3.8 or older version.
|
| 117 |
+
# merged_mappings = reduce(
|
| 118 |
+
# lambda x, y: x | y, (p.planner_data for p in global_plan)
|
| 119 |
+
# )
|
| 120 |
+
planner_data_dict = [p.planner_data for p in global_plan]
|
| 121 |
+
merged_mappings = dict(ChainMap(*planner_data_dict))
|
| 122 |
+
metadata = dataclasses.replace(metadata, planner_data=merged_mappings)
|
| 123 |
+
|
| 124 |
+
if not _validate_global_plan(global_plan, metadata):
|
| 125 |
+
raise ValueError("Failed to validate global plan")
|
| 126 |
+
|
| 127 |
+
self.global_plan = global_plan
|
| 128 |
+
self.metadata = metadata
|
| 129 |
+
|
| 130 |
+
return self.global_plan, self.metadata
|
| 131 |
+
|
| 132 |
+
def finish_plan(self, new_plan: SavePlan) -> SavePlan:
|
| 133 |
+
self.plan = new_plan
|
| 134 |
+
return new_plan
|
| 135 |
+
|
| 136 |
+
def resolve_data(self, write_item: WriteItem) -> Union[torch.Tensor, io.BytesIO]:
|
| 137 |
+
object = self.lookup_object(write_item.index)
|
| 138 |
+
return self.transform_object(write_item, object)
|
| 139 |
+
|
| 140 |
+
def lookup_object(self, index: MetadataIndex) -> Any:
|
| 141 |
+
"""Extension from the planner interface to make it easy to extend the default planner."""
|
| 142 |
+
return find_state_dict_object(self.state_dict, index)
|
| 143 |
+
|
| 144 |
+
def transform_object(self, write_item: WriteItem, object: Any):
|
| 145 |
+
"""Extension from the planner interface to make it easy to extend the default planner."""
|
| 146 |
+
if write_item.type == WriteItemType.BYTE_IO:
|
| 147 |
+
bytes = io.BytesIO()
|
| 148 |
+
torch.save(object, bytes)
|
| 149 |
+
object = bytes
|
| 150 |
+
return object
|
| 151 |
+
|
| 152 |
+
|
| 153 |
+
class DefaultLoadPlanner(LoadPlanner):
|
| 154 |
+
"""
|
| 155 |
+
DefaultLoadPlanner that adds multiple features on top of LoadPlanner.
|
| 156 |
+
|
| 157 |
+
In particular it adds the following:
|
| 158 |
+
|
| 159 |
+
flatten_state_dict: Handle state_dict with nested dicts
|
| 160 |
+
flatten_sharded_tensors: For FSDP in 2D parallel mode
|
| 161 |
+
allow_partial_load: If False, will raise a runtime error if a key is present in state_dict, but not in the checkpoint.
|
| 162 |
+
"""
|
| 163 |
+
|
| 164 |
+
original_state_dict: STATE_DICT_TYPE
|
| 165 |
+
mappings: FLATTEN_MAPPING
|
| 166 |
+
|
| 167 |
+
def __init__(
|
| 168 |
+
self,
|
| 169 |
+
flatten_state_dict: bool = True,
|
| 170 |
+
flatten_sharded_tensors: bool = True,
|
| 171 |
+
allow_partial_load: bool = False,
|
| 172 |
+
) -> None:
|
| 173 |
+
self.flatten_state_dict = flatten_state_dict
|
| 174 |
+
self.flatten_sharded_tensors = flatten_sharded_tensors
|
| 175 |
+
self.original_state_dict = {}
|
| 176 |
+
self.mappings = {}
|
| 177 |
+
self.allow_partial_load = allow_partial_load
|
| 178 |
+
|
| 179 |
+
def set_up_planner(
|
| 180 |
+
self,
|
| 181 |
+
state_dict: STATE_DICT_TYPE,
|
| 182 |
+
metadata: Optional[Metadata] = None,
|
| 183 |
+
is_coordinator: bool = False,
|
| 184 |
+
) -> None:
|
| 185 |
+
_init_state_dict(state_dict)
|
| 186 |
+
self.original_state_dict = state_dict
|
| 187 |
+
|
| 188 |
+
if self.flatten_sharded_tensors:
|
| 189 |
+
state_dict = _flatten_sharded_tensors(state_dict)
|
| 190 |
+
|
| 191 |
+
if self.flatten_state_dict:
|
| 192 |
+
state_dict, self.mappings = flatten_state_dict(state_dict)
|
| 193 |
+
|
| 194 |
+
self.state_dict = state_dict
|
| 195 |
+
self.metadata = metadata
|
| 196 |
+
self.is_coordinator = is_coordinator
|
| 197 |
+
|
| 198 |
+
def create_local_plan(self) -> LoadPlan:
|
| 199 |
+
assert self.metadata is not None
|
| 200 |
+
if self.flatten_state_dict:
|
| 201 |
+
# To support checkpoints that are saved before v2.4, we have to
|
| 202 |
+
# differentiate if the missing keys are due to old checkpoints.
|
| 203 |
+
# The contracts are:
|
| 204 |
+
# 1. There are 3 cases when we found a missing key.
|
| 205 |
+
# 1.1 Actual missing key, but allow_partial_load is False
|
| 206 |
+
# 1.2 Actual missing key, but allow_partial load is True
|
| 207 |
+
# 1.3 Old checkpoint, but allow_partial_load is False
|
| 208 |
+
# 1.4 Old checkpoint, but allow_partial_load is True
|
| 209 |
+
# 2. If we found a missing key, we first convert the keys back to
|
| 210 |
+
# the key format of v2.3
|
| 211 |
+
# 3. If the previous missing keys are in the v2.3 keys, we assume
|
| 212 |
+
# this is a old checkpoint.
|
| 213 |
+
# 4. Pass the state_dict to `create_default_local_load_plan()`,
|
| 214 |
+
# which has the logic to check missing for allow_partial_load.
|
| 215 |
+
# So for 1.2 and 1.4 cases, we delegate allow_partial_load check to
|
| 216 |
+
# `create_default_local_load_plan()`. The logic here is to determine
|
| 217 |
+
# whether the checkpoint belong to 2.3 (or before) or 2.4 (or after).
|
| 218 |
+
current_keys = set(self.state_dict.keys())
|
| 219 |
+
load_keys = set(self.metadata.state_dict_metadata.keys())
|
| 220 |
+
missing_keys = load_keys - current_keys
|
| 221 |
+
if missing_keys:
|
| 222 |
+
_version._derived_version = "2_3"
|
| 223 |
+
old_state_dict, old_mappings = flatten_state_dict(
|
| 224 |
+
self.original_state_dict
|
| 225 |
+
)
|
| 226 |
+
old_keys = set(old_state_dict.keys())
|
| 227 |
+
if old_keys & missing_keys:
|
| 228 |
+
self.state_dict, self.mappings = old_state_dict, old_mappings
|
| 229 |
+
# _derived_version is only used by flatten_state_dict now.
|
| 230 |
+
# Set it back to None so that later we can save to a new version.
|
| 231 |
+
_version._derived_version = None
|
| 232 |
+
|
| 233 |
+
return create_default_local_load_plan(
|
| 234 |
+
self.state_dict, self.metadata, not self.allow_partial_load
|
| 235 |
+
)
|
| 236 |
+
|
| 237 |
+
def create_global_plan(self, global_plan: List[LoadPlan]) -> List[LoadPlan]:
|
| 238 |
+
return create_default_global_load_plan(global_plan)
|
| 239 |
+
|
| 240 |
+
def finish_plan(self, new_plan: LoadPlan) -> LoadPlan:
|
| 241 |
+
return new_plan
|
| 242 |
+
|
| 243 |
+
def load_bytes(self, read_item: ReadItem, value: io.BytesIO) -> None:
|
| 244 |
+
if self.flatten_state_dict:
|
| 245 |
+
set_element(
|
| 246 |
+
self.original_state_dict,
|
| 247 |
+
self.mappings[read_item.dest_index.fqn],
|
| 248 |
+
torch.load(value, weights_only=False),
|
| 249 |
+
)
|
| 250 |
+
else:
|
| 251 |
+
self.state_dict[read_item.dest_index.fqn] = torch.load(
|
| 252 |
+
value, weights_only=False
|
| 253 |
+
)
|
| 254 |
+
|
| 255 |
+
def resolve_tensor(self, read_item: ReadItem):
|
| 256 |
+
tensor = self.lookup_tensor(read_item.dest_index)
|
| 257 |
+
return self.transform_tensor(read_item, tensor)
|
| 258 |
+
|
| 259 |
+
def commit_tensor(self, read_item: ReadItem, tensor: torch.Tensor) -> None:
|
| 260 |
+
pass
|
| 261 |
+
|
| 262 |
+
def lookup_tensor(self, index: MetadataIndex) -> torch.Tensor:
|
| 263 |
+
"""Extension from the planner interface to make it easy to extend the default planner."""
|
| 264 |
+
return find_state_dict_object(self.state_dict, index)
|
| 265 |
+
|
| 266 |
+
def transform_tensor(self, read_item: ReadItem, tensor: torch.Tensor):
|
| 267 |
+
"""Extension from the planner interface to make it easy to extend the default planner."""
|
| 268 |
+
return narrow_tensor_by_index(tensor, read_item.dest_offsets, read_item.lengths)
|
| 269 |
+
|
| 270 |
+
|
| 271 |
+
class _EmptyStateDictLoadPlanner(DefaultLoadPlanner):
|
| 272 |
+
"""
|
| 273 |
+
Extension of DefaultLoadPlanner, which rebuilds state_dict from the saved metadata.
|
| 274 |
+
Useful for loading in state_dict without first initializing a model, such as
|
| 275 |
+
when converting a DCP checkpoint into a Torch save file.
|
| 276 |
+
|
| 277 |
+
. N.B. `state_dict` must be an empty dictionary when used with this LoadPlanner
|
| 278 |
+
|
| 279 |
+
.. warning::
|
| 280 |
+
Because the entire state dict is initialized, It's recommended to only utilize
|
| 281 |
+
this LoadPlanner on a single rank or process to avoid OOM.
|
| 282 |
+
|
| 283 |
+
"""
|
| 284 |
+
|
| 285 |
+
def __init__(self, keys=None, *args, **kwargs):
|
| 286 |
+
self.keys = keys
|
| 287 |
+
super().__init__(*args, **kwargs)
|
| 288 |
+
|
| 289 |
+
def _should_include_key(self, key: str, metadata: Metadata) -> bool:
|
| 290 |
+
if self.keys is None:
|
| 291 |
+
return True
|
| 292 |
+
|
| 293 |
+
if key in self.keys:
|
| 294 |
+
True
|
| 295 |
+
|
| 296 |
+
unflattened_keys: List[str] = []
|
| 297 |
+
planner_data = metadata.planner_data.get(key)
|
| 298 |
+
for unflattened_key in planner_data:
|
| 299 |
+
if unflattened_keys:
|
| 300 |
+
unflattened_keys.append(
|
| 301 |
+
".".join([unflattened_keys[-1], str(unflattened_key)])
|
| 302 |
+
)
|
| 303 |
+
|
| 304 |
+
else:
|
| 305 |
+
unflattened_keys.append(unflattened_key)
|
| 306 |
+
|
| 307 |
+
if any(unflattened_key in self.keys for unflattened_key in unflattened_keys):
|
| 308 |
+
return True
|
| 309 |
+
|
| 310 |
+
return False
|
| 311 |
+
|
| 312 |
+
def set_up_planner(
|
| 313 |
+
self,
|
| 314 |
+
state_dict: STATE_DICT_TYPE,
|
| 315 |
+
metadata: Optional[Metadata] = None,
|
| 316 |
+
is_coordinator: bool = False,
|
| 317 |
+
) -> None:
|
| 318 |
+
assert not state_dict
|
| 319 |
+
assert metadata is not None
|
| 320 |
+
|
| 321 |
+
# rebuild the state dict from the metadata
|
| 322 |
+
for k, v in metadata.state_dict_metadata.items():
|
| 323 |
+
if not self._should_include_key(k, metadata):
|
| 324 |
+
continue
|
| 325 |
+
|
| 326 |
+
if isinstance(v, TensorStorageMetadata):
|
| 327 |
+
v = torch.empty(v.size, dtype=v.properties.dtype) # type: ignore[assignment]
|
| 328 |
+
if k in metadata.planner_data:
|
| 329 |
+
set_element(state_dict, metadata.planner_data[k], v)
|
| 330 |
+
else:
|
| 331 |
+
state_dict[k] = v
|
| 332 |
+
|
| 333 |
+
super().set_up_planner(state_dict, metadata, is_coordinator)
|
| 334 |
+
|
| 335 |
+
|
| 336 |
+
def create_default_local_load_plan(
|
| 337 |
+
state_dict: Dict[str, Any], metadata: Metadata, strict: bool = True
|
| 338 |
+
) -> LoadPlan:
|
| 339 |
+
requests = []
|
| 340 |
+
"""
|
| 341 |
+
Create the ``LoadPlan`` used by DefaultLoadPlanner.
|
| 342 |
+
|
| 343 |
+
It produces one read item per value in ``state_dict`` using the metadata in ``metadata``.
|
| 344 |
+
|
| 345 |
+
The default behavior is to match key exactly between state_dict and metadata.
|
| 346 |
+
It handles resharding by issuing multiple read requests against storage in order to match
|
| 347 |
+
load requirements.
|
| 348 |
+
"""
|
| 349 |
+
|
| 350 |
+
for fqn, obj in state_dict.items():
|
| 351 |
+
# ignore state_dict keys which do not exist in `state_dict` if strict=False
|
| 352 |
+
if fqn not in metadata.state_dict_metadata:
|
| 353 |
+
if strict:
|
| 354 |
+
raise RuntimeError(f"Missing key in checkpoint state_dict: {fqn}.")
|
| 355 |
+
else:
|
| 356 |
+
continue
|
| 357 |
+
|
| 358 |
+
md = metadata.state_dict_metadata[fqn]
|
| 359 |
+
# Since DTensor supports submesh, adding extra check to ensure _create_read_items()
|
| 360 |
+
# gets called only when the current rank is part of the mesh for the corresponding DTensor.
|
| 361 |
+
if isinstance(obj, DTensor):
|
| 362 |
+
if obj.device_mesh.get_coordinate() is not None:
|
| 363 |
+
requests += _create_read_items(fqn, md, obj)
|
| 364 |
+
else:
|
| 365 |
+
requests += _create_read_items(fqn, md, obj)
|
| 366 |
+
|
| 367 |
+
return LoadPlan(requests)
|
| 368 |
+
|
| 369 |
+
|
| 370 |
+
def create_default_global_load_plan(
|
| 371 |
+
all_plans: List[LoadPlan],
|
| 372 |
+
) -> List[LoadPlan]:
|
| 373 |
+
"""
|
| 374 |
+
Create global load plan used by DefaultLoadPlanner.
|
| 375 |
+
|
| 376 |
+
The default load behavior involved no global coordination and this function
|
| 377 |
+
currently doesn't change the local plans.
|
| 378 |
+
"""
|
| 379 |
+
return all_plans
|
| 380 |
+
|
| 381 |
+
|
| 382 |
+
def create_default_local_save_plan(
|
| 383 |
+
state_dict: Dict[str, Any], is_coordinator: bool
|
| 384 |
+
) -> SavePlan:
|
| 385 |
+
"""
|
| 386 |
+
Create the ``SavePlan`` used by DefaultSavePlanner.
|
| 387 |
+
|
| 388 |
+
On non-coordinator ranks, this function ignores tensors and non-tensor objects,
|
| 389 |
+
only producing writes for ShardedTensor objects.
|
| 390 |
+
|
| 391 |
+
On the coordinator rank, produce writes for all values.
|
| 392 |
+
"""
|
| 393 |
+
requests = []
|
| 394 |
+
for fqn, obj in state_dict.items():
|
| 395 |
+
# Since DTensor supports submesh, adding extra check to ensure _create_write_items()
|
| 396 |
+
# gets called only when the current rank is part of the mesh for the corresponding DTensor.
|
| 397 |
+
if isinstance(obj, DTensor):
|
| 398 |
+
if obj.device_mesh.get_coordinate() is not None:
|
| 399 |
+
requests += _create_write_items(fqn, obj)
|
| 400 |
+
else:
|
| 401 |
+
# For the plain tensor and non-tensor values, add the request for all
|
| 402 |
+
# the ranks. Coordinator will decides whether to deduplicate the
|
| 403 |
+
# values based on the keys.
|
| 404 |
+
requests += _create_write_items(fqn, obj)
|
| 405 |
+
|
| 406 |
+
return SavePlan(requests)
|
| 407 |
+
|
| 408 |
+
|
| 409 |
+
def create_default_global_save_plan(
|
| 410 |
+
all_plans: List[SavePlan],
|
| 411 |
+
rewrite_index_hints: bool = True,
|
| 412 |
+
) -> Tuple[List[SavePlan], Metadata]:
|
| 413 |
+
"""
|
| 414 |
+
Create the global plan and metadata used by DefaultSavePlanner.
|
| 415 |
+
|
| 416 |
+
Metadata is produced by concatenating the metadata of all ``WriteItem`` from the supplied plans.
|
| 417 |
+
|
| 418 |
+
The only global planning change is to update index hints in all ``MetadataIndex`` objects if
|
| 419 |
+
``rewrite_index_hints`` is True.
|
| 420 |
+
"""
|
| 421 |
+
md: Dict[str, STORAGE_TYPES] = {}
|
| 422 |
+
new_plans = []
|
| 423 |
+
for plan in all_plans:
|
| 424 |
+
new_items = []
|
| 425 |
+
for item in plan.items:
|
| 426 |
+
if not item.type == WriteItemType.SHARD:
|
| 427 |
+
assert item.index.fqn not in md
|
| 428 |
+
|
| 429 |
+
if item.type == WriteItemType.BYTE_IO:
|
| 430 |
+
md[item.index.fqn] = BytesStorageMetadata()
|
| 431 |
+
new_items.append(item)
|
| 432 |
+
else:
|
| 433 |
+
assert item.tensor_data is not None
|
| 434 |
+
tensor_md = cast(
|
| 435 |
+
TensorStorageMetadata,
|
| 436 |
+
md.setdefault(
|
| 437 |
+
item.index.fqn,
|
| 438 |
+
TensorStorageMetadata(
|
| 439 |
+
properties=item.tensor_data.properties,
|
| 440 |
+
size=item.tensor_data.size,
|
| 441 |
+
chunks=[],
|
| 442 |
+
),
|
| 443 |
+
),
|
| 444 |
+
)
|
| 445 |
+
new_item = item
|
| 446 |
+
if rewrite_index_hints:
|
| 447 |
+
new_index = dataclasses.replace(
|
| 448 |
+
item.index, index=len(tensor_md.chunks)
|
| 449 |
+
)
|
| 450 |
+
new_item = dataclasses.replace(item, index=new_index)
|
| 451 |
+
new_items.append(new_item)
|
| 452 |
+
|
| 453 |
+
assert (
|
| 454 |
+
item.tensor_data.chunk is not None
|
| 455 |
+
), f"""
|
| 456 |
+
Cannot create MD for tensor without bounds.
|
| 457 |
+
FQN: {item.index.fqn}
|
| 458 |
+
"""
|
| 459 |
+
tensor_md.chunks.append(item.tensor_data.chunk)
|
| 460 |
+
new_plans.append(dataclasses.replace(plan, items=new_items))
|
| 461 |
+
return (new_plans, Metadata(md))
|
| 462 |
+
|
| 463 |
+
|
| 464 |
+
def _create_default_local_metadata(state_dict: STATE_DICT_TYPE) -> Metadata:
|
| 465 |
+
"""Return the ``Metadata`` if DefaultSavePlanner was used to checkpoint ``state_dict``."""
|
| 466 |
+
plan = _create_default_metadata_only_plan(state_dict)
|
| 467 |
+
_, md = create_default_global_save_plan([plan])
|
| 468 |
+
return md
|
| 469 |
+
|
| 470 |
+
|
| 471 |
+
def _check_box_overlap(box0: ChunkStorageMetadata, box1: ChunkStorageMetadata) -> bool:
|
| 472 |
+
"""Check if two boxes overlap. Tuples are (offset, lengths)."""
|
| 473 |
+
# For each dim of each shard, check if one shard resides on the other
|
| 474 |
+
# end of second shard with respect to that dim. As an example for a 2D
|
| 475 |
+
# shard, we would check if one shard is above or on the left of the
|
| 476 |
+
# other shard.
|
| 477 |
+
ndims = len(box0.offsets)
|
| 478 |
+
for i in range(ndims):
|
| 479 |
+
if box0.offsets[i] >= box1.offsets[i] + box1.sizes[i]:
|
| 480 |
+
return False
|
| 481 |
+
if box1.offsets[i] >= box0.offsets[i] + box0.sizes[i]:
|
| 482 |
+
return False
|
| 483 |
+
|
| 484 |
+
return True
|
| 485 |
+
|
| 486 |
+
|
| 487 |
+
def _check_box_bounds(
|
| 488 |
+
outer_box_size: torch.Size, inner_box: ChunkStorageMetadata
|
| 489 |
+
) -> bool:
|
| 490 |
+
for i in range(len(outer_box_size)):
|
| 491 |
+
if inner_box.offsets[i] < 0:
|
| 492 |
+
return False
|
| 493 |
+
if inner_box.sizes[i] < 0:
|
| 494 |
+
return False
|
| 495 |
+
if inner_box.offsets[i] + inner_box.sizes[i] > outer_box_size[i]:
|
| 496 |
+
return False
|
| 497 |
+
|
| 498 |
+
return True
|
| 499 |
+
|
| 500 |
+
|
| 501 |
+
def _validate_global_plan(global_plan: List[SavePlan], metadata: Metadata) -> bool:
|
| 502 |
+
all_good = True
|
| 503 |
+
for key, value in metadata.state_dict_metadata.items():
|
| 504 |
+
if isinstance(value, BytesStorageMetadata):
|
| 505 |
+
continue
|
| 506 |
+
if len(value.size) == 0:
|
| 507 |
+
continue
|
| 508 |
+
chunks_volume = 0
|
| 509 |
+
for chunk_idx, chunk0 in enumerate(value.chunks):
|
| 510 |
+
# Compute the volume
|
| 511 |
+
if not _check_box_bounds(value.size, chunk0):
|
| 512 |
+
logger.warning(
|
| 513 |
+
"""
|
| 514 |
+
key:%s has out of bounds chunk:
|
| 515 |
+
tensor-size:%s chunk: %s
|
| 516 |
+
""",
|
| 517 |
+
key,
|
| 518 |
+
value.size,
|
| 519 |
+
chunk0,
|
| 520 |
+
)
|
| 521 |
+
all_good = False
|
| 522 |
+
chunks_volume += reduce(operator.mul, chunk0.sizes, 1)
|
| 523 |
+
|
| 524 |
+
# Check for overlap
|
| 525 |
+
for chunk1 in value.chunks[chunk_idx + 1 :]:
|
| 526 |
+
if _check_box_overlap(chunk0, chunk1):
|
| 527 |
+
logger.warning(
|
| 528 |
+
"key:%s has overlapping chunks: %s %s", key, chunk0, chunk1
|
| 529 |
+
)
|
| 530 |
+
all_good = False
|
| 531 |
+
|
| 532 |
+
# Check whether combined chunk cover the whole tensor
|
| 533 |
+
tensor_volume = reduce(operator.mul, value.size, 1)
|
| 534 |
+
if chunks_volume != tensor_volume:
|
| 535 |
+
logger.warning(
|
| 536 |
+
"""
|
| 537 |
+
key:%s invalid fill tensor-volume:
|
| 538 |
+
%s chunks-volume: %s
|
| 539 |
+
""",
|
| 540 |
+
key,
|
| 541 |
+
tensor_volume,
|
| 542 |
+
chunks_volume,
|
| 543 |
+
)
|
| 544 |
+
all_good = False
|
| 545 |
+
|
| 546 |
+
return all_good
|
vllm/lib/python3.10/site-packages/torch/distributed/checkpoint/format_utils.py
ADDED
|
@@ -0,0 +1,280 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# mypy: allow-untyped-defs
|
| 2 |
+
import argparse
|
| 3 |
+
import os
|
| 4 |
+
from enum import Enum
|
| 5 |
+
from typing import cast, Dict, List, Optional, Union
|
| 6 |
+
|
| 7 |
+
import torch
|
| 8 |
+
import torch.distributed as dist
|
| 9 |
+
from torch.distributed._shard._utils import narrow_tensor_by_index
|
| 10 |
+
from torch.distributed.checkpoint import FileSystemReader, FileSystemWriter
|
| 11 |
+
from torch.distributed.checkpoint._nested_dict import flatten_state_dict
|
| 12 |
+
from torch.distributed.checkpoint.default_planner import (
|
| 13 |
+
_EmptyStateDictLoadPlanner,
|
| 14 |
+
DefaultLoadPlanner,
|
| 15 |
+
)
|
| 16 |
+
from torch.distributed.checkpoint.metadata import (
|
| 17 |
+
Metadata,
|
| 18 |
+
STATE_DICT_TYPE,
|
| 19 |
+
STORAGE_TYPES,
|
| 20 |
+
TensorProperties,
|
| 21 |
+
TensorStorageMetadata,
|
| 22 |
+
)
|
| 23 |
+
from torch.distributed.checkpoint.planner import LoadItemType, LoadPlan, LoadPlanner
|
| 24 |
+
from torch.distributed.checkpoint.planner_helpers import _create_chunk_list
|
| 25 |
+
from torch.distributed.checkpoint.state_dict_loader import _load_state_dict
|
| 26 |
+
from torch.distributed.checkpoint.state_dict_saver import _save_state_dict
|
| 27 |
+
from torch.distributed.checkpoint.storage import StorageReader
|
| 28 |
+
from torch.futures import Future
|
| 29 |
+
|
| 30 |
+
|
| 31 |
+
__all__ = [
|
| 32 |
+
"dcp_to_torch_save",
|
| 33 |
+
"torch_save_to_dcp",
|
| 34 |
+
"BroadcastingTorchSaveReader",
|
| 35 |
+
"DynamicMetaLoadPlanner",
|
| 36 |
+
]
|
| 37 |
+
|
| 38 |
+
|
| 39 |
+
class BroadcastingTorchSaveReader(StorageReader):
|
| 40 |
+
"""
|
| 41 |
+
StorageReader for reading a Torch Save file. This reader will read the entire checkpoint
|
| 42 |
+
on the coordinator rank, and then broadcast and shard each tensor to all ranks.
|
| 43 |
+
|
| 44 |
+
. N.B. Intended to be used with DynamicMetaLoadPlanner
|
| 45 |
+
|
| 46 |
+
.. warning::
|
| 47 |
+
Current implementation only supports loading Tensors.
|
| 48 |
+
|
| 49 |
+
>>> # xdoctest: +SKIP("undefined vars")
|
| 50 |
+
>>> sd = {"mode": model}
|
| 51 |
+
>>> dcp.load(
|
| 52 |
+
>>> sd,
|
| 53 |
+
>>> storage_reader=BroadcastingTorchSaveReader(),
|
| 54 |
+
>>> planner=DynamicMetaLoadPlanner(),
|
| 55 |
+
>>> checkpoint_id="path_to_model.pt"
|
| 56 |
+
>>> )
|
| 57 |
+
"""
|
| 58 |
+
|
| 59 |
+
def __init__(
|
| 60 |
+
self,
|
| 61 |
+
checkpoint_id: Optional[Union[str, os.PathLike]] = None,
|
| 62 |
+
coordinator_rank: int = 0,
|
| 63 |
+
) -> None:
|
| 64 |
+
self.checkpoint_id = checkpoint_id
|
| 65 |
+
self.coordinator_rank = coordinator_rank
|
| 66 |
+
|
| 67 |
+
def read_metadata(self) -> Metadata:
|
| 68 |
+
"""Extends the default StorageReader to support building the metadata file"""
|
| 69 |
+
# Metadata is built in planner.set_up_planner, since we are not actually reading metadata from
|
| 70 |
+
# the disk
|
| 71 |
+
return Metadata(state_dict_metadata={})
|
| 72 |
+
|
| 73 |
+
def read_data(self, plan: LoadPlan, planner: LoadPlanner) -> Future[None]:
|
| 74 |
+
"""
|
| 75 |
+
Reads torch save data on the coordinator rank, and broadcast afterwards
|
| 76 |
+
this incurrs a communication cost, but avoids having to load
|
| 77 |
+
the entire checkpoint on each rank, hopefully preventing OOM issues
|
| 78 |
+
"""
|
| 79 |
+
planner = cast(DefaultLoadPlanner, planner)
|
| 80 |
+
|
| 81 |
+
# data is read in on the coordinator rank, and broadcast afterwards
|
| 82 |
+
# this incurrs a communication cost, but it avoids having to load
|
| 83 |
+
# the entire checkpoint on each rank, hopefully preventing OOM issues
|
| 84 |
+
# TODO: read on each host, instead of only the coordinator
|
| 85 |
+
if self.is_coordinator:
|
| 86 |
+
assert self.checkpoint_id is not None
|
| 87 |
+
torch_state_dict = torch.load(
|
| 88 |
+
self.checkpoint_id, map_location="cpu", weights_only=False
|
| 89 |
+
)
|
| 90 |
+
if planner.flatten_state_dict:
|
| 91 |
+
torch_state_dict, _ = flatten_state_dict(torch_state_dict)
|
| 92 |
+
else:
|
| 93 |
+
torch_state_dict = None
|
| 94 |
+
|
| 95 |
+
for req in plan.items:
|
| 96 |
+
if req.type == LoadItemType.BYTE_IO:
|
| 97 |
+
raise RuntimeError(
|
| 98 |
+
f"Non-tensor value identified at {req.storage_index.fqn}. "
|
| 99 |
+
f"At this time {type(self).__name__} only supports loading Tensors."
|
| 100 |
+
)
|
| 101 |
+
|
| 102 |
+
# Broadcast the tensor from the coordinator rank
|
| 103 |
+
if self.is_coordinator:
|
| 104 |
+
pg_device = dist.distributed_c10d._get_pg_default_device()
|
| 105 |
+
tensor = torch_state_dict[req.storage_index.fqn].to(pg_device)
|
| 106 |
+
else:
|
| 107 |
+
tensor = torch.empty_like(planner.state_dict[req.storage_index.fqn])
|
| 108 |
+
|
| 109 |
+
dist.broadcast(tensor, src=self.coordinator_rank, async_op=False)
|
| 110 |
+
|
| 111 |
+
tensor = narrow_tensor_by_index(tensor, req.storage_offsets, req.lengths)
|
| 112 |
+
target_tensor = planner.resolve_tensor(req).detach()
|
| 113 |
+
assert target_tensor.size() == tensor.size(), (
|
| 114 |
+
f"req {req.storage_index} mismatch sizes, "
|
| 115 |
+
f"{target_tensor.size()} vs {tensor.size()}"
|
| 116 |
+
)
|
| 117 |
+
target_tensor.copy_(tensor)
|
| 118 |
+
planner.commit_tensor(req, target_tensor)
|
| 119 |
+
|
| 120 |
+
fut: Future = Future()
|
| 121 |
+
fut.set_result(None)
|
| 122 |
+
return fut
|
| 123 |
+
|
| 124 |
+
def set_up_storage_reader(self, metadata: Metadata, is_coordinator: bool) -> None:
|
| 125 |
+
"""Implementation of the StorageReader method"""
|
| 126 |
+
self.is_coordinator = is_coordinator
|
| 127 |
+
if self.is_coordinator:
|
| 128 |
+
assert dist.get_rank() == self.coordinator_rank
|
| 129 |
+
|
| 130 |
+
assert self.checkpoint_id is not None
|
| 131 |
+
|
| 132 |
+
def prepare_local_plan(self, plan: LoadPlan) -> LoadPlan:
|
| 133 |
+
"""Implementation of the StorageReader method"""
|
| 134 |
+
return plan
|
| 135 |
+
|
| 136 |
+
def prepare_global_plan(self, global_plan: List[LoadPlan]) -> List[LoadPlan]:
|
| 137 |
+
"""Implementation of the StorageReader method"""
|
| 138 |
+
return global_plan
|
| 139 |
+
|
| 140 |
+
def reset(self, checkpoint_id: Union[str, os.PathLike, None] = None) -> None:
|
| 141 |
+
"""Implementation of the StorageReader method"""
|
| 142 |
+
self.checkpoint_id = checkpoint_id
|
| 143 |
+
|
| 144 |
+
@classmethod
|
| 145 |
+
def validate_checkpoint_id(cls, checkpoint_id: Union[str, os.PathLike]) -> bool:
|
| 146 |
+
"""Implementation of the StorageReader method"""
|
| 147 |
+
return os.path.isfile(checkpoint_id)
|
| 148 |
+
|
| 149 |
+
|
| 150 |
+
class DynamicMetaLoadPlanner(DefaultLoadPlanner):
|
| 151 |
+
"""
|
| 152 |
+
Extension of DefaultLoadPlanner, which creates a new Metadata object based on the passed in state dict,
|
| 153 |
+
avoiding the need to read metadata from disk. This is useful when reading formats which don't have a
|
| 154 |
+
metadata file, like Torch Save files.
|
| 155 |
+
|
| 156 |
+
. N.B. Intended to be used with BroadcastingTorchSaveReader
|
| 157 |
+
|
| 158 |
+
.. warning::
|
| 159 |
+
Current implementation only supports loading Tensors.
|
| 160 |
+
|
| 161 |
+
>>> # xdoctest: +SKIP("undefined vars")
|
| 162 |
+
>>> sd = {"mode": model}
|
| 163 |
+
>>> dcp.load(
|
| 164 |
+
>>> sd,
|
| 165 |
+
>>> storage_reader=BroadcastingTorchSaveReader(),
|
| 166 |
+
>>> planner=DynamicMetaLoadPlanner(),
|
| 167 |
+
>>> checkpoint_id="path_to_model.pt"
|
| 168 |
+
>>> )
|
| 169 |
+
"""
|
| 170 |
+
|
| 171 |
+
def set_up_planner(
|
| 172 |
+
self,
|
| 173 |
+
state_dict: STATE_DICT_TYPE,
|
| 174 |
+
metadata: Optional[Metadata] = None,
|
| 175 |
+
is_coordinator: bool = False,
|
| 176 |
+
) -> None:
|
| 177 |
+
"""Setups of the planner, extnding default behavior by creating the Metadata object from the state dict"""
|
| 178 |
+
super().set_up_planner(state_dict, metadata, is_coordinator)
|
| 179 |
+
|
| 180 |
+
state_dict_metadata: Dict[str, STORAGE_TYPES] = {}
|
| 181 |
+
for key, tensor in self.state_dict.items():
|
| 182 |
+
if not torch.is_tensor(tensor):
|
| 183 |
+
raise RuntimeError(
|
| 184 |
+
f"Non-tensor value identified at {key}. "
|
| 185 |
+
f"At this time {type(self).__name__} only supports loading Tensors."
|
| 186 |
+
)
|
| 187 |
+
|
| 188 |
+
state_dict_metadata[key] = TensorStorageMetadata(
|
| 189 |
+
TensorProperties(dtype=tensor.dtype),
|
| 190 |
+
tensor.size(),
|
| 191 |
+
_create_chunk_list(tensor),
|
| 192 |
+
)
|
| 193 |
+
self.metadata = Metadata(state_dict_metadata=state_dict_metadata)
|
| 194 |
+
|
| 195 |
+
|
| 196 |
+
def dcp_to_torch_save(
|
| 197 |
+
dcp_checkpoint_dir: Union[str, os.PathLike],
|
| 198 |
+
torch_save_path: Union[str, os.PathLike],
|
| 199 |
+
):
|
| 200 |
+
"""
|
| 201 |
+
Given a directory containing a DCP checkpoint, this function will convert it into a
|
| 202 |
+
Torch save file.
|
| 203 |
+
|
| 204 |
+
Args:
|
| 205 |
+
dcp_checkpoint_dir: Directory containing the DCP checkpoint.
|
| 206 |
+
torch_save_path: Filename to store the converted Torch save file.
|
| 207 |
+
|
| 208 |
+
.. warning::
|
| 209 |
+
To avoid OOM, it's recommended to only run this function on a single rank.
|
| 210 |
+
"""
|
| 211 |
+
sd: STATE_DICT_TYPE = {}
|
| 212 |
+
_load_state_dict(
|
| 213 |
+
sd,
|
| 214 |
+
storage_reader=FileSystemReader(dcp_checkpoint_dir),
|
| 215 |
+
planner=_EmptyStateDictLoadPlanner(),
|
| 216 |
+
no_dist=True,
|
| 217 |
+
)
|
| 218 |
+
torch.save(sd, torch_save_path)
|
| 219 |
+
|
| 220 |
+
|
| 221 |
+
def torch_save_to_dcp(
|
| 222 |
+
torch_save_path: Union[str, os.PathLike],
|
| 223 |
+
dcp_checkpoint_dir: Union[str, os.PathLike],
|
| 224 |
+
):
|
| 225 |
+
"""
|
| 226 |
+
Given the location of a torch save file, converts it into a DCP checkpoint.
|
| 227 |
+
|
| 228 |
+
Args:
|
| 229 |
+
torch_save_path: Filename of the Torch save file.
|
| 230 |
+
dcp_checkpoint_dir: Directory to store the DCP checkpoint.
|
| 231 |
+
|
| 232 |
+
.. warning::
|
| 233 |
+
To avoid OOM, it's recommended to only run this function on a single rank.
|
| 234 |
+
"""
|
| 235 |
+
|
| 236 |
+
state_dict = torch.load(torch_save_path, weights_only=False)
|
| 237 |
+
# we don't need stateful behavior here because the expectation is anything loaded by
|
| 238 |
+
# torch.load would not contain stateful objects.
|
| 239 |
+
_save_state_dict(
|
| 240 |
+
state_dict, storage_writer=FileSystemWriter(dcp_checkpoint_dir), no_dist=True
|
| 241 |
+
)
|
| 242 |
+
|
| 243 |
+
|
| 244 |
+
if __name__ == "__main__":
|
| 245 |
+
|
| 246 |
+
class FormatMode(Enum):
|
| 247 |
+
TORCH_TO_DCP = "torch_to_dcp"
|
| 248 |
+
DCP_TO_TORCH = "dcp_to_torch"
|
| 249 |
+
|
| 250 |
+
# Parse command-line arguments
|
| 251 |
+
parser = argparse.ArgumentParser()
|
| 252 |
+
parser.add_argument(
|
| 253 |
+
"mode",
|
| 254 |
+
type=str,
|
| 255 |
+
help="Conversion mode",
|
| 256 |
+
choices=[m.value for m in FormatMode],
|
| 257 |
+
default=FormatMode.TORCH_TO_DCP,
|
| 258 |
+
)
|
| 259 |
+
parser.add_argument("src", type=str, help="Path to the source model")
|
| 260 |
+
parser.add_argument("dst", type=str, help="Path to the destination model")
|
| 261 |
+
args = parser.parse_args()
|
| 262 |
+
|
| 263 |
+
print(
|
| 264 |
+
f"Converting checkpoint from {args.src} to {args.dst} using method: '{args.mode}'"
|
| 265 |
+
)
|
| 266 |
+
checkpoint_missing_warning = (
|
| 267 |
+
f"No checkpoint found at {args.src}. Skipping conversion."
|
| 268 |
+
)
|
| 269 |
+
if args.mode == FormatMode.TORCH_TO_DCP.value:
|
| 270 |
+
if os.path.isfile(args.src):
|
| 271 |
+
torch_save_to_dcp(args.src, args.dst)
|
| 272 |
+
else:
|
| 273 |
+
print(checkpoint_missing_warning)
|
| 274 |
+
elif args.mode == FormatMode.DCP_TO_TORCH.value:
|
| 275 |
+
if os.path.isdir(args.src):
|
| 276 |
+
dcp_to_torch_save(args.src, args.dst)
|
| 277 |
+
else:
|
| 278 |
+
print(checkpoint_missing_warning)
|
| 279 |
+
else:
|
| 280 |
+
raise ValueError(f"Unknown conversion mode: {args.mode}")
|
vllm/lib/python3.10/site-packages/torch/distributed/checkpoint/logger.py
ADDED
|
@@ -0,0 +1,103 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# mypy: allow-untyped-defs
|
| 2 |
+
import functools
|
| 3 |
+
import time
|
| 4 |
+
from typing import Any, Callable, Dict, List, TypeVar
|
| 5 |
+
from typing_extensions import ParamSpec
|
| 6 |
+
from uuid import uuid4
|
| 7 |
+
|
| 8 |
+
import torch.distributed.c10d_logger as c10d_logger
|
| 9 |
+
from torch.distributed.checkpoint.logging_handlers import DCP_LOGGER_NAME
|
| 10 |
+
|
| 11 |
+
|
| 12 |
+
__all__: List[str] = []
|
| 13 |
+
|
| 14 |
+
global _dcp_logger
|
| 15 |
+
_dcp_logger = c10d_logger._get_or_create_logger(DCP_LOGGER_NAME)
|
| 16 |
+
|
| 17 |
+
_T = TypeVar("_T")
|
| 18 |
+
_P = ParamSpec("_P")
|
| 19 |
+
|
| 20 |
+
|
| 21 |
+
def _msg_dict_from_dcp_method_args(*args, **kwargs) -> Dict[str, Any]:
|
| 22 |
+
"""
|
| 23 |
+
Extracts log data from dcp method args
|
| 24 |
+
"""
|
| 25 |
+
msg_dict = {}
|
| 26 |
+
|
| 27 |
+
# checkpoint ID can be passed in through the serializer or through the checkpoint id directly
|
| 28 |
+
storage_writer = kwargs.get("storage_writer", None)
|
| 29 |
+
storage_reader = kwargs.get("storage_reader", None)
|
| 30 |
+
planner = kwargs.get("planner", None)
|
| 31 |
+
|
| 32 |
+
checkpoint_id = kwargs.get("checkpoint_id", None)
|
| 33 |
+
if not checkpoint_id and (serializer := storage_writer or storage_reader):
|
| 34 |
+
checkpoint_id = getattr(serializer, "checkpoint_id", None)
|
| 35 |
+
|
| 36 |
+
msg_dict["checkpoint_id"] = (
|
| 37 |
+
str(checkpoint_id) if checkpoint_id is not None else checkpoint_id
|
| 38 |
+
)
|
| 39 |
+
|
| 40 |
+
# Uniquely identify a _dcp_method_logger wrapped function call.
|
| 41 |
+
msg_dict["uuid"] = str(uuid4().int)
|
| 42 |
+
|
| 43 |
+
if storage_writer:
|
| 44 |
+
msg_dict["storage_writer"] = storage_writer.__class__.__name__
|
| 45 |
+
|
| 46 |
+
if storage_reader:
|
| 47 |
+
msg_dict["storage_reader"] = storage_reader.__class__.__name__
|
| 48 |
+
|
| 49 |
+
if planner:
|
| 50 |
+
msg_dict["planner"] = planner.__class__.__name__
|
| 51 |
+
|
| 52 |
+
return msg_dict
|
| 53 |
+
|
| 54 |
+
|
| 55 |
+
def _get_msg_dict(func_name, *args, **kwargs) -> Dict[str, Any]:
|
| 56 |
+
msg_dict = _msg_dict_from_dcp_method_args(*args, **kwargs)
|
| 57 |
+
msg_dict.update(c10d_logger._get_msg_dict(func_name, **msg_dict))
|
| 58 |
+
|
| 59 |
+
return msg_dict
|
| 60 |
+
|
| 61 |
+
|
| 62 |
+
def _dcp_method_logger(
|
| 63 |
+
log_exceptions: bool = False, **wrapper_kwargs: Any
|
| 64 |
+
) -> Callable[[Callable[_P, _T]], Callable[_P, _T]]: # pyre-ignore
|
| 65 |
+
"""This method decorator logs the start, end, and exception of wrapped events."""
|
| 66 |
+
|
| 67 |
+
def decorator(func: Callable[_P, _T]):
|
| 68 |
+
@functools.wraps(func)
|
| 69 |
+
def wrapper(*args: _P.args, **kwargs: _P.kwargs) -> _T:
|
| 70 |
+
msg_dict = _get_msg_dict(
|
| 71 |
+
func.__name__, *args, **{**wrapper_kwargs, **kwargs}
|
| 72 |
+
)
|
| 73 |
+
|
| 74 |
+
# log start event
|
| 75 |
+
msg_dict["event"] = "start"
|
| 76 |
+
t0 = time.time_ns()
|
| 77 |
+
msg_dict["time"] = t0
|
| 78 |
+
msg_dict["log_exceptions"] = log_exceptions
|
| 79 |
+
_dcp_logger.debug(msg_dict)
|
| 80 |
+
|
| 81 |
+
# exceptions
|
| 82 |
+
try:
|
| 83 |
+
result = func(*args, **kwargs)
|
| 84 |
+
except BaseException as error:
|
| 85 |
+
if log_exceptions:
|
| 86 |
+
msg_dict["event"] = "exception"
|
| 87 |
+
msg_dict["error"] = f"{error}"
|
| 88 |
+
msg_dict["time"] = time.time_ns()
|
| 89 |
+
_dcp_logger.error(msg_dict)
|
| 90 |
+
raise
|
| 91 |
+
|
| 92 |
+
# end event
|
| 93 |
+
msg_dict["event"] = "end"
|
| 94 |
+
t1 = time.time_ns()
|
| 95 |
+
msg_dict["time"] = time.time_ns()
|
| 96 |
+
msg_dict["times_spent"] = t1 - t0
|
| 97 |
+
_dcp_logger.debug(msg_dict)
|
| 98 |
+
|
| 99 |
+
return result
|
| 100 |
+
|
| 101 |
+
return wrapper
|
| 102 |
+
|
| 103 |
+
return decorator
|
vllm/lib/python3.10/site-packages/torch/distributed/checkpoint/logging_handlers.py
ADDED
|
@@ -0,0 +1,15 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import logging
|
| 2 |
+
from typing import List
|
| 3 |
+
|
| 4 |
+
from torch.distributed.logging_handlers import _log_handlers
|
| 5 |
+
|
| 6 |
+
|
| 7 |
+
__all__: List[str] = []
|
| 8 |
+
|
| 9 |
+
DCP_LOGGER_NAME = "dcp_logger"
|
| 10 |
+
|
| 11 |
+
_log_handlers.update(
|
| 12 |
+
{
|
| 13 |
+
DCP_LOGGER_NAME: logging.NullHandler(),
|
| 14 |
+
}
|
| 15 |
+
)
|
vllm/lib/python3.10/site-packages/torch/distributed/checkpoint/optimizer.py
ADDED
|
@@ -0,0 +1,356 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) Meta Platforms, Inc. and affiliates
|
| 2 |
+
|
| 3 |
+
import dataclasses
|
| 4 |
+
from typing import cast, Dict, List, Optional, Sequence, Tuple, Union
|
| 5 |
+
|
| 6 |
+
import torch
|
| 7 |
+
import torch.distributed as dist
|
| 8 |
+
from torch._utils import _get_device_module
|
| 9 |
+
from torch.distributed._shard.sharded_tensor.api import ShardedTensor
|
| 10 |
+
from torch.distributed._shard.sharded_tensor.metadata import (
|
| 11 |
+
TensorProperties as ShardTensorProperties,
|
| 12 |
+
)
|
| 13 |
+
from torch.distributed._shard.sharded_tensor.shard import Shard
|
| 14 |
+
from torch.distributed._shard.sharding_spec.chunk_sharding_spec import ChunkShardingSpec
|
| 15 |
+
from torch.distributed.checkpoint._nested_dict import unflatten_state_dict
|
| 16 |
+
from torch.distributed.checkpoint.default_planner import DefaultLoadPlanner
|
| 17 |
+
from torch.distributed.checkpoint.metadata import (
|
| 18 |
+
BytesStorageMetadata,
|
| 19 |
+
ChunkStorageMetadata,
|
| 20 |
+
Metadata,
|
| 21 |
+
MetadataIndex,
|
| 22 |
+
STATE_DICT_TYPE,
|
| 23 |
+
TensorProperties,
|
| 24 |
+
TensorStorageMetadata,
|
| 25 |
+
)
|
| 26 |
+
from torch.distributed.checkpoint.planner import LoadPlan, LoadPlanner
|
| 27 |
+
from torch.distributed.checkpoint.planner_helpers import (
|
| 28 |
+
_create_read_items,
|
| 29 |
+
create_read_items_for_chunk_list,
|
| 30 |
+
)
|
| 31 |
+
from torch.distributed.checkpoint.state_dict_loader import load_state_dict
|
| 32 |
+
from torch.distributed.checkpoint.storage import StorageReader
|
| 33 |
+
from torch.distributed.checkpoint.utils import (
|
| 34 |
+
_element_wise_add,
|
| 35 |
+
_element_wise_sub,
|
| 36 |
+
_normalize_device_info,
|
| 37 |
+
)
|
| 38 |
+
from torch.distributed.distributed_c10d import _get_default_group
|
| 39 |
+
from torch.distributed.fsdp._shard_utils import _create_chunk_sharded_tensor
|
| 40 |
+
from torch.distributed.remote_device import _remote_device
|
| 41 |
+
from torch.distributed.tensor import DTensor
|
| 42 |
+
|
| 43 |
+
|
| 44 |
+
STATE_DICT_2D_LAYOUT = Dict[str, Tuple[Optional[Sequence[int]], Sequence[int]]]
|
| 45 |
+
|
| 46 |
+
|
| 47 |
+
# TODO: Update docstrings for optimizer.py
|
| 48 |
+
__all__ = [
|
| 49 |
+
"load_sharded_optimizer_state_dict",
|
| 50 |
+
]
|
| 51 |
+
|
| 52 |
+
|
| 53 |
+
def _gen_rank_device(global_rank: int, device_type: str = "cuda") -> str:
|
| 54 |
+
if device_type == "cpu":
|
| 55 |
+
return "cpu"
|
| 56 |
+
device_module = _get_device_module(device_type)
|
| 57 |
+
if device_module.is_available():
|
| 58 |
+
return _normalize_device_info(
|
| 59 |
+
device_type, global_rank % device_module.device_count()
|
| 60 |
+
)
|
| 61 |
+
return "cpu"
|
| 62 |
+
|
| 63 |
+
|
| 64 |
+
def _create_colwise_spec(
|
| 65 |
+
pg: Optional[dist.ProcessGroup] = None,
|
| 66 |
+
) -> ChunkShardingSpec:
|
| 67 |
+
pg_device_type = dist.distributed_c10d._get_pg_default_device(pg).type
|
| 68 |
+
if pg is None:
|
| 69 |
+
placements = [
|
| 70 |
+
f"rank:{idx}/{_gen_rank_device(idx, pg_device_type)}"
|
| 71 |
+
for idx in range(dist.get_world_size())
|
| 72 |
+
]
|
| 73 |
+
else:
|
| 74 |
+
placements = [
|
| 75 |
+
f"rank:{idx}/{_gen_rank_device(dist.get_global_rank(pg, idx), pg_device_type)}"
|
| 76 |
+
for idx in range(pg.size())
|
| 77 |
+
]
|
| 78 |
+
return ChunkShardingSpec(
|
| 79 |
+
dim=0,
|
| 80 |
+
placements=cast(List[Union[_remote_device, str]], placements),
|
| 81 |
+
)
|
| 82 |
+
|
| 83 |
+
|
| 84 |
+
def _is_nested_tensor(val: torch.Tensor) -> bool:
|
| 85 |
+
if type(val) is ShardedTensor:
|
| 86 |
+
if len(val.local_shards()) == 0:
|
| 87 |
+
return False
|
| 88 |
+
if type(val.local_shards()[0].tensor) is ShardedTensor:
|
| 89 |
+
return True
|
| 90 |
+
if type(val.local_shards()[0].tensor) is DTensor:
|
| 91 |
+
raise ValueError("Cannot handle DTensor nested insided ShardedTensor")
|
| 92 |
+
elif type(val) is DTensor and (
|
| 93 |
+
type(val._local_tensor) is DTensor or type(val._local_tensor) is ShardedTensor
|
| 94 |
+
):
|
| 95 |
+
raise ValueError("Cannot handle nested DTensor")
|
| 96 |
+
return False
|
| 97 |
+
|
| 98 |
+
|
| 99 |
+
def _alloc_tensor(
|
| 100 |
+
props: TensorProperties, size: Sequence[int], device_type: str = "cuda"
|
| 101 |
+
) -> torch.Tensor:
|
| 102 |
+
if device_type == "cpu":
|
| 103 |
+
device = cast(torch.device, _get_device_module(device_type).current_device())
|
| 104 |
+
else:
|
| 105 |
+
device = torch.device(
|
| 106 |
+
device_type, _get_device_module(device_type).current_device()
|
| 107 |
+
)
|
| 108 |
+
|
| 109 |
+
return torch.empty(
|
| 110 |
+
size=size,
|
| 111 |
+
dtype=props.dtype,
|
| 112 |
+
layout=props.layout,
|
| 113 |
+
requires_grad=props.requires_grad,
|
| 114 |
+
pin_memory=props.pin_memory,
|
| 115 |
+
device=device,
|
| 116 |
+
)
|
| 117 |
+
|
| 118 |
+
|
| 119 |
+
def _get_state_dict_2d_layout(
|
| 120 |
+
state_dict: STATE_DICT_TYPE,
|
| 121 |
+
) -> Tuple[STATE_DICT_2D_LAYOUT, Optional[dist.ProcessGroup]]:
|
| 122 |
+
"""
|
| 123 |
+
Load the right TP slice of the optimizer state.
|
| 124 |
+
|
| 125 |
+
This is not easy since the per-tensor slicing can't be inferred from checkpoint metadata.
|
| 126 |
+
We take advantage of the model state_dict producing a sliced ST to figure out what we need to load.
|
| 127 |
+
This is pretty fragile and it might be easier for FSDP to compute this info for us.
|
| 128 |
+
Returns a dictionary where keys are the same of the state_dict and the value is a tuple of
|
| 129 |
+
(offset, size) for the current rank TP slice.
|
| 130 |
+
N.B. The state_dict *MUST* come from FSDP.sharded_state_dict.
|
| 131 |
+
"""
|
| 132 |
+
specs: STATE_DICT_2D_LAYOUT = {}
|
| 133 |
+
dp_pg: Optional[dist.ProcessGroup] = None
|
| 134 |
+
for key, value in state_dict.items():
|
| 135 |
+
specs[key] = (None, value.size())
|
| 136 |
+
if _is_nested_tensor(value):
|
| 137 |
+
assert (
|
| 138 |
+
len(value.local_shards()) == 1
|
| 139 |
+
), "Cannot handle ST with multiple shards"
|
| 140 |
+
assert isinstance(
|
| 141 |
+
value, ShardedTensor
|
| 142 |
+
), "Can only handle nested ShardedTensor"
|
| 143 |
+
shard = value.local_shards()[0]
|
| 144 |
+
specs[key] = (
|
| 145 |
+
shard.metadata.shard_offsets,
|
| 146 |
+
shard.metadata.shard_sizes,
|
| 147 |
+
)
|
| 148 |
+
dp_pg = shard.tensor._process_group # type: ignore[attr-defined]
|
| 149 |
+
|
| 150 |
+
return (
|
| 151 |
+
specs,
|
| 152 |
+
dp_pg,
|
| 153 |
+
)
|
| 154 |
+
|
| 155 |
+
|
| 156 |
+
class _ReaderWithOffset(DefaultLoadPlanner):
|
| 157 |
+
translation: Dict[MetadataIndex, MetadataIndex]
|
| 158 |
+
state_dict: STATE_DICT_TYPE
|
| 159 |
+
metadata: Metadata
|
| 160 |
+
|
| 161 |
+
def __init__(self, fqn_to_offset: Dict[str, Sequence[int]]) -> None:
|
| 162 |
+
super().__init__()
|
| 163 |
+
self.fqn_to_offset = fqn_to_offset
|
| 164 |
+
self.metadata = Metadata({})
|
| 165 |
+
self.state_dict = {}
|
| 166 |
+
self.translation = {}
|
| 167 |
+
|
| 168 |
+
def create_local_plan(self) -> LoadPlan:
|
| 169 |
+
requests = []
|
| 170 |
+
self.translation = {}
|
| 171 |
+
for fqn, obj in self.state_dict.items():
|
| 172 |
+
md = self.metadata.state_dict_metadata[fqn]
|
| 173 |
+
if not isinstance(obj, ShardedTensor):
|
| 174 |
+
requests += _create_read_items(fqn, md, obj)
|
| 175 |
+
continue
|
| 176 |
+
|
| 177 |
+
if fqn not in self.fqn_to_offset:
|
| 178 |
+
requests += _create_read_items(fqn, md, obj)
|
| 179 |
+
continue
|
| 180 |
+
|
| 181 |
+
offset = self.fqn_to_offset[fqn]
|
| 182 |
+
|
| 183 |
+
assert len(obj.local_shards()) == 1
|
| 184 |
+
original_shard = obj.local_shards()[0]
|
| 185 |
+
local_chunks = [
|
| 186 |
+
ChunkStorageMetadata(
|
| 187 |
+
offsets=torch.Size(
|
| 188 |
+
_element_wise_add(original_shard.metadata.shard_offsets, offset)
|
| 189 |
+
),
|
| 190 |
+
sizes=torch.Size(original_shard.metadata.shard_sizes),
|
| 191 |
+
)
|
| 192 |
+
]
|
| 193 |
+
|
| 194 |
+
reqs = create_read_items_for_chunk_list(
|
| 195 |
+
fqn, cast(TensorStorageMetadata, md), local_chunks
|
| 196 |
+
)
|
| 197 |
+
# TODO: The ReadItems will have a displaced MetadataIndex, fix it.
|
| 198 |
+
# TODO: we should change _create_sharded_read_items to have more ergonomic API
|
| 199 |
+
for ri in reqs:
|
| 200 |
+
assert ri.dest_index.offset is not None
|
| 201 |
+
original_offset = _element_wise_sub(ri.dest_index.offset, offset)
|
| 202 |
+
original_index = dataclasses.replace(
|
| 203 |
+
ri.dest_index, offset=torch.Size(original_offset)
|
| 204 |
+
)
|
| 205 |
+
self.translation[ri.dest_index] = original_index
|
| 206 |
+
|
| 207 |
+
requests += reqs
|
| 208 |
+
return LoadPlan(requests)
|
| 209 |
+
|
| 210 |
+
def lookup_tensor(self, index: MetadataIndex) -> torch.Tensor:
|
| 211 |
+
return super().lookup_tensor(self.translation.get(index, index))
|
| 212 |
+
|
| 213 |
+
|
| 214 |
+
def load_sharded_optimizer_state_dict(
|
| 215 |
+
model_state_dict: STATE_DICT_TYPE,
|
| 216 |
+
optimizer_key: str,
|
| 217 |
+
storage_reader: StorageReader,
|
| 218 |
+
planner: Optional[LoadPlanner] = None,
|
| 219 |
+
) -> STATE_DICT_TYPE:
|
| 220 |
+
"""
|
| 221 |
+
Load a state_dict in conjunction with FSDP sharded optimizer state.
|
| 222 |
+
|
| 223 |
+
This is the current recommended way to checkpoint FSDP.
|
| 224 |
+
>>> # xdoctest: +SKIP
|
| 225 |
+
>>> import torch.distributed.checkpoint as dist_cp
|
| 226 |
+
>>> # Save
|
| 227 |
+
>>> model: torch.nn.Model
|
| 228 |
+
>>> optim_params = model.parameters()
|
| 229 |
+
>>> optim = torch.optim.SGD(optim_params, lr=0.01)
|
| 230 |
+
>>> # Save
|
| 231 |
+
>>> with FSDP.state_dict_type(model, StateDictType.SHARDED_STATE_DICT):
|
| 232 |
+
>>> state_dict = {
|
| 233 |
+
>>> "optimizer": FSDP.optim_state_dict(model, optim),
|
| 234 |
+
>>> "model": model.state_dict()
|
| 235 |
+
>>> }
|
| 236 |
+
>>> dist_cp.save_state_dict(
|
| 237 |
+
>>> state_dict=optim_state,
|
| 238 |
+
>>> storage_writer=dist_cp.FileSystemWriter("checkpoint"),
|
| 239 |
+
>>> planner=dist_cp.DefaultSavePlanner(),
|
| 240 |
+
>>> )
|
| 241 |
+
>>>
|
| 242 |
+
>>> # Load
|
| 243 |
+
>>> with FSDP.state_dict_type(model_tp, StateDictType.SHARDED_STATE_DICT):
|
| 244 |
+
>>> model_state_dict = model_tp.state_dict()
|
| 245 |
+
>>> checkpoint = {
|
| 246 |
+
>>> "model": model_state_dict
|
| 247 |
+
>>> }
|
| 248 |
+
>>> dist_cp.load_state_dict(
|
| 249 |
+
>>> state_dict=checkpoint,
|
| 250 |
+
>>> storage_reader=dist_cp.FileSystemReader(checkpoint_file),
|
| 251 |
+
>>> planner=dist_cp.DefaultLoadPlanner(),
|
| 252 |
+
>>> )
|
| 253 |
+
>>> model.load_state_dict(checkpoint["model_state"])
|
| 254 |
+
>>>
|
| 255 |
+
>>> optim_state = dist_cp.load_sharded_optimizer_state_dict(
|
| 256 |
+
>>> model_state_dict,
|
| 257 |
+
>>> optimizer_key="optimizer",
|
| 258 |
+
>>> storage_reader=dist_cp.FileSystemReader("checkpoint"),
|
| 259 |
+
>>> )
|
| 260 |
+
>>>
|
| 261 |
+
>>> flattened_osd = FSDP.optim_state_dict_to_load(
|
| 262 |
+
>>> model, optim, optim_state["optimizer"]
|
| 263 |
+
>>> )
|
| 264 |
+
>>>
|
| 265 |
+
>>> optim.load_state_dict(flattened_osd)
|
| 266 |
+
"""
|
| 267 |
+
metadata = storage_reader.read_metadata()
|
| 268 |
+
|
| 269 |
+
layout_specs, dp_pg = _get_state_dict_2d_layout(model_state_dict)
|
| 270 |
+
dp_pg_device_type = dist.distributed_c10d._get_pg_default_device(dp_pg).type
|
| 271 |
+
device_module = _get_device_module(dp_pg_device_type)
|
| 272 |
+
|
| 273 |
+
if dp_pg is None:
|
| 274 |
+
placements = []
|
| 275 |
+
for i in range(dist.get_world_size()):
|
| 276 |
+
device_info = _normalize_device_info(
|
| 277 |
+
dp_pg_device_type, i % device_module.device_count()
|
| 278 |
+
)
|
| 279 |
+
placements.append(f"rank:{i}/{device_info}")
|
| 280 |
+
sharding_spec = ChunkShardingSpec(dim=0, placements=placements) # type: ignore[arg-type]
|
| 281 |
+
else:
|
| 282 |
+
sharding_spec = _create_colwise_spec(dp_pg)
|
| 283 |
+
|
| 284 |
+
# Create a state_dict for optimizer state
|
| 285 |
+
state_dict: STATE_DICT_TYPE = {}
|
| 286 |
+
|
| 287 |
+
fqn_to_offset: Dict[str, Sequence[int]] = {}
|
| 288 |
+
for key, value in metadata.state_dict_metadata.items():
|
| 289 |
+
key_path = metadata.planner_data[key]
|
| 290 |
+
if key_path[0] != optimizer_key:
|
| 291 |
+
continue
|
| 292 |
+
|
| 293 |
+
if isinstance(value, BytesStorageMetadata):
|
| 294 |
+
state_dict[key] = "<bytes_io>"
|
| 295 |
+
continue
|
| 296 |
+
|
| 297 |
+
# value: TensorStorageMetadata
|
| 298 |
+
if value.size.numel() == 1:
|
| 299 |
+
state_dict[key] = _alloc_tensor(
|
| 300 |
+
value.properties, value.size, dp_pg_device_type
|
| 301 |
+
)
|
| 302 |
+
elif dp_pg is None:
|
| 303 |
+
state_dict[key] = _create_chunk_sharded_tensor(
|
| 304 |
+
_alloc_tensor(value.properties, value.size, dp_pg_device_type),
|
| 305 |
+
rank=dist.get_rank(),
|
| 306 |
+
world_size=dist.get_world_size(),
|
| 307 |
+
num_devices_per_node=device_module.device_count(),
|
| 308 |
+
pg=_get_default_group(),
|
| 309 |
+
)
|
| 310 |
+
else:
|
| 311 |
+
spec_key = key_path[2]
|
| 312 |
+
alloc_size = layout_specs.get(spec_key, (None, value.size))[1]
|
| 313 |
+
|
| 314 |
+
properties = ShardTensorProperties(
|
| 315 |
+
dtype=value.properties.dtype,
|
| 316 |
+
layout=value.properties.layout,
|
| 317 |
+
requires_grad=value.properties.requires_grad,
|
| 318 |
+
memory_format=value.properties.memory_format,
|
| 319 |
+
pin_memory=value.properties.pin_memory,
|
| 320 |
+
)
|
| 321 |
+
|
| 322 |
+
st_md = sharding_spec.build_metadata(torch.Size(alloc_size), properties)
|
| 323 |
+
local_shards = []
|
| 324 |
+
current_rank = dist.get_rank(dp_pg)
|
| 325 |
+
for shard_md in st_md.shards_metadata:
|
| 326 |
+
if cast(_remote_device, shard_md.placement).rank() != current_rank:
|
| 327 |
+
continue
|
| 328 |
+
local_shards.append(
|
| 329 |
+
Shard(
|
| 330 |
+
tensor=_alloc_tensor(
|
| 331 |
+
value.properties, shard_md.shard_sizes, dp_pg_device_type
|
| 332 |
+
),
|
| 333 |
+
metadata=shard_md,
|
| 334 |
+
)
|
| 335 |
+
)
|
| 336 |
+
|
| 337 |
+
st = ShardedTensor._init_from_local_shards_and_global_metadata(
|
| 338 |
+
local_shards, st_md, process_group=dp_pg
|
| 339 |
+
)
|
| 340 |
+
|
| 341 |
+
if spec_key in layout_specs and layout_specs[spec_key][0] is not None:
|
| 342 |
+
fqn_to_offset[key] = cast(Sequence[int], layout_specs[spec_key][0])
|
| 343 |
+
|
| 344 |
+
state_dict[key] = st
|
| 345 |
+
|
| 346 |
+
# Whether we unflatten before or after doesn't matter
|
| 347 |
+
load_state_dict(
|
| 348 |
+
state_dict=state_dict,
|
| 349 |
+
storage_reader=storage_reader,
|
| 350 |
+
# FIXME the type of planner is wrong in load_state_dict
|
| 351 |
+
planner=_ReaderWithOffset(fqn_to_offset) if dp_pg is not None else planner,
|
| 352 |
+
)
|
| 353 |
+
|
| 354 |
+
state_dict = unflatten_state_dict(state_dict, metadata.planner_data)
|
| 355 |
+
|
| 356 |
+
return state_dict
|
vllm/lib/python3.10/site-packages/torch/distributed/checkpoint/planner_helpers.py
ADDED
|
@@ -0,0 +1,386 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# mypy: allow-untyped-defs
|
| 2 |
+
import io
|
| 3 |
+
from typing import Any, Callable, cast, Dict, List
|
| 4 |
+
|
| 5 |
+
import torch
|
| 6 |
+
import torch.distributed as dist
|
| 7 |
+
from torch._utils import _get_device_module
|
| 8 |
+
from torch.distributed._shard.metadata import ShardMetadata
|
| 9 |
+
from torch.distributed._shard.sharded_tensor import ShardedTensor
|
| 10 |
+
from torch.distributed.tensor import DTensor
|
| 11 |
+
from torch.distributed.tensor._utils import compute_local_shape_and_global_offset
|
| 12 |
+
|
| 13 |
+
from .metadata import (
|
| 14 |
+
BytesStorageMetadata,
|
| 15 |
+
ChunkStorageMetadata,
|
| 16 |
+
MetadataIndex,
|
| 17 |
+
STATE_DICT_TYPE,
|
| 18 |
+
STORAGE_TYPES,
|
| 19 |
+
TensorProperties,
|
| 20 |
+
TensorStorageMetadata,
|
| 21 |
+
)
|
| 22 |
+
from .planner import (
|
| 23 |
+
LoadItemType,
|
| 24 |
+
ReadItem,
|
| 25 |
+
SavePlan,
|
| 26 |
+
TensorWriteData,
|
| 27 |
+
WriteItem,
|
| 28 |
+
WriteItemType,
|
| 29 |
+
)
|
| 30 |
+
from .resharding import (
|
| 31 |
+
_check_shard_metadata_pair_overlap,
|
| 32 |
+
_shards_get_overlap_region_wrt_saved_tensor,
|
| 33 |
+
)
|
| 34 |
+
|
| 35 |
+
|
| 36 |
+
__all__: List[str] = ["create_read_items_for_chunk_list"]
|
| 37 |
+
|
| 38 |
+
|
| 39 |
+
def _create_chunk_from_tensor(tensor: torch.Tensor) -> ChunkStorageMetadata:
|
| 40 |
+
return ChunkStorageMetadata(
|
| 41 |
+
offsets=torch.Size([0] * len(tensor.size())), sizes=tensor.size()
|
| 42 |
+
)
|
| 43 |
+
|
| 44 |
+
|
| 45 |
+
def _chunk_for_shard(shard_md: ShardMetadata) -> ChunkStorageMetadata:
|
| 46 |
+
return ChunkStorageMetadata(
|
| 47 |
+
offsets=torch.Size(shard_md.shard_offsets),
|
| 48 |
+
sizes=torch.Size(shard_md.shard_sizes),
|
| 49 |
+
)
|
| 50 |
+
|
| 51 |
+
|
| 52 |
+
def _sharded_tensor_metadata(
|
| 53 |
+
sharded_tensor: ShardedTensor, shard_md: ShardMetadata
|
| 54 |
+
) -> TensorWriteData:
|
| 55 |
+
shard_properties = sharded_tensor.metadata().tensor_properties
|
| 56 |
+
|
| 57 |
+
properties = TensorProperties(
|
| 58 |
+
dtype=shard_properties.dtype,
|
| 59 |
+
layout=shard_properties.layout,
|
| 60 |
+
requires_grad=shard_properties.requires_grad,
|
| 61 |
+
memory_format=shard_properties.memory_format,
|
| 62 |
+
pin_memory=shard_properties.pin_memory,
|
| 63 |
+
)
|
| 64 |
+
|
| 65 |
+
return TensorWriteData(
|
| 66 |
+
chunk=_chunk_for_shard(shard_md),
|
| 67 |
+
properties=properties,
|
| 68 |
+
size=sharded_tensor.metadata().size,
|
| 69 |
+
)
|
| 70 |
+
|
| 71 |
+
|
| 72 |
+
def _create_write_items_for_dtensor(fqn: str, tensor: DTensor) -> WriteItem:
|
| 73 |
+
sizes, offsets = compute_local_shape_and_global_offset(
|
| 74 |
+
tensor.shape, tensor.device_mesh, tensor.placements
|
| 75 |
+
)
|
| 76 |
+
sizes, offsets = torch.Size(sizes), torch.Size(offsets)
|
| 77 |
+
|
| 78 |
+
return WriteItem(
|
| 79 |
+
index=MetadataIndex(fqn, offsets),
|
| 80 |
+
type=WriteItemType.SHARD,
|
| 81 |
+
tensor_data=TensorWriteData(
|
| 82 |
+
chunk=ChunkStorageMetadata(
|
| 83 |
+
offsets=offsets,
|
| 84 |
+
sizes=sizes,
|
| 85 |
+
),
|
| 86 |
+
properties=TensorProperties.create_from_tensor(tensor.to_local()),
|
| 87 |
+
size=tensor.size(),
|
| 88 |
+
),
|
| 89 |
+
)
|
| 90 |
+
|
| 91 |
+
|
| 92 |
+
def _create_write_item_for_shard(
|
| 93 |
+
fqn: str, sharded_tensor: ShardedTensor, shard_md: ShardMetadata
|
| 94 |
+
) -> WriteItem:
|
| 95 |
+
offsets = torch.Size(shard_md.shard_offsets)
|
| 96 |
+
return WriteItem(
|
| 97 |
+
index=MetadataIndex(fqn, offsets),
|
| 98 |
+
type=WriteItemType.SHARD,
|
| 99 |
+
tensor_data=_sharded_tensor_metadata(sharded_tensor, shard_md),
|
| 100 |
+
)
|
| 101 |
+
|
| 102 |
+
|
| 103 |
+
def _create_write_item_for_tensor(fqn: str, tensor: torch.Tensor) -> WriteItem:
|
| 104 |
+
offsets = torch.Size([0] * len(tensor.size()))
|
| 105 |
+
return WriteItem(
|
| 106 |
+
index=MetadataIndex(fqn, offsets),
|
| 107 |
+
type=WriteItemType.TENSOR,
|
| 108 |
+
tensor_data=TensorWriteData(
|
| 109 |
+
chunk=ChunkStorageMetadata(offsets=offsets, sizes=tensor.size()),
|
| 110 |
+
properties=TensorProperties.create_from_tensor(tensor),
|
| 111 |
+
size=tensor.size(),
|
| 112 |
+
),
|
| 113 |
+
)
|
| 114 |
+
|
| 115 |
+
|
| 116 |
+
def _create_write_item_for_bytesio(fqn: str, bytes: Any):
|
| 117 |
+
return WriteItem(
|
| 118 |
+
index=MetadataIndex(fqn),
|
| 119 |
+
type=WriteItemType.BYTE_IO,
|
| 120 |
+
)
|
| 121 |
+
|
| 122 |
+
|
| 123 |
+
def _create_read_item_for_byteio(
|
| 124 |
+
dest_index, dest_offset, storage_index, storage_offset, length
|
| 125 |
+
):
|
| 126 |
+
return ReadItem(
|
| 127 |
+
type=LoadItemType.BYTE_IO,
|
| 128 |
+
dest_index=dest_index,
|
| 129 |
+
dest_offsets=torch.Size((dest_offset,)),
|
| 130 |
+
storage_index=storage_index,
|
| 131 |
+
storage_offsets=torch.Size((storage_offset,)),
|
| 132 |
+
lengths=torch.Size((length,)),
|
| 133 |
+
)
|
| 134 |
+
|
| 135 |
+
|
| 136 |
+
def _create_read_item_for_tensor(
|
| 137 |
+
dest_index, dest_offsets, storage_index, storage_offsets, lengths
|
| 138 |
+
):
|
| 139 |
+
return ReadItem(
|
| 140 |
+
type=LoadItemType.TENSOR,
|
| 141 |
+
dest_index=dest_index,
|
| 142 |
+
dest_offsets=torch.Size(dest_offsets),
|
| 143 |
+
storage_index=storage_index,
|
| 144 |
+
storage_offsets=torch.Size(storage_offsets),
|
| 145 |
+
lengths=torch.Size(lengths),
|
| 146 |
+
)
|
| 147 |
+
|
| 148 |
+
|
| 149 |
+
def create_read_items_for_chunk_list(
|
| 150 |
+
fqn: str,
|
| 151 |
+
checkpoint_md: TensorStorageMetadata,
|
| 152 |
+
local_chunks: List[ChunkStorageMetadata],
|
| 153 |
+
) -> List[ReadItem]:
|
| 154 |
+
"""
|
| 155 |
+
Create a list of ``ReadItem`` based on the checkpoint and local chunks.
|
| 156 |
+
|
| 157 |
+
This applies the resharding algorithm and computes the reads needed
|
| 158 |
+
to satisfy ``local_chunks`` with a checkpoint described by ``checkpoint_md``.
|
| 159 |
+
|
| 160 |
+
Args:
|
| 161 |
+
fqn (str) : The state_dict FQN to pass to ``ReadItem``.
|
| 162 |
+
checkpoint_md (TensorStorageMetadata): metadata for a given tensor
|
| 163 |
+
from a checkpoint.
|
| 164 |
+
local_chunks (List[ChunkStorageMetadata]): Local chunks that needs to be
|
| 165 |
+
loaded.
|
| 166 |
+
|
| 167 |
+
Returns:
|
| 168 |
+
A list of ``ReadItem`` that will satisfy all input chunks.
|
| 169 |
+
"""
|
| 170 |
+
read_items = []
|
| 171 |
+
# this is a naive quadratic algo that can be optimized later
|
| 172 |
+
for idx, shard in enumerate(local_chunks):
|
| 173 |
+
for storage_idx, storage_md in enumerate(checkpoint_md.chunks):
|
| 174 |
+
if not _check_shard_metadata_pair_overlap(shard, storage_md):
|
| 175 |
+
continue
|
| 176 |
+
|
| 177 |
+
storage_offsets = []
|
| 178 |
+
dest_offsets = []
|
| 179 |
+
lengths = []
|
| 180 |
+
for (
|
| 181 |
+
dim,
|
| 182 |
+
offset_for_saved_tensor,
|
| 183 |
+
offset_for_current_tensor,
|
| 184 |
+
length,
|
| 185 |
+
) in _shards_get_overlap_region_wrt_saved_tensor(
|
| 186 |
+
saved_shard=storage_md, current_shard=shard
|
| 187 |
+
):
|
| 188 |
+
storage_offsets.append(offset_for_saved_tensor)
|
| 189 |
+
dest_offsets.append(offset_for_current_tensor)
|
| 190 |
+
lengths.append(length)
|
| 191 |
+
|
| 192 |
+
read_items.append(
|
| 193 |
+
_create_read_item_for_tensor(
|
| 194 |
+
dest_index=MetadataIndex(fqn, shard.offsets, idx),
|
| 195 |
+
dest_offsets=dest_offsets,
|
| 196 |
+
storage_index=MetadataIndex(fqn, storage_md.offsets, storage_idx),
|
| 197 |
+
storage_offsets=storage_offsets,
|
| 198 |
+
lengths=lengths,
|
| 199 |
+
)
|
| 200 |
+
)
|
| 201 |
+
return read_items
|
| 202 |
+
|
| 203 |
+
|
| 204 |
+
def _create_default_metadata_only_plan(state_dict: STATE_DICT_TYPE) -> SavePlan:
|
| 205 |
+
requests = []
|
| 206 |
+
for fqn, obj in state_dict.items():
|
| 207 |
+
if isinstance(obj, DTensor):
|
| 208 |
+
requests.append(_create_write_items_for_dtensor(fqn, obj))
|
| 209 |
+
elif isinstance(obj, ShardedTensor):
|
| 210 |
+
for shard_md in obj.metadata().shards_metadata:
|
| 211 |
+
requests.append(_create_write_item_for_shard(fqn, obj, shard_md))
|
| 212 |
+
elif isinstance(obj, torch.Tensor):
|
| 213 |
+
requests.append(_create_write_item_for_tensor(fqn, obj))
|
| 214 |
+
else:
|
| 215 |
+
requests.append(_create_write_item_for_bytesio(fqn, obj))
|
| 216 |
+
return SavePlan(requests)
|
| 217 |
+
|
| 218 |
+
|
| 219 |
+
def _create_write_items(fqn: str, object: Any) -> List[WriteItem]:
|
| 220 |
+
if hasattr(object, "__create_write_items__"):
|
| 221 |
+
# DTensor implements _Checkpointable
|
| 222 |
+
return object.__create_write_items__(fqn, object)
|
| 223 |
+
elif isinstance(object, ShardedTensor):
|
| 224 |
+
return [
|
| 225 |
+
_create_write_item_for_shard(fqn, object, shard.metadata)
|
| 226 |
+
for shard in object.local_shards()
|
| 227 |
+
]
|
| 228 |
+
elif isinstance(object, torch.Tensor):
|
| 229 |
+
return [_create_write_item_for_tensor(fqn, object)]
|
| 230 |
+
else:
|
| 231 |
+
return [_create_write_item_for_bytesio(fqn, object)]
|
| 232 |
+
|
| 233 |
+
|
| 234 |
+
def _create_chunk_from_dtensor(tensor: DTensor) -> ChunkStorageMetadata:
|
| 235 |
+
sizes, offsets = compute_local_shape_and_global_offset(
|
| 236 |
+
tensor.shape, tensor.device_mesh, tensor.placements
|
| 237 |
+
)
|
| 238 |
+
sizes, offsets = torch.Size(sizes), torch.Size(offsets)
|
| 239 |
+
return ChunkStorageMetadata(
|
| 240 |
+
offsets=offsets,
|
| 241 |
+
sizes=sizes,
|
| 242 |
+
)
|
| 243 |
+
|
| 244 |
+
|
| 245 |
+
def _create_chunk_list(tensor: torch.Tensor) -> List[ChunkStorageMetadata]:
|
| 246 |
+
if hasattr(tensor, "__create_chunk_list__"):
|
| 247 |
+
# DTensor implements _Checkpointable
|
| 248 |
+
local_chunks = tensor.__create_chunk_list__() # type: ignore[attr-defined]
|
| 249 |
+
elif isinstance(tensor, ShardedTensor):
|
| 250 |
+
local_chunks = [
|
| 251 |
+
_chunk_for_shard(shard.metadata) for shard in tensor.local_shards()
|
| 252 |
+
]
|
| 253 |
+
elif isinstance(tensor, torch.Tensor):
|
| 254 |
+
local_chunks = [_create_chunk_from_tensor(tensor)]
|
| 255 |
+
else:
|
| 256 |
+
raise ValueError(
|
| 257 |
+
"Unsupported Type, expecting one of [Tensor, DTensor, ShardedTensor] "
|
| 258 |
+
f",but got {type(tensor)}"
|
| 259 |
+
)
|
| 260 |
+
|
| 261 |
+
return local_chunks
|
| 262 |
+
|
| 263 |
+
|
| 264 |
+
def _create_read_items(fqn: str, md: STORAGE_TYPES, obj: Any) -> List[ReadItem]:
|
| 265 |
+
if not isinstance(md, BytesStorageMetadata):
|
| 266 |
+
try:
|
| 267 |
+
local_chunks = _create_chunk_list(obj)
|
| 268 |
+
except ValueError as ex:
|
| 269 |
+
raise ValueError(
|
| 270 |
+
f"Invalid checkpoint metadata for {fqn}, "
|
| 271 |
+
+ f"expected BytesStorageMetadata but found {type(md)}",
|
| 272 |
+
) from ex
|
| 273 |
+
|
| 274 |
+
return create_read_items_for_chunk_list(fqn, md, local_chunks)
|
| 275 |
+
else:
|
| 276 |
+
return [
|
| 277 |
+
_create_read_item_for_byteio(
|
| 278 |
+
dest_index=MetadataIndex(fqn),
|
| 279 |
+
dest_offset=0,
|
| 280 |
+
storage_index=MetadataIndex(fqn),
|
| 281 |
+
storage_offset=0,
|
| 282 |
+
length=0,
|
| 283 |
+
)
|
| 284 |
+
]
|
| 285 |
+
|
| 286 |
+
|
| 287 |
+
def _init_state_dict(state_dict: Dict[str, Any]) -> Any:
|
| 288 |
+
"""
|
| 289 |
+
Initializes meta tensor if the meta tensor is DTensor or torch.Tensor.
|
| 290 |
+
"""
|
| 291 |
+
|
| 292 |
+
def dtensor_func(value: DTensor):
|
| 293 |
+
device = getattr(value, "device", None)
|
| 294 |
+
if device == torch.device("meta"):
|
| 295 |
+
device_type = dist.distributed_c10d._get_pg_default_device().type
|
| 296 |
+
device = cast(
|
| 297 |
+
torch.device, _get_device_module(device_type).current_device()
|
| 298 |
+
)
|
| 299 |
+
new_local_tensor = torch.empty_like(value.to_local(), device=device)
|
| 300 |
+
# We need to pass shape and stride explicitly, since DTensor might be
|
| 301 |
+
# sharded unevenly.
|
| 302 |
+
dtensor = DTensor.from_local(
|
| 303 |
+
new_local_tensor,
|
| 304 |
+
device_mesh=value.device_mesh,
|
| 305 |
+
placements=value.placements,
|
| 306 |
+
shape=value.size(),
|
| 307 |
+
stride=value.stride(),
|
| 308 |
+
)
|
| 309 |
+
return dtensor
|
| 310 |
+
else:
|
| 311 |
+
return value
|
| 312 |
+
|
| 313 |
+
def sharded_tensor_func(value: Any):
|
| 314 |
+
device = getattr(value, "device", None)
|
| 315 |
+
if device == torch.device("meta"):
|
| 316 |
+
raise RuntimeError(
|
| 317 |
+
f"Found unsupported type {type(value)} for meta device loading."
|
| 318 |
+
)
|
| 319 |
+
else:
|
| 320 |
+
return value
|
| 321 |
+
|
| 322 |
+
def tensor_func(value: torch.Tensor):
|
| 323 |
+
device = getattr(value, "device", None)
|
| 324 |
+
if device == torch.device("meta"):
|
| 325 |
+
device_type = dist.distributed_c10d._get_pg_default_device().type
|
| 326 |
+
device = cast(
|
| 327 |
+
torch.device, _get_device_module(device_type).current_device()
|
| 328 |
+
)
|
| 329 |
+
tensor = torch.empty_like(value, device=device)
|
| 330 |
+
return tensor
|
| 331 |
+
else:
|
| 332 |
+
return value
|
| 333 |
+
|
| 334 |
+
_iterate_state_dict(
|
| 335 |
+
state_dict,
|
| 336 |
+
dtensor_func,
|
| 337 |
+
sharded_tensor_func,
|
| 338 |
+
tensor_func,
|
| 339 |
+
)
|
| 340 |
+
|
| 341 |
+
|
| 342 |
+
def _iterate_state_dict(
|
| 343 |
+
iter_object: Any,
|
| 344 |
+
dtensor_func: Callable,
|
| 345 |
+
sharded_tensor_func: Callable,
|
| 346 |
+
tensor_func: Callable,
|
| 347 |
+
):
|
| 348 |
+
"""
|
| 349 |
+
Iterate through the state dict, applying the given functions to each tensor type
|
| 350 |
+
and update the state dict in place.
|
| 351 |
+
|
| 352 |
+
Args:
|
| 353 |
+
iter_object (Any): the target state_dict.
|
| 354 |
+
sharded_tensor_func (Callable): the function to apply to ShardedTensor
|
| 355 |
+
dtensor_func (Callable): the function to apply to DTensor
|
| 356 |
+
tensor_func (Callable): the function to apply to Tensor
|
| 357 |
+
|
| 358 |
+
# TODO: let state_dict_util._iterate_state_dict() to support in place option
|
| 359 |
+
so we don't need to have two versions of _iterate_state_dict.
|
| 360 |
+
"""
|
| 361 |
+
|
| 362 |
+
if isinstance(iter_object, DTensor):
|
| 363 |
+
return dtensor_func(iter_object)
|
| 364 |
+
elif isinstance(iter_object, ShardedTensor):
|
| 365 |
+
return sharded_tensor_func(iter_object)
|
| 366 |
+
elif isinstance(iter_object, torch.Tensor):
|
| 367 |
+
return tensor_func(iter_object)
|
| 368 |
+
elif (
|
| 369 |
+
isinstance(iter_object, (int, float, str, bytes, io.BytesIO))
|
| 370 |
+
or iter_object is None
|
| 371 |
+
):
|
| 372 |
+
return iter_object
|
| 373 |
+
elif isinstance(iter_object, dict):
|
| 374 |
+
for key, value in iter_object.items():
|
| 375 |
+
iter_object[key] = _iterate_state_dict(
|
| 376 |
+
value, dtensor_func, sharded_tensor_func, tensor_func
|
| 377 |
+
)
|
| 378 |
+
return iter_object
|
| 379 |
+
elif isinstance(iter_object, (list, tuple)):
|
| 380 |
+
ret = [
|
| 381 |
+
_iterate_state_dict(v, dtensor_func, sharded_tensor_func, tensor_func)
|
| 382 |
+
for v in iter_object
|
| 383 |
+
]
|
| 384 |
+
if isinstance(iter_object, tuple):
|
| 385 |
+
ret = tuple(ret) # type: ignore[assignment]
|
| 386 |
+
return ret
|
vllm/lib/python3.10/site-packages/torch/distributed/checkpoint/resharding.py
ADDED
|
@@ -0,0 +1,72 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# mypy: allow-untyped-defs
|
| 2 |
+
from typing import List, Tuple
|
| 3 |
+
|
| 4 |
+
from torch.distributed.checkpoint.metadata import ChunkStorageMetadata
|
| 5 |
+
|
| 6 |
+
|
| 7 |
+
__all__: List[str] = []
|
| 8 |
+
|
| 9 |
+
|
| 10 |
+
def _check_shard_metadata_pair_overlap(
|
| 11 |
+
shard1: ChunkStorageMetadata, shard2: ChunkStorageMetadata
|
| 12 |
+
):
|
| 13 |
+
"""Check if two shards overlap."""
|
| 14 |
+
# For each dim of each shard, check if one shard resides on the other
|
| 15 |
+
# end of second shard with respect to that dim. As an example for a 2D
|
| 16 |
+
# shard, we would check if one shard is above or on the left of the
|
| 17 |
+
# other shard.
|
| 18 |
+
ndims = len(shard1.offsets)
|
| 19 |
+
for i in range(ndims):
|
| 20 |
+
if shard1.offsets[i] >= shard2.offsets[i] + shard2.sizes[i]:
|
| 21 |
+
return False
|
| 22 |
+
if shard2.offsets[i] >= shard1.offsets[i] + shard1.sizes[i]:
|
| 23 |
+
return False
|
| 24 |
+
|
| 25 |
+
return True
|
| 26 |
+
|
| 27 |
+
|
| 28 |
+
def _shards_get_overlap_region_wrt_saved_tensor(
|
| 29 |
+
saved_shard: ChunkStorageMetadata, current_shard: ChunkStorageMetadata
|
| 30 |
+
) -> List[Tuple[int, int, int, int]]:
|
| 31 |
+
"""
|
| 32 |
+
Return the overlapping region between saved_shard and current_shard.
|
| 33 |
+
|
| 34 |
+
There returned list has the same number of elements as the tensor's dimension.
|
| 35 |
+
For each element, we produce a tuple with the following contents:
|
| 36 |
+
(dimension, `saved_shard` offset, `current_shard` offset, length)
|
| 37 |
+
|
| 38 |
+
Offsets are relative to each shard.
|
| 39 |
+
"""
|
| 40 |
+
narrows = []
|
| 41 |
+
for dim, (
|
| 42 |
+
saved_shard_offset,
|
| 43 |
+
current_shard_offset,
|
| 44 |
+
saved_shard_size,
|
| 45 |
+
current_shard_size,
|
| 46 |
+
) in enumerate(
|
| 47 |
+
zip(
|
| 48 |
+
saved_shard.offsets,
|
| 49 |
+
current_shard.offsets,
|
| 50 |
+
saved_shard.sizes,
|
| 51 |
+
current_shard.sizes,
|
| 52 |
+
)
|
| 53 |
+
):
|
| 54 |
+
min_range_end = min(
|
| 55 |
+
saved_shard_offset + saved_shard_size,
|
| 56 |
+
current_shard_offset + current_shard_size,
|
| 57 |
+
)
|
| 58 |
+
|
| 59 |
+
length = min_range_end - max(current_shard_offset, saved_shard_offset)
|
| 60 |
+
|
| 61 |
+
if saved_shard_offset > current_shard_offset:
|
| 62 |
+
offset_for_saved_tensor = 0
|
| 63 |
+
offset_for_current_tensor = saved_shard_offset - current_shard_offset
|
| 64 |
+
else:
|
| 65 |
+
offset_for_saved_tensor = current_shard_offset - saved_shard_offset
|
| 66 |
+
offset_for_current_tensor = 0
|
| 67 |
+
|
| 68 |
+
narrows.append(
|
| 69 |
+
(dim, offset_for_saved_tensor, offset_for_current_tensor, length)
|
| 70 |
+
)
|
| 71 |
+
|
| 72 |
+
return narrows
|
vllm/lib/python3.10/site-packages/torch/distributed/checkpoint/staging.py
ADDED
|
@@ -0,0 +1,117 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from typing import Optional, runtime_checkable
|
| 2 |
+
from typing_extensions import Protocol
|
| 3 |
+
|
| 4 |
+
from torch.distributed._state_dict_utils import (
|
| 5 |
+
_copy_state_dict,
|
| 6 |
+
_create_cpu_state_dict,
|
| 7 |
+
_offload_state_dict_to_cpu,
|
| 8 |
+
)
|
| 9 |
+
from torch.distributed.checkpoint.metadata import STATE_DICT_TYPE
|
| 10 |
+
|
| 11 |
+
|
| 12 |
+
__all__ = ["AsyncStager", "BlockingAsyncStager"]
|
| 13 |
+
|
| 14 |
+
|
| 15 |
+
@runtime_checkable
|
| 16 |
+
class AsyncStager(Protocol):
|
| 17 |
+
"""
|
| 18 |
+
This protocol is meant to provide customization and extensibility for dcp.async_save, allowing users
|
| 19 |
+
to customize how data is staged previous to executing the usual dcp.save path in parallel.
|
| 20 |
+
The expected order of operations (concretely defined in `torch.distributed.state_dict_saver.async_save`)
|
| 21 |
+
is the following:
|
| 22 |
+
|
| 23 |
+
1. AsyncStager.stage_data(state_dict):
|
| 24 |
+
This call gives the AsyncStager the opportunity to 'stage'
|
| 25 |
+
the state_dict. The expectation and purpose of staging in this context is to create a "training-safe"
|
| 26 |
+
representation of the state dict, meaning that any updates to module data after staging is complete
|
| 27 |
+
should not be reflected in the state dict returned from this method. For example, in the default
|
| 28 |
+
case a copy of the entire state dict is created on CPU RAM and returned here, allowing users
|
| 29 |
+
to continue training without risking changes to data which is being serialized.
|
| 30 |
+
|
| 31 |
+
2. dcp.save is called on the state_dict returned from stage in parallel. This call is responsible
|
| 32 |
+
for serializing the state_dict and writing it to storage.
|
| 33 |
+
|
| 34 |
+
3. If AsyncStager.should_synchronize_after_execute is True, this method will be called immediately after
|
| 35 |
+
the serialization thread starts and before returning from dcp.async_save. If this is set to False,
|
| 36 |
+
the assumption is the user has defined a custom synchronization point for the the purpose of further
|
| 37 |
+
optimizing save latency in the training loop (for example, by overlapping staging with the
|
| 38 |
+
forward/backward pass), and it is the respondsibility of the user to call `AsyncStager.synchronize_staging`
|
| 39 |
+
at the appropriate time.
|
| 40 |
+
|
| 41 |
+
"""
|
| 42 |
+
|
| 43 |
+
# default to True since the common case is to stage synchronously
|
| 44 |
+
_synchronize_after_execute: bool = True
|
| 45 |
+
|
| 46 |
+
@property
|
| 47 |
+
def should_synchronize_after_execute(self) -> bool:
|
| 48 |
+
"""
|
| 49 |
+
Whether to synchronize after executing the stage.
|
| 50 |
+
"""
|
| 51 |
+
|
| 52 |
+
return self._synchronize_after_execute
|
| 53 |
+
|
| 54 |
+
def stage(self, state_dict: STATE_DICT_TYPE) -> STATE_DICT_TYPE:
|
| 55 |
+
"""
|
| 56 |
+
Returns a "staged" copy of `state_dict`. The expectation of the staged copy is that it is
|
| 57 |
+
innoculated from any updates incurred after the stage call is complete.
|
| 58 |
+
"""
|
| 59 |
+
raise NotImplementedError(
|
| 60 |
+
f"{self.__class__.__name__} must implement stage method"
|
| 61 |
+
)
|
| 62 |
+
|
| 63 |
+
def synchronize_staging(self) -> None:
|
| 64 |
+
"""
|
| 65 |
+
In the case `stage` is async in some way, this method should be called to ensure staging
|
| 66 |
+
is complete and it is safe to begin modifying the original `state_dict`
|
| 67 |
+
"""
|
| 68 |
+
|
| 69 |
+
|
| 70 |
+
class BlockingAsyncStager(AsyncStager):
|
| 71 |
+
"""
|
| 72 |
+
An implementation of AsyncStager which stages the state_dict on CPU RAM and blocks until the copy is complete.
|
| 73 |
+
This implementation also provides an option to optimize stage latency using pinned memory.
|
| 74 |
+
|
| 75 |
+
N.B. synchronize_staging is a no-op in this case.
|
| 76 |
+
|
| 77 |
+
|
| 78 |
+
"""
|
| 79 |
+
|
| 80 |
+
# default to True since the common case is to stage synchronously
|
| 81 |
+
_synchronize_after_execute: bool = False
|
| 82 |
+
|
| 83 |
+
def __init__(
|
| 84 |
+
self,
|
| 85 |
+
cache_staged_state_dict: bool = False,
|
| 86 |
+
type_check: bool = False,
|
| 87 |
+
):
|
| 88 |
+
"""
|
| 89 |
+
Initializes the BlockingAsyncStager.
|
| 90 |
+
|
| 91 |
+
Args:
|
| 92 |
+
cache_staged_state_dict: Whether to cache the staged state_dict. This option decreases staging latency
|
| 93 |
+
at the cost of increases memory usage. Additionally, if this parameter is set to True, it's the expectation
|
| 94 |
+
that the stager is maintained and re-used for multiple dcp.async_save calls. Default to False.
|
| 95 |
+
type_check: Whether to perform a type check during cpu_offload. Defaults to False.
|
| 96 |
+
|
| 97 |
+
"""
|
| 98 |
+
self.cache_staged_state_dict = cache_staged_state_dict
|
| 99 |
+
self.type_check = type_check
|
| 100 |
+
self.state_dict_cache: Optional[STATE_DICT_TYPE] = None
|
| 101 |
+
|
| 102 |
+
def stage(self, state_dict: STATE_DICT_TYPE) -> STATE_DICT_TYPE:
|
| 103 |
+
"""
|
| 104 |
+
Returns a copy of `state_dict` on the CPU.
|
| 105 |
+
"""
|
| 106 |
+
|
| 107 |
+
if not self.cache_staged_state_dict:
|
| 108 |
+
return _offload_state_dict_to_cpu(state_dict, type_check=self.type_check)
|
| 109 |
+
|
| 110 |
+
if self.state_dict_cache is None:
|
| 111 |
+
self.state_dict_cache = _create_cpu_state_dict(state_dict, pin_memory=True)
|
| 112 |
+
return _copy_state_dict(state_dict, self.state_dict_cache)
|
| 113 |
+
|
| 114 |
+
def synchronize_staging(self) -> None:
|
| 115 |
+
"""
|
| 116 |
+
No-op function, since staging is blocking.
|
| 117 |
+
"""
|
vllm/lib/python3.10/site-packages/torch/distributed/checkpoint/state_dict_loader.py
ADDED
|
@@ -0,0 +1,316 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# mypy: allow-untyped-decorators
|
| 2 |
+
# mypy: allow-untyped-defs
|
| 3 |
+
import os
|
| 4 |
+
import warnings
|
| 5 |
+
from typing import Any, cast, Dict, Optional, Set, Union
|
| 6 |
+
from typing_extensions import deprecated
|
| 7 |
+
|
| 8 |
+
import torch
|
| 9 |
+
import torch.distributed as dist
|
| 10 |
+
from torch.distributed.checkpoint.default_planner import _EmptyStateDictLoadPlanner
|
| 11 |
+
from torch.distributed.checkpoint.logger import _dcp_method_logger
|
| 12 |
+
from torch.distributed.checkpoint.stateful import Stateful
|
| 13 |
+
|
| 14 |
+
from ._storage_utils import _storage_setup
|
| 15 |
+
from .default_planner import DefaultLoadPlanner
|
| 16 |
+
from .planner import LoadPlan, LoadPlanner
|
| 17 |
+
from .storage import StorageReader
|
| 18 |
+
from .utils import _all_gather_keys, _api_bc_check, _DistWrapper, _profile
|
| 19 |
+
|
| 20 |
+
|
| 21 |
+
__all__ = ["load_state_dict", "load"]
|
| 22 |
+
|
| 23 |
+
|
| 24 |
+
@deprecated(
|
| 25 |
+
"`load_state_dict` is deprecated and will be removed in future versions. "
|
| 26 |
+
"Please use `load` instead.",
|
| 27 |
+
category=FutureWarning,
|
| 28 |
+
)
|
| 29 |
+
def load_state_dict(
|
| 30 |
+
state_dict: Dict[str, Any],
|
| 31 |
+
storage_reader: StorageReader,
|
| 32 |
+
process_group: Optional[dist.ProcessGroup] = None,
|
| 33 |
+
coordinator_rank: int = 0,
|
| 34 |
+
no_dist: bool = False,
|
| 35 |
+
planner: Optional[LoadPlanner] = None,
|
| 36 |
+
) -> None:
|
| 37 |
+
"""This method is deprecated. Please switch to 'load'."""
|
| 38 |
+
storage_reader.reset()
|
| 39 |
+
with _profile():
|
| 40 |
+
# TODO: test returning `load` here instead.
|
| 41 |
+
return _load_state_dict(
|
| 42 |
+
state_dict,
|
| 43 |
+
storage_reader,
|
| 44 |
+
process_group,
|
| 45 |
+
coordinator_rank,
|
| 46 |
+
no_dist,
|
| 47 |
+
planner,
|
| 48 |
+
)
|
| 49 |
+
|
| 50 |
+
|
| 51 |
+
@_dcp_method_logger(log_exceptions=True)
|
| 52 |
+
@_api_bc_check
|
| 53 |
+
def load(
|
| 54 |
+
state_dict: Dict[str, Any],
|
| 55 |
+
*,
|
| 56 |
+
checkpoint_id: Union[str, os.PathLike, None] = None,
|
| 57 |
+
storage_reader: Optional[StorageReader] = None,
|
| 58 |
+
planner: Optional[LoadPlanner] = None,
|
| 59 |
+
process_group: Optional[dist.ProcessGroup] = None,
|
| 60 |
+
) -> None:
|
| 61 |
+
"""
|
| 62 |
+
Load a distributed ``state_dict`` in SPMD style.
|
| 63 |
+
|
| 64 |
+
Each rank will try to read the least amount of data necessary
|
| 65 |
+
to fullfill the requested `state_dict`. When loading :class:`ShardedTensor`
|
| 66 |
+
or :class:`DTensor` instances, each rank only reads data for their local shards.
|
| 67 |
+
|
| 68 |
+
For each ``Stateful`` object (having both a ``state_dict`` and a ``load_state_dict``),
|
| 69 |
+
load will first call ``state_dict`` before attempting deserialization, followed by
|
| 70 |
+
``load_state_dict`` once the deserialization is complete.
|
| 71 |
+
|
| 72 |
+
.. warning::
|
| 73 |
+
All tensors in ``state_dict`` must be allocated on their
|
| 74 |
+
destination device *prior to* calling this function.
|
| 75 |
+
|
| 76 |
+
All non-tensor data is loaded using `torch.load()` and modified in place
|
| 77 |
+
on state_dict.
|
| 78 |
+
|
| 79 |
+
.. warning::
|
| 80 |
+
Users must call `load_state_dict` on the root module to ensure load
|
| 81 |
+
pos-processing and non-tensor data properly propagates.
|
| 82 |
+
|
| 83 |
+
.. note:
|
| 84 |
+
If no process group is initialized, this function will assume the intent
|
| 85 |
+
is to load a checkpoint into the local process. This can be useful in the
|
| 86 |
+
case of local inference, and when using regular Tensors (as opposed to DTensor
|
| 87 |
+
or ShardedTensor)
|
| 88 |
+
|
| 89 |
+
.. note:
|
| 90 |
+
Rank 0 is assumed to be the coordinator rank.
|
| 91 |
+
|
| 92 |
+
Args:
|
| 93 |
+
state_dict (Dict[str, Any]): The state_dict to save.
|
| 94 |
+
checkpoint_id (Union[str, os.PathLike, None]):
|
| 95 |
+
The ID of this checkpoint instance. The meaning of the checkpoint_id
|
| 96 |
+
depends on the storage. It can be a path to a folder or to a file.
|
| 97 |
+
It can also be a key if the storage is a key-value store.
|
| 98 |
+
(Default: ``None``)
|
| 99 |
+
storage_reader (Optional[StorageReader]):
|
| 100 |
+
Instance of StorageWriter used to perform reads. If this is not
|
| 101 |
+
specified, DCP will automatically infer the reader based on the
|
| 102 |
+
checkpoint_id. If checkpoint_id is also None, an exception will
|
| 103 |
+
be raised. (Default: ``None``)
|
| 104 |
+
planner (Optional[LoadPlanner]):
|
| 105 |
+
Instance of LoadPlanner. If this is not specificed, the default
|
| 106 |
+
planner will be used. (Default: ``None``)
|
| 107 |
+
process_group (Optional[ProcessGroup]):
|
| 108 |
+
ProcessGroup to be used for cross-rank synchronization.
|
| 109 |
+
(Default: ``None``)
|
| 110 |
+
|
| 111 |
+
Returns:
|
| 112 |
+
None.
|
| 113 |
+
|
| 114 |
+
Examples
|
| 115 |
+
>>> # xdoctest: +SKIP
|
| 116 |
+
>>> my_model = MyModule()
|
| 117 |
+
>>> optimizer = Adagrad(my_model.parameters())
|
| 118 |
+
>>> model_state_dict = my_model.state_dict()
|
| 119 |
+
>>> fs_storage_reader = torch.distributed.checkpoint.FileSystemReader("/checkpoint/1")
|
| 120 |
+
|
| 121 |
+
>>> torch.distributed.checkpoint.load_state_dict(
|
| 122 |
+
>>> state_dict=model_state_dict,
|
| 123 |
+
>>> storage_reader=fs_storage_reader,
|
| 124 |
+
>>> )
|
| 125 |
+
|
| 126 |
+
>>> # module.load_state_dict() function might have customized steps
|
| 127 |
+
>>> # to flush the state_dict, must call it to
|
| 128 |
+
>>> # ensure correct behavior.
|
| 129 |
+
>>> my_model.load_state_dict(model_state_dict)
|
| 130 |
+
|
| 131 |
+
.. note::
|
| 132 |
+
load_state_dict uses collectives to coordinate reads across ranks.
|
| 133 |
+
For NCCL-based process groups, internal tensor representations of
|
| 134 |
+
objects must be moved to the GPU device before communication takes place.
|
| 135 |
+
In this case, the device used is given by ``torch.cuda.current_device()``
|
| 136 |
+
and it is the user's responsibility to ensure that this is set so that each
|
| 137 |
+
rank has an individual GPU, via ``torch.cuda.set_device()``.
|
| 138 |
+
"""
|
| 139 |
+
|
| 140 |
+
no_dist = not (dist.is_available() and dist.is_initialized())
|
| 141 |
+
if no_dist:
|
| 142 |
+
warnings.warn(
|
| 143 |
+
"torch.distributed is unavailable or uninitialized, assuming the intent is to load in a single process."
|
| 144 |
+
)
|
| 145 |
+
|
| 146 |
+
with _profile():
|
| 147 |
+
storage_reader = cast(
|
| 148 |
+
StorageReader, _storage_setup(storage_reader, checkpoint_id, reader=True)
|
| 149 |
+
)
|
| 150 |
+
|
| 151 |
+
if no_dist:
|
| 152 |
+
keys = list(state_dict.keys())
|
| 153 |
+
else:
|
| 154 |
+
keys = _all_gather_keys(state_dict, process_group)
|
| 155 |
+
if keys != sorted(state_dict.keys()):
|
| 156 |
+
warnings.warn(
|
| 157 |
+
"Detected mismatched keys in state dict after all gather!"
|
| 158 |
+
" This behavior is unsupported and may cause errors may cause errors."
|
| 159 |
+
)
|
| 160 |
+
|
| 161 |
+
statetful_sd = {}
|
| 162 |
+
for key in keys:
|
| 163 |
+
if key not in state_dict:
|
| 164 |
+
continue
|
| 165 |
+
elem = state_dict[key]
|
| 166 |
+
statetful_sd[key] = (
|
| 167 |
+
elem.state_dict() if isinstance(elem, Stateful) else elem
|
| 168 |
+
)
|
| 169 |
+
|
| 170 |
+
_load_state_dict(
|
| 171 |
+
state_dict=statetful_sd,
|
| 172 |
+
storage_reader=storage_reader,
|
| 173 |
+
process_group=process_group,
|
| 174 |
+
no_dist=no_dist,
|
| 175 |
+
planner=planner,
|
| 176 |
+
)
|
| 177 |
+
for key in keys:
|
| 178 |
+
if key not in state_dict:
|
| 179 |
+
continue
|
| 180 |
+
elem = state_dict[key]
|
| 181 |
+
if isinstance(elem, Stateful):
|
| 182 |
+
elem.load_state_dict(statetful_sd[key])
|
| 183 |
+
state_dict[key] = statetful_sd[key]
|
| 184 |
+
|
| 185 |
+
|
| 186 |
+
def _load_state_dict(
|
| 187 |
+
state_dict: Dict[str, Any],
|
| 188 |
+
storage_reader: StorageReader,
|
| 189 |
+
process_group: Optional[dist.ProcessGroup] = None,
|
| 190 |
+
coordinator_rank: int = 0,
|
| 191 |
+
no_dist: bool = False,
|
| 192 |
+
planner: Optional[LoadPlanner] = None,
|
| 193 |
+
) -> None:
|
| 194 |
+
torch._C._log_api_usage_once("torch.distributed.checkpoint.load_state_dict")
|
| 195 |
+
|
| 196 |
+
distW = _DistWrapper(process_group, not no_dist, coordinator_rank)
|
| 197 |
+
if planner is None:
|
| 198 |
+
planner = DefaultLoadPlanner()
|
| 199 |
+
|
| 200 |
+
ckpt_kwargs = {}
|
| 201 |
+
if (ckpt_id := getattr(storage_reader, "checkpoint_id", None)) is not None:
|
| 202 |
+
ckpt_kwargs["checkpoint_id"] = ckpt_id
|
| 203 |
+
|
| 204 |
+
@_dcp_method_logger(**ckpt_kwargs)
|
| 205 |
+
def local_step():
|
| 206 |
+
assert planner is not None
|
| 207 |
+
metadata = storage_reader.read_metadata()
|
| 208 |
+
planner.set_up_planner(state_dict, metadata, distW.is_coordinator)
|
| 209 |
+
storage_reader.set_up_storage_reader(metadata, distW.is_coordinator)
|
| 210 |
+
|
| 211 |
+
local_plan = planner.create_local_plan()
|
| 212 |
+
local_plan = storage_reader.prepare_local_plan(local_plan)
|
| 213 |
+
return local_plan
|
| 214 |
+
|
| 215 |
+
@_dcp_method_logger(**ckpt_kwargs)
|
| 216 |
+
def global_step(all_local_plans):
|
| 217 |
+
assert planner is not None
|
| 218 |
+
all_local_plans = planner.create_global_plan(all_local_plans)
|
| 219 |
+
all_local_plans = storage_reader.prepare_global_plan(all_local_plans)
|
| 220 |
+
return all_local_plans
|
| 221 |
+
|
| 222 |
+
central_plan: LoadPlan = distW.reduce_scatter("plan", local_step, global_step)
|
| 223 |
+
|
| 224 |
+
@_dcp_method_logger(**ckpt_kwargs)
|
| 225 |
+
def read_data():
|
| 226 |
+
assert planner is not None
|
| 227 |
+
final_local_plan = planner.finish_plan(central_plan)
|
| 228 |
+
all_reads = storage_reader.read_data(final_local_plan, planner)
|
| 229 |
+
|
| 230 |
+
all_reads.wait()
|
| 231 |
+
return None
|
| 232 |
+
|
| 233 |
+
_ = distW.all_gather("read", read_data)
|
| 234 |
+
|
| 235 |
+
|
| 236 |
+
def _load_state_dict_from_keys(
|
| 237 |
+
keys: Optional[Union[Set[str], str]] = None,
|
| 238 |
+
*,
|
| 239 |
+
checkpoint_id: Union[str, os.PathLike, None] = None,
|
| 240 |
+
storage_reader: Optional[StorageReader] = None,
|
| 241 |
+
process_group: Optional[dist.ProcessGroup] = None,
|
| 242 |
+
) -> Dict[str, Any]:
|
| 243 |
+
"""
|
| 244 |
+
Load only the specified keys from the checkpoint, if no keys are specified, the entire
|
| 245 |
+
checkpoint will be loaded. Note, this method completely loads the checkpoint into the
|
| 246 |
+
current process and is not distributed.
|
| 247 |
+
|
| 248 |
+
.. warning::
|
| 249 |
+
|
| 250 |
+
|
| 251 |
+
.. warning::
|
| 252 |
+
|
| 253 |
+
All non-tensor data is loaded using `torch.load()`
|
| 254 |
+
|
| 255 |
+
.. note:
|
| 256 |
+
As opposed to the usual pattern, this function does not take a state dict as input
|
| 257 |
+
and does not load inplace. Instead, a new state dict is directly initialized and read
|
| 258 |
+
from file.
|
| 259 |
+
|
| 260 |
+
.. note:
|
| 261 |
+
If no process group is initialized, this function will assume the intent
|
| 262 |
+
is to load a checkpoint into the local process. This can be useful in the
|
| 263 |
+
case of local inference, and when using regular Tensors (as opposed to DTensor
|
| 264 |
+
or ShardedTensor)
|
| 265 |
+
|
| 266 |
+
.. note:
|
| 267 |
+
Rank 0 is assumed to be the coordinator rank.
|
| 268 |
+
|
| 269 |
+
Args:
|
| 270 |
+
keys (Optional[Union[Set[str], str]]):
|
| 271 |
+
Loads any key specified in this set. If no keys are specified, the entire checkpoint
|
| 272 |
+
is loaded.
|
| 273 |
+
checkpoint_id (Union[str, os.PathLike, None]):
|
| 274 |
+
The ID of this checkpoint instance. The meaning of the checkpoint_id
|
| 275 |
+
depends on the storage. It can be a path to a folder or to a file.
|
| 276 |
+
It can also be a key if the storage is a key-value store.
|
| 277 |
+
(Default: ``None``)
|
| 278 |
+
storage_reader (Optional[StorageReader]):
|
| 279 |
+
Instance of StorageWriter used to perform reads. If this is not
|
| 280 |
+
specified, DCP will automatically infer the reader based on the
|
| 281 |
+
checkpoint_id. If checkpoint_id is also None, an exception will
|
| 282 |
+
be raised. (Default: ``None``)
|
| 283 |
+
process_group (Optional[ProcessGroup]):
|
| 284 |
+
ProcessGroup to be used for cross-rank synchronization.
|
| 285 |
+
(Default: ``None``)
|
| 286 |
+
|
| 287 |
+
Returns:
|
| 288 |
+
State dict from specified keys
|
| 289 |
+
"""
|
| 290 |
+
torch._C._log_api_usage_once(
|
| 291 |
+
"torch.distributed.checkpoint._load_state_dict_from_keys"
|
| 292 |
+
)
|
| 293 |
+
|
| 294 |
+
no_dist = not (dist.is_available() and dist.is_initialized())
|
| 295 |
+
if no_dist:
|
| 296 |
+
warnings.warn(
|
| 297 |
+
"torch.distributed is unavailable or uninitialized, assuming the intent is to load in a single process."
|
| 298 |
+
)
|
| 299 |
+
|
| 300 |
+
storage_reader = cast(
|
| 301 |
+
StorageReader, _storage_setup(storage_reader, checkpoint_id, reader=True)
|
| 302 |
+
)
|
| 303 |
+
|
| 304 |
+
if isinstance(keys, str):
|
| 305 |
+
keys = {keys}
|
| 306 |
+
|
| 307 |
+
sd: Dict[str, Any] = {}
|
| 308 |
+
_load_state_dict(
|
| 309 |
+
state_dict=sd,
|
| 310 |
+
storage_reader=storage_reader,
|
| 311 |
+
process_group=process_group,
|
| 312 |
+
no_dist=no_dist,
|
| 313 |
+
planner=_EmptyStateDictLoadPlanner(keys=keys or set()),
|
| 314 |
+
)
|
| 315 |
+
|
| 316 |
+
return sd
|
vllm/lib/python3.10/site-packages/torch/distributed/checkpoint/state_dict_saver.py
ADDED
|
@@ -0,0 +1,333 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# mypy: allow-untyped-decorators
|
| 2 |
+
# mypy: allow-untyped-defs
|
| 3 |
+
import inspect
|
| 4 |
+
import os
|
| 5 |
+
import warnings
|
| 6 |
+
from concurrent.futures import Future, ThreadPoolExecutor
|
| 7 |
+
from typing import cast, Optional, Union
|
| 8 |
+
from typing_extensions import deprecated
|
| 9 |
+
|
| 10 |
+
import torch
|
| 11 |
+
import torch.distributed as dist
|
| 12 |
+
from torch.distributed._state_dict_utils import _offload_state_dict_to_cpu
|
| 13 |
+
from torch.distributed.checkpoint._storage_utils import _storage_setup
|
| 14 |
+
from torch.distributed.checkpoint.default_planner import DefaultSavePlanner
|
| 15 |
+
from torch.distributed.checkpoint.logger import _dcp_method_logger
|
| 16 |
+
from torch.distributed.checkpoint.metadata import Metadata, STATE_DICT_TYPE
|
| 17 |
+
from torch.distributed.checkpoint.planner import SavePlan, SavePlanner
|
| 18 |
+
from torch.distributed.checkpoint.staging import AsyncStager
|
| 19 |
+
from torch.distributed.checkpoint.stateful import Stateful
|
| 20 |
+
from torch.distributed.checkpoint.storage import StorageWriter
|
| 21 |
+
from torch.distributed.distributed_c10d import _get_default_group
|
| 22 |
+
|
| 23 |
+
from .utils import _api_bc_check, _DistWrapper, _profile
|
| 24 |
+
|
| 25 |
+
|
| 26 |
+
__all__ = ["save_state_dict", "save", "async_save"]
|
| 27 |
+
|
| 28 |
+
|
| 29 |
+
@deprecated(
|
| 30 |
+
"`save_state_dict` is deprecated and will be removed in future versions."
|
| 31 |
+
"Please use `save` instead.",
|
| 32 |
+
category=FutureWarning,
|
| 33 |
+
)
|
| 34 |
+
def save_state_dict(
|
| 35 |
+
state_dict: STATE_DICT_TYPE,
|
| 36 |
+
storage_writer: StorageWriter,
|
| 37 |
+
process_group: Optional[dist.ProcessGroup] = None,
|
| 38 |
+
coordinator_rank: int = 0,
|
| 39 |
+
no_dist: bool = False,
|
| 40 |
+
planner: Optional[SavePlanner] = None,
|
| 41 |
+
) -> Metadata:
|
| 42 |
+
"""This method is deprecated. Please switch to 'save'."""
|
| 43 |
+
storage_writer.reset()
|
| 44 |
+
|
| 45 |
+
# TODO: test returning `save` here instead.
|
| 46 |
+
with _profile():
|
| 47 |
+
return _save_state_dict(
|
| 48 |
+
state_dict,
|
| 49 |
+
storage_writer,
|
| 50 |
+
process_group,
|
| 51 |
+
coordinator_rank,
|
| 52 |
+
no_dist,
|
| 53 |
+
planner,
|
| 54 |
+
)
|
| 55 |
+
|
| 56 |
+
|
| 57 |
+
@_dcp_method_logger(log_exceptions=True) # type: ignore[arg-type]
|
| 58 |
+
@_api_bc_check
|
| 59 |
+
def save(
|
| 60 |
+
state_dict: STATE_DICT_TYPE,
|
| 61 |
+
*,
|
| 62 |
+
checkpoint_id: Union[str, os.PathLike, None] = None,
|
| 63 |
+
storage_writer: Optional[StorageWriter] = None,
|
| 64 |
+
planner: Optional[SavePlanner] = None,
|
| 65 |
+
process_group: Optional[dist.ProcessGroup] = None,
|
| 66 |
+
) -> Metadata:
|
| 67 |
+
"""
|
| 68 |
+
Save a distributed model in SPMD style.
|
| 69 |
+
|
| 70 |
+
This function is different from ``torch.save()`` as it handles
|
| 71 |
+
``ShardedTensor`` , and ``DTensor`` by having each rank only save their local shards.
|
| 72 |
+
|
| 73 |
+
For each ``Stateful`` object (having both a ``state_dict`` and a ``load_state_dict``),
|
| 74 |
+
save will call ``state_dict`` before serialization.
|
| 75 |
+
|
| 76 |
+
.. warning::
|
| 77 |
+
There is no guarantees of Backwards Compatibility across PyTorch versions
|
| 78 |
+
for saved state_dicts.
|
| 79 |
+
|
| 80 |
+
.. warning::
|
| 81 |
+
If using the `process_group` argument, make sure that only its ranks
|
| 82 |
+
call `save_state_dict` and that all data in state_dict belong to it.
|
| 83 |
+
|
| 84 |
+
.. note::
|
| 85 |
+
When saving checkpoint for FSDP's `ShardingStrategy.HYBRID_SHARD`, only one of
|
| 86 |
+
the shard_group should be calling `save_state_dict` and the corresponding process
|
| 87 |
+
group needs to be passed in.
|
| 88 |
+
|
| 89 |
+
.. note::
|
| 90 |
+
If no process group is available, this function assumes the intention is to save the
|
| 91 |
+
state_dict in the local process.
|
| 92 |
+
|
| 93 |
+
.. note:
|
| 94 |
+
Rank 0 is assumed to be the coordinator rank.
|
| 95 |
+
|
| 96 |
+
|
| 97 |
+
Args:
|
| 98 |
+
state_dict (Dict[str, Any]): The state_dict to save.
|
| 99 |
+
checkpoint_id (Union[str, os.PathLike, None]):
|
| 100 |
+
The ID of this checkpoint instance. The meaning of the checkpoint_id
|
| 101 |
+
depends on the storage. It can be a path to a folder or to a file.
|
| 102 |
+
It can also be a key if the storage is a key-value store.
|
| 103 |
+
(Default: ``None``)
|
| 104 |
+
storage_writer (Optional[StorageWriter]):
|
| 105 |
+
Instance of StorageWriter used to perform writes. If this is not
|
| 106 |
+
specified, DCP will automatically infer the writer based on the
|
| 107 |
+
checkpoint_id. If checkpoint_id is also None, an exception will
|
| 108 |
+
be raised. (Default: ``None``)
|
| 109 |
+
planner (Optional[SavePlanner]):
|
| 110 |
+
Instance of SavePlanner. If this is not specificed, the default
|
| 111 |
+
planner will be used. (Default: ``None``)
|
| 112 |
+
process_group (Optional[ProcessGroup]):
|
| 113 |
+
ProcessGroup to be used for cross-rank synchronization.
|
| 114 |
+
(Default: ``None``)
|
| 115 |
+
|
| 116 |
+
Returns:
|
| 117 |
+
Metadata: Metadata object for the saved checkpoint.
|
| 118 |
+
|
| 119 |
+
Example:
|
| 120 |
+
>>> # xdoctest: +SKIP
|
| 121 |
+
>>> my_model = MyModule()
|
| 122 |
+
|
| 123 |
+
>>> state_dict = {"model": my_model}
|
| 124 |
+
|
| 125 |
+
>>> fs_storage_writer = torch.distributed.checkpoint.FileSystemWriter("/checkpoint/1")
|
| 126 |
+
>>> torch.distributed.checkpoint.save(
|
| 127 |
+
>>> state_dict=state_dict,
|
| 128 |
+
>>> storage_writer=fs_storage_writer,
|
| 129 |
+
>>> )
|
| 130 |
+
|
| 131 |
+
.. note::
|
| 132 |
+
save_state_dict uses collectives to coordinate writes across ranks.
|
| 133 |
+
For NCCL-based process groups, internal tensor representations of
|
| 134 |
+
objects must be moved to the GPU device before communication takes place.
|
| 135 |
+
In this case, the device used is given by ``torch.cuda.current_device()``
|
| 136 |
+
and it is the user's responsibility to ensure that this is set so that
|
| 137 |
+
each rank has an individual GPU, via ``torch.cuda.set_device()``.
|
| 138 |
+
"""
|
| 139 |
+
torch._C._log_api_usage_once("torch.distributed.checkpoint.save")
|
| 140 |
+
|
| 141 |
+
no_dist = not (dist.is_available() and dist.is_initialized())
|
| 142 |
+
if no_dist:
|
| 143 |
+
warnings.warn(
|
| 144 |
+
"torch.distributed is unavailable or uninitialized, assuming the intent is to save in a single process."
|
| 145 |
+
)
|
| 146 |
+
|
| 147 |
+
with _profile():
|
| 148 |
+
storage_writer = cast(
|
| 149 |
+
StorageWriter, _storage_setup(storage_writer, checkpoint_id, reader=False)
|
| 150 |
+
)
|
| 151 |
+
|
| 152 |
+
return _save_state_dict(
|
| 153 |
+
state_dict=_stateful_to_state_dict(state_dict),
|
| 154 |
+
storage_writer=storage_writer,
|
| 155 |
+
process_group=process_group,
|
| 156 |
+
no_dist=no_dist,
|
| 157 |
+
planner=planner,
|
| 158 |
+
)
|
| 159 |
+
|
| 160 |
+
|
| 161 |
+
@_dcp_method_logger(log_exceptions=True)
|
| 162 |
+
def async_save(
|
| 163 |
+
state_dict: STATE_DICT_TYPE,
|
| 164 |
+
*,
|
| 165 |
+
checkpoint_id: Union[str, os.PathLike, None] = None,
|
| 166 |
+
storage_writer: Optional[StorageWriter] = None,
|
| 167 |
+
planner: Optional[SavePlanner] = None,
|
| 168 |
+
process_group: Optional[dist.ProcessGroup] = None,
|
| 169 |
+
) -> Future:
|
| 170 |
+
"""Asynchronous version of ``save``. This code first de-stages the state_dict on to the
|
| 171 |
+
staging storage (defaults to CPU memory), and then calls the `save` in a separate thread.
|
| 172 |
+
|
| 173 |
+
.. warning::
|
| 174 |
+
This feature is experimental and subject to change.
|
| 175 |
+
|
| 176 |
+
Args:
|
| 177 |
+
state_dict (Dict[str, Any]): The state_dict to save.
|
| 178 |
+
checkpoint_id (Union[str, os.PathLike, None]):
|
| 179 |
+
The ID of this checkpoint instance. The meaning of the checkpoint_id
|
| 180 |
+
depends on the storage. It can be a path to a folder or to a file.
|
| 181 |
+
It can also be a key if the storage is a key-value store.
|
| 182 |
+
(Default: ``None``)
|
| 183 |
+
storage_writer (Optional[StorageWriter]):
|
| 184 |
+
Instance of StorageWriter used to perform 'stage' and 'save'. If
|
| 185 |
+
this is not specified, DCP will automatically infer the writer based on the
|
| 186 |
+
checkpoint_id. If checkpoint_id is also None, an exception will
|
| 187 |
+
be raised. (Default: ``None``)
|
| 188 |
+
planner (Optional[SavePlanner]):
|
| 189 |
+
Instance of SavePlanner. If this is not specificed, the default
|
| 190 |
+
planner will be used. (Default: ``None``)
|
| 191 |
+
process_group (Optional[ProcessGroup]):
|
| 192 |
+
ProcessGroup to be used for cross-rank synchronization.
|
| 193 |
+
(Default: ``None``)
|
| 194 |
+
|
| 195 |
+
Returns:
|
| 196 |
+
Future: A future holding the resultant Metadata object from `save`.
|
| 197 |
+
|
| 198 |
+
Example:
|
| 199 |
+
>>> # xdoctest: +SKIP
|
| 200 |
+
>>> my_model = MyModule()
|
| 201 |
+
|
| 202 |
+
>>> state_dict = {"model": my_model}
|
| 203 |
+
|
| 204 |
+
>>> fs_storage_writer = torch.distributed.checkpoint.FileSystemWriter("/checkpoint/1")
|
| 205 |
+
>>> checkpoint_future = torch.distributed.checkpoint.async_save(
|
| 206 |
+
>>> state_dict=state_dict,
|
| 207 |
+
>>> storage_writer=fs_storage_writer,
|
| 208 |
+
>>> )
|
| 209 |
+
>>>
|
| 210 |
+
>>> # ... do some work ...
|
| 211 |
+
>>>
|
| 212 |
+
>>> checkpoint_future.result()
|
| 213 |
+
|
| 214 |
+
"""
|
| 215 |
+
torch._C._log_api_usage_once("torch.distributed.checkpoint.async_save")
|
| 216 |
+
|
| 217 |
+
if dist.is_available() and dist.is_initialized():
|
| 218 |
+
pg = process_group or _get_default_group()
|
| 219 |
+
assert (
|
| 220 |
+
torch.device("cpu") in pg._device_types # type: ignore[attr-defined]
|
| 221 |
+
), "A CPU backend must be enabled for async save; try initializing process group with 'cpu:gloo,cuda:nccl'"
|
| 222 |
+
|
| 223 |
+
storage_writer = cast(
|
| 224 |
+
StorageWriter, _storage_setup(storage_writer, checkpoint_id, reader=False)
|
| 225 |
+
)
|
| 226 |
+
|
| 227 |
+
state_dict = _stateful_to_state_dict(state_dict)
|
| 228 |
+
if isinstance(storage_writer, AsyncStager):
|
| 229 |
+
staged_state_dict = storage_writer.stage(state_dict)
|
| 230 |
+
else: # provides bwc for storage_writers not implementing AsyncStager
|
| 231 |
+
staged_state_dict = _offload_state_dict_to_cpu(state_dict, type_check=False)
|
| 232 |
+
|
| 233 |
+
executor = ThreadPoolExecutor(max_workers=1)
|
| 234 |
+
f: Future = executor.submit(
|
| 235 |
+
save,
|
| 236 |
+
staged_state_dict,
|
| 237 |
+
checkpoint_id=checkpoint_id,
|
| 238 |
+
storage_writer=storage_writer,
|
| 239 |
+
planner=planner,
|
| 240 |
+
process_group=process_group,
|
| 241 |
+
)
|
| 242 |
+
f.add_done_callback(lambda f: executor.shutdown(wait=False))
|
| 243 |
+
|
| 244 |
+
if (
|
| 245 |
+
isinstance(storage_writer, AsyncStager)
|
| 246 |
+
and storage_writer.should_synchronize_after_execute
|
| 247 |
+
):
|
| 248 |
+
storage_writer.synchronize_staging()
|
| 249 |
+
|
| 250 |
+
return f
|
| 251 |
+
|
| 252 |
+
|
| 253 |
+
def _stateful_to_state_dict(state_dict: STATE_DICT_TYPE) -> STATE_DICT_TYPE:
|
| 254 |
+
"""Creates a shallow copy of `state_dict` where `state_dict` is called for each Stateful object."""
|
| 255 |
+
stateful_state_dict = {}
|
| 256 |
+
for key, elem in state_dict.items():
|
| 257 |
+
stateful_state_dict[key] = (
|
| 258 |
+
elem.state_dict() if isinstance(elem, Stateful) else elem
|
| 259 |
+
)
|
| 260 |
+
return stateful_state_dict
|
| 261 |
+
|
| 262 |
+
|
| 263 |
+
def _save_state_dict(
|
| 264 |
+
state_dict: STATE_DICT_TYPE,
|
| 265 |
+
storage_writer: StorageWriter,
|
| 266 |
+
process_group: Optional[dist.ProcessGroup] = None,
|
| 267 |
+
coordinator_rank: int = 0,
|
| 268 |
+
no_dist: bool = False,
|
| 269 |
+
planner: Optional[SavePlanner] = None,
|
| 270 |
+
) -> Metadata:
|
| 271 |
+
torch._C._log_api_usage_once("torch.distributed.checkpoint.save_state_dict")
|
| 272 |
+
|
| 273 |
+
distW = _DistWrapper(process_group, not no_dist, coordinator_rank)
|
| 274 |
+
if planner is None:
|
| 275 |
+
planner = DefaultSavePlanner()
|
| 276 |
+
assert planner is not None
|
| 277 |
+
|
| 278 |
+
global_metadata = None
|
| 279 |
+
|
| 280 |
+
ckpt_kwargs = {}
|
| 281 |
+
if (ckpt_id := getattr(storage_writer, "checkpoint_id", None)) is not None:
|
| 282 |
+
ckpt_kwargs["checkpoint_id"] = ckpt_id
|
| 283 |
+
|
| 284 |
+
@_dcp_method_logger(**ckpt_kwargs)
|
| 285 |
+
def local_step():
|
| 286 |
+
assert planner is not None
|
| 287 |
+
storage_meta = storage_writer.storage_meta()
|
| 288 |
+
if "storage_meta" not in inspect.signature(planner.set_up_planner).parameters:
|
| 289 |
+
warnings.warn(
|
| 290 |
+
"The function definition for SavePlanner.set_up_planner has been updated"
|
| 291 |
+
" to include the storage_meta argument. Please update your implementation"
|
| 292 |
+
" to include this parameter."
|
| 293 |
+
)
|
| 294 |
+
planner.set_up_planner(state_dict, distW.is_coordinator) # type: ignore[call-arg, arg-type]
|
| 295 |
+
else:
|
| 296 |
+
planner.set_up_planner(
|
| 297 |
+
state_dict=state_dict,
|
| 298 |
+
storage_meta=storage_meta,
|
| 299 |
+
is_coordinator=distW.is_coordinator,
|
| 300 |
+
)
|
| 301 |
+
storage_writer.set_up_storage_writer(distW.is_coordinator)
|
| 302 |
+
|
| 303 |
+
local_plan = planner.create_local_plan()
|
| 304 |
+
local_plan = storage_writer.prepare_local_plan(local_plan)
|
| 305 |
+
return local_plan
|
| 306 |
+
|
| 307 |
+
@_dcp_method_logger(**ckpt_kwargs)
|
| 308 |
+
def global_step(all_local_plans):
|
| 309 |
+
nonlocal global_metadata
|
| 310 |
+
|
| 311 |
+
assert planner is not None
|
| 312 |
+
all_local_plans, global_metadata = planner.create_global_plan(all_local_plans)
|
| 313 |
+
all_local_plans = storage_writer.prepare_global_plan(all_local_plans)
|
| 314 |
+
return all_local_plans
|
| 315 |
+
|
| 316 |
+
central_plan: SavePlan = distW.reduce_scatter("plan", local_step, global_step)
|
| 317 |
+
|
| 318 |
+
@_dcp_method_logger(**ckpt_kwargs)
|
| 319 |
+
def write_data():
|
| 320 |
+
assert planner is not None
|
| 321 |
+
final_local_plan = planner.finish_plan(central_plan)
|
| 322 |
+
all_writes = storage_writer.write_data(final_local_plan, planner)
|
| 323 |
+
|
| 324 |
+
all_writes.wait()
|
| 325 |
+
return all_writes.value()
|
| 326 |
+
|
| 327 |
+
@_dcp_method_logger(**ckpt_kwargs)
|
| 328 |
+
def finish_checkpoint(all_results):
|
| 329 |
+
assert global_metadata is not None
|
| 330 |
+
storage_writer.finish(metadata=global_metadata, results=all_results)
|
| 331 |
+
return global_metadata
|
| 332 |
+
|
| 333 |
+
return distW.all_reduce("write", write_data, finish_checkpoint)
|
vllm/lib/python3.10/site-packages/torch/distributed/checkpoint/stateful.py
ADDED
|
@@ -0,0 +1,42 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from typing import Any, Dict, runtime_checkable, TypeVar
|
| 2 |
+
from typing_extensions import Protocol
|
| 3 |
+
|
| 4 |
+
|
| 5 |
+
__all__ = ["Stateful", "StatefulT"]
|
| 6 |
+
|
| 7 |
+
|
| 8 |
+
@runtime_checkable
|
| 9 |
+
class Stateful(Protocol):
|
| 10 |
+
"""
|
| 11 |
+
Stateful protocol for objects that can be checkpointed and restored.
|
| 12 |
+
"""
|
| 13 |
+
|
| 14 |
+
def state_dict(self) -> Dict[str, Any]:
|
| 15 |
+
"""
|
| 16 |
+
Objects should return their state_dict representation as a dictionary.
|
| 17 |
+
The output of this function will be checkpointed, and later restored in
|
| 18 |
+
`load_state_dict()`.
|
| 19 |
+
|
| 20 |
+
.. warning::
|
| 21 |
+
Because of the inplace nature of restoring a checkpoint, this function
|
| 22 |
+
is also called during `torch.distributed.checkpoint.load`.
|
| 23 |
+
|
| 24 |
+
|
| 25 |
+
Returns:
|
| 26 |
+
Dict: The objects state dict
|
| 27 |
+
"""
|
| 28 |
+
|
| 29 |
+
...
|
| 30 |
+
|
| 31 |
+
def load_state_dict(self, state_dict: Dict[str, Any]) -> None:
|
| 32 |
+
"""
|
| 33 |
+
Restore the object's state from the provided state_dict.
|
| 34 |
+
|
| 35 |
+
Args:
|
| 36 |
+
state_dict: The state dict to restore from
|
| 37 |
+
"""
|
| 38 |
+
|
| 39 |
+
...
|
| 40 |
+
|
| 41 |
+
|
| 42 |
+
StatefulT = TypeVar("StatefulT", bound=Stateful)
|
vllm/lib/python3.10/site-packages/torch/distributed/checkpoint/storage.py
ADDED
|
@@ -0,0 +1,284 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import abc
|
| 2 |
+
import os
|
| 3 |
+
from dataclasses import dataclass
|
| 4 |
+
from typing import Any, List, Optional, Union
|
| 5 |
+
|
| 6 |
+
from torch.distributed.checkpoint.metadata import Metadata, MetadataIndex, StorageMeta
|
| 7 |
+
from torch.distributed.checkpoint.planner import (
|
| 8 |
+
LoadPlan,
|
| 9 |
+
LoadPlanner,
|
| 10 |
+
SavePlan,
|
| 11 |
+
SavePlanner,
|
| 12 |
+
)
|
| 13 |
+
from torch.futures import Future
|
| 14 |
+
|
| 15 |
+
|
| 16 |
+
__all__ = ["WriteResult", "StorageWriter", "StorageReader"]
|
| 17 |
+
|
| 18 |
+
|
| 19 |
+
@dataclass(frozen=True)
|
| 20 |
+
class WriteResult:
|
| 21 |
+
index: MetadataIndex
|
| 22 |
+
|
| 23 |
+
size_in_bytes: int
|
| 24 |
+
storage_data: Any
|
| 25 |
+
|
| 26 |
+
|
| 27 |
+
class StorageWriter(abc.ABC):
|
| 28 |
+
"""
|
| 29 |
+
Interface used by ``save_state_dict`` to write to storage.
|
| 30 |
+
|
| 31 |
+
One StorageWriter instance acts as both the coordinator and the follower
|
| 32 |
+
in a distributed checkpoint. As part of initialization, each instance
|
| 33 |
+
is told its role.
|
| 34 |
+
|
| 35 |
+
A subclass should expect the following sequence of calls.
|
| 36 |
+
|
| 37 |
+
0) (all ranks) set checkpoint_id if users pass a valid checkpoint_id.
|
| 38 |
+
1) (all ranks) set_up_storage_writer()
|
| 39 |
+
2) (all ranks) prepare_local_plan()
|
| 40 |
+
3) (coordinator) prepare_global_plan()
|
| 41 |
+
4) (all ranks) write_data()
|
| 42 |
+
5) (coordinator) finish()
|
| 43 |
+
"""
|
| 44 |
+
|
| 45 |
+
@abc.abstractmethod
|
| 46 |
+
def reset(self, checkpoint_id: Union[str, os.PathLike, None] = None) -> None:
|
| 47 |
+
"""
|
| 48 |
+
Calls to indicates a brand new checkpoint write is going to happen.
|
| 49 |
+
A checkpoint_id may be present if users set the checkpoint_id for
|
| 50 |
+
this checkpoint write. The meaning of the checkpiont_id is
|
| 51 |
+
storage-dependent. It can be a path to a folder/file or a key for
|
| 52 |
+
a key-value storage.
|
| 53 |
+
|
| 54 |
+
Args:
|
| 55 |
+
checkpoint_id (Union[str, os.PathLike, None]):
|
| 56 |
+
The ID of this checkpoint instance. The meaning of the checkpoint_id
|
| 57 |
+
depends on the storage. It can be a path to a folder or to a file.
|
| 58 |
+
It can also be a key if the storage is a key-value store.
|
| 59 |
+
(Default: ``None``)
|
| 60 |
+
"""
|
| 61 |
+
...
|
| 62 |
+
|
| 63 |
+
@abc.abstractmethod
|
| 64 |
+
def set_up_storage_writer(self, is_coordinator: bool) -> None:
|
| 65 |
+
"""
|
| 66 |
+
Initialize this instance.
|
| 67 |
+
|
| 68 |
+
Args:
|
| 69 |
+
is_coordinator (bool): Whether this instance is responsible for coordinating
|
| 70 |
+
the checkpoint.
|
| 71 |
+
"""
|
| 72 |
+
|
| 73 |
+
@abc.abstractmethod
|
| 74 |
+
def prepare_local_plan(self, plan: SavePlan) -> SavePlan:
|
| 75 |
+
"""
|
| 76 |
+
Perform storage-specific local planning.
|
| 77 |
+
|
| 78 |
+
While this method can produce a completely different plan, the recommended
|
| 79 |
+
way is to store storage specific data in SavePlan::storage_data.
|
| 80 |
+
|
| 81 |
+
Args:
|
| 82 |
+
plan (SavePlan): The local plan from the ``SavePlanner`` in use.
|
| 83 |
+
|
| 84 |
+
Returns:
|
| 85 |
+
A transformed ``SavePlan`` after storage local planning
|
| 86 |
+
"""
|
| 87 |
+
|
| 88 |
+
@abc.abstractmethod
|
| 89 |
+
def prepare_global_plan(self, plans: List[SavePlan]) -> List[SavePlan]:
|
| 90 |
+
"""
|
| 91 |
+
Perform centralized planning of storage.
|
| 92 |
+
|
| 93 |
+
This method is only called on the coordinator instance.
|
| 94 |
+
|
| 95 |
+
While this method can produce a completely different plan, the preferred
|
| 96 |
+
way is to store storage specific data in SavePlan::storage_data.
|
| 97 |
+
|
| 98 |
+
Args:
|
| 99 |
+
plans: A list of ``SavePlan`` instances, one for each rank.
|
| 100 |
+
|
| 101 |
+
Returns:
|
| 102 |
+
A list of transformed ``SavePlan`` after storage global planning
|
| 103 |
+
"""
|
| 104 |
+
|
| 105 |
+
@abc.abstractmethod
|
| 106 |
+
def write_data(
|
| 107 |
+
self, plan: SavePlan, planner: SavePlanner
|
| 108 |
+
) -> Future[List[WriteResult]]:
|
| 109 |
+
"""
|
| 110 |
+
Write all items from ``plan`` using ``planner`` to resolve the data.
|
| 111 |
+
|
| 112 |
+
A subclass should call ``SavePlanner::resolve_data`` on each item
|
| 113 |
+
from the plan to get access to the underlying object to write.
|
| 114 |
+
|
| 115 |
+
Subclasses should lazily call `resolve_data` as it can allocate memory.
|
| 116 |
+
In case of tensors, make following assumptions:
|
| 117 |
+
|
| 118 |
+
- They might be on any device, including not matching the one on ``WriteItem::tensor_data``
|
| 119 |
+
- They might be views or not contiguous. Only the projection needs to be saved.
|
| 120 |
+
|
| 121 |
+
Args:
|
| 122 |
+
plan (SavePlan): The save plan to execute.
|
| 123 |
+
planner (SavePlanner): Planner object to be used to resolve items to data.
|
| 124 |
+
|
| 125 |
+
Returns:
|
| 126 |
+
A future that completes to a list of WriteResult
|
| 127 |
+
"""
|
| 128 |
+
|
| 129 |
+
@abc.abstractmethod
|
| 130 |
+
def finish(self, metadata: Metadata, results: List[List[WriteResult]]) -> None:
|
| 131 |
+
"""
|
| 132 |
+
Write the metadata and marks the current checkpoint as successful.
|
| 133 |
+
|
| 134 |
+
The actual format/schema used for serializing `metadata` is an
|
| 135 |
+
implementation detail. The only requirement is that it's recoverable
|
| 136 |
+
in to the same object graph.
|
| 137 |
+
|
| 138 |
+
Args:
|
| 139 |
+
metadata (Metadata): metadata for the new checkpoint
|
| 140 |
+
results: A list of WriteResults from all ranks.
|
| 141 |
+
|
| 142 |
+
Returns:
|
| 143 |
+
None
|
| 144 |
+
"""
|
| 145 |
+
|
| 146 |
+
@classmethod
|
| 147 |
+
@abc.abstractmethod
|
| 148 |
+
def validate_checkpoint_id(cls, checkpoint_id: Union[str, os.PathLike]) -> bool:
|
| 149 |
+
"""
|
| 150 |
+
Check if the given checkpoint_id is supported by the stroage. This allow
|
| 151 |
+
us to enable automatic storage selection.
|
| 152 |
+
"""
|
| 153 |
+
...
|
| 154 |
+
|
| 155 |
+
def storage_meta(self) -> Optional[StorageMeta]:
|
| 156 |
+
"""
|
| 157 |
+
Return the storage-specific metadata. This is used to store additional information
|
| 158 |
+
in a checkpoint that can be useful for providing request-level observability. StorageMeta
|
| 159 |
+
is passed to the ``SavePlanner`` during save calls. Returns None by default.
|
| 160 |
+
|
| 161 |
+
TODO: provide an example
|
| 162 |
+
"""
|
| 163 |
+
return None
|
| 164 |
+
|
| 165 |
+
|
| 166 |
+
class StorageReader(abc.ABC):
|
| 167 |
+
"""
|
| 168 |
+
Interface used by ``load_state_dict`` to read from storage.
|
| 169 |
+
|
| 170 |
+
One StorageReader instance acts as both the coordinator and the follower
|
| 171 |
+
in a distributed checkpoint. As part of initialization, each instance
|
| 172 |
+
is told its role.
|
| 173 |
+
|
| 174 |
+
A subclass should expected the following sequence of calls by ``load_state_dict``:
|
| 175 |
+
|
| 176 |
+
0) (all ranks) set checkpoint_id if users pass a valid checkpoint_id.
|
| 177 |
+
1) (all ranks) read_metadata()
|
| 178 |
+
2) (all ranks) set_up_storage_reader()
|
| 179 |
+
3) (all ranks) prepare_local_plan()
|
| 180 |
+
4) (coordinator) prepare_global_plan()
|
| 181 |
+
5) (all ranks) read_data()
|
| 182 |
+
"""
|
| 183 |
+
|
| 184 |
+
@abc.abstractmethod
|
| 185 |
+
def reset(self, checkpoint_id: Union[str, os.PathLike, None] = None) -> None:
|
| 186 |
+
"""
|
| 187 |
+
Calls to indicates a brand new checkpoint read is going to happen.
|
| 188 |
+
A checkpoint_id may be present if users set the checkpoint_id for
|
| 189 |
+
this checkpoint read. The meaning of the checkpiont_id is
|
| 190 |
+
storage-dependent. It can be a path to a folder/file or a key for
|
| 191 |
+
a key-value storage.
|
| 192 |
+
|
| 193 |
+
Args:
|
| 194 |
+
checkpoint_id (Union[str, os.PathLike, None]):
|
| 195 |
+
The ID of this checkpoint instance. The meaning of the checkpoint_id
|
| 196 |
+
depends on the storage. It can be a path to a folder or to a file.
|
| 197 |
+
It can also be a key if the storage is more like a key-value store.
|
| 198 |
+
(Default: ``None``)
|
| 199 |
+
"""
|
| 200 |
+
...
|
| 201 |
+
|
| 202 |
+
@abc.abstractmethod
|
| 203 |
+
def read_metadata(self) -> Metadata:
|
| 204 |
+
"""
|
| 205 |
+
Read the checkpoint metadata.
|
| 206 |
+
|
| 207 |
+
Returns:
|
| 208 |
+
The metadata object associated with the checkpoint being loaded.
|
| 209 |
+
|
| 210 |
+
"""
|
| 211 |
+
|
| 212 |
+
@abc.abstractmethod
|
| 213 |
+
def set_up_storage_reader(self, metadata: Metadata, is_coordinator: bool) -> None:
|
| 214 |
+
"""
|
| 215 |
+
Initialize this instance.
|
| 216 |
+
|
| 217 |
+
Args:
|
| 218 |
+
metadata (Metadata): The metadata schema to use.
|
| 219 |
+
is_coordinator (bool): Whether this instance is responsible for coordinating
|
| 220 |
+
the checkpoint.
|
| 221 |
+
"""
|
| 222 |
+
|
| 223 |
+
@abc.abstractmethod
|
| 224 |
+
def prepare_local_plan(self, plan: LoadPlan) -> LoadPlan:
|
| 225 |
+
"""
|
| 226 |
+
Perform storage-specific local planning.
|
| 227 |
+
|
| 228 |
+
While this method can produce a completely different plan, the recommended
|
| 229 |
+
way is to store storage specific data in LoadPlan::storage_data.
|
| 230 |
+
|
| 231 |
+
Args:
|
| 232 |
+
plan (LoadPlan): The local plan from the ``LoadPlan`` in use.
|
| 233 |
+
|
| 234 |
+
Returns:
|
| 235 |
+
A transformed ``LoadPlan`` after storage local planning
|
| 236 |
+
"""
|
| 237 |
+
|
| 238 |
+
@abc.abstractmethod
|
| 239 |
+
def prepare_global_plan(self, plans: List[LoadPlan]) -> List[LoadPlan]:
|
| 240 |
+
"""
|
| 241 |
+
Perform centralized planning of storage loading.
|
| 242 |
+
|
| 243 |
+
This method is only called on the coordinator instance.
|
| 244 |
+
|
| 245 |
+
While this method can produce a completely different plan, the preferred
|
| 246 |
+
way is to store storage specific data in LoadPlan::storage_data.
|
| 247 |
+
|
| 248 |
+
Args:
|
| 249 |
+
plans: A list of ``LoadPlan`` instances, one for each rank.
|
| 250 |
+
|
| 251 |
+
Returns:
|
| 252 |
+
A list of transformed ``LoadPlan`` after storage global planning
|
| 253 |
+
"""
|
| 254 |
+
|
| 255 |
+
@abc.abstractmethod
|
| 256 |
+
def read_data(self, plan: LoadPlan, planner: LoadPlanner) -> Future[None]:
|
| 257 |
+
"""
|
| 258 |
+
Read all items from ``plan`` using ``planner`` to resolve the data.
|
| 259 |
+
|
| 260 |
+
A subclass should call ``LoadPlanner::load_bytes`` to deserialize a BytesIO
|
| 261 |
+
object into the right place.
|
| 262 |
+
|
| 263 |
+
A subclass should call ``LoadPlanner::resolve_tensor`` to get access to the
|
| 264 |
+
tensors that in should load data into.
|
| 265 |
+
|
| 266 |
+
It's the StorageLayer responsibility to properly schedule any cross device copies
|
| 267 |
+
required.
|
| 268 |
+
|
| 269 |
+
Args:
|
| 270 |
+
plan (LoadPlan): The local plan to execute on
|
| 271 |
+
planner (LoadPlanner): The planner object to use to resolve items.
|
| 272 |
+
|
| 273 |
+
Returns:
|
| 274 |
+
A future that completes once all reads are finished.
|
| 275 |
+
"""
|
| 276 |
+
|
| 277 |
+
@classmethod
|
| 278 |
+
@abc.abstractmethod
|
| 279 |
+
def validate_checkpoint_id(cls, checkpoint_id: Union[str, os.PathLike]) -> bool:
|
| 280 |
+
"""
|
| 281 |
+
Check if the given checkpoint_id is supported by the stroage. This allow
|
| 282 |
+
us to enable automatic storage selection.
|
| 283 |
+
"""
|
| 284 |
+
...
|
vllm/lib/python3.10/site-packages/torch/distributed/checkpoint/utils.py
ADDED
|
@@ -0,0 +1,431 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# mypy: allow-untyped-defs
|
| 2 |
+
import cProfile
|
| 3 |
+
import inspect
|
| 4 |
+
import io
|
| 5 |
+
import itertools
|
| 6 |
+
import os
|
| 7 |
+
import warnings
|
| 8 |
+
from contextlib import contextmanager
|
| 9 |
+
from functools import wraps
|
| 10 |
+
from pstats import Stats
|
| 11 |
+
from typing import Any, Callable, cast, Dict, List, Optional, Sequence, TypeVar, Union
|
| 12 |
+
|
| 13 |
+
import torch
|
| 14 |
+
import torch.distributed as dist
|
| 15 |
+
from torch.distributed._shard.sharded_tensor import ShardedTensor
|
| 16 |
+
from torch.distributed._shard.sharded_tensor.shard import Shard
|
| 17 |
+
|
| 18 |
+
from .api import (
|
| 19 |
+
_is_wrapped_exception,
|
| 20 |
+
_wrap_exception,
|
| 21 |
+
CheckpointException,
|
| 22 |
+
WRAPPED_EXCEPTION,
|
| 23 |
+
)
|
| 24 |
+
from .metadata import MetadataIndex, STATE_DICT_TYPE
|
| 25 |
+
|
| 26 |
+
|
| 27 |
+
__all__ = ["find_tensor_shard", "find_state_dict_object"]
|
| 28 |
+
|
| 29 |
+
T = TypeVar("T")
|
| 30 |
+
R = TypeVar("R")
|
| 31 |
+
|
| 32 |
+
|
| 33 |
+
def _get_failure_dict(
|
| 34 |
+
results: List[Union[T, WRAPPED_EXCEPTION]]
|
| 35 |
+
) -> Dict[int, WRAPPED_EXCEPTION]:
|
| 36 |
+
return cast(
|
| 37 |
+
Dict[int, WRAPPED_EXCEPTION],
|
| 38 |
+
{i: err for i, err in enumerate(results) if _is_wrapped_exception(err)},
|
| 39 |
+
)
|
| 40 |
+
|
| 41 |
+
|
| 42 |
+
def _all_gather_keys(
|
| 43 |
+
local_dict: Dict[Any, Any], group: Optional[dist.ProcessGroup] = None
|
| 44 |
+
) -> List[Any]:
|
| 45 |
+
"""Gathers all keys, and returns them sorted."""
|
| 46 |
+
keys = list(local_dict.keys())
|
| 47 |
+
gathered_keys: List[List[Any]] = [None] * dist.get_world_size(group) # type: ignore[list-item]
|
| 48 |
+
|
| 49 |
+
dist.all_gather_object(gathered_keys, keys, group=group)
|
| 50 |
+
return sorted(set(itertools.chain.from_iterable(gathered_keys)))
|
| 51 |
+
|
| 52 |
+
|
| 53 |
+
class _DistWrapper:
|
| 54 |
+
"""
|
| 55 |
+
This is a wrapper around PG that provides a series of features around object collectives.
|
| 56 |
+
|
| 57 |
+
It works without distributed initialized, where most collectives turns into nops.
|
| 58 |
+
|
| 59 |
+
All variants that take functions are exception robust, meaning that if one or more
|
| 60 |
+
ranks raise errors, all ranks will observe those.
|
| 61 |
+
"""
|
| 62 |
+
|
| 63 |
+
def __init__(
|
| 64 |
+
self,
|
| 65 |
+
group: Optional[dist.ProcessGroup],
|
| 66 |
+
use_dist: bool,
|
| 67 |
+
coordinator_rank: int,
|
| 68 |
+
):
|
| 69 |
+
self.group = group
|
| 70 |
+
self.use_dist = use_dist
|
| 71 |
+
self.coordinator_rank = coordinator_rank
|
| 72 |
+
if self.use_dist:
|
| 73 |
+
self.rank = dist.get_rank(group)
|
| 74 |
+
self.is_coordinator = self.rank == coordinator_rank
|
| 75 |
+
else:
|
| 76 |
+
self.rank = 0
|
| 77 |
+
self.is_coordinator = True
|
| 78 |
+
|
| 79 |
+
def get_rank(self) -> int:
|
| 80 |
+
return self.rank
|
| 81 |
+
|
| 82 |
+
def get_world_size(self) -> int:
|
| 83 |
+
if self.use_dist:
|
| 84 |
+
return dist.get_world_size(self.group)
|
| 85 |
+
return 1
|
| 86 |
+
|
| 87 |
+
def broadcast_object(self, object: Optional[T]) -> T:
|
| 88 |
+
"""Implement functionality similar to c10d::broadcast_object_list but without distributed enabled."""
|
| 89 |
+
object_list = [object]
|
| 90 |
+
if self.use_dist:
|
| 91 |
+
dist.broadcast_object_list(
|
| 92 |
+
object_list=object_list,
|
| 93 |
+
group=self.group,
|
| 94 |
+
src=self.coordinator_rank,
|
| 95 |
+
)
|
| 96 |
+
return cast(T, object_list[0])
|
| 97 |
+
|
| 98 |
+
def gather_object(self, object: T) -> Optional[List[T]]:
|
| 99 |
+
"""Implement functionality similar to c10d::gather_object but without distributed enabled."""
|
| 100 |
+
if self.use_dist:
|
| 101 |
+
gather_objs = (
|
| 102 |
+
cast(List[T], [None] * dist.get_world_size(self.group))
|
| 103 |
+
if self.is_coordinator
|
| 104 |
+
else None
|
| 105 |
+
)
|
| 106 |
+
|
| 107 |
+
dist.gather_object(
|
| 108 |
+
obj=object,
|
| 109 |
+
object_gather_list=gather_objs if self.is_coordinator else None,
|
| 110 |
+
dst=self.coordinator_rank,
|
| 111 |
+
group=self.group,
|
| 112 |
+
)
|
| 113 |
+
result = gather_objs
|
| 114 |
+
else:
|
| 115 |
+
result = [object]
|
| 116 |
+
return result
|
| 117 |
+
|
| 118 |
+
def all_gather_object(self, object: T) -> List[T]:
|
| 119 |
+
"""Implement functionality similar to c10d::all_gather_object but without distributed enabled."""
|
| 120 |
+
if self.use_dist:
|
| 121 |
+
gather_objs = cast(List[T], [None] * dist.get_world_size(self.group))
|
| 122 |
+
|
| 123 |
+
dist.all_gather_object(
|
| 124 |
+
object_list=gather_objs, obj=object, group=self.group
|
| 125 |
+
)
|
| 126 |
+
else:
|
| 127 |
+
gather_objs = [object]
|
| 128 |
+
return gather_objs
|
| 129 |
+
|
| 130 |
+
def scatter_object(self, object_list: Optional[List[T]]) -> T:
|
| 131 |
+
"""Implement functionality similar to c10d::scatter_object but without distributed enabled."""
|
| 132 |
+
if self.use_dist:
|
| 133 |
+
gather_result = cast(List[T], [None])
|
| 134 |
+
dist.scatter_object_list(
|
| 135 |
+
scatter_object_output_list=gather_result,
|
| 136 |
+
scatter_object_input_list=object_list if self.is_coordinator else None,
|
| 137 |
+
src=self.coordinator_rank,
|
| 138 |
+
group=self.group,
|
| 139 |
+
)
|
| 140 |
+
|
| 141 |
+
local_reply = gather_result[0]
|
| 142 |
+
else:
|
| 143 |
+
assert object_list is not None
|
| 144 |
+
local_reply = object_list[0]
|
| 145 |
+
return local_reply
|
| 146 |
+
|
| 147 |
+
def reduce_scatter(
|
| 148 |
+
self,
|
| 149 |
+
step: str,
|
| 150 |
+
map_fun: Callable[[], T],
|
| 151 |
+
reduce_fun: Callable[[List[T]], List[R]],
|
| 152 |
+
) -> R:
|
| 153 |
+
"""
|
| 154 |
+
Compute a value on each rank, then do centralized reduce on a single rank, followed by a scatter.
|
| 155 |
+
|
| 156 |
+
This method operates in the following way:
|
| 157 |
+
Run ``map_fun`` on all ranks
|
| 158 |
+
Gather results on rank 0
|
| 159 |
+
Call ``reduce_fun`` on all those values
|
| 160 |
+
Scatter to each rank part of the result.
|
| 161 |
+
"""
|
| 162 |
+
local_data: Union[WRAPPED_EXCEPTION, T]
|
| 163 |
+
try:
|
| 164 |
+
local_data = map_fun()
|
| 165 |
+
except BaseException as e:
|
| 166 |
+
local_data = _wrap_exception(e)
|
| 167 |
+
|
| 168 |
+
all_data = self.gather_object(local_data)
|
| 169 |
+
all_results: Optional[List[Union[R, CheckpointException]]] = None
|
| 170 |
+
if self.is_coordinator:
|
| 171 |
+
assert all_data is not None
|
| 172 |
+
node_failures = _get_failure_dict(all_data)
|
| 173 |
+
|
| 174 |
+
if len(node_failures) == 0:
|
| 175 |
+
try:
|
| 176 |
+
# N.B. why can't mypy cast List[R] to List[Union[R, WRAPPED_EXCEPTION]]?
|
| 177 |
+
all_results = cast(
|
| 178 |
+
List[Union[R, CheckpointException]],
|
| 179 |
+
reduce_fun(cast(List[T], all_data)),
|
| 180 |
+
)
|
| 181 |
+
except BaseException as e:
|
| 182 |
+
node_failures[self.rank] = _wrap_exception(e)
|
| 183 |
+
|
| 184 |
+
if len(node_failures) > 0:
|
| 185 |
+
all_results = [
|
| 186 |
+
CheckpointException(step, node_failures)
|
| 187 |
+
] * self.get_world_size()
|
| 188 |
+
|
| 189 |
+
result = self.scatter_object(all_results)
|
| 190 |
+
if isinstance(result, CheckpointException):
|
| 191 |
+
raise result
|
| 192 |
+
return result
|
| 193 |
+
|
| 194 |
+
def all_reduce(
|
| 195 |
+
self,
|
| 196 |
+
step: str,
|
| 197 |
+
map_fun: Callable[[], T],
|
| 198 |
+
reduce_fun: Callable[[List[T]], R],
|
| 199 |
+
) -> R:
|
| 200 |
+
"""
|
| 201 |
+
Compute a value on each rank, then do centralized reduce on a single rank, followed by a broadcast.
|
| 202 |
+
|
| 203 |
+
This method operates in the following way:
|
| 204 |
+
Run ``map_fun`` on all ranks
|
| 205 |
+
Gather results on rank 0
|
| 206 |
+
Call ``reduce_fun`` on all those values
|
| 207 |
+
Broadcast the reduced value to all ranks.
|
| 208 |
+
"""
|
| 209 |
+
local_data: Union[T, WRAPPED_EXCEPTION]
|
| 210 |
+
try:
|
| 211 |
+
local_data = map_fun()
|
| 212 |
+
except BaseException as e:
|
| 213 |
+
local_data = _wrap_exception(e)
|
| 214 |
+
|
| 215 |
+
all_data = self.gather_object(local_data)
|
| 216 |
+
result: Optional[Union[R, CheckpointException]] = None
|
| 217 |
+
if self.is_coordinator:
|
| 218 |
+
assert all_data is not None
|
| 219 |
+
node_failures = _get_failure_dict(all_data)
|
| 220 |
+
if len(node_failures) == 0:
|
| 221 |
+
try:
|
| 222 |
+
result = reduce_fun(cast(List[T], all_data))
|
| 223 |
+
except BaseException as e:
|
| 224 |
+
node_failures[self.rank] = _wrap_exception(e)
|
| 225 |
+
|
| 226 |
+
if len(node_failures) > 0:
|
| 227 |
+
result = CheckpointException(step, node_failures)
|
| 228 |
+
|
| 229 |
+
final_result = self.broadcast_object(result)
|
| 230 |
+
if isinstance(final_result, CheckpointException):
|
| 231 |
+
raise final_result
|
| 232 |
+
return cast(R, final_result)
|
| 233 |
+
|
| 234 |
+
def all_gather(
|
| 235 |
+
self,
|
| 236 |
+
step: str,
|
| 237 |
+
map_fun: Callable[[], T],
|
| 238 |
+
) -> List[T]:
|
| 239 |
+
"""
|
| 240 |
+
Compute a value on each rank, then all_gather them.
|
| 241 |
+
|
| 242 |
+
This method operates in the following way:
|
| 243 |
+
Run ``map_cp`` on all ranks
|
| 244 |
+
all_gather the values to all ranks
|
| 245 |
+
"""
|
| 246 |
+
result: Union[T, WRAPPED_EXCEPTION]
|
| 247 |
+
try:
|
| 248 |
+
result = map_fun()
|
| 249 |
+
except BaseException as e:
|
| 250 |
+
result = _wrap_exception(e)
|
| 251 |
+
|
| 252 |
+
all_results = self.all_gather_object(result)
|
| 253 |
+
|
| 254 |
+
node_failures = _get_failure_dict(all_results)
|
| 255 |
+
if len(node_failures) > 0:
|
| 256 |
+
raise CheckpointException(step, node_failures)
|
| 257 |
+
return cast(List[T], all_results)
|
| 258 |
+
|
| 259 |
+
def broadcast(
|
| 260 |
+
self,
|
| 261 |
+
step: str,
|
| 262 |
+
map_fun: Callable[[], T],
|
| 263 |
+
) -> T:
|
| 264 |
+
"""
|
| 265 |
+
Compute a value on rank 0 and broadcast it.
|
| 266 |
+
|
| 267 |
+
This method operates in the following way:
|
| 268 |
+
Run ``map_cp`` on rank 0
|
| 269 |
+
broadcast the value
|
| 270 |
+
"""
|
| 271 |
+
result: Optional[Union[T, CheckpointException]] = None
|
| 272 |
+
if self.is_coordinator:
|
| 273 |
+
try:
|
| 274 |
+
result = map_fun()
|
| 275 |
+
except BaseException as e:
|
| 276 |
+
result = CheckpointException(step, {self.rank: _wrap_exception(e)})
|
| 277 |
+
final_result = self.broadcast_object(result)
|
| 278 |
+
if isinstance(final_result, CheckpointException):
|
| 279 |
+
raise final_result
|
| 280 |
+
return cast(T, final_result)
|
| 281 |
+
|
| 282 |
+
|
| 283 |
+
def _find_shard(tensor: ShardedTensor, index: MetadataIndex) -> Shard:
|
| 284 |
+
if index.offset is None:
|
| 285 |
+
raise ValueError(
|
| 286 |
+
f"Cannot lookup {index.fqn} since its a ShardedTensor and no offset was provided"
|
| 287 |
+
)
|
| 288 |
+
|
| 289 |
+
shards = tensor.local_shards()
|
| 290 |
+
# index fast path
|
| 291 |
+
if index.index is not None:
|
| 292 |
+
if (
|
| 293 |
+
len(shards) > index.index
|
| 294 |
+
and torch.Size(shards[index.index].metadata.shard_offsets) == index.offset
|
| 295 |
+
):
|
| 296 |
+
return shards[index.index]
|
| 297 |
+
|
| 298 |
+
for shard in shards:
|
| 299 |
+
if torch.Size(shard.metadata.shard_offsets) == index.offset:
|
| 300 |
+
return shard
|
| 301 |
+
raise ValueError(f"Could not find shard at '{index.offset}' for FQN: '{index.fqn}'")
|
| 302 |
+
|
| 303 |
+
|
| 304 |
+
def find_tensor_shard(tensor: torch.Tensor, index: MetadataIndex) -> torch.Tensor:
|
| 305 |
+
if hasattr(tensor, "__get_tensor_shard__"):
|
| 306 |
+
# DTensor implements _Checkpointable
|
| 307 |
+
return tensor.__get_tensor_shard__(index) # type: ignore[attr-defined]
|
| 308 |
+
if isinstance(tensor, ShardedTensor):
|
| 309 |
+
return _find_shard(tensor, index).tensor
|
| 310 |
+
if index.offset is not None:
|
| 311 |
+
# special case looking up a tensor by origin
|
| 312 |
+
if index.offset == torch.Size([0] * len(tensor.size())):
|
| 313 |
+
return tensor
|
| 314 |
+
raise ValueError(
|
| 315 |
+
f"FQN: '{index.fqn}' is not a ShardedTensor, can't find by offset: '{index.offset}'"
|
| 316 |
+
)
|
| 317 |
+
return tensor
|
| 318 |
+
|
| 319 |
+
|
| 320 |
+
def find_state_dict_object(state_dict: STATE_DICT_TYPE, index: MetadataIndex) -> Any:
|
| 321 |
+
if index.fqn not in state_dict:
|
| 322 |
+
raise ValueError(f"Could not find FQN: '{index.fqn}'")
|
| 323 |
+
obj = state_dict[index.fqn]
|
| 324 |
+
|
| 325 |
+
if isinstance(obj, torch.Tensor):
|
| 326 |
+
return find_tensor_shard(obj, index)
|
| 327 |
+
elif index.offset is not None:
|
| 328 |
+
raise ValueError(
|
| 329 |
+
f"FQN: '{index.fqn}' is not a ShardedTensor, can't find by offset: '{index.offset}'"
|
| 330 |
+
)
|
| 331 |
+
return obj
|
| 332 |
+
|
| 333 |
+
|
| 334 |
+
def _element_wise_add(a: Sequence[int], b: Sequence[int]) -> List[int]:
|
| 335 |
+
return [i_a + i_b for i_a, i_b in zip(a, b)]
|
| 336 |
+
|
| 337 |
+
|
| 338 |
+
def _element_wise_sub(a: Sequence[int], b: Sequence[int]) -> List[int]:
|
| 339 |
+
return [i_a - i_b for i_a, i_b in zip(a, b)]
|
| 340 |
+
|
| 341 |
+
|
| 342 |
+
class _ReaderView(io.IOBase):
|
| 343 |
+
def __init__(self, base_stream: io.IOBase, offset: int, len: int):
|
| 344 |
+
super().__init__()
|
| 345 |
+
self.offset = offset
|
| 346 |
+
self.len = len
|
| 347 |
+
self.base_stream = base_stream
|
| 348 |
+
self.seek(0)
|
| 349 |
+
|
| 350 |
+
def seek(self, __offset: int, __whence: int = os.SEEK_SET) -> int:
|
| 351 |
+
if __whence == os.SEEK_SET:
|
| 352 |
+
__offset = self.offset + __offset
|
| 353 |
+
elif __whence == os.SEEK_END:
|
| 354 |
+
__whence = os.SEEK_SET
|
| 355 |
+
__offset = (self.offset + self.len) - __offset
|
| 356 |
+
return self.base_stream.seek(__offset, __whence)
|
| 357 |
+
|
| 358 |
+
def tell(self) -> int:
|
| 359 |
+
return self.base_stream.tell() - self.offset
|
| 360 |
+
|
| 361 |
+
def readable(self) -> bool:
|
| 362 |
+
return self.base_stream.readable()
|
| 363 |
+
|
| 364 |
+
def seekable(self) -> bool:
|
| 365 |
+
return self.base_stream.seekable()
|
| 366 |
+
|
| 367 |
+
def readinto(self, b):
|
| 368 |
+
return self.base_stream.readinto(b) # type: ignore[attr-defined]
|
| 369 |
+
|
| 370 |
+
def read(self, size=-1):
|
| 371 |
+
return self.base_stream.read(size)
|
| 372 |
+
|
| 373 |
+
|
| 374 |
+
def _create_file_view(file: io.IOBase, offset: int, length: int) -> io.IOBase:
|
| 375 |
+
# FIXME (kumpera) torch.load fails if we wrap with io.BufferedReader
|
| 376 |
+
return _ReaderView(file, offset, length)
|
| 377 |
+
|
| 378 |
+
|
| 379 |
+
def _normalize_device_info(device_type: str, device_id: int) -> str:
|
| 380 |
+
"""Device info normalization."""
|
| 381 |
+
if device_type == "cpu":
|
| 382 |
+
return "cpu"
|
| 383 |
+
return f"{device_type}:{device_id}"
|
| 384 |
+
|
| 385 |
+
|
| 386 |
+
# TODO: integrate with distributed logging flag
|
| 387 |
+
ENABLE_PROFILE = False
|
| 388 |
+
|
| 389 |
+
|
| 390 |
+
@contextmanager
|
| 391 |
+
def _profile():
|
| 392 |
+
# Only log the profiling when it is enable and is on rank0 or dist is not
|
| 393 |
+
# avaiable.
|
| 394 |
+
if ENABLE_PROFILE and (not dist.is_available() or dist.get_rank() == 0):
|
| 395 |
+
profiler = cProfile.Profile()
|
| 396 |
+
profiler.enable()
|
| 397 |
+
try:
|
| 398 |
+
yield
|
| 399 |
+
finally:
|
| 400 |
+
profiler.disable()
|
| 401 |
+
stats = Stats(profiler)
|
| 402 |
+
stats.sort_stats("time").print_stats(10)
|
| 403 |
+
else:
|
| 404 |
+
yield
|
| 405 |
+
|
| 406 |
+
|
| 407 |
+
def _api_bc_check(func):
|
| 408 |
+
@wraps(func)
|
| 409 |
+
def inner_func(*args, **kwargs) -> Any:
|
| 410 |
+
if len(args) == 2:
|
| 411 |
+
warnings.warn(
|
| 412 |
+
f"The argument order of {func.__name__} has been changed. "
|
| 413 |
+
"Please check the document to avoid future breakages."
|
| 414 |
+
)
|
| 415 |
+
sig = inspect.signature(func)
|
| 416 |
+
kwonlyargs = [
|
| 417 |
+
p.name for p in sig.parameters.values() if p.kind == p.KEYWORD_ONLY
|
| 418 |
+
]
|
| 419 |
+
if "storage_writer" in kwonlyargs:
|
| 420 |
+
assert "storage_writer" not in kwargs, (args, kwargs)
|
| 421 |
+
kwargs["storage_writer"] = args[1]
|
| 422 |
+
elif "storage_reader" in kwonlyargs:
|
| 423 |
+
assert "storage_reader" not in kwargs, (args, kwargs)
|
| 424 |
+
kwargs["storage_reader"] = args[1]
|
| 425 |
+
else:
|
| 426 |
+
raise RuntimeError(f"Unexpected kwonlyargs = {kwonlyargs}")
|
| 427 |
+
return func(args[0], **kwargs)
|
| 428 |
+
else:
|
| 429 |
+
return func(*args, **kwargs)
|
| 430 |
+
|
| 431 |
+
return inner_func
|
vllm/lib/python3.10/site-packages/torch/distributed/elastic/__init__.py
ADDED
|
@@ -0,0 +1,77 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/usr/bin/env/python3
|
| 2 |
+
|
| 3 |
+
# Copyright (c) Facebook, Inc. and its affiliates.
|
| 4 |
+
# All rights reserved.
|
| 5 |
+
#
|
| 6 |
+
# This source code is licensed under the BSD-style license found in the
|
| 7 |
+
# LICENSE file in the root directory of this source tree.
|
| 8 |
+
|
| 9 |
+
"""
|
| 10 |
+
|
| 11 |
+
Torchelastic agent and user worker failover contract:
|
| 12 |
+
|
| 13 |
+
**TL;DR;**:
|
| 14 |
+
|
| 15 |
+
* TE(torchelastic) expects user workers to finish with the 5 minutes drift
|
| 16 |
+
* It is better to design DDP app to fail for all workers, rather than a single one.
|
| 17 |
+
* TE does not synchronize number of restarts between agents
|
| 18 |
+
* TE re-rendezvous does not trigger restart decrease
|
| 19 |
+
* When a single agent finishes its job(successfully or not), it will close rendezvous.
|
| 20 |
+
If other agents still have workers in progress, they will be terminated.
|
| 21 |
+
* Based on above, scale down does not work if at least single agent finishes the job.
|
| 22 |
+
* When Scale up is detected by agents, it will not decrease ``max_restarts``
|
| 23 |
+
|
| 24 |
+
|
| 25 |
+
In general TE(torchelastic) can launch arbitrary user code, but there is some
|
| 26 |
+
clarifications need to be done around what failover mechanism torchelastic
|
| 27 |
+
provides and what failover mechanism it expects from user workers.
|
| 28 |
+
|
| 29 |
+
Torchelastic currently supports DDP style applications. That means that
|
| 30 |
+
TE expects *ALL* workers finish approximately at the same time. In practice,
|
| 31 |
+
it is nearly to impossible to guarantee that all workers in arbitrary
|
| 32 |
+
DDP application finish at the time, so TE provides a finalization barrier
|
| 33 |
+
that waits for TIMEOUT(5 minutes) for worker finalization.
|
| 34 |
+
|
| 35 |
+
**Worker Failure**
|
| 36 |
+
|
| 37 |
+
When worker fails, TE will check the number of restarts
|
| 38 |
+
available, if there is more than 0 restarts, TE will start a new rendezvous
|
| 39 |
+
round and restart the worker process. New rendezvous round will other
|
| 40 |
+
TE agents to terminate their workers.
|
| 41 |
+
|
| 42 |
+
.. note:: The TE agent does not synchronize restarts between themselves.
|
| 43 |
+
When a single agent performs restart, it will trigger a local ``max_restarts``
|
| 44 |
+
decrease, other agent will not decrease their ``max_restarts``.
|
| 45 |
+
the user to run the distributed application locally on a dev host.
|
| 46 |
+
|
| 47 |
+
A single worker failure can cause the whole cluster to fail:
|
| 48 |
+
If a single worker is constantly failing, it will cause the TE agent
|
| 49 |
+
``max_restarts`` to go to zero. This will cause an agent to finish its
|
| 50 |
+
work and close rendezvous. If there are any other workers on different
|
| 51 |
+
agents, they will be terminated.
|
| 52 |
+
|
| 53 |
+
|
| 54 |
+
**Re-Rendezvous**
|
| 55 |
+
|
| 56 |
+
Re-rendezvous occurs when TE agents detect a new node
|
| 57 |
+
trying to joint a cluster. TE will not decrease ``max_restarts``. TE agents
|
| 58 |
+
will terminate its workers and start a new rendezvous round.
|
| 59 |
+
|
| 60 |
+
Note about DynamicRendezvous(etcd-v2, c10d-experimental): If the rendezvous
|
| 61 |
+
has already max_nodes, the new node won't be added to the wait list right
|
| 62 |
+
away since there is no need to tear down a rendezvous that is already fully
|
| 63 |
+
utilized. The new node will wait until its timeout (600 secs by default)
|
| 64 |
+
and periodically check the number of participants. If the number becomes
|
| 65 |
+
less than max_nodes, it will be added to the wait list; otherwise, it will time out after 600 secs.
|
| 66 |
+
|
| 67 |
+
*Scale up event*. When scale up event happens, torchelastic rendezvous
|
| 68 |
+
will detect that there are new nodes trying to join. Torchelastic agent
|
| 69 |
+
will stop all workers and perform re-rendezvous. Note: when scale up event
|
| 70 |
+
happens, *``max_restarts``* will *not* decrease.
|
| 71 |
+
|
| 72 |
+
*Scale down event*. When scale down event happens, rendezvous will not
|
| 73 |
+
notify the torchelastic agent about it. If TE agent launched with ``max_restarts=0`` ,
|
| 74 |
+
it relies on the underlying scheduler to handle job restart. If the ``max_restarts>0`` ,
|
| 75 |
+
TE agent will terminate workers and start a new rdzv round, which is a *Scale up event*.
|
| 76 |
+
|
| 77 |
+
"""
|
vllm/lib/python3.10/site-packages/torch/distributed/elastic/control_plane.py
ADDED
|
@@ -0,0 +1,52 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
from contextlib import contextmanager, ExitStack
|
| 3 |
+
from typing import Generator
|
| 4 |
+
|
| 5 |
+
from torch.distributed.elastic.multiprocessing.errors import record
|
| 6 |
+
|
| 7 |
+
|
| 8 |
+
__all__ = [
|
| 9 |
+
"worker_main",
|
| 10 |
+
]
|
| 11 |
+
|
| 12 |
+
TORCH_WORKER_SERVER_SOCKET = "TORCH_WORKER_SERVER_SOCKET"
|
| 13 |
+
|
| 14 |
+
|
| 15 |
+
@contextmanager
|
| 16 |
+
def _worker_server(socket_path: str) -> Generator[None, None, None]:
|
| 17 |
+
from torch._C._distributed_c10d import _WorkerServer
|
| 18 |
+
|
| 19 |
+
server = _WorkerServer(socket_path)
|
| 20 |
+
try:
|
| 21 |
+
yield
|
| 22 |
+
finally:
|
| 23 |
+
server.shutdown()
|
| 24 |
+
|
| 25 |
+
|
| 26 |
+
@contextmanager
|
| 27 |
+
@record
|
| 28 |
+
def worker_main() -> Generator[None, None, None]:
|
| 29 |
+
"""
|
| 30 |
+
This is a context manager that wraps your main entry function. This combines
|
| 31 |
+
the existing ``errors.record`` logic as well as a new ``_WorkerServer`` that
|
| 32 |
+
exposes handlers via a unix socket specified by
|
| 33 |
+
``Torch_WORKER_SERVER_SOCKET``.
|
| 34 |
+
|
| 35 |
+
Example
|
| 36 |
+
|
| 37 |
+
::
|
| 38 |
+
|
| 39 |
+
@worker_main()
|
| 40 |
+
def main():
|
| 41 |
+
pass
|
| 42 |
+
|
| 43 |
+
if __name__=="__main__":
|
| 44 |
+
main()
|
| 45 |
+
|
| 46 |
+
"""
|
| 47 |
+
with ExitStack() as stack:
|
| 48 |
+
socket_path = os.environ.get(TORCH_WORKER_SERVER_SOCKET)
|
| 49 |
+
if socket_path is not None:
|
| 50 |
+
stack.enter_context(_worker_server(socket_path))
|
| 51 |
+
|
| 52 |
+
yield
|
vllm/lib/python3.10/site-packages/torch/distributed/elastic/multiprocessing/__init__.py
ADDED
|
@@ -0,0 +1,233 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/usr/bin/env python3
|
| 2 |
+
|
| 3 |
+
# Copyright (c) Facebook, Inc. and its affiliates.
|
| 4 |
+
# All rights reserved.
|
| 5 |
+
#
|
| 6 |
+
# This source code is licensed under the BSD-style license found in the
|
| 7 |
+
# LICENSE file in the root directory of this source tree.
|
| 8 |
+
|
| 9 |
+
"""
|
| 10 |
+
Library that launches and manages ``n`` copies of worker subprocesses either specified by a function or a binary.
|
| 11 |
+
|
| 12 |
+
For functions, it uses ``torch.multiprocessing`` (and therefore python
|
| 13 |
+
``multiprocessing``) to spawn/fork worker processes. For binaries it uses python
|
| 14 |
+
``subprocessing.Popen`` to create worker processes.
|
| 15 |
+
|
| 16 |
+
|
| 17 |
+
Usage 1: Launching two trainers as a function
|
| 18 |
+
|
| 19 |
+
::
|
| 20 |
+
|
| 21 |
+
from torch.distributed.elastic.multiprocessing import Std, start_processes
|
| 22 |
+
|
| 23 |
+
def trainer(a, b, c):
|
| 24 |
+
pass # train
|
| 25 |
+
|
| 26 |
+
|
| 27 |
+
# runs two trainers
|
| 28 |
+
# LOCAL_RANK=0 trainer(1,2,3)
|
| 29 |
+
# LOCAL_RANK=1 trainer(4,5,6)
|
| 30 |
+
ctx = start_processes(
|
| 31 |
+
name="trainer",
|
| 32 |
+
entrypoint=trainer,
|
| 33 |
+
args={0: (1,2,3), 1: (4,5,6)},
|
| 34 |
+
envs={0: {"LOCAL_RANK": 0}, 1: {"LOCAL_RANK": 1}},
|
| 35 |
+
log_dir="/tmp/foobar",
|
| 36 |
+
redirects=Std.ALL, # write all worker stdout/stderr to a log file
|
| 37 |
+
tee={0: Std.ERR}, # tee only local rank 0's stderr to console
|
| 38 |
+
)
|
| 39 |
+
|
| 40 |
+
# waits for all copies of trainer to finish
|
| 41 |
+
ctx.wait()
|
| 42 |
+
|
| 43 |
+
Usage 2: Launching 2 echo workers as a binary
|
| 44 |
+
|
| 45 |
+
::
|
| 46 |
+
|
| 47 |
+
# same as invoking
|
| 48 |
+
# echo hello
|
| 49 |
+
# echo world > stdout.log
|
| 50 |
+
ctx = start_processes(
|
| 51 |
+
name="echo"
|
| 52 |
+
entrypoint="echo",
|
| 53 |
+
log_dir="/tmp/foobar",
|
| 54 |
+
args={0: "hello", 1: "world"},
|
| 55 |
+
redirects={1: Std.OUT},
|
| 56 |
+
)
|
| 57 |
+
|
| 58 |
+
Just like ``torch.multiprocessing``, the return value of the function
|
| 59 |
+
:func:`start_processes` is a process context (:class:`api.PContext`). If a function
|
| 60 |
+
was launched, a :class:`api.MultiprocessContext` is returned and if a binary
|
| 61 |
+
was launched a :class:`api.SubprocessContext` is returned. Both are specific
|
| 62 |
+
implementations of the parent :class:`api.PContext` class.
|
| 63 |
+
"""
|
| 64 |
+
|
| 65 |
+
from typing import Callable, Dict, Optional, Tuple, Union
|
| 66 |
+
|
| 67 |
+
from torch.distributed.elastic.multiprocessing.api import ( # noqa: F401
|
| 68 |
+
_validate_full_rank,
|
| 69 |
+
DefaultLogsSpecs,
|
| 70 |
+
LogsDest,
|
| 71 |
+
LogsSpecs,
|
| 72 |
+
MultiprocessContext,
|
| 73 |
+
PContext,
|
| 74 |
+
ProcessFailure,
|
| 75 |
+
RunProcsResult,
|
| 76 |
+
SignalException,
|
| 77 |
+
Std,
|
| 78 |
+
SubprocessContext,
|
| 79 |
+
to_map,
|
| 80 |
+
)
|
| 81 |
+
from torch.distributed.elastic.utils.logging import get_logger
|
| 82 |
+
|
| 83 |
+
|
| 84 |
+
__all__ = [
|
| 85 |
+
"start_processes",
|
| 86 |
+
"MultiprocessContext",
|
| 87 |
+
"PContext",
|
| 88 |
+
"ProcessFailure",
|
| 89 |
+
"RunProcsResult",
|
| 90 |
+
"SignalException",
|
| 91 |
+
"Std",
|
| 92 |
+
"LogsDest",
|
| 93 |
+
"LogsSpecs",
|
| 94 |
+
"DefaultLogsSpecs",
|
| 95 |
+
"SubprocessContext",
|
| 96 |
+
"to_map",
|
| 97 |
+
]
|
| 98 |
+
|
| 99 |
+
|
| 100 |
+
def start_processes(
|
| 101 |
+
name: str,
|
| 102 |
+
entrypoint: Union[Callable, str],
|
| 103 |
+
args: Dict[int, Tuple],
|
| 104 |
+
envs: Dict[int, Dict[str, str]],
|
| 105 |
+
logs_specs: LogsSpecs,
|
| 106 |
+
log_line_prefixes: Optional[Dict[int, str]] = None,
|
| 107 |
+
start_method: str = "spawn",
|
| 108 |
+
) -> PContext:
|
| 109 |
+
"""
|
| 110 |
+
Start ``n`` copies of ``entrypoint`` processes with the provided options.
|
| 111 |
+
|
| 112 |
+
``entrypoint`` is either a ``Callable`` (function) or a ``str`` (binary).
|
| 113 |
+
The number of copies is determined by the number of entries for ``args`` and
|
| 114 |
+
``envs`` arguments, which need to have the same key set.
|
| 115 |
+
|
| 116 |
+
``args`` and ``env`` parameters are the arguments and environment variables
|
| 117 |
+
to pass down to the entrypoint mapped by the replica index (local rank).
|
| 118 |
+
All local ranks must be accounted for.
|
| 119 |
+
That is, the keyset should be ``{0,1,...,(nprocs-1)}``.
|
| 120 |
+
|
| 121 |
+
.. note:: When the ``entrypoint`` is a binary (``str``), ``args`` can only be strings.
|
| 122 |
+
If any other type is given, then it is casted to a string representation
|
| 123 |
+
(e.g. ``str(arg1)``). Furthermore, a binary failure will only write
|
| 124 |
+
an ``error.json`` error file if the main function is annotated with
|
| 125 |
+
``torch.distributed.elastic.multiprocessing.errors.record``. For function launches,
|
| 126 |
+
this is done by default and there is no need to manually annotate
|
| 127 |
+
with the ``@record`` annotation.
|
| 128 |
+
|
| 129 |
+
``redirects`` and ``tee`` are bitmasks specifying which std stream(s) to redirect
|
| 130 |
+
to a log file in the ``log_dir``. Valid mask values are defined in ``Std``.
|
| 131 |
+
To redirect/tee only certain local ranks, pass ``redirects`` as a map with the key as
|
| 132 |
+
the local rank to specify the redirect behavior for.
|
| 133 |
+
Any missing local ranks will default to ``Std.NONE``.
|
| 134 |
+
|
| 135 |
+
``tee`` acts like the unix "tee" command in that it redirects + prints to console.
|
| 136 |
+
To avoid worker stdout/stderr from printing to console, use the ``redirects`` parameter.
|
| 137 |
+
|
| 138 |
+
For each process, the ``log_dir`` will contain:
|
| 139 |
+
|
| 140 |
+
#. ``{local_rank}/error.json``: if the process failed, a file with the error info
|
| 141 |
+
#. ``{local_rank}/stdout.json``: if ``redirect & STDOUT == STDOUT``
|
| 142 |
+
#. ``{local_rank}/stderr.json``: if ``redirect & STDERR == STDERR``
|
| 143 |
+
|
| 144 |
+
.. note:: It is expected that the ``log_dir`` exists, is empty, and is a directory.
|
| 145 |
+
|
| 146 |
+
Example:
|
| 147 |
+
::
|
| 148 |
+
|
| 149 |
+
log_dir = "/tmp/test"
|
| 150 |
+
|
| 151 |
+
# ok; two copies of foo: foo("bar0"), foo("bar1")
|
| 152 |
+
start_processes(
|
| 153 |
+
name="trainer",
|
| 154 |
+
entrypoint=foo,
|
| 155 |
+
args:{0:("bar0",), 1:("bar1",),
|
| 156 |
+
envs:{0:{}, 1:{}},
|
| 157 |
+
log_dir=log_dir
|
| 158 |
+
)
|
| 159 |
+
|
| 160 |
+
# invalid; envs missing for local rank 1
|
| 161 |
+
start_processes(
|
| 162 |
+
name="trainer",
|
| 163 |
+
entrypoint=foo,
|
| 164 |
+
args:{0:("bar0",), 1:("bar1",),
|
| 165 |
+
envs:{0:{}},
|
| 166 |
+
log_dir=log_dir
|
| 167 |
+
)
|
| 168 |
+
|
| 169 |
+
# ok; two copies of /usr/bin/touch: touch file1, touch file2
|
| 170 |
+
start_processes(
|
| 171 |
+
name="trainer",
|
| 172 |
+
entrypoint="/usr/bin/touch",
|
| 173 |
+
args:{0:("file1",), 1:("file2",),
|
| 174 |
+
envs:{0:{}, 1:{}},
|
| 175 |
+
log_dir=log_dir
|
| 176 |
+
)
|
| 177 |
+
|
| 178 |
+
# caution; arguments casted to string, runs:
|
| 179 |
+
# echo "1" "2" "3" and echo "[1, 2, 3]"
|
| 180 |
+
start_processes(
|
| 181 |
+
name="trainer",
|
| 182 |
+
entrypoint="/usr/bin/echo",
|
| 183 |
+
args:{0:(1,2,3), 1:([1,2,3],),
|
| 184 |
+
envs:{0:{}, 1:{}},
|
| 185 |
+
log_dir=log_dir
|
| 186 |
+
)
|
| 187 |
+
|
| 188 |
+
Args:
|
| 189 |
+
name: a human readable short name that describes what the processes are
|
| 190 |
+
(used as header when tee'ing stdout/stderr outputs)
|
| 191 |
+
entrypoint: either a ``Callable`` (function) or ``cmd`` (binary)
|
| 192 |
+
args: arguments to each replica
|
| 193 |
+
envs: env vars to each replica
|
| 194 |
+
log_dir: directory used to write log files
|
| 195 |
+
start_method: multiprocessing start method (spawn, fork, forkserver)
|
| 196 |
+
ignored for binaries
|
| 197 |
+
redirects: which std streams to redirect to a log file
|
| 198 |
+
tee: which std streams to redirect + print to console
|
| 199 |
+
local_ranks_filter: which ranks' logs to print to console
|
| 200 |
+
|
| 201 |
+
"""
|
| 202 |
+
|
| 203 |
+
nprocs = len(args)
|
| 204 |
+
_validate_full_rank(args, nprocs, "args")
|
| 205 |
+
_validate_full_rank(envs, nprocs, "envs")
|
| 206 |
+
|
| 207 |
+
context: PContext
|
| 208 |
+
if isinstance(entrypoint, str):
|
| 209 |
+
context = SubprocessContext(
|
| 210 |
+
name=name,
|
| 211 |
+
entrypoint=entrypoint,
|
| 212 |
+
args=args,
|
| 213 |
+
envs=envs,
|
| 214 |
+
logs_specs=logs_specs,
|
| 215 |
+
log_line_prefixes=log_line_prefixes,
|
| 216 |
+
)
|
| 217 |
+
else:
|
| 218 |
+
context = MultiprocessContext(
|
| 219 |
+
name=name,
|
| 220 |
+
entrypoint=entrypoint,
|
| 221 |
+
args=args,
|
| 222 |
+
envs=envs,
|
| 223 |
+
log_line_prefixes=log_line_prefixes,
|
| 224 |
+
start_method=start_method,
|
| 225 |
+
logs_specs=logs_specs,
|
| 226 |
+
)
|
| 227 |
+
|
| 228 |
+
try:
|
| 229 |
+
context.start()
|
| 230 |
+
return context
|
| 231 |
+
except Exception:
|
| 232 |
+
context.close()
|
| 233 |
+
raise
|