Add files using upload-large-folder tool
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- vlmpy310/lib/python3.10/site-packages/torch/distributed/__pycache__/__init__.cpython-310.pyc +0 -0
- vlmpy310/lib/python3.10/site-packages/torch/distributed/__pycache__/_functional_collectives_impl.cpython-310.pyc +0 -0
- vlmpy310/lib/python3.10/site-packages/torch/distributed/__pycache__/c10d_logger.cpython-310.pyc +0 -0
- vlmpy310/lib/python3.10/site-packages/torch/distributed/__pycache__/device_mesh.cpython-310.pyc +0 -0
- vlmpy310/lib/python3.10/site-packages/torch/distributed/__pycache__/launch.cpython-310.pyc +0 -0
- vlmpy310/lib/python3.10/site-packages/torch/distributed/_composable/__pycache__/replicate.cpython-310.pyc +0 -0
- vlmpy310/lib/python3.10/site-packages/torch/distributed/_sharded_tensor/__init__.py +12 -0
- vlmpy310/lib/python3.10/site-packages/torch/distributed/_sharded_tensor/__pycache__/__init__.cpython-310.pyc +0 -0
- vlmpy310/lib/python3.10/site-packages/torch/distributed/fsdp/__pycache__/__init__.cpython-310.pyc +0 -0
- vlmpy310/lib/python3.10/site-packages/torch/distributed/fsdp/__pycache__/_debug_utils.cpython-310.pyc +0 -0
- vlmpy310/lib/python3.10/site-packages/torch/distributed/fsdp/__pycache__/_optim_utils.cpython-310.pyc +0 -0
- vlmpy310/lib/python3.10/site-packages/torch/distributed/fsdp/__pycache__/_runtime_utils.cpython-310.pyc +0 -0
- vlmpy310/lib/python3.10/site-packages/torch/distributed/fsdp/__pycache__/_traversal_utils.cpython-310.pyc +0 -0
- vlmpy310/lib/python3.10/site-packages/torch/distributed/pipeline/__init__.py +0 -0
- vlmpy310/lib/python3.10/site-packages/torch/distributed/pipeline/__pycache__/__init__.cpython-310.pyc +0 -0
- vlmpy310/lib/python3.10/site-packages/torch/distributed/pipeline/sync/__init__.py +12 -0
- vlmpy310/lib/python3.10/site-packages/torch/distributed/pipeline/sync/__pycache__/__init__.cpython-310.pyc +0 -0
- vlmpy310/lib/python3.10/site-packages/torch/distributed/pipeline/sync/__pycache__/batchnorm.cpython-310.pyc +0 -0
- vlmpy310/lib/python3.10/site-packages/torch/distributed/pipeline/sync/__pycache__/microbatch.cpython-310.pyc +0 -0
- vlmpy310/lib/python3.10/site-packages/torch/distributed/pipeline/sync/__pycache__/pipe.cpython-310.pyc +0 -0
- vlmpy310/lib/python3.10/site-packages/torch/distributed/pipeline/sync/__pycache__/pipeline.cpython-310.pyc +0 -0
- vlmpy310/lib/python3.10/site-packages/torch/distributed/pipeline/sync/__pycache__/stream.cpython-310.pyc +0 -0
- vlmpy310/lib/python3.10/site-packages/torch/distributed/pipeline/sync/__pycache__/worker.cpython-310.pyc +0 -0
- vlmpy310/lib/python3.10/site-packages/torch/distributed/pipeline/sync/_balance/__pycache__/__init__.cpython-310.pyc +0 -0
- vlmpy310/lib/python3.10/site-packages/torch/distributed/pipeline/sync/_balance/profile.py +116 -0
- vlmpy310/lib/python3.10/site-packages/torch/distributed/pipeline/sync/batchnorm.py +159 -0
- vlmpy310/lib/python3.10/site-packages/torch/distributed/pipeline/sync/checkpoint.py +364 -0
- vlmpy310/lib/python3.10/site-packages/torch/distributed/pipeline/sync/copy.py +108 -0
- vlmpy310/lib/python3.10/site-packages/torch/distributed/pipeline/sync/dependency.py +54 -0
- vlmpy310/lib/python3.10/site-packages/torch/distributed/pipeline/sync/microbatch.py +234 -0
- vlmpy310/lib/python3.10/site-packages/torch/distributed/pipeline/sync/phony.py +50 -0
- vlmpy310/lib/python3.10/site-packages/torch/distributed/pipeline/sync/pipe.py +490 -0
- vlmpy310/lib/python3.10/site-packages/torch/distributed/pipeline/sync/pipeline.py +255 -0
- vlmpy310/lib/python3.10/site-packages/torch/distributed/pipeline/sync/skip/__init__.py +11 -0
- vlmpy310/lib/python3.10/site-packages/torch/distributed/pipeline/sync/skip/__pycache__/__init__.cpython-310.pyc +0 -0
- vlmpy310/lib/python3.10/site-packages/torch/distributed/pipeline/sync/skip/__pycache__/layout.cpython-310.pyc +0 -0
- vlmpy310/lib/python3.10/site-packages/torch/distributed/pipeline/sync/skip/__pycache__/namespace.cpython-310.pyc +0 -0
- vlmpy310/lib/python3.10/site-packages/torch/distributed/pipeline/sync/skip/__pycache__/portal.cpython-310.pyc +0 -0
- vlmpy310/lib/python3.10/site-packages/torch/distributed/pipeline/sync/skip/__pycache__/skippable.cpython-310.pyc +0 -0
- vlmpy310/lib/python3.10/site-packages/torch/distributed/pipeline/sync/skip/__pycache__/tracker.cpython-310.pyc +0 -0
- vlmpy310/lib/python3.10/site-packages/torch/distributed/pipeline/sync/skip/layout.py +92 -0
- vlmpy310/lib/python3.10/site-packages/torch/distributed/pipeline/sync/skip/namespace.py +50 -0
- vlmpy310/lib/python3.10/site-packages/torch/distributed/pipeline/sync/skip/portal.py +231 -0
- vlmpy310/lib/python3.10/site-packages/torch/distributed/pipeline/sync/skip/skippable.py +431 -0
- vlmpy310/lib/python3.10/site-packages/torch/distributed/pipeline/sync/skip/tracker.py +180 -0
- vlmpy310/lib/python3.10/site-packages/torch/distributed/pipeline/sync/stream.py +120 -0
- vlmpy310/lib/python3.10/site-packages/torch/distributed/pipeline/sync/utils.py +38 -0
- vlmpy310/lib/python3.10/site-packages/torch/distributed/pipeline/sync/worker.py +132 -0
- vlmpy310/lib/python3.10/site-packages/torch/distributed/rpc/__init__.py +249 -0
- vlmpy310/lib/python3.10/site-packages/torch/distributed/rpc/__pycache__/__init__.cpython-310.pyc +0 -0
vlmpy310/lib/python3.10/site-packages/torch/distributed/__pycache__/__init__.cpython-310.pyc
ADDED
|
Binary file (3.44 kB). View file
|
|
|
vlmpy310/lib/python3.10/site-packages/torch/distributed/__pycache__/_functional_collectives_impl.cpython-310.pyc
ADDED
|
Binary file (9.71 kB). View file
|
|
|
vlmpy310/lib/python3.10/site-packages/torch/distributed/__pycache__/c10d_logger.cpython-310.pyc
ADDED
|
Binary file (2.84 kB). View file
|
|
|
vlmpy310/lib/python3.10/site-packages/torch/distributed/__pycache__/device_mesh.cpython-310.pyc
ADDED
|
Binary file (17.9 kB). View file
|
|
|
vlmpy310/lib/python3.10/site-packages/torch/distributed/__pycache__/launch.cpython-310.pyc
ADDED
|
Binary file (7.11 kB). View file
|
|
|
vlmpy310/lib/python3.10/site-packages/torch/distributed/_composable/__pycache__/replicate.cpython-310.pyc
ADDED
|
Binary file (4.12 kB). View file
|
|
|
vlmpy310/lib/python3.10/site-packages/torch/distributed/_sharded_tensor/__init__.py
ADDED
|
@@ -0,0 +1,12 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Keep old package for BC purposes, this file should be removed once
|
| 2 |
+
# everything moves to the `torch.distributed._shard` package.
|
| 3 |
+
import sys
|
| 4 |
+
import torch
|
| 5 |
+
import warnings
|
| 6 |
+
|
| 7 |
+
from torch.distributed._shard.sharded_tensor import * # noqa: F403
|
| 8 |
+
warnings.warn(
|
| 9 |
+
"torch.distributed._sharded_tensor will be deprecated, use torch.distributed._shard.sharded_tensor instead",
|
| 10 |
+
DeprecationWarning
|
| 11 |
+
)
|
| 12 |
+
sys.modules['torch.distributed._sharded_tensor'] = torch.distributed._shard.sharded_tensor
|
vlmpy310/lib/python3.10/site-packages/torch/distributed/_sharded_tensor/__pycache__/__init__.cpython-310.pyc
ADDED
|
Binary file (547 Bytes). View file
|
|
|
vlmpy310/lib/python3.10/site-packages/torch/distributed/fsdp/__pycache__/__init__.cpython-310.pyc
ADDED
|
Binary file (760 Bytes). View file
|
|
|
vlmpy310/lib/python3.10/site-packages/torch/distributed/fsdp/__pycache__/_debug_utils.cpython-310.pyc
ADDED
|
Binary file (5.76 kB). View file
|
|
|
vlmpy310/lib/python3.10/site-packages/torch/distributed/fsdp/__pycache__/_optim_utils.cpython-310.pyc
ADDED
|
Binary file (55.6 kB). View file
|
|
|
vlmpy310/lib/python3.10/site-packages/torch/distributed/fsdp/__pycache__/_runtime_utils.cpython-310.pyc
ADDED
|
Binary file (39.5 kB). View file
|
|
|
vlmpy310/lib/python3.10/site-packages/torch/distributed/fsdp/__pycache__/_traversal_utils.cpython-310.pyc
ADDED
|
Binary file (3.03 kB). View file
|
|
|
vlmpy310/lib/python3.10/site-packages/torch/distributed/pipeline/__init__.py
ADDED
|
File without changes
|
vlmpy310/lib/python3.10/site-packages/torch/distributed/pipeline/__pycache__/__init__.cpython-310.pyc
ADDED
|
Binary file (180 Bytes). View file
|
|
|
vlmpy310/lib/python3.10/site-packages/torch/distributed/pipeline/sync/__init__.py
ADDED
|
@@ -0,0 +1,12 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright 2019 Kakao Brain
|
| 2 |
+
#
|
| 3 |
+
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
|
| 4 |
+
#
|
| 5 |
+
# This source code is licensed under the BSD license found in the
|
| 6 |
+
# LICENSE file in the root directory of this source tree.
|
| 7 |
+
"""A Pipe implementation in PyTorch."""
|
| 8 |
+
from .checkpoint import is_checkpointing, is_recomputing
|
| 9 |
+
from .pipe import Pipe, WithDevice
|
| 10 |
+
from .microbatch import NoChunk
|
| 11 |
+
|
| 12 |
+
__all__ = ["Pipe", "is_checkpointing", "is_recomputing"]
|
vlmpy310/lib/python3.10/site-packages/torch/distributed/pipeline/sync/__pycache__/__init__.cpython-310.pyc
ADDED
|
Binary file (443 Bytes). View file
|
|
|
vlmpy310/lib/python3.10/site-packages/torch/distributed/pipeline/sync/__pycache__/batchnorm.cpython-310.pyc
ADDED
|
Binary file (4.23 kB). View file
|
|
|
vlmpy310/lib/python3.10/site-packages/torch/distributed/pipeline/sync/__pycache__/microbatch.cpython-310.pyc
ADDED
|
Binary file (7.75 kB). View file
|
|
|
vlmpy310/lib/python3.10/site-packages/torch/distributed/pipeline/sync/__pycache__/pipe.cpython-310.pyc
ADDED
|
Binary file (16.9 kB). View file
|
|
|
vlmpy310/lib/python3.10/site-packages/torch/distributed/pipeline/sync/__pycache__/pipeline.cpython-310.pyc
ADDED
|
Binary file (6.44 kB). View file
|
|
|
vlmpy310/lib/python3.10/site-packages/torch/distributed/pipeline/sync/__pycache__/stream.cpython-310.pyc
ADDED
|
Binary file (3.34 kB). View file
|
|
|
vlmpy310/lib/python3.10/site-packages/torch/distributed/pipeline/sync/__pycache__/worker.cpython-310.pyc
ADDED
|
Binary file (4.13 kB). View file
|
|
|
vlmpy310/lib/python3.10/site-packages/torch/distributed/pipeline/sync/_balance/__pycache__/__init__.cpython-310.pyc
ADDED
|
Binary file (5.52 kB). View file
|
|
|
vlmpy310/lib/python3.10/site-packages/torch/distributed/pipeline/sync/_balance/profile.py
ADDED
|
@@ -0,0 +1,116 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright 2019 Kakao Brain
|
| 2 |
+
#
|
| 3 |
+
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
|
| 4 |
+
#
|
| 5 |
+
# This source code is licensed under the BSD license found in the
|
| 6 |
+
# LICENSE file in the root directory of this source tree.
|
| 7 |
+
"""Per-layer profilers."""
|
| 8 |
+
import copy
|
| 9 |
+
import time
|
| 10 |
+
from typing import Any, Generator, List, Union, Sequence
|
| 11 |
+
|
| 12 |
+
import torch
|
| 13 |
+
from torch import Tensor
|
| 14 |
+
import torch.nn as nn
|
| 15 |
+
|
| 16 |
+
from ..microbatch import Batch
|
| 17 |
+
|
| 18 |
+
__all__: List[str] = []
|
| 19 |
+
|
| 20 |
+
|
| 21 |
+
Device = Union[torch.device, int, str]
|
| 22 |
+
|
| 23 |
+
Tensors = Sequence[Tensor]
|
| 24 |
+
TensorOrTensors = Union[Tensor, Tensors]
|
| 25 |
+
|
| 26 |
+
|
| 27 |
+
def layerwise_sandbox(module: nn.Sequential, device: torch.device,) -> Generator[nn.Module, None, None]:
|
| 28 |
+
"""Copies layers for ease to profile. It doesn't modify the given
|
| 29 |
+
module.
|
| 30 |
+
"""
|
| 31 |
+
for layer in module:
|
| 32 |
+
layer_copy = copy.deepcopy(layer)
|
| 33 |
+
layer_copy.to(device)
|
| 34 |
+
layer_copy.train()
|
| 35 |
+
yield layer_copy
|
| 36 |
+
|
| 37 |
+
|
| 38 |
+
def detach(batch: Batch) -> None:
|
| 39 |
+
"""Detaches from autograd graph."""
|
| 40 |
+
for i, x in enumerate(batch):
|
| 41 |
+
batch[i] = x.detach().requires_grad_(x.requires_grad)
|
| 42 |
+
|
| 43 |
+
|
| 44 |
+
def profile_times(module: nn.Sequential, sample: Union[List[Any], Tensor], timeout: float, device: torch.device,) -> List[int]:
|
| 45 |
+
"""Profiles elapsed times per layer."""
|
| 46 |
+
if any(p.grad is not None for p in module.parameters()):
|
| 47 |
+
raise ValueError("some parameter already has gradient")
|
| 48 |
+
|
| 49 |
+
_batch = Batch(sample)
|
| 50 |
+
for i, x in enumerate(_batch):
|
| 51 |
+
_batch[i] = x.detach().to(device).requires_grad_(x.requires_grad)
|
| 52 |
+
|
| 53 |
+
time_bufs: List[List[float]] = [[] for _ in module]
|
| 54 |
+
begun_at = time.time()
|
| 55 |
+
|
| 56 |
+
while time.time() - begun_at < timeout:
|
| 57 |
+
batch = _batch
|
| 58 |
+
|
| 59 |
+
for i, layer in enumerate(layerwise_sandbox(module, device)):
|
| 60 |
+
detach(batch)
|
| 61 |
+
|
| 62 |
+
if device.type == "cuda":
|
| 63 |
+
torch.cuda.synchronize(device)
|
| 64 |
+
tick = time.time()
|
| 65 |
+
|
| 66 |
+
# Forward
|
| 67 |
+
batch = batch.call(layer)
|
| 68 |
+
|
| 69 |
+
# Backward
|
| 70 |
+
backward_tensors = tuple(y for y in batch if y.requires_grad)
|
| 71 |
+
if backward_tensors:
|
| 72 |
+
torch.autograd.backward(backward_tensors, backward_tensors)
|
| 73 |
+
|
| 74 |
+
if device.type == "cuda":
|
| 75 |
+
torch.cuda.synchronize(device)
|
| 76 |
+
tock = time.time()
|
| 77 |
+
|
| 78 |
+
time_bufs[i].append(tock - tick)
|
| 79 |
+
|
| 80 |
+
us = 1_000_000
|
| 81 |
+
return [sum(int(t * us) for t in buf) for buf in time_bufs]
|
| 82 |
+
|
| 83 |
+
|
| 84 |
+
def profile_sizes(
|
| 85 |
+
module: nn.Sequential, input: Union[List[Any], Tensor], chunks: int, param_scale: float, device: torch.device,
|
| 86 |
+
) -> List[int]:
|
| 87 |
+
"""Profiles CUDA memory usage per layer."""
|
| 88 |
+
if device.type != "cuda":
|
| 89 |
+
raise ValueError("size profiler supports only CUDA device")
|
| 90 |
+
|
| 91 |
+
batch = Batch(input)
|
| 92 |
+
sizes: List[int] = []
|
| 93 |
+
|
| 94 |
+
latent_scale = batch[0].size(0) / chunks
|
| 95 |
+
for i, x in enumerate(batch):
|
| 96 |
+
batch[i] = x[:1].detach().to(device).requires_grad_(x.requires_grad)
|
| 97 |
+
|
| 98 |
+
for layer in layerwise_sandbox(module, device):
|
| 99 |
+
detach(batch)
|
| 100 |
+
|
| 101 |
+
# Detect memory usage at forward.
|
| 102 |
+
torch._C._cuda_clearCublasWorkspaces()
|
| 103 |
+
memory_before = torch.cuda.memory_allocated(device)
|
| 104 |
+
batch = batch.call(layer)
|
| 105 |
+
torch._C._cuda_clearCublasWorkspaces()
|
| 106 |
+
memory_after = torch.cuda.memory_allocated(device)
|
| 107 |
+
latent_size = memory_after - memory_before
|
| 108 |
+
|
| 109 |
+
# Analyze size of parameters.
|
| 110 |
+
param_size = sum(p._typed_storage()._nbytes() for p in layer.parameters())
|
| 111 |
+
|
| 112 |
+
# Combine size of parameters and activations with normalize scales.
|
| 113 |
+
size = latent_size * latent_scale + param_size * param_scale
|
| 114 |
+
sizes.append(int(size))
|
| 115 |
+
|
| 116 |
+
return sizes
|
vlmpy310/lib/python3.10/site-packages/torch/distributed/pipeline/sync/batchnorm.py
ADDED
|
@@ -0,0 +1,159 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright 2019 Kakao Brain
|
| 2 |
+
#
|
| 3 |
+
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
|
| 4 |
+
#
|
| 5 |
+
# This source code is licensed under the BSD license found in the
|
| 6 |
+
# LICENSE file in the root directory of this source tree.
|
| 7 |
+
"""Tracks the running statistics per mini-batch instead of micro-batch."""
|
| 8 |
+
from typing import TypeVar, cast
|
| 9 |
+
|
| 10 |
+
import torch
|
| 11 |
+
from torch import Tensor, nn
|
| 12 |
+
from torch.nn.functional import batch_norm
|
| 13 |
+
from torch.nn.modules.batchnorm import _BatchNorm
|
| 14 |
+
|
| 15 |
+
from .checkpoint import is_recomputing
|
| 16 |
+
|
| 17 |
+
__all__ = ["DeferredBatchNorm"]
|
| 18 |
+
|
| 19 |
+
|
| 20 |
+
TModule = TypeVar("TModule", bound=nn.Module)
|
| 21 |
+
|
| 22 |
+
|
| 23 |
+
class DeferredBatchNorm(_BatchNorm):
|
| 24 |
+
"""A BatchNorm layer tracks multiple micro-batches to update running statistics per mini-batch."""
|
| 25 |
+
|
| 26 |
+
sum: Tensor
|
| 27 |
+
sum_squares: Tensor
|
| 28 |
+
running_mean: Tensor
|
| 29 |
+
running_var: Tensor
|
| 30 |
+
num_batches_tracked: Tensor
|
| 31 |
+
|
| 32 |
+
def __init__(
|
| 33 |
+
self,
|
| 34 |
+
num_features: int,
|
| 35 |
+
eps: float = 1e-5,
|
| 36 |
+
momentum: float = 0.1,
|
| 37 |
+
affine: bool = True,
|
| 38 |
+
chunks: int = 1,
|
| 39 |
+
) -> None:
|
| 40 |
+
super().__init__(num_features, eps, momentum, affine, track_running_stats=True)
|
| 41 |
+
|
| 42 |
+
self.register_buffer("sum", torch.zeros_like(self.running_mean))
|
| 43 |
+
self.register_buffer("sum_squares", torch.zeros_like(self.running_var))
|
| 44 |
+
|
| 45 |
+
self.counter = 0
|
| 46 |
+
self.tracked = 0
|
| 47 |
+
self.chunks = chunks
|
| 48 |
+
|
| 49 |
+
def _check_input_dim(self, input: Tensor) -> None:
|
| 50 |
+
# It's the typical _check_input_dim() implementation in PyTorch.
|
| 51 |
+
if input.dim() <= 2:
|
| 52 |
+
raise ValueError("expected at least 3D input (got %dD input)" % input.dim())
|
| 53 |
+
|
| 54 |
+
def _track(self, input: Tensor) -> bool:
|
| 55 |
+
"""Tracks statistics of a micro-batch."""
|
| 56 |
+
# Dimensions except channel. For example, (0, 2, 3) is for BatchNorm2d.
|
| 57 |
+
dim = [0]
|
| 58 |
+
dim.extend(range(2, input.dim()))
|
| 59 |
+
|
| 60 |
+
with torch.no_grad():
|
| 61 |
+
self.sum += input.sum(dim)
|
| 62 |
+
self.sum_squares += (input ** 2).sum(dim)
|
| 63 |
+
|
| 64 |
+
size = input.size().numel() // input.size(1)
|
| 65 |
+
self.counter += size
|
| 66 |
+
self.tracked += 1
|
| 67 |
+
|
| 68 |
+
return self.tracked == self.chunks
|
| 69 |
+
|
| 70 |
+
def _commit(self) -> None:
|
| 71 |
+
"""Update the running statistics of a mini-batch."""
|
| 72 |
+
exponential_average_factor = 0.0
|
| 73 |
+
self.num_batches_tracked += 1
|
| 74 |
+
if self.momentum is None: # use cumulative moving average
|
| 75 |
+
exponential_average_factor = 1.0 / float(self.num_batches_tracked)
|
| 76 |
+
else: # use exponential moving average
|
| 77 |
+
exponential_average_factor = self.momentum
|
| 78 |
+
|
| 79 |
+
mean = self.sum / self.counter
|
| 80 |
+
var = self.sum_squares / self.counter - mean ** 2
|
| 81 |
+
|
| 82 |
+
# Calculate the exponential moving average here.
|
| 83 |
+
m = exponential_average_factor
|
| 84 |
+
|
| 85 |
+
self.running_mean *= 1 - m
|
| 86 |
+
self.running_mean += mean * m
|
| 87 |
+
|
| 88 |
+
self.running_var *= 1 - m
|
| 89 |
+
self.running_var += var * m
|
| 90 |
+
|
| 91 |
+
self.sum.zero_()
|
| 92 |
+
self.sum_squares.zero_()
|
| 93 |
+
self.counter = 0
|
| 94 |
+
self.tracked = 0
|
| 95 |
+
|
| 96 |
+
def forward(self, input: Tensor) -> Tensor:
|
| 97 |
+
if not self.training:
|
| 98 |
+
# Don't train parameters on the evaluation mode.
|
| 99 |
+
return batch_norm(
|
| 100 |
+
input,
|
| 101 |
+
running_mean=self.running_mean,
|
| 102 |
+
running_var=self.running_var,
|
| 103 |
+
weight=self.weight,
|
| 104 |
+
bias=self.bias,
|
| 105 |
+
training=False,
|
| 106 |
+
momentum=0.0,
|
| 107 |
+
eps=self.eps,
|
| 108 |
+
)
|
| 109 |
+
|
| 110 |
+
if not is_recomputing():
|
| 111 |
+
# Track a micro-batch on the training mode
|
| 112 |
+
# but not under a recomputation.
|
| 113 |
+
tracked_enough = self._track(input)
|
| 114 |
+
|
| 115 |
+
# Update the running statistics for a mini-batch
|
| 116 |
+
# if it has tracked enough micro-batches.
|
| 117 |
+
if tracked_enough:
|
| 118 |
+
self._commit()
|
| 119 |
+
|
| 120 |
+
# Normalize a micro-batch and train the parameters.
|
| 121 |
+
return batch_norm(
|
| 122 |
+
input,
|
| 123 |
+
running_mean=None,
|
| 124 |
+
running_var=None,
|
| 125 |
+
weight=self.weight,
|
| 126 |
+
bias=self.bias,
|
| 127 |
+
training=True,
|
| 128 |
+
momentum=0.0,
|
| 129 |
+
eps=self.eps,
|
| 130 |
+
)
|
| 131 |
+
|
| 132 |
+
@classmethod
|
| 133 |
+
def convert_deferred_batch_norm(cls, module: TModule, chunks: int = 1) -> TModule:
|
| 134 |
+
"""Converts a :class:`nn.BatchNorm` or underlying :class:`nn.BatchNorm`s into :class:`DeferredBatchNorm`::
|
| 135 |
+
|
| 136 |
+
from torchvision.models.resnet import resnet101
|
| 137 |
+
from torchpipe.batchnorm import DeferredBatchNorm
|
| 138 |
+
model = resnet101()
|
| 139 |
+
model = DeferredBatchNorm.convert_deferred_batch_norm(model)
|
| 140 |
+
|
| 141 |
+
"""
|
| 142 |
+
if isinstance(module, DeferredBatchNorm) and module.chunks is chunks:
|
| 143 |
+
return cast(TModule, module)
|
| 144 |
+
|
| 145 |
+
module_output: nn.Module = module
|
| 146 |
+
|
| 147 |
+
if isinstance(module, _BatchNorm) and module.track_running_stats:
|
| 148 |
+
module_output = DeferredBatchNorm(module.num_features, module.eps, module.momentum, module.affine, chunks)
|
| 149 |
+
if module.affine:
|
| 150 |
+
module_output.register_parameter("weight", module.weight)
|
| 151 |
+
module_output.register_parameter("bias", module.bias)
|
| 152 |
+
module_output.register_buffer("running_mean", module.running_mean)
|
| 153 |
+
module_output.register_buffer("running_var", module.running_var)
|
| 154 |
+
module_output.register_buffer("num_batches_tracked", module.num_batches_tracked)
|
| 155 |
+
|
| 156 |
+
for name, child in module.named_children():
|
| 157 |
+
module_output.add_module(name, cls.convert_deferred_batch_norm(child, chunks))
|
| 158 |
+
|
| 159 |
+
return cast(TModule, module_output)
|
vlmpy310/lib/python3.10/site-packages/torch/distributed/pipeline/sync/checkpoint.py
ADDED
|
@@ -0,0 +1,364 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright 2019 Kakao Brain
|
| 2 |
+
#
|
| 3 |
+
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
|
| 4 |
+
#
|
| 5 |
+
# This source code is licensed under the BSD license found in the
|
| 6 |
+
# LICENSE file in the root directory of this source tree.
|
| 7 |
+
"""Checkpointing with preceding recomputation.
|
| 8 |
+
|
| 9 |
+
PyTorch already provides the official checkpointing utilities in
|
| 10 |
+
:mod:`torch.utils.checkpoint`. The official checkpointing combines
|
| 11 |
+
recomputation and recursive backpropagation into one autograd function named
|
| 12 |
+
``CheckpointFunction``. Hence, the recomputation can be started only when the
|
| 13 |
+
gradients arrive to the function. In Pipe, the recomputation needs to precede
|
| 14 |
+
the gradient arrival to minimize the GPU idle time.
|
| 15 |
+
|
| 16 |
+
We solve this problem by introducing separate autograd functions named
|
| 17 |
+
:class:`Recompute` and :class:`Checkpoint`. Each function represents
|
| 18 |
+
recomputation and recursive backpropagation, respectively. We can manipulate
|
| 19 |
+
the control flow in aspect of both the autograd engine and CUDA with a pair of
|
| 20 |
+
the functions.
|
| 21 |
+
|
| 22 |
+
Specifically, we place CUDA stream synchronization between :class:`Recompute`
|
| 23 |
+
and :class:`Checkpoint` to delay only :class:`Checkpoint` until the gradient is
|
| 24 |
+
copied entirely.
|
| 25 |
+
|
| 26 |
+
"""
|
| 27 |
+
from collections import deque
|
| 28 |
+
from contextlib import contextmanager
|
| 29 |
+
import threading
|
| 30 |
+
from typing import (
|
| 31 |
+
Any,
|
| 32 |
+
Deque,
|
| 33 |
+
Generator,
|
| 34 |
+
List,
|
| 35 |
+
Optional,
|
| 36 |
+
Protocol,
|
| 37 |
+
Union,
|
| 38 |
+
Sequence,
|
| 39 |
+
Tuple
|
| 40 |
+
)
|
| 41 |
+
|
| 42 |
+
import torch
|
| 43 |
+
from torch import Tensor
|
| 44 |
+
import torch.autograd
|
| 45 |
+
|
| 46 |
+
from .dependency import fork, join
|
| 47 |
+
from .microbatch import Batch
|
| 48 |
+
from .phony import get_phony
|
| 49 |
+
|
| 50 |
+
__all__ = ["Function", "checkpoint", "Checkpointing", "ThreadLocal", "enable_checkpointing",
|
| 51 |
+
"enable_recomputing", "is_checkpointing", "is_recomputing", "Context", "save_rng_states",
|
| 52 |
+
"restore_rng_states", "Checkpoint", "Recompute"]
|
| 53 |
+
|
| 54 |
+
|
| 55 |
+
Tensors = Sequence[Tensor]
|
| 56 |
+
TensorOrTensors = Union[Tensor, Tensors]
|
| 57 |
+
|
| 58 |
+
# Types for shared memory between Checkpoint and Recompute.
|
| 59 |
+
Recomputed = Tuple[TensorOrTensors, Tensors] # (output, input_leaf)
|
| 60 |
+
RNGStates = Tuple[Tensor, Optional[Tensor]] # (cpu_rng_state, gpu_rng_state)
|
| 61 |
+
|
| 62 |
+
|
| 63 |
+
# Protocol with __call__ instead of Callable can be used as an attribute type.
|
| 64 |
+
# See: https://github.com/python/mypy/issues/708#issuecomment-561735949
|
| 65 |
+
class Function(Protocol):
|
| 66 |
+
def __call__(self, input: TensorOrTensors) -> TensorOrTensors:
|
| 67 |
+
...
|
| 68 |
+
|
| 69 |
+
|
| 70 |
+
def checkpoint(function: Function, input):
|
| 71 |
+
"""Make a checkpoint with a simple interface like
|
| 72 |
+
:func:`torch.utils.checkpoint.checkpoint`. It's only used to test or debug
|
| 73 |
+
:class:`Checkpoint` and :class:`Recompute` without boilerplate.
|
| 74 |
+
"""
|
| 75 |
+
batch = Batch(input)
|
| 76 |
+
|
| 77 |
+
chk = Checkpointing(function, batch)
|
| 78 |
+
batch = chk.checkpoint()
|
| 79 |
+
chk.recompute(batch)
|
| 80 |
+
|
| 81 |
+
return batch.values
|
| 82 |
+
|
| 83 |
+
|
| 84 |
+
class Checkpointing:
|
| 85 |
+
"""Generates a pair of :class:`Checkpoint` and :class:`Recompute`."""
|
| 86 |
+
|
| 87 |
+
def __init__(self, function: Function, batch: Batch) -> None:
|
| 88 |
+
self.function = function
|
| 89 |
+
self.batch = batch
|
| 90 |
+
|
| 91 |
+
# Shared memory between Checkpoint and Recompute. 1-length deque is
|
| 92 |
+
# used for mutability and length limitation.
|
| 93 |
+
self.recomputed: Deque[Recomputed] = deque(maxlen=1)
|
| 94 |
+
self.rng_states: Deque[RNGStates] = deque(maxlen=1)
|
| 95 |
+
|
| 96 |
+
def checkpoint(self) -> Batch:
|
| 97 |
+
"""Return a batch applied by :class:`Checkpoint`."""
|
| 98 |
+
input_atomic = self.batch.atomic
|
| 99 |
+
inputs = tuple(self.batch)
|
| 100 |
+
|
| 101 |
+
# Use a phony which requires grad to ensure that Checkpoint can be
|
| 102 |
+
# tracked by the autograd engine even when none of the input tensors
|
| 103 |
+
# require grad.
|
| 104 |
+
phony = get_phony(self.batch.get_device(), requires_grad=True)
|
| 105 |
+
|
| 106 |
+
output = Checkpoint.apply(phony, self.recomputed, self.rng_states, self.function, input_atomic, *inputs)
|
| 107 |
+
|
| 108 |
+
# Gradients are only supported for float Tensors.
|
| 109 |
+
if isinstance(output, tuple):
|
| 110 |
+
output = tuple([x.detach() if torch.is_tensor(x) and not x.is_floating_point() else x for x in output])
|
| 111 |
+
|
| 112 |
+
return Batch(output)
|
| 113 |
+
|
| 114 |
+
def recompute(self, batch: Batch) -> None:
|
| 115 |
+
"""Apply :class:`Recompute` to the batch in place."""
|
| 116 |
+
input_atomic = self.batch.atomic
|
| 117 |
+
inputs = tuple(self.batch)
|
| 118 |
+
|
| 119 |
+
# Use a tensor in the batch to tie together fork-join
|
| 120 |
+
tensor_idx = batch.find_tensor_idx()
|
| 121 |
+
# batch[tensor_idx] is always requiring grad, because it has been passed
|
| 122 |
+
# checkpoint with a phony requiring grad.
|
| 123 |
+
batch[tensor_idx], phony = fork(batch[tensor_idx])
|
| 124 |
+
phony = Recompute.apply(phony, self.recomputed, self.rng_states, self.function, input_atomic, *inputs)
|
| 125 |
+
batch[tensor_idx] = join(batch[tensor_idx], phony)
|
| 126 |
+
|
| 127 |
+
|
| 128 |
+
class ThreadLocal(threading.local):
|
| 129 |
+
def __init__(self) -> None:
|
| 130 |
+
self.is_checkpointing = False
|
| 131 |
+
self.is_recomputing = False
|
| 132 |
+
|
| 133 |
+
|
| 134 |
+
thread_local = ThreadLocal()
|
| 135 |
+
|
| 136 |
+
|
| 137 |
+
@contextmanager
|
| 138 |
+
def enable_checkpointing() -> Generator[None, None, None]:
|
| 139 |
+
"""Make :func:`is_checkpointing` return :data:`True` within a context."""
|
| 140 |
+
orig = thread_local.is_checkpointing
|
| 141 |
+
thread_local.is_checkpointing = True
|
| 142 |
+
try:
|
| 143 |
+
yield
|
| 144 |
+
finally:
|
| 145 |
+
thread_local.is_checkpointing = orig
|
| 146 |
+
|
| 147 |
+
|
| 148 |
+
@contextmanager
|
| 149 |
+
def enable_recomputing() -> Generator[None, None, None]:
|
| 150 |
+
"""Makes :func:`is_recomputing` return :data:`True` within a context."""
|
| 151 |
+
orig = thread_local.is_recomputing
|
| 152 |
+
thread_local.is_recomputing = True
|
| 153 |
+
try:
|
| 154 |
+
yield
|
| 155 |
+
finally:
|
| 156 |
+
thread_local.is_recomputing = orig
|
| 157 |
+
|
| 158 |
+
|
| 159 |
+
def is_checkpointing() -> bool:
|
| 160 |
+
"""Whether the current forward propagation is under checkpointing.
|
| 161 |
+
|
| 162 |
+
Returns:
|
| 163 |
+
bool: :data:`True` if it's under checkpointing.
|
| 164 |
+
|
| 165 |
+
"""
|
| 166 |
+
return thread_local.is_checkpointing
|
| 167 |
+
|
| 168 |
+
|
| 169 |
+
def is_recomputing() -> bool:
|
| 170 |
+
"""Whether the current forward propagation is under checkpoint recomputation.
|
| 171 |
+
|
| 172 |
+
Use this to prevent duplicated side-effects at forward
|
| 173 |
+
propagation::
|
| 174 |
+
|
| 175 |
+
class Counter(nn.Module):
|
| 176 |
+
def __init__(self):
|
| 177 |
+
super().__init__()
|
| 178 |
+
self.counter = 0
|
| 179 |
+
|
| 180 |
+
def forward(self, input):
|
| 181 |
+
if not is_recomputing():
|
| 182 |
+
self.counter += 1
|
| 183 |
+
return input
|
| 184 |
+
|
| 185 |
+
Returns:
|
| 186 |
+
bool: :data:`True` if it's under checkpoint recomputation.
|
| 187 |
+
|
| 188 |
+
.. seealso:: :ref:`Detecting Recomputation`
|
| 189 |
+
|
| 190 |
+
"""
|
| 191 |
+
return thread_local.is_recomputing
|
| 192 |
+
|
| 193 |
+
|
| 194 |
+
class Context:
|
| 195 |
+
"""The common interface between the :class:`Checkpoint` and :class:`Recompute` context."""
|
| 196 |
+
|
| 197 |
+
recomputed: Deque[Recomputed]
|
| 198 |
+
rng_states: Deque[RNGStates]
|
| 199 |
+
function: Function
|
| 200 |
+
input_atomic: bool
|
| 201 |
+
inputs: Sequence[Any]
|
| 202 |
+
|
| 203 |
+
saved_tensors: Tuple[Tensor, ...]
|
| 204 |
+
|
| 205 |
+
def save_for_backward(self, *tensors: Tensor) -> None: # pragma: no cover
|
| 206 |
+
pass
|
| 207 |
+
|
| 208 |
+
|
| 209 |
+
def save_rng_states(device: torch.device, rng_states: Deque[RNGStates],) -> None:
|
| 210 |
+
""":
|
| 211 |
+
Capture the current random number generator states.
|
| 212 |
+
|
| 213 |
+
meth:`Checkpoint.forward` captures the current PyTorch's random number
|
| 214 |
+
generator states at CPU and GPU to reuse in :meth:`Recompute.backward`.
|
| 215 |
+
|
| 216 |
+
.. seealso:: :ref:`Referential Transparency`
|
| 217 |
+
|
| 218 |
+
"""
|
| 219 |
+
cpu_rng_state = torch.get_rng_state()
|
| 220 |
+
|
| 221 |
+
gpu_rng_state: Optional[Tensor]
|
| 222 |
+
if device.type == "cuda":
|
| 223 |
+
gpu_rng_state = torch.cuda.get_rng_state(device)
|
| 224 |
+
else:
|
| 225 |
+
gpu_rng_state = None
|
| 226 |
+
|
| 227 |
+
rng_states.append((cpu_rng_state, gpu_rng_state))
|
| 228 |
+
|
| 229 |
+
|
| 230 |
+
@contextmanager
|
| 231 |
+
def restore_rng_states(device: torch.device, rng_states: Deque[RNGStates],) -> Generator[None, None, None]:
|
| 232 |
+
""":
|
| 233 |
+
Restore the random number generator state.
|
| 234 |
+
|
| 235 |
+
meth:`Recompute.backward` restores the random number generator states
|
| 236 |
+
captured by :func:`save_rng_states` within its context.
|
| 237 |
+
|
| 238 |
+
.. seealso:: :ref:`Referential Transparency`
|
| 239 |
+
|
| 240 |
+
"""
|
| 241 |
+
cpu_rng_state, gpu_rng_state = rng_states.pop()
|
| 242 |
+
|
| 243 |
+
gpu_devices: List[torch.device] = []
|
| 244 |
+
if device.type == "cuda":
|
| 245 |
+
gpu_devices.append(device)
|
| 246 |
+
|
| 247 |
+
with torch.random.fork_rng(gpu_devices):
|
| 248 |
+
torch.set_rng_state(cpu_rng_state)
|
| 249 |
+
if gpu_rng_state is not None:
|
| 250 |
+
torch.cuda.set_rng_state(gpu_rng_state, device)
|
| 251 |
+
yield
|
| 252 |
+
|
| 253 |
+
|
| 254 |
+
class Checkpoint(torch.autograd.Function):
|
| 255 |
+
@staticmethod
|
| 256 |
+
# type: ignore[override]
|
| 257 |
+
def forward(
|
| 258 |
+
ctx: Context,
|
| 259 |
+
phony: Tensor,
|
| 260 |
+
recomputed: Deque[Recomputed],
|
| 261 |
+
rng_states: Deque[RNGStates],
|
| 262 |
+
function: Function,
|
| 263 |
+
input_atomic: bool,
|
| 264 |
+
*inputs,
|
| 265 |
+
):
|
| 266 |
+
ctx.recomputed = recomputed
|
| 267 |
+
ctx.rng_states = rng_states
|
| 268 |
+
|
| 269 |
+
save_rng_states(phony.device, ctx.rng_states)
|
| 270 |
+
|
| 271 |
+
ctx.function = function
|
| 272 |
+
ctx.input_atomic = input_atomic
|
| 273 |
+
if input_atomic:
|
| 274 |
+
tensors = [inputs[0]]
|
| 275 |
+
else:
|
| 276 |
+
tensors = []
|
| 277 |
+
for input in inputs:
|
| 278 |
+
if torch.is_tensor(input):
|
| 279 |
+
tensors.append(input)
|
| 280 |
+
|
| 281 |
+
ctx.save_for_backward(*tensors)
|
| 282 |
+
|
| 283 |
+
with torch.no_grad(), enable_checkpointing():
|
| 284 |
+
if input_atomic:
|
| 285 |
+
assert len(inputs) == 1
|
| 286 |
+
output = function(inputs[0])
|
| 287 |
+
else:
|
| 288 |
+
output = function(*inputs)
|
| 289 |
+
return output
|
| 290 |
+
|
| 291 |
+
@staticmethod
|
| 292 |
+
def backward(ctx: Context, *grad_output: Tensor,) -> Tuple[Optional[Tensor], ...]: # pragma: no cover
|
| 293 |
+
output, input_leaf = ctx.recomputed.pop()
|
| 294 |
+
|
| 295 |
+
if isinstance(output, tuple):
|
| 296 |
+
outputs = output
|
| 297 |
+
else:
|
| 298 |
+
outputs = (output,)
|
| 299 |
+
if any(torch.is_tensor(y) and y.requires_grad for y in outputs):
|
| 300 |
+
tensors = tuple([x for x in outputs if torch.is_tensor(x) and x.requires_grad])
|
| 301 |
+
torch.autograd.backward(tensors, grad_output)
|
| 302 |
+
|
| 303 |
+
grad_input: List[Optional[Tensor]] = [None, None, None, None, None]
|
| 304 |
+
grad_input.extend(x.grad if torch.is_tensor(x) else None for x in input_leaf)
|
| 305 |
+
return tuple(grad_input)
|
| 306 |
+
|
| 307 |
+
|
| 308 |
+
class Recompute(torch.autograd.Function):
|
| 309 |
+
@staticmethod
|
| 310 |
+
# type: ignore[override]
|
| 311 |
+
def forward(
|
| 312 |
+
ctx: Context,
|
| 313 |
+
phony: Tensor,
|
| 314 |
+
recomputed: Deque[Recomputed],
|
| 315 |
+
rng_states: Deque[RNGStates],
|
| 316 |
+
function: Function,
|
| 317 |
+
input_atomic: bool,
|
| 318 |
+
*inputs,
|
| 319 |
+
) -> Tensor:
|
| 320 |
+
ctx.recomputed = recomputed
|
| 321 |
+
ctx.rng_states = rng_states
|
| 322 |
+
|
| 323 |
+
ctx.function = function
|
| 324 |
+
ctx.input_atomic = input_atomic
|
| 325 |
+
ctx.inputs = inputs
|
| 326 |
+
if input_atomic:
|
| 327 |
+
tensors = [inputs[0]]
|
| 328 |
+
else:
|
| 329 |
+
tensors = []
|
| 330 |
+
for input in inputs:
|
| 331 |
+
if torch.is_tensor(input):
|
| 332 |
+
tensors.append(input)
|
| 333 |
+
ctx.save_for_backward(*tensors)
|
| 334 |
+
|
| 335 |
+
return phony
|
| 336 |
+
|
| 337 |
+
@staticmethod
|
| 338 |
+
def backward(ctx: Context, *grad_output: Tensor) -> Tuple[None, ...]: # pragma: no cover
|
| 339 |
+
inputs = ctx.inputs
|
| 340 |
+
inputs_leaf = tuple(x.detach().requires_grad_(x.requires_grad) if torch.is_tensor(x) else x for x in inputs)
|
| 341 |
+
|
| 342 |
+
# Get the device for the inputs from a tensor
|
| 343 |
+
device = None
|
| 344 |
+
for input in inputs:
|
| 345 |
+
if torch.is_tensor(input):
|
| 346 |
+
device = input.device
|
| 347 |
+
break
|
| 348 |
+
|
| 349 |
+
if device is None:
|
| 350 |
+
raise RuntimeError(f'No tensors found in {inputs}')
|
| 351 |
+
|
| 352 |
+
with restore_rng_states(device, ctx.rng_states):
|
| 353 |
+
with torch.enable_grad(), enable_recomputing():
|
| 354 |
+
if ctx.input_atomic:
|
| 355 |
+
assert len(inputs_leaf) == 1
|
| 356 |
+
output = ctx.function(inputs_leaf[0])
|
| 357 |
+
else:
|
| 358 |
+
output = ctx.function(*inputs_leaf)
|
| 359 |
+
|
| 360 |
+
ctx.recomputed.append((output, inputs_leaf))
|
| 361 |
+
|
| 362 |
+
grad_input: List[None] = [None, None, None, None, None]
|
| 363 |
+
grad_input.extend(None for _ in ctx.inputs)
|
| 364 |
+
return tuple(grad_input)
|
vlmpy310/lib/python3.10/site-packages/torch/distributed/pipeline/sync/copy.py
ADDED
|
@@ -0,0 +1,108 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright 2019 Kakao Brain
|
| 2 |
+
#
|
| 3 |
+
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
|
| 4 |
+
#
|
| 5 |
+
# This source code is licensed under the BSD license found in the
|
| 6 |
+
# LICENSE file in the root directory of this source tree.
|
| 7 |
+
"""Autograd functions for stream-aware CUDA copy.
|
| 8 |
+
|
| 9 |
+
It is used to overlap copy and computation on the same GPU.
|
| 10 |
+
"""
|
| 11 |
+
from collections import deque
|
| 12 |
+
from typing import Deque, List, Optional, Tuple, Sequence
|
| 13 |
+
|
| 14 |
+
import torch
|
| 15 |
+
from torch import Tensor
|
| 16 |
+
|
| 17 |
+
from .stream import AbstractStream, current_stream, get_device, record_stream, use_stream, wait_stream
|
| 18 |
+
|
| 19 |
+
__all__: List[str] = ["Context", "Copy", "Wait"]
|
| 20 |
+
|
| 21 |
+
|
| 22 |
+
Tensors = Sequence[Tensor]
|
| 23 |
+
|
| 24 |
+
|
| 25 |
+
# Common interface between :class:`Copy` and :class:`Wait`.
|
| 26 |
+
class Context:
|
| 27 |
+
prev_stream: AbstractStream
|
| 28 |
+
next_stream: AbstractStream
|
| 29 |
+
|
| 30 |
+
|
| 31 |
+
class Copy(torch.autograd.Function):
|
| 32 |
+
"""Copies tensors on specific streams."""
|
| 33 |
+
|
| 34 |
+
@staticmethod
|
| 35 |
+
# type: ignore[override]
|
| 36 |
+
def forward(ctx: Context, prev_stream: AbstractStream, next_stream: AbstractStream, *input,) -> Tensors:
|
| 37 |
+
ctx.prev_stream = prev_stream
|
| 38 |
+
ctx.next_stream = next_stream
|
| 39 |
+
|
| 40 |
+
output = []
|
| 41 |
+
output_stream = current_stream(get_device(next_stream))
|
| 42 |
+
|
| 43 |
+
with use_stream(prev_stream), use_stream(next_stream):
|
| 44 |
+
for x in input:
|
| 45 |
+
if torch.is_tensor(x):
|
| 46 |
+
y = x.to(get_device(next_stream), non_blocking=True)
|
| 47 |
+
output.append(y)
|
| 48 |
+
|
| 49 |
+
# 'prev_stream' is not where 'x' has been allocated.
|
| 50 |
+
record_stream(x, prev_stream)
|
| 51 |
+
# 'y' has been allocated on 'next_stream'.
|
| 52 |
+
# It might be used on the current stream captured as 'output_stream'.
|
| 53 |
+
record_stream(y, output_stream)
|
| 54 |
+
else:
|
| 55 |
+
output.append(x)
|
| 56 |
+
|
| 57 |
+
return tuple(output)
|
| 58 |
+
|
| 59 |
+
@staticmethod
|
| 60 |
+
def backward(ctx: Context, *grad_output: Tensor,) -> Tuple[Optional[Tensor], ...]:
|
| 61 |
+
prev_stream = ctx.prev_stream
|
| 62 |
+
next_stream = ctx.next_stream
|
| 63 |
+
|
| 64 |
+
grad_input: Deque[Tensor] = deque(maxlen=len(grad_output))
|
| 65 |
+
input_stream = current_stream(get_device(prev_stream))
|
| 66 |
+
|
| 67 |
+
with use_stream(prev_stream), use_stream(next_stream):
|
| 68 |
+
for x in reversed(grad_output):
|
| 69 |
+
y = x.to(get_device(prev_stream), non_blocking=True)
|
| 70 |
+
grad_input.appendleft(y)
|
| 71 |
+
|
| 72 |
+
# 'next_stream' is not where 'x' has been allocated.
|
| 73 |
+
record_stream(x, next_stream)
|
| 74 |
+
# 'y' has been allocated on 'prev_stream'.
|
| 75 |
+
# It might be used on the current stream captured as 'input_stream'.
|
| 76 |
+
record_stream(y, input_stream)
|
| 77 |
+
|
| 78 |
+
grad_streams: Tuple[Optional[Tensor], ...] = (None, None)
|
| 79 |
+
return grad_streams + tuple(grad_input)
|
| 80 |
+
|
| 81 |
+
|
| 82 |
+
class Wait(torch.autograd.Function):
|
| 83 |
+
"""Synchronizes a stream to another stream.
|
| 84 |
+
|
| 85 |
+
Place it just before you want to start an operation on the next stream,
|
| 86 |
+
provided that all operations on the previous stream are done.
|
| 87 |
+
|
| 88 |
+
"""
|
| 89 |
+
|
| 90 |
+
@staticmethod
|
| 91 |
+
# type: ignore[override]
|
| 92 |
+
def forward(ctx: Context, prev_stream: AbstractStream, next_stream: AbstractStream, *input) -> Tensors:
|
| 93 |
+
ctx.prev_stream = prev_stream
|
| 94 |
+
ctx.next_stream = next_stream
|
| 95 |
+
|
| 96 |
+
wait_stream(next_stream, prev_stream)
|
| 97 |
+
|
| 98 |
+
return tuple(x.detach() if torch.is_tensor(x) else x for x in input)
|
| 99 |
+
|
| 100 |
+
@staticmethod
|
| 101 |
+
def backward(ctx: Context, *grad_input: Tensor,) -> Tuple[Optional[Tensor], ...]:
|
| 102 |
+
prev_stream = ctx.prev_stream
|
| 103 |
+
next_stream = ctx.next_stream
|
| 104 |
+
|
| 105 |
+
wait_stream(prev_stream, next_stream)
|
| 106 |
+
|
| 107 |
+
grad_streams: Tuple[Optional[Tensor], ...] = (None, None)
|
| 108 |
+
return grad_streams + grad_input
|
vlmpy310/lib/python3.10/site-packages/torch/distributed/pipeline/sync/dependency.py
ADDED
|
@@ -0,0 +1,54 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright 2019 Kakao Brain
|
| 2 |
+
#
|
| 3 |
+
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
|
| 4 |
+
#
|
| 5 |
+
# This source code is licensed under the BSD license found in the
|
| 6 |
+
# LICENSE file in the root directory of this source tree.
|
| 7 |
+
"""Arbitrary dependency between two autograd lanes."""
|
| 8 |
+
from typing import List, Tuple
|
| 9 |
+
|
| 10 |
+
import torch
|
| 11 |
+
from torch import Tensor
|
| 12 |
+
|
| 13 |
+
from .phony import get_phony
|
| 14 |
+
|
| 15 |
+
__all__: List[str] = ["fork", "Fork", "join", "Join"]
|
| 16 |
+
|
| 17 |
+
|
| 18 |
+
def fork(input: Tensor) -> Tuple[Tensor, Tensor]:
|
| 19 |
+
"""Branches out from an autograd lane of the given tensor."""
|
| 20 |
+
if torch.is_grad_enabled() and input.requires_grad:
|
| 21 |
+
input, phony = Fork.apply(input)
|
| 22 |
+
else:
|
| 23 |
+
phony = get_phony(input.device, requires_grad=False)
|
| 24 |
+
|
| 25 |
+
return input, phony
|
| 26 |
+
|
| 27 |
+
|
| 28 |
+
class Fork(torch.autograd.Function):
|
| 29 |
+
@staticmethod
|
| 30 |
+
def forward(ctx: "Fork", input: Tensor) -> Tuple[Tensor, Tensor]: # type: ignore[override]
|
| 31 |
+
phony = get_phony(input.device, requires_grad=False)
|
| 32 |
+
return input.detach(), phony.detach()
|
| 33 |
+
|
| 34 |
+
@staticmethod
|
| 35 |
+
def backward(ctx: "Fork", grad_input: Tensor, grad_grad: Tensor) -> Tensor: # type: ignore[override]
|
| 36 |
+
return grad_input
|
| 37 |
+
|
| 38 |
+
|
| 39 |
+
def join(input: Tensor, phony: Tensor) -> Tensor:
|
| 40 |
+
"""Merge two autograd lanes."""
|
| 41 |
+
if torch.is_grad_enabled() and (input.requires_grad or phony.requires_grad):
|
| 42 |
+
input = Join.apply(input, phony)
|
| 43 |
+
|
| 44 |
+
return input
|
| 45 |
+
|
| 46 |
+
|
| 47 |
+
class Join(torch.autograd.Function):
|
| 48 |
+
@staticmethod
|
| 49 |
+
def forward(ctx: "Join", input: Tensor, phony: Tensor) -> Tensor: # type: ignore[override]
|
| 50 |
+
return input.detach()
|
| 51 |
+
|
| 52 |
+
@staticmethod
|
| 53 |
+
def backward(ctx: "Join", grad_input: Tensor) -> Tuple[Tensor, None]: # type: ignore[override]
|
| 54 |
+
return grad_input, None
|
vlmpy310/lib/python3.10/site-packages/torch/distributed/pipeline/sync/microbatch.py
ADDED
|
@@ -0,0 +1,234 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright 2019 Kakao Brain
|
| 2 |
+
#
|
| 3 |
+
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
|
| 4 |
+
#
|
| 5 |
+
# This source code is licensed under the BSD license found in the
|
| 6 |
+
# LICENSE file in the root directory of this source tree.
|
| 7 |
+
"""Manipulation of micro-batches."""
|
| 8 |
+
import typing
|
| 9 |
+
from typing import Any, Callable, List, Union, cast, Sequence
|
| 10 |
+
|
| 11 |
+
import torch
|
| 12 |
+
from torch import Tensor
|
| 13 |
+
import torch.cuda.comm
|
| 14 |
+
|
| 15 |
+
__all__: List[str] = ["NoChunk", "Batch", "check", "scatter", "gather"]
|
| 16 |
+
|
| 17 |
+
|
| 18 |
+
Tensors = Sequence[Tensor]
|
| 19 |
+
TensorOrTensors = Union[Tensor, Tensors]
|
| 20 |
+
Function = Callable[[TensorOrTensors], Union[List[Any], Tensor]]
|
| 21 |
+
|
| 22 |
+
|
| 23 |
+
class NoChunk:
|
| 24 |
+
"""
|
| 25 |
+
Wrapper for a Tensor in :meth:`Pipe.forward` indicating that the tensor
|
| 26 |
+
should not be chunked on the batch dimension and instead be replicated
|
| 27 |
+
as-is across all micro-batches. This is useful for tensors which might
|
| 28 |
+
not have any 'batch' semantics for the model.
|
| 29 |
+
"""
|
| 30 |
+
def __init__(self, inp: Tensor):
|
| 31 |
+
if not torch.is_tensor(inp):
|
| 32 |
+
raise TypeError(f'NoChunk only supported for tensors, found: {inp}')
|
| 33 |
+
self._tensor = inp
|
| 34 |
+
|
| 35 |
+
@property
|
| 36 |
+
def tensor(self):
|
| 37 |
+
return self._tensor
|
| 38 |
+
|
| 39 |
+
|
| 40 |
+
class Batch:
|
| 41 |
+
"""
|
| 42 |
+
An abstraction representing a microbatch in the pipeline.
|
| 43 |
+
"""
|
| 44 |
+
|
| 45 |
+
def __init__(self, values: Union[List[Any], Tensor]) -> None:
|
| 46 |
+
self._values = values
|
| 47 |
+
self.atomic = torch.is_tensor(values)
|
| 48 |
+
|
| 49 |
+
# Verify at least on tensor
|
| 50 |
+
if not self.atomic:
|
| 51 |
+
if not any(torch.is_tensor(value) for value in self._values):
|
| 52 |
+
raise TypeError(f'No tensors found in batch: {self._values}')
|
| 53 |
+
|
| 54 |
+
@property
|
| 55 |
+
def tensor(self) -> Tensor:
|
| 56 |
+
"""Retrieves the underlying tensor."""
|
| 57 |
+
if not self.atomic:
|
| 58 |
+
raise AttributeError("not atomic batch")
|
| 59 |
+
return cast(Tensor, self._values)
|
| 60 |
+
|
| 61 |
+
@property
|
| 62 |
+
def values(self):
|
| 63 |
+
"""Retrieves the underlying values for the batch"""
|
| 64 |
+
return self._values
|
| 65 |
+
|
| 66 |
+
def find_tensor_idx(self):
|
| 67 |
+
"""
|
| 68 |
+
Retrieves the index of first tensor found.
|
| 69 |
+
"""
|
| 70 |
+
if self.atomic:
|
| 71 |
+
return 0
|
| 72 |
+
for i, value in enumerate(self._values):
|
| 73 |
+
if torch.is_tensor(value):
|
| 74 |
+
return i
|
| 75 |
+
|
| 76 |
+
raise TypeError("No tensor found!")
|
| 77 |
+
|
| 78 |
+
def get_device(self):
|
| 79 |
+
"""
|
| 80 |
+
Retrieves the device for this microbatch.
|
| 81 |
+
"""
|
| 82 |
+
if self.atomic:
|
| 83 |
+
return self._values.device # type: ignore[union-attr]
|
| 84 |
+
|
| 85 |
+
for value in self._values:
|
| 86 |
+
if torch.is_tensor(value):
|
| 87 |
+
return value.device
|
| 88 |
+
|
| 89 |
+
def call(self, function: Function) -> "Batch":
|
| 90 |
+
"""Calls a function on the microbatch. It also wraps
|
| 91 |
+
the output with :class:`Batch`.
|
| 92 |
+
"""
|
| 93 |
+
if self.atomic:
|
| 94 |
+
return Batch(function(self._values))
|
| 95 |
+
else:
|
| 96 |
+
return Batch(function(*self._values))
|
| 97 |
+
|
| 98 |
+
def __repr__(self) -> str:
|
| 99 |
+
return f"Batch[atomic={self.atomic!r}]({self._values!r})"
|
| 100 |
+
|
| 101 |
+
def __iter__(self):
|
| 102 |
+
if self.atomic:
|
| 103 |
+
yield self._values
|
| 104 |
+
else:
|
| 105 |
+
yield from self._values
|
| 106 |
+
|
| 107 |
+
def __len__(self) -> int:
|
| 108 |
+
return 1 if self.atomic else len(self._values)
|
| 109 |
+
|
| 110 |
+
def __getitem__(self, index: int):
|
| 111 |
+
if not self.atomic:
|
| 112 |
+
return self._values[index]
|
| 113 |
+
|
| 114 |
+
if index != 0:
|
| 115 |
+
raise IndexError("atomic batch allows index 0 only")
|
| 116 |
+
|
| 117 |
+
return self._values
|
| 118 |
+
|
| 119 |
+
# NOTE(sublee): pyflakes can't detect "overload" instead of "typing.overload".
|
| 120 |
+
@typing.overload
|
| 121 |
+
def __setitem__(self, index: int, value: Tensor) -> None:
|
| 122 |
+
...
|
| 123 |
+
|
| 124 |
+
@typing.overload
|
| 125 |
+
def __setitem__(self, index: slice, value: Tensors) -> None:
|
| 126 |
+
...
|
| 127 |
+
|
| 128 |
+
def __setitem__(self, index: Union[int, slice], value) -> None:
|
| 129 |
+
if isinstance(index, int):
|
| 130 |
+
self._setitem_by_index(index, value)
|
| 131 |
+
else:
|
| 132 |
+
self._setitem_by_slice(index, value)
|
| 133 |
+
|
| 134 |
+
def _setitem_by_index(self, index: int, value) -> None:
|
| 135 |
+
if not self.atomic:
|
| 136 |
+
i = index
|
| 137 |
+
self._values = self._values[:i] + (value,) + self._values[i + 1 :] # type: ignore[operator]
|
| 138 |
+
return
|
| 139 |
+
|
| 140 |
+
if index != 0:
|
| 141 |
+
raise IndexError("atomic batch allows index 0 only")
|
| 142 |
+
|
| 143 |
+
self._values = value
|
| 144 |
+
|
| 145 |
+
def _setitem_by_slice(self, index: slice, value) -> None:
|
| 146 |
+
if not (index.start is index.stop is index.step is None): # noqa: E714
|
| 147 |
+
raise NotImplementedError("only slice [:] supported")
|
| 148 |
+
|
| 149 |
+
if not self.atomic:
|
| 150 |
+
self._values = value
|
| 151 |
+
return
|
| 152 |
+
|
| 153 |
+
if len(value) != 1:
|
| 154 |
+
raise IndexError("atomic batch cannot be replaced with multiple tensors")
|
| 155 |
+
|
| 156 |
+
self._values = value[0]
|
| 157 |
+
|
| 158 |
+
|
| 159 |
+
def check(first_device, *inputs) -> None:
|
| 160 |
+
"""
|
| 161 |
+
Checks whether the input contains at least one tensor and each tensor is
|
| 162 |
+
on the same device as the first partition.
|
| 163 |
+
|
| 164 |
+
Raises:
|
| 165 |
+
ValueError: input does not contain at least one tensor
|
| 166 |
+
|
| 167 |
+
"""
|
| 168 |
+
|
| 169 |
+
if not any(torch.is_tensor(input) for input in inputs):
|
| 170 |
+
raise TypeError(f'inputs do not have any tensors: {inputs}')
|
| 171 |
+
if any(torch.is_tensor(input) and input.device != first_device for input in inputs):
|
| 172 |
+
raise ValueError('All inputs should be on the same device as the first partition')
|
| 173 |
+
|
| 174 |
+
|
| 175 |
+
def scatter(*inputs, chunks: int) -> List[Batch]:
|
| 176 |
+
"""Splits an input mini-batch into multiple micro-batches."""
|
| 177 |
+
if len(inputs) == 1 and isinstance(inputs[0], Tensor):
|
| 178 |
+
return [Batch(x) for x in inputs[0].chunk(chunks)]
|
| 179 |
+
|
| 180 |
+
batches: List[Any] = [[] for _ in range(chunks)]
|
| 181 |
+
# Actual number of chunks produced
|
| 182 |
+
num_chunks = -1
|
| 183 |
+
for input in inputs:
|
| 184 |
+
if torch.is_tensor(input):
|
| 185 |
+
# Chunk only tensors.
|
| 186 |
+
tensors = input.chunk(chunks)
|
| 187 |
+
|
| 188 |
+
# Validate number of chunks equal across all inputs.
|
| 189 |
+
if num_chunks != -1 and num_chunks != len(tensors):
|
| 190 |
+
raise RuntimeError(f'Found different number of chunks produced for inputs: {num_chunks} and {len(tensors)}')
|
| 191 |
+
num_chunks = len(tensors)
|
| 192 |
+
|
| 193 |
+
for i, tensor in enumerate(tensors):
|
| 194 |
+
batches[i].append(tensor)
|
| 195 |
+
else:
|
| 196 |
+
# Replicate non-tensors or tensors wrapped with 'NoChunk'.
|
| 197 |
+
for i in range(chunks):
|
| 198 |
+
if isinstance(input, NoChunk):
|
| 199 |
+
# Extract the tensor out.
|
| 200 |
+
batches[i].append(input.tensor)
|
| 201 |
+
else:
|
| 202 |
+
batches[i].append(input)
|
| 203 |
+
|
| 204 |
+
# Truncate to actual number of chunks
|
| 205 |
+
batches = batches[:num_chunks]
|
| 206 |
+
|
| 207 |
+
return [Batch(x) for x in batches]
|
| 208 |
+
|
| 209 |
+
|
| 210 |
+
def gather(outputs: List[Batch]):
|
| 211 |
+
"""Concatenates output micro-batches into a mini-batch."""
|
| 212 |
+
output: Any
|
| 213 |
+
|
| 214 |
+
if outputs[0].atomic:
|
| 215 |
+
tensors = tuple(b.tensor for b in outputs)
|
| 216 |
+
output = torch.cat(tensors)
|
| 217 |
+
else:
|
| 218 |
+
output_buf: List[Any] = []
|
| 219 |
+
for i in range(len(outputs[0])):
|
| 220 |
+
output_type = type(outputs[0][i])
|
| 221 |
+
current_outputs = []
|
| 222 |
+
for batch in outputs:
|
| 223 |
+
if output_type != type(batch[i]):
|
| 224 |
+
raise TypeError(f'Types for microbatch outputs do not match, found: {output_type} and {type(batch[i])}')
|
| 225 |
+
current_outputs.append(batch[i])
|
| 226 |
+
|
| 227 |
+
if torch.is_tensor(outputs[0][i]):
|
| 228 |
+
output_buf.append(torch.cat(current_outputs))
|
| 229 |
+
else:
|
| 230 |
+
output_buf.append(current_outputs)
|
| 231 |
+
|
| 232 |
+
output = tuple(output_buf)
|
| 233 |
+
|
| 234 |
+
return output
|
vlmpy310/lib/python3.10/site-packages/torch/distributed/pipeline/sync/phony.py
ADDED
|
@@ -0,0 +1,50 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright 2019 Kakao Brain
|
| 2 |
+
#
|
| 3 |
+
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
|
| 4 |
+
#
|
| 5 |
+
# This source code is licensed under the BSD license found in the
|
| 6 |
+
# LICENSE file in the root directory of this source tree.
|
| 7 |
+
"""Provides phony for arbitrary dependency in a autograd graph."""
|
| 8 |
+
from typing import Dict, List, Tuple
|
| 9 |
+
|
| 10 |
+
import torch
|
| 11 |
+
from torch import Tensor
|
| 12 |
+
|
| 13 |
+
from .stream import default_stream, use_stream
|
| 14 |
+
|
| 15 |
+
__all__: List[str] = ["get_phony"]
|
| 16 |
+
|
| 17 |
+
|
| 18 |
+
_phonies: Dict[Tuple[torch.device, bool], Tensor] = {}
|
| 19 |
+
|
| 20 |
+
|
| 21 |
+
def get_phony(device: torch.device, *, requires_grad: bool) -> Tensor:
|
| 22 |
+
"""Get a phony. Phony is tensor without space.
|
| 23 |
+
|
| 24 |
+
It is useful to make arbitrary dependency in a autograd graph because it doesn't require any
|
| 25 |
+
gradient accumulation.
|
| 26 |
+
|
| 27 |
+
.. note::
|
| 28 |
+
|
| 29 |
+
Phonies for each device are cached. If an autograd function gets a phony
|
| 30 |
+
internally, the phony must be detached to be returned. Otherwise, the
|
| 31 |
+
autograd engine will mutate the cached phony in-place::
|
| 32 |
+
|
| 33 |
+
class Phonify(torch.autograd.Function):
|
| 34 |
+
@staticmethod
|
| 35 |
+
def forward(ctx, input):
|
| 36 |
+
phony = get_phony(input.device, requires_grad=False)
|
| 37 |
+
return phony.detach() # detach() is necessary.
|
| 38 |
+
|
| 39 |
+
"""
|
| 40 |
+
key = (device, requires_grad)
|
| 41 |
+
|
| 42 |
+
try:
|
| 43 |
+
phony = _phonies[key]
|
| 44 |
+
except KeyError:
|
| 45 |
+
with use_stream(default_stream(device)):
|
| 46 |
+
phony = torch.empty(0, device=device, requires_grad=requires_grad)
|
| 47 |
+
|
| 48 |
+
_phonies[key] = phony
|
| 49 |
+
|
| 50 |
+
return phony
|
vlmpy310/lib/python3.10/site-packages/torch/distributed/pipeline/sync/pipe.py
ADDED
|
@@ -0,0 +1,490 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright 2019 Kakao Brain
|
| 2 |
+
#
|
| 3 |
+
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
|
| 4 |
+
#
|
| 5 |
+
# This source code is licensed under the BSD license found in the
|
| 6 |
+
# LICENSE file in the root directory of this source tree.
|
| 7 |
+
"""The Pipe interface."""
|
| 8 |
+
from collections import OrderedDict
|
| 9 |
+
from typing import TYPE_CHECKING, Any, Iterable, Iterator, List, Optional, Union, Sequence, Tuple, cast
|
| 10 |
+
|
| 11 |
+
import torch
|
| 12 |
+
from torch import Tensor, nn
|
| 13 |
+
from torch.distributed.rpc import RRef
|
| 14 |
+
import torch.autograd
|
| 15 |
+
import torch.cuda
|
| 16 |
+
|
| 17 |
+
from . import microbatch
|
| 18 |
+
from .batchnorm import DeferredBatchNorm
|
| 19 |
+
from .pipeline import Pipeline
|
| 20 |
+
from .skip.layout import inspect_skip_layout
|
| 21 |
+
from .skip.skippable import verify_skippables
|
| 22 |
+
from .stream import AbstractStream, new_stream
|
| 23 |
+
|
| 24 |
+
__all__ = ["Pipe", "BalanceError", "PipeSequential", "WithDevice"]
|
| 25 |
+
|
| 26 |
+
|
| 27 |
+
Device = Union[torch.device, int, str]
|
| 28 |
+
Devices = Union[Iterable[Device], List[Device]]
|
| 29 |
+
|
| 30 |
+
Tensors = Sequence[Tensor]
|
| 31 |
+
TensorOrTensors = Union[Tensor, Tensors]
|
| 32 |
+
|
| 33 |
+
if TYPE_CHECKING:
|
| 34 |
+
# Typechecking: nn.Module is not a Generic
|
| 35 |
+
Module = nn.Module[TensorOrTensors] # type: ignore[type-arg]
|
| 36 |
+
NamedModules = OrderedDict[str, Module]
|
| 37 |
+
else:
|
| 38 |
+
Module = nn.Module
|
| 39 |
+
NamedModules = OrderedDict
|
| 40 |
+
|
| 41 |
+
|
| 42 |
+
def _recommend_auto_balance(message: str) -> str:
|
| 43 |
+
"""Expands a message with recommendation to :mod:`torchpipe.balance`."""
|
| 44 |
+
return f"""{message}
|
| 45 |
+
|
| 46 |
+
If your model is still under development, its optimal balance would change
|
| 47 |
+
frequently. In this case, we highly recommend 'torch.distributed.pipeline.sync.balance' for
|
| 48 |
+
naive automatic balancing:
|
| 49 |
+
|
| 50 |
+
from torch.distributed.pipeline.sync import Pipe
|
| 51 |
+
from torch.distributed.pipeline.sync.balance import balance_by_time
|
| 52 |
+
|
| 53 |
+
partitions = torch.cuda.device_count()
|
| 54 |
+
sample = torch.empty(...)
|
| 55 |
+
balance = balance_by_time(partitions, model, sample)
|
| 56 |
+
|
| 57 |
+
model = Pipe(model, balance, ...)
|
| 58 |
+
"""
|
| 59 |
+
|
| 60 |
+
|
| 61 |
+
def _verify_module(module: nn.Sequential) -> None:
|
| 62 |
+
if not isinstance(module, nn.Sequential):
|
| 63 |
+
raise TypeError("module must be nn.Sequential to be partitioned")
|
| 64 |
+
|
| 65 |
+
named_children = list(module.named_children())
|
| 66 |
+
if len(named_children) != len(module):
|
| 67 |
+
raise ValueError("module with duplicate children is not supported")
|
| 68 |
+
|
| 69 |
+
|
| 70 |
+
def _verify_splitting(
|
| 71 |
+
module: nn.Sequential, partitions: List[nn.Sequential], devices: List[torch.device]
|
| 72 |
+
) -> None:
|
| 73 |
+
num_parameters = len(list(module.parameters()))
|
| 74 |
+
num_child_parameters = sum(len(list(child.parameters())) for child in module.children())
|
| 75 |
+
if num_parameters == num_child_parameters:
|
| 76 |
+
return
|
| 77 |
+
|
| 78 |
+
for i in range(len(partitions)):
|
| 79 |
+
for j in range(i + 1, len(partitions)):
|
| 80 |
+
parti = partitions[i]
|
| 81 |
+
partj = partitions[j]
|
| 82 |
+
if devices[i] == devices[j]:
|
| 83 |
+
continue
|
| 84 |
+
for p in parti.parameters():
|
| 85 |
+
for q in partj.parameters():
|
| 86 |
+
if p is q:
|
| 87 |
+
raise ValueError("module with duplicate parameters on distinct devices is not supported")
|
| 88 |
+
|
| 89 |
+
|
| 90 |
+
class BalanceError(ValueError):
|
| 91 |
+
pass
|
| 92 |
+
|
| 93 |
+
|
| 94 |
+
def _retrieve_device(module: nn.Module) -> torch.device:
|
| 95 |
+
"""Validates all parameters in the Module have the same device and returns
|
| 96 |
+
the appropriate device.
|
| 97 |
+
|
| 98 |
+
Args:
|
| 99 |
+
An ``nn.Module`` to process.
|
| 100 |
+
|
| 101 |
+
Returns:
|
| 102 |
+
``torch.Device`` for the entire module.
|
| 103 |
+
|
| 104 |
+
Raises:
|
| 105 |
+
ValueError:
|
| 106 |
+
If devices for ``nn.Module`` parameters are not all same.
|
| 107 |
+
"""
|
| 108 |
+
|
| 109 |
+
device = None
|
| 110 |
+
for parameter in module.parameters():
|
| 111 |
+
if device is None:
|
| 112 |
+
device = parameter.device
|
| 113 |
+
elif device != parameter.device:
|
| 114 |
+
raise ValueError(
|
| 115 |
+
f'nn.Module: {module}, should have all parameters on a single device,'
|
| 116 |
+
' please use .to() to place the module on a single device')
|
| 117 |
+
|
| 118 |
+
return device if device is not None else torch.device("cpu")
|
| 119 |
+
|
| 120 |
+
|
| 121 |
+
class PipeSequential(nn.Sequential):
|
| 122 |
+
"""
|
| 123 |
+
Pipe variant of ``nn.Sequential`` which supports multiple inputs.
|
| 124 |
+
"""
|
| 125 |
+
|
| 126 |
+
def forward(self, *inputs):
|
| 127 |
+
for module in self:
|
| 128 |
+
if isinstance(inputs, Tuple): # type: ignore[arg-type]
|
| 129 |
+
inputs = module(*inputs)
|
| 130 |
+
else:
|
| 131 |
+
# Don't expand single variables (ex: lists/Tensor)
|
| 132 |
+
inputs = module(inputs)
|
| 133 |
+
return inputs
|
| 134 |
+
|
| 135 |
+
|
| 136 |
+
class WithDevice(nn.Module):
|
| 137 |
+
"""
|
| 138 |
+
Wraps an ``nn.Module`` which is part of ``nn.Sequential`` passed into :class:`Pipe`
|
| 139 |
+
that overrides the device for that module. In cases where :class:`Pipe`
|
| 140 |
+
can't implicitly determine the device for the module and places it on CPU,
|
| 141 |
+
this wrapper can be used to override the implicit behavior and explicitly
|
| 142 |
+
specify which device a module should run on.
|
| 143 |
+
|
| 144 |
+
The provided module is also moved to the given device via ``.to(device)``
|
| 145 |
+
by :class:`Pipe`
|
| 146 |
+
|
| 147 |
+
Args:
|
| 148 |
+
module(:class:`torch.nn.Module`): The module to be wrapped.
|
| 149 |
+
device(:class:`torch.device`): The device to run the module on.
|
| 150 |
+
|
| 151 |
+
Example::
|
| 152 |
+
>>> # xdoctest: +SKIP("distributed")
|
| 153 |
+
>>> fc1 = nn.Linear(16, 8).cuda(0)
|
| 154 |
+
>>> fc2 = nn.Linear(8, 4).cuda(1)
|
| 155 |
+
>>> dropout = nn.Dropout()
|
| 156 |
+
>>>
|
| 157 |
+
>>> # xdoctest: +REQUIRES(env:TORCH_DOCTEST_CUDA1)
|
| 158 |
+
>>> # Dropout does not have any parameters/buffers, but we want to
|
| 159 |
+
>>> # run it on cuda:1 to avoid any GPU to CPU transfers.
|
| 160 |
+
>>> model = nn.Sequential(fc1, fc2, WithDevice(dropout, 'cuda:1'))
|
| 161 |
+
>>> # xdoctest: +SKIP("Needs RPC framework init")
|
| 162 |
+
>>> model = Pipe(model, chunks=8)
|
| 163 |
+
"""
|
| 164 |
+
def __init__(self, module: nn.Module, device: torch.device):
|
| 165 |
+
super().__init__()
|
| 166 |
+
self._module = module
|
| 167 |
+
self._device = torch.device(device)
|
| 168 |
+
|
| 169 |
+
def forward(self, *args, **kwargs):
|
| 170 |
+
return self._module(*args, **kwargs)
|
| 171 |
+
|
| 172 |
+
@property
|
| 173 |
+
def module(self):
|
| 174 |
+
return self._module
|
| 175 |
+
|
| 176 |
+
@property
|
| 177 |
+
def device(self):
|
| 178 |
+
return self._device
|
| 179 |
+
|
| 180 |
+
|
| 181 |
+
def _assemble_partition(modules: List[nn.Module]):
|
| 182 |
+
modules_list: List[nn.Module] = []
|
| 183 |
+
for module in modules:
|
| 184 |
+
if isinstance(module, nn.Sequential):
|
| 185 |
+
modules_list.extend(module.children())
|
| 186 |
+
else:
|
| 187 |
+
modules_list.append(module)
|
| 188 |
+
return PipeSequential(*modules_list)
|
| 189 |
+
|
| 190 |
+
|
| 191 |
+
def _split_module(modules: nn.Sequential) -> Tuple[List[nn.Sequential], List[torch.device]]:
|
| 192 |
+
partitions = []
|
| 193 |
+
devices = []
|
| 194 |
+
|
| 195 |
+
current_partition = []
|
| 196 |
+
current_device = None
|
| 197 |
+
for name, module in modules.named_children():
|
| 198 |
+
if isinstance(module, WithDevice):
|
| 199 |
+
# Process device override and move module to appropriate device.
|
| 200 |
+
device = module.device
|
| 201 |
+
module = module.module
|
| 202 |
+
module.to(device)
|
| 203 |
+
else:
|
| 204 |
+
device = _retrieve_device(module)
|
| 205 |
+
if current_device is not None and (current_device != device or device.type == 'cpu'):
|
| 206 |
+
partitions.append(_assemble_partition(current_partition))
|
| 207 |
+
devices.append(current_device)
|
| 208 |
+
current_partition = []
|
| 209 |
+
current_device = device
|
| 210 |
+
current_partition.append(module)
|
| 211 |
+
|
| 212 |
+
if current_device is not None:
|
| 213 |
+
partitions.append(_assemble_partition(current_partition))
|
| 214 |
+
devices.append(current_device)
|
| 215 |
+
|
| 216 |
+
partitions = cast(List[nn.Sequential], nn.ModuleList(partitions))
|
| 217 |
+
|
| 218 |
+
return partitions, devices
|
| 219 |
+
|
| 220 |
+
|
| 221 |
+
MOVING_DENIED = TypeError("denied to move parameters and buffers, because Pipe should manage device placement")
|
| 222 |
+
|
| 223 |
+
|
| 224 |
+
class Pipe(Module):
|
| 225 |
+
"""Wraps an arbitrary :class:`nn.Sequential <torch.nn.Sequential>` module
|
| 226 |
+
to train on using synchronous pipeline parallelism. If the module requires
|
| 227 |
+
lots of memory and doesn't fit on a single GPU, pipeline parallelism is a
|
| 228 |
+
useful technique to employ for training.
|
| 229 |
+
|
| 230 |
+
The implementation is based on the torchgpipe_ paper.
|
| 231 |
+
|
| 232 |
+
.. _torchgpipe: https://arxiv.org/abs/2004.09910
|
| 233 |
+
|
| 234 |
+
Pipe combines pipeline parallelism with checkpointing to reduce peak
|
| 235 |
+
memory required to train while minimizing device under-utilization.
|
| 236 |
+
|
| 237 |
+
You should place all the modules on the appropriate devices and wrap them
|
| 238 |
+
into an :class:`nn.Sequential <torch.nn.Sequential>` module defining the
|
| 239 |
+
desired order of execution. If a module does not contain any
|
| 240 |
+
parameters/buffers, it is assumed this module should be executed on CPU
|
| 241 |
+
and appropriate input tensors to the module are moved to CPU before
|
| 242 |
+
execution. This behavior can be overridden by the :class:`WithDevice`
|
| 243 |
+
wrapper which can be used to explicitly specify which device a module
|
| 244 |
+
should run on.
|
| 245 |
+
|
| 246 |
+
Args:
|
| 247 |
+
module (:class:`nn.Sequential <torch.nn.Sequential>`):
|
| 248 |
+
sequential module to be parallelized using pipelining. Each module
|
| 249 |
+
in the sequence has to have all of its parameters on a single
|
| 250 |
+
device. Each module in the sequence has to either be an nn.Module
|
| 251 |
+
or :class:`nn.Sequential <torch.nn.Sequential>` (to combine multiple
|
| 252 |
+
sequential modules on a single device)
|
| 253 |
+
chunks (int):
|
| 254 |
+
number of micro-batches (default: ``1``)
|
| 255 |
+
checkpoint (str):
|
| 256 |
+
when to enable checkpointing, one of ``'always'``,
|
| 257 |
+
``'except_last'``, or ``'never'`` (default: ``'except_last'``).
|
| 258 |
+
``'never'`` disables checkpointing completely, ``'except_last'``
|
| 259 |
+
enables checkpointing for all micro-batches except the last one
|
| 260 |
+
and ``'always'`` enables checkpointing for all micro-batches.
|
| 261 |
+
deferred_batch_norm (bool):
|
| 262 |
+
whether to use deferred ``BatchNorm`` moving statistics (default:
|
| 263 |
+
:data:`False`). If set to :data:`True`, we track statistics across
|
| 264 |
+
multiple micro-batches to update the running statistics per
|
| 265 |
+
mini-batch.
|
| 266 |
+
|
| 267 |
+
Raises:
|
| 268 |
+
TypeError:
|
| 269 |
+
the module is not a :class:`nn.Sequential <torch.nn.Sequential>`.
|
| 270 |
+
ValueError:
|
| 271 |
+
invalid arguments
|
| 272 |
+
|
| 273 |
+
Example::
|
| 274 |
+
Pipeline of two FC layers across GPUs 0 and 1.
|
| 275 |
+
|
| 276 |
+
>>> # Need to initialize RPC framework first.
|
| 277 |
+
>>> # xdoctest: +SKIP
|
| 278 |
+
>>> os.environ['MASTER_ADDR'] = 'localhost'
|
| 279 |
+
>>> os.environ['MASTER_PORT'] = '29500'
|
| 280 |
+
>>> torch.distributed.rpc.init_rpc('worker', rank=0, world_size=1)
|
| 281 |
+
>>>
|
| 282 |
+
>>> # Build pipe.
|
| 283 |
+
>>> fc1 = nn.Linear(16, 8).cuda(0)
|
| 284 |
+
>>> fc2 = nn.Linear(8, 4).cuda(1)
|
| 285 |
+
>>> model = nn.Sequential(fc1, fc2)
|
| 286 |
+
>>> model = Pipe(model, chunks=8)
|
| 287 |
+
>>> input = torch.rand(16, 16).cuda(0)
|
| 288 |
+
>>> output_rref = model(input)
|
| 289 |
+
|
| 290 |
+
.. note::
|
| 291 |
+
You can wrap a :class:`Pipe` model with
|
| 292 |
+
:class:`torch.nn.parallel.DistributedDataParallel` only when the
|
| 293 |
+
checkpoint parameter of :class:`Pipe` is ``'never'``.
|
| 294 |
+
|
| 295 |
+
.. note::
|
| 296 |
+
:class:`Pipe` only supports intra-node pipelining currently, but
|
| 297 |
+
will be expanded to support inter-node pipelining in the future.
|
| 298 |
+
The forward function returns an :class:`~torch.distributed.rpc.RRef`
|
| 299 |
+
to allow for inter-node pipelining in the future, where the output
|
| 300 |
+
might be on a remote host. For intra-node pipelining you can use
|
| 301 |
+
:meth:`~torch.distributed.rpc.RRef.local_value` to retrieve the
|
| 302 |
+
output locally.
|
| 303 |
+
|
| 304 |
+
.. warning::
|
| 305 |
+
:class:`Pipe` is experimental and subject to change.
|
| 306 |
+
"""
|
| 307 |
+
|
| 308 |
+
def __init__(
|
| 309 |
+
self,
|
| 310 |
+
module: nn.Sequential,
|
| 311 |
+
chunks: int = 1,
|
| 312 |
+
checkpoint: str = "except_last",
|
| 313 |
+
deferred_batch_norm: bool = False,
|
| 314 |
+
) -> None:
|
| 315 |
+
super().__init__()
|
| 316 |
+
|
| 317 |
+
# Check if RPC framework is initialized.
|
| 318 |
+
if not torch.distributed.rpc._is_current_rpc_agent_set():
|
| 319 |
+
raise RuntimeError(
|
| 320 |
+
'Please initialize RPC framework for Pipe using '
|
| 321 |
+
'torch.distributed.rpc.init_rpc')
|
| 322 |
+
|
| 323 |
+
chunks = int(chunks)
|
| 324 |
+
checkpoint = str(checkpoint)
|
| 325 |
+
|
| 326 |
+
if chunks <= 0:
|
| 327 |
+
raise ValueError("number of chunks must be positive integer")
|
| 328 |
+
if checkpoint not in ["always", "except_last", "never"]:
|
| 329 |
+
raise ValueError("checkpoint is not one of 'always', 'except_last', or 'never'")
|
| 330 |
+
|
| 331 |
+
_verify_module(module)
|
| 332 |
+
|
| 333 |
+
# Verify if the underlying skippable modules satisfy integrity. The
|
| 334 |
+
# integrity can be verified before forward() because it is static.
|
| 335 |
+
verify_skippables(module)
|
| 336 |
+
|
| 337 |
+
self.chunks = chunks
|
| 338 |
+
self.checkpoint = checkpoint
|
| 339 |
+
|
| 340 |
+
if deferred_batch_norm:
|
| 341 |
+
module = DeferredBatchNorm.convert_deferred_batch_norm(module, chunks)
|
| 342 |
+
|
| 343 |
+
self.partitions, self.devices = _split_module(module)
|
| 344 |
+
_verify_splitting(module, self.partitions, self.devices)
|
| 345 |
+
|
| 346 |
+
self._copy_streams: List[List[AbstractStream]] = []
|
| 347 |
+
self._skip_layout = inspect_skip_layout(self.partitions)
|
| 348 |
+
|
| 349 |
+
# Separate CUDA streams for copy.
|
| 350 |
+
copy_streams = self._ensure_copy_streams()
|
| 351 |
+
|
| 352 |
+
# The micro-batch index where the checkpointing stops.
|
| 353 |
+
checkpoint_stop = {"always": self.chunks, "except_last": self.chunks - 1, "never": 0}[self.checkpoint]
|
| 354 |
+
|
| 355 |
+
self.pipeline = Pipeline(self.partitions, self.devices, copy_streams, self._skip_layout, checkpoint_stop)
|
| 356 |
+
|
| 357 |
+
def __len__(self) -> int:
|
| 358 |
+
"""Counts the length of the underlying sequential module."""
|
| 359 |
+
return sum(len(p) for p in self.partitions)
|
| 360 |
+
|
| 361 |
+
def __getitem__(self, index: int) -> nn.Module:
|
| 362 |
+
"""Gets a layer in the underlying sequential module."""
|
| 363 |
+
partitions = self.partitions
|
| 364 |
+
if index < 0:
|
| 365 |
+
partitions = partitions[::-1]
|
| 366 |
+
|
| 367 |
+
for partition in partitions:
|
| 368 |
+
try:
|
| 369 |
+
return partition[index]
|
| 370 |
+
except IndexError:
|
| 371 |
+
pass
|
| 372 |
+
|
| 373 |
+
shift = len(partition)
|
| 374 |
+
|
| 375 |
+
if index < 0:
|
| 376 |
+
index += shift
|
| 377 |
+
else:
|
| 378 |
+
index -= shift
|
| 379 |
+
|
| 380 |
+
raise IndexError
|
| 381 |
+
|
| 382 |
+
def __iter__(self) -> Iterator[nn.Module]:
|
| 383 |
+
"""Iterates over children of the underlying sequential module."""
|
| 384 |
+
for partition in self.partitions:
|
| 385 |
+
yield from partition
|
| 386 |
+
|
| 387 |
+
# Pipe should manage the device of each partition.
|
| 388 |
+
# Deny cuda(), cpu(), and to() with device, by TypeError.
|
| 389 |
+
def cuda(self, device: Optional[Device] = None) -> "Pipe":
|
| 390 |
+
raise MOVING_DENIED
|
| 391 |
+
|
| 392 |
+
def cpu(self) -> "Pipe":
|
| 393 |
+
raise MOVING_DENIED
|
| 394 |
+
|
| 395 |
+
def to(self, *args: Any, **kwargs: Any) -> "Pipe":
|
| 396 |
+
# Deny these usages:
|
| 397 |
+
#
|
| 398 |
+
# - to(device[, dtype, non_blocking])
|
| 399 |
+
# - to(tensor[, non_blocking])
|
| 400 |
+
#
|
| 401 |
+
# But allow this:
|
| 402 |
+
#
|
| 403 |
+
# - to(dtype[, non_blocking])
|
| 404 |
+
#
|
| 405 |
+
if "device" in kwargs or "tensor" in kwargs:
|
| 406 |
+
raise MOVING_DENIED
|
| 407 |
+
|
| 408 |
+
if args:
|
| 409 |
+
if isinstance(args[0], (torch.device, int, str)):
|
| 410 |
+
raise MOVING_DENIED
|
| 411 |
+
if torch.is_tensor(args[0]):
|
| 412 |
+
raise MOVING_DENIED
|
| 413 |
+
|
| 414 |
+
return super().to(*args, **kwargs)
|
| 415 |
+
|
| 416 |
+
def _ensure_copy_streams(self) -> List[List[AbstractStream]]:
|
| 417 |
+
"""Ensures that :class:`Pipe` caches CUDA streams for copy.
|
| 418 |
+
|
| 419 |
+
It's worth to cache CUDA streams although PyTorch already manages a
|
| 420 |
+
pool of pre-allocated CUDA streams, because it may reduce GPU memory
|
| 421 |
+
fragmentation when the number of micro-batches is small.
|
| 422 |
+
|
| 423 |
+
"""
|
| 424 |
+
if not self._copy_streams:
|
| 425 |
+
for device in self.devices:
|
| 426 |
+
self._copy_streams.append([new_stream(device) for _ in range(self.chunks)])
|
| 427 |
+
|
| 428 |
+
return self._copy_streams
|
| 429 |
+
|
| 430 |
+
def forward(self, *inputs) -> RRef:
|
| 431 |
+
"""
|
| 432 |
+
Processes a single input mini-batch through the pipe and returns an
|
| 433 |
+
:class:`~torch.distributed.rpc.RRef` pointing to the output.
|
| 434 |
+
:class:`Pipe` is a fairly transparent module wrapper. It doesn't
|
| 435 |
+
modify the input and output signature of the underlying module. But
|
| 436 |
+
there's type restriction. Input and output have to contain at least one
|
| 437 |
+
tensor. This restriction is applied at partition boundaries too.
|
| 438 |
+
|
| 439 |
+
The sequence of inputs are fed into the first stage of the pipeline as
|
| 440 |
+
``*inputs``. As a result the positional args for this function should
|
| 441 |
+
match the positional args for the first stage of the pipeline. The same
|
| 442 |
+
condition applies for output of one stage of the pipeline which is the
|
| 443 |
+
input for the next stage.
|
| 444 |
+
|
| 445 |
+
The input tensor is split into multiple micro-batches based on the
|
| 446 |
+
``chunks`` parameter used to initialize :class:`Pipe`. The batch size
|
| 447 |
+
is assumed to be the first dimension of the tensor and if the batch
|
| 448 |
+
size is less than ``chunks``, the number of micro-batches is equal to
|
| 449 |
+
the batch size.
|
| 450 |
+
|
| 451 |
+
Only tensors are split into multiple micro-batches, non-Tensor inputs
|
| 452 |
+
are just replicated as-is in each micro-batch. For non-Tensor outputs
|
| 453 |
+
in the last stage of the pipeline, they are aggregated as a ``List``
|
| 454 |
+
and returned the user. For example, if you have 2 micro-batches
|
| 455 |
+
returning the integer 5, the user would receive the consolidated
|
| 456 |
+
output of `[5, 5]`
|
| 457 |
+
|
| 458 |
+
All the input tensors need to be on the same device as the first
|
| 459 |
+
partition of the pipeline.
|
| 460 |
+
|
| 461 |
+
If a tensor is wrapped with the :class:`NoChunk` wrapper, the tensor
|
| 462 |
+
is not split across micro-batches and is replicated as-is similar to
|
| 463 |
+
non-tensors.
|
| 464 |
+
|
| 465 |
+
Args:
|
| 466 |
+
inputs: input mini-batch
|
| 467 |
+
|
| 468 |
+
Returns:
|
| 469 |
+
:class:`~torch.distributed.rpc.RRef` to the output of the mini-batch
|
| 470 |
+
|
| 471 |
+
Raises:
|
| 472 |
+
TypeError: input doesn't contain at least one tensor
|
| 473 |
+
|
| 474 |
+
"""
|
| 475 |
+
first_partition_device = self.devices[0] if len(self.devices) != 0 else torch.device("cpu")
|
| 476 |
+
microbatch.check(first_partition_device, *inputs)
|
| 477 |
+
|
| 478 |
+
if not self.devices:
|
| 479 |
+
# Empty sequential module is not illegal.
|
| 480 |
+
return RRef(*inputs)
|
| 481 |
+
|
| 482 |
+
# Divide a mini-batch into micro-batches.
|
| 483 |
+
batches = microbatch.scatter(*inputs, chunks=self.chunks)
|
| 484 |
+
|
| 485 |
+
# Run pipeline parallelism.
|
| 486 |
+
self.pipeline.run(batches)
|
| 487 |
+
|
| 488 |
+
# Merge the micro-batches into one mini-batch.
|
| 489 |
+
output = microbatch.gather(batches)
|
| 490 |
+
return RRef(output)
|
vlmpy310/lib/python3.10/site-packages/torch/distributed/pipeline/sync/pipeline.py
ADDED
|
@@ -0,0 +1,255 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright 2019 Kakao Brain
|
| 2 |
+
#
|
| 3 |
+
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
|
| 4 |
+
#
|
| 5 |
+
# This source code is licensed under the BSD license found in the
|
| 6 |
+
# LICENSE file in the root directory of this source tree.
|
| 7 |
+
"""The pipeline parallelism of Pipe."""
|
| 8 |
+
from queue import Queue
|
| 9 |
+
from types import TracebackType
|
| 10 |
+
from typing import TYPE_CHECKING, Iterable, List, Optional, Tuple, Type, Union, cast, Sequence
|
| 11 |
+
|
| 12 |
+
import torch
|
| 13 |
+
from torch import Tensor, nn
|
| 14 |
+
from torch.autograd.profiler import record_function
|
| 15 |
+
|
| 16 |
+
from .checkpoint import Checkpointing
|
| 17 |
+
from .copy import Copy, Wait
|
| 18 |
+
from .dependency import fork, join
|
| 19 |
+
from .microbatch import Batch
|
| 20 |
+
from .skip.layout import SkipLayout
|
| 21 |
+
from .skip.tracker import SkipTrackerThroughPotals, use_skip_tracker
|
| 22 |
+
from .stream import AbstractStream, current_stream, use_device
|
| 23 |
+
from .worker import Task, create_workers
|
| 24 |
+
|
| 25 |
+
__all__: List[str] = ["Pipeline"]
|
| 26 |
+
|
| 27 |
+
|
| 28 |
+
Tensors = Sequence[Tensor]
|
| 29 |
+
TensorOrTensors = Union[Tensor, Tensors]
|
| 30 |
+
|
| 31 |
+
ExcInfo = Tuple[Type[BaseException], BaseException, TracebackType]
|
| 32 |
+
|
| 33 |
+
# Queue is generic only in stubs.
|
| 34 |
+
# https://mypy.readthedocs.io/en/latest/common_issues.html#using-classes-that-are-generic-in-stubs-but-not-at-runtime
|
| 35 |
+
if TYPE_CHECKING:
|
| 36 |
+
InQueue = Queue[Optional["Task"]]
|
| 37 |
+
OutQueue = Queue[Tuple[bool, Union[Tuple["Task", Batch], ExcInfo, None]]]
|
| 38 |
+
else:
|
| 39 |
+
InQueue = Queue
|
| 40 |
+
OutQueue = Queue
|
| 41 |
+
|
| 42 |
+
|
| 43 |
+
def _depend(fork_from: Batch, join_to: Batch) -> None:
|
| 44 |
+
fork_from_idx = fork_from.find_tensor_idx()
|
| 45 |
+
join_to_idx = join_to.find_tensor_idx()
|
| 46 |
+
|
| 47 |
+
fork_from[fork_from_idx], phony = fork(fork_from[fork_from_idx])
|
| 48 |
+
join_to[join_to_idx] = join(join_to[join_to_idx], phony)
|
| 49 |
+
|
| 50 |
+
|
| 51 |
+
def _copy(batch: Batch, prev_stream: AbstractStream, next_stream: AbstractStream) -> None:
|
| 52 |
+
batch[:] = Copy.apply(prev_stream, next_stream, *batch)
|
| 53 |
+
# Gradients are only supported for float Tensors.
|
| 54 |
+
batch[:] = tuple([x.detach() if torch.is_tensor(x) and not x.is_floating_point() else x for x in batch])
|
| 55 |
+
|
| 56 |
+
|
| 57 |
+
def _wait(batch: Batch, prev_stream: AbstractStream, next_stream: AbstractStream) -> None:
|
| 58 |
+
batch[:] = Wait.apply(prev_stream, next_stream, *batch)
|
| 59 |
+
# Gradients are only supported for float Tensors.
|
| 60 |
+
batch[:] = tuple([x.detach() if torch.is_tensor(x) and not x.is_floating_point() else x for x in batch])
|
| 61 |
+
|
| 62 |
+
|
| 63 |
+
def _clock_cycles(m: int, n: int) -> Iterable[List[Tuple[int, int]]]:
|
| 64 |
+
"""Generate schedules for each clock cycle."""
|
| 65 |
+
# m: number of micro-batches
|
| 66 |
+
# n: number of partitions
|
| 67 |
+
# i: index of micro-batch
|
| 68 |
+
# j: index of partition
|
| 69 |
+
# k: clock number
|
| 70 |
+
#
|
| 71 |
+
# k (i,j) (i,j) (i,j)
|
| 72 |
+
# - ----- ----- -----
|
| 73 |
+
# 0 (0,0)
|
| 74 |
+
# 1 (1,0) (0,1)
|
| 75 |
+
# 2 (2,0) (1,1) (0,2)
|
| 76 |
+
# 3 (2,1) (1,2)
|
| 77 |
+
# 4 (2,2)
|
| 78 |
+
for k in range(m + n - 1):
|
| 79 |
+
yield [(k - j, j) for j in range(max(1 + k - m, 0), min(1 + k, n))]
|
| 80 |
+
|
| 81 |
+
|
| 82 |
+
class Pipeline:
|
| 83 |
+
"""The pipeline parallelism for Pipe."""
|
| 84 |
+
|
| 85 |
+
def __init__(
|
| 86 |
+
self,
|
| 87 |
+
partitions: List[nn.Sequential],
|
| 88 |
+
devices: List[torch.device],
|
| 89 |
+
copy_streams: List[List[AbstractStream]],
|
| 90 |
+
skip_layout: SkipLayout,
|
| 91 |
+
checkpoint_stop: int,
|
| 92 |
+
) -> None:
|
| 93 |
+
self.partitions = partitions
|
| 94 |
+
self.devices = devices
|
| 95 |
+
self.copy_streams = copy_streams
|
| 96 |
+
self.skip_layout = skip_layout
|
| 97 |
+
self.checkpoint_stop = checkpoint_stop
|
| 98 |
+
(self.in_queues, self.out_queues) = create_workers(devices)
|
| 99 |
+
|
| 100 |
+
def run(self, batches: List[Batch]) -> None:
|
| 101 |
+
"""Runs pipeline parallelism.
|
| 102 |
+
|
| 103 |
+
It modifies the given batches in place.
|
| 104 |
+
|
| 105 |
+
"""
|
| 106 |
+
partitions = self.partitions
|
| 107 |
+
devices = self.devices
|
| 108 |
+
skip_layout = self.skip_layout
|
| 109 |
+
|
| 110 |
+
m = len(batches)
|
| 111 |
+
n = len(partitions)
|
| 112 |
+
|
| 113 |
+
skip_trackers = [SkipTrackerThroughPotals(skip_layout) for _ in batches]
|
| 114 |
+
|
| 115 |
+
for schedule in _clock_cycles(m, n):
|
| 116 |
+
self.fence(batches, schedule, skip_trackers)
|
| 117 |
+
self.compute(batches, schedule, skip_trackers)
|
| 118 |
+
|
| 119 |
+
def fence(
|
| 120 |
+
self, batches: List[Batch], schedule: List[Tuple[int, int]], skip_trackers: List[SkipTrackerThroughPotals],
|
| 121 |
+
) -> None:
|
| 122 |
+
"""Copy micro-batches after computation for the previous micro-batches."""
|
| 123 |
+
copy_streams = self.copy_streams
|
| 124 |
+
skip_layout = self.skip_layout
|
| 125 |
+
|
| 126 |
+
for i, j in schedule:
|
| 127 |
+
# Ensure that batches[i-1] is executed after batches[i] in
|
| 128 |
+
# backpropagation by an explicit dependency.
|
| 129 |
+
if i != 0 and j != 0:
|
| 130 |
+
_depend(batches[i - 1], batches[i])
|
| 131 |
+
|
| 132 |
+
next_stream = copy_streams[j][i]
|
| 133 |
+
|
| 134 |
+
for prev_j, ns, name in skip_layout.copy_policy(j):
|
| 135 |
+
prev_stream = copy_streams[prev_j][i]
|
| 136 |
+
skip_trackers[i].copy(batches[i], prev_stream, next_stream, ns, name)
|
| 137 |
+
|
| 138 |
+
if j != 0:
|
| 139 |
+
prev_stream = copy_streams[j - 1][i]
|
| 140 |
+
_copy(batches[i], prev_stream, next_stream)
|
| 141 |
+
|
| 142 |
+
def compute(
|
| 143 |
+
self, batches: List[Batch], schedule: List[Tuple[int, int]], skip_trackers: List[SkipTrackerThroughPotals],
|
| 144 |
+
) -> None:
|
| 145 |
+
"""Run tasks with synchronization to copy streams."""
|
| 146 |
+
partitions = self.partitions
|
| 147 |
+
devices = self.devices
|
| 148 |
+
copy_streams = self.copy_streams
|
| 149 |
+
checkpoint_stop = self.checkpoint_stop
|
| 150 |
+
|
| 151 |
+
# Disable checkpointing if in eval mode.
|
| 152 |
+
if not self.partitions[0].training:
|
| 153 |
+
checkpoint_stop = 0
|
| 154 |
+
|
| 155 |
+
n = len(partitions)
|
| 156 |
+
streams = [current_stream(d) for d in devices]
|
| 157 |
+
exc_info: Optional[ExcInfo] = None
|
| 158 |
+
|
| 159 |
+
# With checkpointing, the autograd graph looks like this diagram:
|
| 160 |
+
# ┌─────┸──────┐
|
| 161 |
+
# │ Copy │
|
| 162 |
+
# └─────┰──────┘ (fence)
|
| 163 |
+
# ─ ─ ─ ╂ ─ ─ ─ ─ ─ ─ ─ ─ ─
|
| 164 |
+
# ┃ (compute)
|
| 165 |
+
# ┌─────┸──────┐
|
| 166 |
+
# │ Wait │ [1] Synchronize the current stream with the copy stream.
|
| 167 |
+
# └─────┰──────┘
|
| 168 |
+
# ┌─────┸──────┐
|
| 169 |
+
# │ Checkpoint │ [2] Compute a partition within checkpointing.
|
| 170 |
+
# └─────┰──────┘
|
| 171 |
+
# ┌─────┸──────┐
|
| 172 |
+
# │ Wait │ [3] Synchronize the copy stream with the current stream.
|
| 173 |
+
# └─────┰──────┘
|
| 174 |
+
# ┠ ─ ─ ─ ┐
|
| 175 |
+
# ┃ ┌─────┴─────┐
|
| 176 |
+
# ┃ │ Recompute │ [4] Schedule the recomputation at backpropagation.
|
| 177 |
+
# ┃ └─────┬─────┘
|
| 178 |
+
# ┠ ─ ─ ─ ┘
|
| 179 |
+
# ┃
|
| 180 |
+
# ─ ─ ─ ╂ ─ ─ ─ ─ ─ ─ ─ ─ ─
|
| 181 |
+
# ┌─────┸──────┐ (fence)
|
| 182 |
+
# │ Copy │
|
| 183 |
+
# └─────┰──────┘
|
| 184 |
+
for i, j in schedule:
|
| 185 |
+
batch = batches[i]
|
| 186 |
+
partition = partitions[j]
|
| 187 |
+
|
| 188 |
+
# Synchronize with the copied input. ([1] in the diagram)
|
| 189 |
+
if j != 0:
|
| 190 |
+
_wait(batch, copy_streams[j][i], streams[j])
|
| 191 |
+
|
| 192 |
+
# Determine whether checkpointing or not.
|
| 193 |
+
checkpoint = i < checkpoint_stop
|
| 194 |
+
if checkpoint:
|
| 195 |
+
|
| 196 |
+
def function(
|
| 197 |
+
*inputs,
|
| 198 |
+
partition: nn.Module = partition,
|
| 199 |
+
skip_tracker: SkipTrackerThroughPotals = skip_trackers[i],
|
| 200 |
+
chunk_id: int = i,
|
| 201 |
+
part_id: int = j,
|
| 202 |
+
) -> TensorOrTensors:
|
| 203 |
+
with use_skip_tracker(skip_tracker), record_function("chunk%d-part%d" % (chunk_id, part_id)):
|
| 204 |
+
return partition(*inputs)
|
| 205 |
+
|
| 206 |
+
chk = Checkpointing(function, batch) # type: ignore[arg-type]
|
| 207 |
+
task = Task(streams[j], compute=chk.checkpoint, finalize=chk.recompute)
|
| 208 |
+
del function, chk
|
| 209 |
+
|
| 210 |
+
else:
|
| 211 |
+
|
| 212 |
+
def compute(
|
| 213 |
+
batch: Batch = batch,
|
| 214 |
+
partition: nn.Module = partition,
|
| 215 |
+
skip_tracker: SkipTrackerThroughPotals = skip_trackers[i],
|
| 216 |
+
chunk_id: int = i,
|
| 217 |
+
part_id: int = j,
|
| 218 |
+
) -> Batch:
|
| 219 |
+
with use_skip_tracker(skip_tracker), record_function("chunk%d-part%d" % (chunk_id, part_id)):
|
| 220 |
+
return batch.call(partition)
|
| 221 |
+
|
| 222 |
+
task = Task(streams[j], compute=compute, finalize=None)
|
| 223 |
+
del compute
|
| 224 |
+
|
| 225 |
+
# Compute tasks in parallel. ([2] in the diagram)
|
| 226 |
+
self.in_queues[j].put(task)
|
| 227 |
+
|
| 228 |
+
for i, j in schedule:
|
| 229 |
+
ok, payload = self.out_queues[j].get()
|
| 230 |
+
|
| 231 |
+
# Hold the first exception.
|
| 232 |
+
if exc_info is not None:
|
| 233 |
+
continue
|
| 234 |
+
elif not ok:
|
| 235 |
+
exc_info = cast(ExcInfo, payload)
|
| 236 |
+
continue
|
| 237 |
+
|
| 238 |
+
task, batch = cast(Tuple[Task, Batch], payload)
|
| 239 |
+
|
| 240 |
+
# The copy stream synchronizes to copy the output. ([3] in the
|
| 241 |
+
# diagram)
|
| 242 |
+
if j != n - 1:
|
| 243 |
+
_wait(batch, streams[j], copy_streams[j][i])
|
| 244 |
+
|
| 245 |
+
# Finalize tasks. If checkpointing is enabled, here the
|
| 246 |
+
# recomputation is scheduled at backpropagation. ([4] in the
|
| 247 |
+
# diagram)
|
| 248 |
+
with use_device(devices[j]):
|
| 249 |
+
task.finalize(batch)
|
| 250 |
+
|
| 251 |
+
batches[i] = batch
|
| 252 |
+
|
| 253 |
+
# Fail at the first exception.
|
| 254 |
+
if exc_info is not None:
|
| 255 |
+
raise exc_info[0].with_traceback(exc_info[1], exc_info[2])
|
vlmpy310/lib/python3.10/site-packages/torch/distributed/pipeline/sync/skip/__init__.py
ADDED
|
@@ -0,0 +1,11 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright 2019 Kakao Brain
|
| 2 |
+
#
|
| 3 |
+
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
|
| 4 |
+
#
|
| 5 |
+
# This source code is licensed under the BSD license found in the
|
| 6 |
+
# LICENSE file in the root directory of this source tree.
|
| 7 |
+
"""Supports efficiency with skip connections."""
|
| 8 |
+
from .namespace import Namespace
|
| 9 |
+
from .skippable import pop, skippable, stash, verify_skippables
|
| 10 |
+
|
| 11 |
+
__all__ = ["skippable", "stash", "pop", "verify_skippables", "Namespace"]
|
vlmpy310/lib/python3.10/site-packages/torch/distributed/pipeline/sync/skip/__pycache__/__init__.cpython-310.pyc
ADDED
|
Binary file (428 Bytes). View file
|
|
|
vlmpy310/lib/python3.10/site-packages/torch/distributed/pipeline/sync/skip/__pycache__/layout.cpython-310.pyc
ADDED
|
Binary file (2.92 kB). View file
|
|
|
vlmpy310/lib/python3.10/site-packages/torch/distributed/pipeline/sync/skip/__pycache__/namespace.cpython-310.pyc
ADDED
|
Binary file (1.57 kB). View file
|
|
|
vlmpy310/lib/python3.10/site-packages/torch/distributed/pipeline/sync/skip/__pycache__/portal.cpython-310.pyc
ADDED
|
Binary file (6.46 kB). View file
|
|
|
vlmpy310/lib/python3.10/site-packages/torch/distributed/pipeline/sync/skip/__pycache__/skippable.cpython-310.pyc
ADDED
|
Binary file (13.1 kB). View file
|
|
|
vlmpy310/lib/python3.10/site-packages/torch/distributed/pipeline/sync/skip/__pycache__/tracker.cpython-310.pyc
ADDED
|
Binary file (5.43 kB). View file
|
|
|
vlmpy310/lib/python3.10/site-packages/torch/distributed/pipeline/sync/skip/layout.py
ADDED
|
@@ -0,0 +1,92 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright 2019 Kakao Brain
|
| 2 |
+
#
|
| 3 |
+
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
|
| 4 |
+
#
|
| 5 |
+
# This source code is licensed under the BSD license found in the
|
| 6 |
+
# LICENSE file in the root directory of this source tree.
|
| 7 |
+
"""Static skip connection layout of ``@skippable`` modules."""
|
| 8 |
+
from typing import Dict, Iterable, List, Tuple
|
| 9 |
+
|
| 10 |
+
from torch import nn
|
| 11 |
+
|
| 12 |
+
from .namespace import Namespace
|
| 13 |
+
|
| 14 |
+
__all__: List[str] = []
|
| 15 |
+
|
| 16 |
+
|
| 17 |
+
class SkipLayout:
|
| 18 |
+
"""Represents a skip connection layout across partitions."""
|
| 19 |
+
|
| 20 |
+
# Skip routes indexed by 'ns, name': {(ns, name): (prev_j, next_j), ...}
|
| 21 |
+
by_ns_name: Dict[Tuple[Namespace, str], Tuple[int, int]]
|
| 22 |
+
|
| 23 |
+
# Skip routes indexed by partition number 'j': [[next_j]: [(prev_j, ns, name), ...], ...]
|
| 24 |
+
by_partition: List[List[Tuple[int, Namespace, str]]]
|
| 25 |
+
|
| 26 |
+
def __init__(self, num_partitions: int, skip_routes: Dict[Tuple[Namespace, str], Tuple[int, int]],) -> None:
|
| 27 |
+
# The skip routes are already indexed by 'ns, name'.
|
| 28 |
+
self.by_ns_name = skip_routes
|
| 29 |
+
|
| 30 |
+
# Index skip routes by partition number 'j'.
|
| 31 |
+
self.by_partition = [[] for _ in range(num_partitions)]
|
| 32 |
+
|
| 33 |
+
for (ns, name), (prev_j, next_j) in skip_routes.items():
|
| 34 |
+
self.by_partition[next_j].append((prev_j, ns, name))
|
| 35 |
+
|
| 36 |
+
for p in self.by_partition:
|
| 37 |
+
p.sort()
|
| 38 |
+
|
| 39 |
+
def copy_policy(self, next_j: int) -> Iterable[Tuple[int, Namespace, str]]:
|
| 40 |
+
"""Generates skip routes for the given destination partition number.
|
| 41 |
+
The skip routes are sorted by source partition number in ascending
|
| 42 |
+
order.
|
| 43 |
+
|
| 44 |
+
Yields:
|
| 45 |
+
Each tuple of (source partition number, namespace, name).
|
| 46 |
+
|
| 47 |
+
"""
|
| 48 |
+
for prev_j, ns, name in self.by_partition[next_j]:
|
| 49 |
+
if prev_j == next_j:
|
| 50 |
+
# This skip tensor will be popped at the same partition where
|
| 51 |
+
# it is stashed. In this case, copy is not required.
|
| 52 |
+
continue
|
| 53 |
+
|
| 54 |
+
yield (prev_j, ns, name)
|
| 55 |
+
|
| 56 |
+
def requires_copy(self, ns: Namespace, name: str) -> bool:
|
| 57 |
+
"""Whether the given namespace and name requires partition-to-partition
|
| 58 |
+
copy or not.
|
| 59 |
+
"""
|
| 60 |
+
prev_j, next_j = self.by_ns_name.get((ns, name), (-1, -1))
|
| 61 |
+
return prev_j != next_j
|
| 62 |
+
|
| 63 |
+
|
| 64 |
+
def inspect_skip_layout(partitions: List[nn.Sequential]) -> SkipLayout:
|
| 65 |
+
"""Inspects the skip connection layout in the given partitions."""
|
| 66 |
+
# NOTE(sublee): Hide circular import inside this subroutine. Circular
|
| 67 |
+
# import is not ideal but placing this logic near to SkipLayout may
|
| 68 |
+
# increase cohesion of code.
|
| 69 |
+
from .skippable import Skippable
|
| 70 |
+
|
| 71 |
+
skip_routes: Dict[Tuple[Namespace, str], Tuple[int, int]] = {}
|
| 72 |
+
stashed_at: Dict[Tuple[Namespace, str], int] = {}
|
| 73 |
+
|
| 74 |
+
for j, partition in enumerate(partitions):
|
| 75 |
+
def inspect_layer(layer):
|
| 76 |
+
if not isinstance(layer, Skippable):
|
| 77 |
+
return
|
| 78 |
+
|
| 79 |
+
for ns, name in layer.stashable():
|
| 80 |
+
stashed_at[(ns, name)] = j
|
| 81 |
+
|
| 82 |
+
for ns, name in layer.poppable():
|
| 83 |
+
prev_j = stashed_at.pop((ns, name))
|
| 84 |
+
skip_routes[(ns, name)] = (prev_j, j)
|
| 85 |
+
|
| 86 |
+
if isinstance(partition, nn.Sequential):
|
| 87 |
+
for layer in partition:
|
| 88 |
+
inspect_layer(layer)
|
| 89 |
+
else:
|
| 90 |
+
inspect_layer(partition)
|
| 91 |
+
|
| 92 |
+
return SkipLayout(len(partitions), skip_routes)
|
vlmpy310/lib/python3.10/site-packages/torch/distributed/pipeline/sync/skip/namespace.py
ADDED
|
@@ -0,0 +1,50 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright 2019 Kakao Brain
|
| 2 |
+
#
|
| 3 |
+
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
|
| 4 |
+
#
|
| 5 |
+
# This source code is licensed under the BSD license found in the
|
| 6 |
+
# LICENSE file in the root directory of this source tree.
|
| 7 |
+
"""Provides isolated namespace of skip tensors."""
|
| 8 |
+
import abc
|
| 9 |
+
from functools import total_ordering
|
| 10 |
+
from typing import Any
|
| 11 |
+
import uuid
|
| 12 |
+
|
| 13 |
+
__all__ = ["Namespace"]
|
| 14 |
+
|
| 15 |
+
|
| 16 |
+
@total_ordering
|
| 17 |
+
class Namespace(metaclass=abc.ABCMeta):
|
| 18 |
+
"""Namespace for isolating skip tensors used by :meth:`isolate()
|
| 19 |
+
<torchpipe.skip.skippable.Skippable.isolate>`.
|
| 20 |
+
"""
|
| 21 |
+
|
| 22 |
+
__slots__ = ("id",)
|
| 23 |
+
|
| 24 |
+
def __init__(self) -> None:
|
| 25 |
+
self.id = uuid.uuid4()
|
| 26 |
+
|
| 27 |
+
def __repr__(self) -> str:
|
| 28 |
+
return f"<Namespace '{self.id}'>"
|
| 29 |
+
|
| 30 |
+
def __hash__(self) -> int:
|
| 31 |
+
return hash(self.id)
|
| 32 |
+
|
| 33 |
+
# Namespaces should support ordering, since SkipLayout will sort tuples
|
| 34 |
+
# including a namespace. But actual order between namespaces is not
|
| 35 |
+
# important. That's why they are ordered by version 4 UUID which generates
|
| 36 |
+
# random numbers.
|
| 37 |
+
def __lt__(self, other: Any) -> bool:
|
| 38 |
+
if isinstance(other, Namespace):
|
| 39 |
+
return self.id < other.id
|
| 40 |
+
return False
|
| 41 |
+
|
| 42 |
+
def __eq__(self, other: object) -> bool:
|
| 43 |
+
if isinstance(other, Namespace):
|
| 44 |
+
return self.id == other.id
|
| 45 |
+
return False
|
| 46 |
+
|
| 47 |
+
|
| 48 |
+
# 'None' is the default namespace,
|
| 49 |
+
# which means that 'isinstance(None, Namespace)' is 'True'.
|
| 50 |
+
Namespace.register(type(None))
|
vlmpy310/lib/python3.10/site-packages/torch/distributed/pipeline/sync/skip/portal.py
ADDED
|
@@ -0,0 +1,231 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright 2019 Kakao Brain
|
| 2 |
+
#
|
| 3 |
+
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
|
| 4 |
+
#
|
| 5 |
+
# This source code is licensed under the BSD license found in the
|
| 6 |
+
# LICENSE file in the root directory of this source tree.
|
| 7 |
+
"""Portal keeps a tensor in the pocket plane. The tensor becomes hidden to the
|
| 8 |
+
autograd engine. The shared context of three functions (:class:`PortalBlue`,
|
| 9 |
+
:class:`PortalOrange`, and :class:`PortalCopy`) out of the computation graph is
|
| 10 |
+
one of the most important feature of :mod:`torchpipe.skip`.
|
| 11 |
+
|
| 12 |
+
The metaphor is inspired by Portal™ from Valve.
|
| 13 |
+
|
| 14 |
+
"""
|
| 15 |
+
from typing import List, Optional, Tuple
|
| 16 |
+
|
| 17 |
+
import torch
|
| 18 |
+
from torch import Tensor
|
| 19 |
+
|
| 20 |
+
from ..copy import Context as CopyContext
|
| 21 |
+
from ..copy import Copy
|
| 22 |
+
from ..phony import get_phony
|
| 23 |
+
from ..stream import AbstractStream, get_device
|
| 24 |
+
|
| 25 |
+
__all__: List[str] = []
|
| 26 |
+
|
| 27 |
+
|
| 28 |
+
class Portal:
|
| 29 |
+
"""A portal for a tensor."""
|
| 30 |
+
|
| 31 |
+
def __init__(self, tensor: Optional[Tensor], tensor_life: int) -> None:
|
| 32 |
+
self.put_tensor(tensor, tensor_life)
|
| 33 |
+
self.grad: Optional[Tensor] = None
|
| 34 |
+
|
| 35 |
+
def blue(self) -> Tensor:
|
| 36 |
+
"""Creates a :class:`PortalBlue` which hides the underlying tensor from
|
| 37 |
+
the autograd engine.
|
| 38 |
+
|
| 39 |
+
Join the returning phony to the main lane of the autograd graph to
|
| 40 |
+
assure the correct backpropagation::
|
| 41 |
+
|
| 42 |
+
PortalBlue --+
|
| 43 |
+
|
|
| 44 |
+
---------- Join --
|
| 45 |
+
|
| 46 |
+
"""
|
| 47 |
+
tensor = self.use_tensor()
|
| 48 |
+
|
| 49 |
+
if tensor is None:
|
| 50 |
+
return get_phony(torch.device("cpu"), requires_grad=False)
|
| 51 |
+
|
| 52 |
+
return PortalBlue.apply(self, tensor)
|
| 53 |
+
|
| 54 |
+
def orange(self, phony: Tensor) -> Optional[Tensor]:
|
| 55 |
+
"""Creates a :class:`PortalOrange` which retrieves the hidden tensor
|
| 56 |
+
without losing ability of backpropagation.
|
| 57 |
+
|
| 58 |
+
Give a phony forked from the main lane of an autograd graph::
|
| 59 |
+
|
| 60 |
+
+-- PortalOrange --+
|
| 61 |
+
| |
|
| 62 |
+
-- Fork --------- f(a, b) --
|
| 63 |
+
|
| 64 |
+
"""
|
| 65 |
+
self.check_tensor_life()
|
| 66 |
+
|
| 67 |
+
if self.tensor is None:
|
| 68 |
+
return self.use_tensor()
|
| 69 |
+
|
| 70 |
+
return PortalOrange.apply(self, phony)
|
| 71 |
+
|
| 72 |
+
def copy(self, prev_stream: AbstractStream, next_stream: AbstractStream, phony: Tensor,) -> Tensor:
|
| 73 |
+
"""Copies the hidden tensor by a :class:`PortalCopy`.
|
| 74 |
+
|
| 75 |
+
Give a phony and use the returning phony to keep backpropagation::
|
| 76 |
+
|
| 77 |
+
+-- PortalCopy --+
|
| 78 |
+
| |
|
| 79 |
+
-- Fork ---------- Join --
|
| 80 |
+
|
| 81 |
+
"""
|
| 82 |
+
if self.tensor is None:
|
| 83 |
+
return get_phony(torch.device("cpu"), requires_grad=False)
|
| 84 |
+
|
| 85 |
+
return PortalCopy.apply(self, prev_stream, next_stream, phony)
|
| 86 |
+
|
| 87 |
+
def check_tensor_life(self) -> None:
|
| 88 |
+
if self.tensor_life <= 0:
|
| 89 |
+
raise RuntimeError("tensor in portal has been removed")
|
| 90 |
+
|
| 91 |
+
def put_tensor(self, tensor: Optional[Tensor], tensor_life: int) -> None:
|
| 92 |
+
"""Stores a tensor into this portal."""
|
| 93 |
+
# [Life of Tensor through Portal]
|
| 94 |
+
#
|
| 95 |
+
# The tensor can be retrieved by use_tensor() up to 'tensor_life'
|
| 96 |
+
# times. When the life becomes 0, the tensor will be deleted for
|
| 97 |
+
# deallocation in CUDA memory.
|
| 98 |
+
#
|
| 99 |
+
# The below events participate in a tensor through a portal.
|
| 100 |
+
# Note that [x] denotes the events which call use_tensor():
|
| 101 |
+
#
|
| 102 |
+
# 1. [x] blue()
|
| 103 |
+
# 2. [ ] PortalBlue.forward
|
| 104 |
+
# 3. [ ] copy()
|
| 105 |
+
# 4. [ ] PortalCopy.forward
|
| 106 |
+
# 5. [ ] orange()
|
| 107 |
+
# 6. [x] PortalOrange.forward
|
| 108 |
+
# - - - - - - - - - - - - - - - - - - - - - - - - - - -
|
| 109 |
+
# 7. [ ] orange() (recomputed)
|
| 110 |
+
# 8. [x] PortalOrange.forward (recomputed)
|
| 111 |
+
# 9. [ ] PortalOrange.backward
|
| 112 |
+
# 10. [ ] PortalCopy.backward
|
| 113 |
+
# 11. [x] blue() (recomputed)
|
| 114 |
+
# 12. [ ] PortalBlue.forward (recomputed)
|
| 115 |
+
# 13. [ ] PortalBlue.backward
|
| 116 |
+
#
|
| 117 |
+
self.tensor_life = tensor_life
|
| 118 |
+
|
| 119 |
+
if tensor_life > 0:
|
| 120 |
+
self.tensor = tensor
|
| 121 |
+
else:
|
| 122 |
+
self.tensor = None
|
| 123 |
+
|
| 124 |
+
def use_tensor(self) -> Optional[Tensor]:
|
| 125 |
+
"""Retrieves the underlying tensor and decreases the tensor life. When
|
| 126 |
+
the life becomes 0, it the tensor will be removed.
|
| 127 |
+
"""
|
| 128 |
+
self.check_tensor_life()
|
| 129 |
+
|
| 130 |
+
tensor = self.tensor
|
| 131 |
+
|
| 132 |
+
self.tensor_life -= 1
|
| 133 |
+
|
| 134 |
+
if self.tensor_life <= 0:
|
| 135 |
+
self.tensor = None
|
| 136 |
+
|
| 137 |
+
return tensor
|
| 138 |
+
|
| 139 |
+
def put_grad(self, grad: Tensor) -> None:
|
| 140 |
+
"""Stores a gradient into this portal."""
|
| 141 |
+
self.grad = grad
|
| 142 |
+
|
| 143 |
+
def use_grad(self) -> Tensor:
|
| 144 |
+
"""Retrieves and removes the underlying gradient. The gradient is
|
| 145 |
+
always ephemeral.
|
| 146 |
+
"""
|
| 147 |
+
if self.grad is None:
|
| 148 |
+
raise RuntimeError("grad in portal has been removed or never set")
|
| 149 |
+
|
| 150 |
+
grad = self.grad
|
| 151 |
+
self.grad = None
|
| 152 |
+
return grad
|
| 153 |
+
|
| 154 |
+
|
| 155 |
+
# Common interface between :class:`PortalBlue`, :class:`PortalOrange`, and
|
| 156 |
+
# :class:`PortalCopy`.
|
| 157 |
+
class Context(CopyContext):
|
| 158 |
+
portal: Portal
|
| 159 |
+
|
| 160 |
+
|
| 161 |
+
class PortalBlue(torch.autograd.Function):
|
| 162 |
+
"""Hides a tensor from the autograd engine by a :class:`Portal`."""
|
| 163 |
+
|
| 164 |
+
@staticmethod
|
| 165 |
+
# type: ignore[override]
|
| 166 |
+
def forward(
|
| 167 |
+
ctx: Context,
|
| 168 |
+
portal: Portal,
|
| 169 |
+
# This tensor must be retrieved by portal.use_tensor().
|
| 170 |
+
tensor: Tensor,
|
| 171 |
+
) -> Tensor:
|
| 172 |
+
ctx.portal = portal
|
| 173 |
+
|
| 174 |
+
phony = get_phony(tensor.device, requires_grad=False)
|
| 175 |
+
return phony.detach()
|
| 176 |
+
|
| 177 |
+
@staticmethod
|
| 178 |
+
# type: ignore[override]
|
| 179 |
+
def backward(ctx: Context, grad_phony: Tensor,) -> Tuple[None, Tensor]:
|
| 180 |
+
# The paired PortalOrange should keep the gradient.
|
| 181 |
+
grad = ctx.portal.use_grad()
|
| 182 |
+
return None, grad
|
| 183 |
+
|
| 184 |
+
|
| 185 |
+
class PortalOrange(torch.autograd.Function):
|
| 186 |
+
"""Retrieves the hidden tensor from a :class:`Portal`."""
|
| 187 |
+
|
| 188 |
+
@staticmethod
|
| 189 |
+
# type: ignore[override]
|
| 190 |
+
def forward(ctx: Context, portal: Portal, phony: Tensor) -> Tensor:
|
| 191 |
+
ctx.portal = portal
|
| 192 |
+
|
| 193 |
+
tensor = portal.use_tensor()
|
| 194 |
+
assert tensor is not None
|
| 195 |
+
|
| 196 |
+
return tensor.detach()
|
| 197 |
+
|
| 198 |
+
@staticmethod
|
| 199 |
+
def backward(ctx: Context, grad: Tensor) -> Tuple[None, None]: # type: ignore[override]
|
| 200 |
+
# The paired PortalBlue will use the gradient.
|
| 201 |
+
ctx.portal.put_grad(grad)
|
| 202 |
+
return None, None
|
| 203 |
+
|
| 204 |
+
|
| 205 |
+
class PortalCopy(torch.autograd.Function):
|
| 206 |
+
"""Copies the hidden tensor in a :class:`Portal`. It replaces the hidden
|
| 207 |
+
tensor with copied one.
|
| 208 |
+
"""
|
| 209 |
+
|
| 210 |
+
@staticmethod
|
| 211 |
+
# type: ignore[override]
|
| 212 |
+
def forward(
|
| 213 |
+
ctx: Context, portal: Portal, prev_stream: AbstractStream, next_stream: AbstractStream, phony: Tensor,
|
| 214 |
+
) -> Tensor:
|
| 215 |
+
ctx.portal = portal
|
| 216 |
+
|
| 217 |
+
assert portal.tensor is not None
|
| 218 |
+
(portal.tensor,) = Copy.forward(ctx, prev_stream, next_stream, portal.tensor)
|
| 219 |
+
|
| 220 |
+
phony = get_phony(get_device(next_stream), requires_grad=False)
|
| 221 |
+
return phony.detach()
|
| 222 |
+
|
| 223 |
+
@staticmethod
|
| 224 |
+
# type: ignore[override]
|
| 225 |
+
def backward(ctx: Context, grad_phony: Tensor,) -> Tuple[None, None, None, None]:
|
| 226 |
+
portal = ctx.portal
|
| 227 |
+
|
| 228 |
+
assert portal.grad is not None
|
| 229 |
+
_, _, portal.grad = Copy.backward(ctx, portal.grad)
|
| 230 |
+
|
| 231 |
+
return None, None, None, None
|
vlmpy310/lib/python3.10/site-packages/torch/distributed/pipeline/sync/skip/skippable.py
ADDED
|
@@ -0,0 +1,431 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright 2019 Kakao Brain
|
| 2 |
+
#
|
| 3 |
+
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
|
| 4 |
+
#
|
| 5 |
+
# This source code is licensed under the BSD license found in the
|
| 6 |
+
# LICENSE file in the root directory of this source tree.
|
| 7 |
+
"""The user interface to define skip connections."""
|
| 8 |
+
from typing import (
|
| 9 |
+
TYPE_CHECKING,
|
| 10 |
+
Any,
|
| 11 |
+
Callable,
|
| 12 |
+
ClassVar,
|
| 13 |
+
Dict,
|
| 14 |
+
FrozenSet,
|
| 15 |
+
Generator,
|
| 16 |
+
Iterable,
|
| 17 |
+
List,
|
| 18 |
+
Optional,
|
| 19 |
+
Set,
|
| 20 |
+
Sequence,
|
| 21 |
+
Tuple,
|
| 22 |
+
Type,
|
| 23 |
+
TypeVar,
|
| 24 |
+
Union,
|
| 25 |
+
cast,
|
| 26 |
+
)
|
| 27 |
+
|
| 28 |
+
from torch import Tensor, nn
|
| 29 |
+
|
| 30 |
+
from ..microbatch import Batch
|
| 31 |
+
from .namespace import Namespace
|
| 32 |
+
from .tracker import current_skip_tracker
|
| 33 |
+
|
| 34 |
+
__all__ = ["skippable", "stash", "pop", "verify_skippables"]
|
| 35 |
+
|
| 36 |
+
|
| 37 |
+
Tensors = Sequence[Tensor]
|
| 38 |
+
TensorOrTensors = Union[Tensor, Tensors]
|
| 39 |
+
|
| 40 |
+
StashPop = Union["stash", "pop"]
|
| 41 |
+
StashPopGenerator = Generator[StashPop, Optional[Tensor], TensorOrTensors]
|
| 42 |
+
if TYPE_CHECKING:
|
| 43 |
+
# Typechecking: nn.Module is not a Generic
|
| 44 |
+
SkippableModule = nn.Module[Union[StashPopGenerator, TensorOrTensors]] # type: ignore[type-arg]
|
| 45 |
+
else:
|
| 46 |
+
SkippableModule = nn.Module
|
| 47 |
+
|
| 48 |
+
T = TypeVar("T", bound="Skippable")
|
| 49 |
+
|
| 50 |
+
|
| 51 |
+
class Skippable(nn.Module):
|
| 52 |
+
"""The base class for skippable modules.
|
| 53 |
+
|
| 54 |
+
Do not use this class directly. Define a subclass by :func:`skippable`
|
| 55 |
+
instead.
|
| 56 |
+
|
| 57 |
+
"""
|
| 58 |
+
|
| 59 |
+
module_cls: ClassVar[Type[SkippableModule]]
|
| 60 |
+
stashable_names: ClassVar[FrozenSet[str]]
|
| 61 |
+
poppable_names: ClassVar[FrozenSet[str]]
|
| 62 |
+
|
| 63 |
+
def __init__(self, *args: Any, **kwargs: Any) -> None:
|
| 64 |
+
super().__init__()
|
| 65 |
+
self.module = self.module_cls(*args, **kwargs) # type: ignore[call-arg]
|
| 66 |
+
self.namespaces: Dict[str, Namespace] = {}
|
| 67 |
+
|
| 68 |
+
def __repr__(self) -> str:
|
| 69 |
+
return f"@skippable({self.module})"
|
| 70 |
+
|
| 71 |
+
def namespaced(self, name: str) -> Tuple[Namespace, str]:
|
| 72 |
+
"""Prepend namespace for the given skip name."""
|
| 73 |
+
ns = self.namespaces.get(name)
|
| 74 |
+
ns = cast(Namespace, ns)
|
| 75 |
+
return (ns, name)
|
| 76 |
+
|
| 77 |
+
def stashable(self) -> Iterable[Tuple[Namespace, str]]:
|
| 78 |
+
"""Iterate over namespaced skip names to be stashed."""
|
| 79 |
+
for name in self.stashable_names:
|
| 80 |
+
yield self.namespaced(name)
|
| 81 |
+
|
| 82 |
+
def poppable(self) -> Iterable[Tuple[Namespace, str]]:
|
| 83 |
+
"""Iterate over namespaced skip names to be popped."""
|
| 84 |
+
for name in self.poppable_names:
|
| 85 |
+
yield self.namespaced(name)
|
| 86 |
+
|
| 87 |
+
def isolate(self: T, ns: Namespace, *, only: Optional[Iterable[str]] = None) -> T:
|
| 88 |
+
r"""Isolate a specified subset or the whole set of skip tensors.
|
| 89 |
+
|
| 90 |
+
In a single sequential module, skip tensors with the same
|
| 91 |
+
name are not allowed unless they are isolated by different namespaces.
|
| 92 |
+
|
| 93 |
+
Here's an example using the same name for skip tensors twice. Each pair
|
| 94 |
+
of ``Layer1`` and ``Layer2`` is isolated with its own namespace ``ns1``
|
| 95 |
+
and ``ns2``. There is no conflict anymore::
|
| 96 |
+
|
| 97 |
+
ns1 = Namespace()
|
| 98 |
+
ns2 = Namespace()
|
| 99 |
+
|
| 100 |
+
model = nn.Sequential(
|
| 101 |
+
Layer1().isolate(ns1),
|
| 102 |
+
Layer1().isolate(ns2),
|
| 103 |
+
Layer2(),
|
| 104 |
+
Layer3().isolate(ns2),
|
| 105 |
+
Layer3().isolate(ns1),
|
| 106 |
+
)
|
| 107 |
+
|
| 108 |
+
When `only` parameter is omitted, all skip tensors are isolated. You
|
| 109 |
+
can isolate a subset of skip tensors by passing `only` parameter::
|
| 110 |
+
|
| 111 |
+
ns_alice = Namespace()
|
| 112 |
+
ns_bob = Namespace()
|
| 113 |
+
|
| 114 |
+
model = nn.Sequential(
|
| 115 |
+
...
|
| 116 |
+
StashStashPop().isolate(ns_alice, only=['alice']) \
|
| 117 |
+
.isolate(ns_bob, only=['bob']),
|
| 118 |
+
...
|
| 119 |
+
)
|
| 120 |
+
|
| 121 |
+
Args:
|
| 122 |
+
ns (Namespace):
|
| 123 |
+
namespace for isolation
|
| 124 |
+
|
| 125 |
+
Keyword Args:
|
| 126 |
+
only (iterable of strs):
|
| 127 |
+
names of specific skip tensors to be isolated (omit this option
|
| 128 |
+
to isolate all skip tensors declared in this module)
|
| 129 |
+
|
| 130 |
+
Returns:
|
| 131 |
+
this module itself
|
| 132 |
+
|
| 133 |
+
"""
|
| 134 |
+
names: Iterable[str]
|
| 135 |
+
|
| 136 |
+
if only is None:
|
| 137 |
+
names = self.stashable_names | self.poppable_names
|
| 138 |
+
else:
|
| 139 |
+
names = set(only)
|
| 140 |
+
|
| 141 |
+
for name in names:
|
| 142 |
+
self.namespaces[name] = ns
|
| 143 |
+
|
| 144 |
+
return self
|
| 145 |
+
|
| 146 |
+
def dispatch(
|
| 147 |
+
self,
|
| 148 |
+
input,
|
| 149 |
+
handle_stash: Callable[[str, Optional[Tensor]], None],
|
| 150 |
+
handle_pop: Callable[[str], Optional[Tensor]],
|
| 151 |
+
):
|
| 152 |
+
"""Dispatch :class:`stash` or :class:`pop` commands.
|
| 153 |
+
|
| 154 |
+
The commands are generated by the module's ``forward()``.
|
| 155 |
+
"""
|
| 156 |
+
generator = self.module(input)
|
| 157 |
+
|
| 158 |
+
if not isinstance(generator, Generator):
|
| 159 |
+
# The underlying module returned output without any yield.
|
| 160 |
+
output = generator
|
| 161 |
+
return output
|
| 162 |
+
|
| 163 |
+
try:
|
| 164 |
+
op = next(generator)
|
| 165 |
+
|
| 166 |
+
while True:
|
| 167 |
+
if isinstance(op, stash):
|
| 168 |
+
handle_stash(op.name, op.tensor)
|
| 169 |
+
op = next(generator)
|
| 170 |
+
continue
|
| 171 |
+
|
| 172 |
+
if isinstance(op, pop):
|
| 173 |
+
tensor = handle_pop(op.name)
|
| 174 |
+
op = generator.send(tensor)
|
| 175 |
+
continue
|
| 176 |
+
|
| 177 |
+
raise TypeError(f"{op!r} is not a command from @skippable")
|
| 178 |
+
|
| 179 |
+
except StopIteration as stop:
|
| 180 |
+
output = stop.args[0]
|
| 181 |
+
return output
|
| 182 |
+
|
| 183 |
+
def forward(self, input: Union[List[Any], Tensor]) -> TensorOrTensors:
|
| 184 |
+
"""Perform the forward propagation.
|
| 185 |
+
|
| 186 |
+
:class:`stash` or :class:`pop` commands will be handled by portals
|
| 187 |
+
silently. The portals won't be exposed to users.
|
| 188 |
+
|
| 189 |
+
Raises:
|
| 190 |
+
RuntimeError:
|
| 191 |
+
illegal 'stash' or 'pop' is found.
|
| 192 |
+
|
| 193 |
+
"""
|
| 194 |
+
skip_tracker = current_skip_tracker()
|
| 195 |
+
stashed_tensors: Dict[str, Optional[Tensor]] = {}
|
| 196 |
+
|
| 197 |
+
# Load skip tensors that might be popped.
|
| 198 |
+
poppable_tensors = {}
|
| 199 |
+
batch = Batch(input)
|
| 200 |
+
for ns, name in self.poppable():
|
| 201 |
+
try:
|
| 202 |
+
poppable_tensors[name] = skip_tracker.load(batch, ns, name)
|
| 203 |
+
except KeyError as e:
|
| 204 |
+
raise RuntimeError(f"'{name}' has not been stashed") from e
|
| 205 |
+
input = batch.values
|
| 206 |
+
|
| 207 |
+
# Handle skip commands.
|
| 208 |
+
def handle_stash(name: str, tensor: Optional[Tensor]) -> None:
|
| 209 |
+
if name not in self.stashable_names:
|
| 210 |
+
raise RuntimeError(f"'{name}' has not been declared as stashable")
|
| 211 |
+
stashed_tensors[name] = tensor
|
| 212 |
+
|
| 213 |
+
def handle_pop(name: str) -> Optional[Tensor]:
|
| 214 |
+
if name not in self.poppable_names:
|
| 215 |
+
raise RuntimeError(f"'{name}' has not been declared as poppable")
|
| 216 |
+
return poppable_tensors.pop(name)
|
| 217 |
+
|
| 218 |
+
output = self.dispatch(input, handle_stash, handle_pop)
|
| 219 |
+
|
| 220 |
+
# All declared skips must be stashed or popped.
|
| 221 |
+
not_stashed = self.stashable_names - stashed_tensors.keys()
|
| 222 |
+
if not_stashed:
|
| 223 |
+
comma_names = ", ".join(f"'{n}'" for n in not_stashed)
|
| 224 |
+
raise RuntimeError(f"{comma_names} must be stashed but have not")
|
| 225 |
+
|
| 226 |
+
not_popped = poppable_tensors.keys()
|
| 227 |
+
if not_popped:
|
| 228 |
+
comma_names = ", ".join(f"'{n}'" for n in not_popped)
|
| 229 |
+
raise RuntimeError(f"{comma_names} must be popped but have not")
|
| 230 |
+
|
| 231 |
+
# Save stashed skip tensors.
|
| 232 |
+
batch = Batch(output)
|
| 233 |
+
for ns, name in self.stashable():
|
| 234 |
+
tensor = stashed_tensors[name]
|
| 235 |
+
skip_tracker.save(batch, ns, name, tensor)
|
| 236 |
+
output = batch.values
|
| 237 |
+
|
| 238 |
+
return output
|
| 239 |
+
|
| 240 |
+
|
| 241 |
+
# TODO(sublee): Move to above of Skippable class for better read flow.
|
| 242 |
+
def skippable(
|
| 243 |
+
stash: Iterable[str] = (), pop: Iterable[str] = (),
|
| 244 |
+
) -> Callable[[Type[SkippableModule]], Type[Skippable]]:
|
| 245 |
+
"""Define a decorator to create :class:`nn.Module <torch.nn.Module>` with skip connections.
|
| 246 |
+
|
| 247 |
+
These decorated modules are called "skippable". This functionality works perfectly
|
| 248 |
+
fine even when the module is not wrapped by :class:`~torch.distributed.pipeline.sync.Pipe`.
|
| 249 |
+
|
| 250 |
+
Each skip tensor is managed by its name. Before manipulating skip tensors,
|
| 251 |
+
a skippable module must statically declare the names for skip tensors by
|
| 252 |
+
`stash` and/or `pop` parameters. Skip tensors with pre-declared name can be
|
| 253 |
+
stashed by ``yield stash(name, tensor)`` or popped by ``tensor = yield
|
| 254 |
+
pop(name)``.
|
| 255 |
+
|
| 256 |
+
Here is an example with three layers. A skip tensor named "1to3" is stashed
|
| 257 |
+
and popped at the first and last layer, respectively::
|
| 258 |
+
|
| 259 |
+
@skippable(stash=['1to3'])
|
| 260 |
+
class Layer1(nn.Module):
|
| 261 |
+
def forward(self, input):
|
| 262 |
+
yield stash('1to3', input)
|
| 263 |
+
return f1(input)
|
| 264 |
+
|
| 265 |
+
class Layer2(nn.Module):
|
| 266 |
+
def forward(self, input):
|
| 267 |
+
return f2(input)
|
| 268 |
+
|
| 269 |
+
@skippable(pop=['1to3'])
|
| 270 |
+
class Layer3(nn.Module):
|
| 271 |
+
def forward(self, input):
|
| 272 |
+
skip_1to3 = yield pop('1to3')
|
| 273 |
+
return f3(input) + skip_1to3
|
| 274 |
+
|
| 275 |
+
model = nn.Sequential(Layer1(), Layer2(), Layer3())
|
| 276 |
+
|
| 277 |
+
One skippable module can stash or pop multiple skip tensors::
|
| 278 |
+
|
| 279 |
+
@skippable(stash=['alice', 'bob'], pop=['carol'])
|
| 280 |
+
class StashStashPop(nn.Module):
|
| 281 |
+
def forward(self, input):
|
| 282 |
+
yield stash('alice', f_alice(input))
|
| 283 |
+
yield stash('bob', f_bob(input))
|
| 284 |
+
carol = yield pop('carol')
|
| 285 |
+
return input + carol
|
| 286 |
+
|
| 287 |
+
Every skip tensor must be associated with exactly one pair of `stash` and
|
| 288 |
+
`pop`. :class:`~torch.distributed.pipeline.sync.Pipe` checks this
|
| 289 |
+
restriction automatically when wrapping a module. You can also check the
|
| 290 |
+
restriction by :func:`verify_skippables`
|
| 291 |
+
without :class:`~torch.distributed.pipeline.sync.Pipe`.
|
| 292 |
+
|
| 293 |
+
"""
|
| 294 |
+
stashable_names = frozenset(stash)
|
| 295 |
+
poppable_names = frozenset(pop)
|
| 296 |
+
|
| 297 |
+
def extend_skippable(module_cls: Type[SkippableModule]) -> Type[Skippable]:
|
| 298 |
+
name = module_cls.__name__
|
| 299 |
+
bases = (Skippable,)
|
| 300 |
+
attrs = {"module_cls": module_cls, "stashable_names": stashable_names, "poppable_names": poppable_names}
|
| 301 |
+
return type(name, bases, attrs)
|
| 302 |
+
|
| 303 |
+
return extend_skippable
|
| 304 |
+
|
| 305 |
+
|
| 306 |
+
class stash:
|
| 307 |
+
"""The command to stash a skip tensor.
|
| 308 |
+
|
| 309 |
+
::
|
| 310 |
+
|
| 311 |
+
def forward(self, input):
|
| 312 |
+
yield stash('name', input)
|
| 313 |
+
return f(input)
|
| 314 |
+
|
| 315 |
+
Args:
|
| 316 |
+
name (str): name of skip tensor
|
| 317 |
+
input (torch.Tensor or None): tensor to pass to the skip connection
|
| 318 |
+
|
| 319 |
+
"""
|
| 320 |
+
|
| 321 |
+
__slots__ = ("name", "tensor")
|
| 322 |
+
|
| 323 |
+
def __init__(self, name: str, tensor: Optional[Tensor]) -> None:
|
| 324 |
+
self.name = name
|
| 325 |
+
self.tensor = tensor
|
| 326 |
+
|
| 327 |
+
|
| 328 |
+
class pop:
|
| 329 |
+
"""The command to pop a skip tensor.
|
| 330 |
+
|
| 331 |
+
::
|
| 332 |
+
|
| 333 |
+
def forward(self, input):
|
| 334 |
+
skip = yield pop('name')
|
| 335 |
+
return f(input) + skip
|
| 336 |
+
|
| 337 |
+
Args:
|
| 338 |
+
name (str): name of skip tensor
|
| 339 |
+
|
| 340 |
+
Returns:
|
| 341 |
+
the skip tensor previously stashed by another layer under the same name
|
| 342 |
+
|
| 343 |
+
"""
|
| 344 |
+
|
| 345 |
+
__slots__ = ("name",)
|
| 346 |
+
|
| 347 |
+
def __init__(self, name: str) -> None:
|
| 348 |
+
self.name = name
|
| 349 |
+
|
| 350 |
+
|
| 351 |
+
def verify_skippables(module: nn.Sequential) -> None:
|
| 352 |
+
"""Verify if the underlying skippable modules satisfy integrity.
|
| 353 |
+
|
| 354 |
+
Every skip tensor must have only one pair of `stash` and `pop`. If there
|
| 355 |
+
are one or more unmatched pairs, it will raise :exc:`TypeError` with the
|
| 356 |
+
detailed messages.
|
| 357 |
+
|
| 358 |
+
Here are a few failure cases. :func:`verify_skippables` will report failure
|
| 359 |
+
for these cases::
|
| 360 |
+
|
| 361 |
+
# Layer1 stashes "1to3".
|
| 362 |
+
# Layer3 pops "1to3".
|
| 363 |
+
|
| 364 |
+
nn.Sequential(Layer1(), Layer2())
|
| 365 |
+
# └──── ?
|
| 366 |
+
|
| 367 |
+
nn.Sequential(Layer2(), Layer3())
|
| 368 |
+
# ? ────┘
|
| 369 |
+
|
| 370 |
+
nn.Sequential(Layer1(), Layer2(), Layer3(), Layer3())
|
| 371 |
+
# └───────────────────┘ ^^^^^^
|
| 372 |
+
|
| 373 |
+
nn.Sequential(Layer1(), Layer1(), Layer2(), Layer3())
|
| 374 |
+
# ^^^^^^ └───────────────────┘
|
| 375 |
+
|
| 376 |
+
To use the same name for multiple skip tensors, they must be isolated by
|
| 377 |
+
different namespaces. See :meth:`isolate()
|
| 378 |
+
<torchpipe.skip.skippable.Skippable.isolate>`.
|
| 379 |
+
|
| 380 |
+
Raises:
|
| 381 |
+
TypeError:
|
| 382 |
+
one or more pairs of `stash` and `pop` are not matched.
|
| 383 |
+
|
| 384 |
+
"""
|
| 385 |
+
stashed: Set[Tuple[Namespace, str]] = set()
|
| 386 |
+
popped: Set[Tuple[Namespace, str]] = set()
|
| 387 |
+
msgs: List[str] = []
|
| 388 |
+
|
| 389 |
+
for layer_name, layer in module.named_children():
|
| 390 |
+
if not isinstance(layer, Skippable):
|
| 391 |
+
continue
|
| 392 |
+
|
| 393 |
+
for name in layer.stashable_names & layer.poppable_names:
|
| 394 |
+
msg = f"'{layer_name}' declared '{name}' both as stashable and as poppable"
|
| 395 |
+
msgs.append(msg)
|
| 396 |
+
|
| 397 |
+
for ns, name in layer.stashable():
|
| 398 |
+
if name in layer.poppable_names:
|
| 399 |
+
continue
|
| 400 |
+
|
| 401 |
+
if (ns, name) in stashed:
|
| 402 |
+
msg = f"'{layer_name}' redeclared '{name}' as stashable but not isolated by namespace"
|
| 403 |
+
msgs.append(msg)
|
| 404 |
+
continue
|
| 405 |
+
|
| 406 |
+
stashed.add((ns, name))
|
| 407 |
+
|
| 408 |
+
for ns, name in layer.poppable():
|
| 409 |
+
if name in layer.stashable_names:
|
| 410 |
+
continue
|
| 411 |
+
|
| 412 |
+
if (ns, name) in popped:
|
| 413 |
+
msg = f"'{layer_name}' redeclared '{name}' as poppable but not isolated by namespace"
|
| 414 |
+
msgs.append(msg)
|
| 415 |
+
continue
|
| 416 |
+
|
| 417 |
+
if (ns, name) not in stashed:
|
| 418 |
+
msg = f"'{layer_name}' declared '{name}' as poppable but it was not stashed"
|
| 419 |
+
msgs.append(msg)
|
| 420 |
+
continue
|
| 421 |
+
|
| 422 |
+
popped.add((ns, name))
|
| 423 |
+
|
| 424 |
+
for (_, name) in stashed - popped:
|
| 425 |
+
msg = f"no module declared '{name}' as poppable but stashed"
|
| 426 |
+
msgs.append(msg)
|
| 427 |
+
|
| 428 |
+
if msgs:
|
| 429 |
+
raise TypeError(
|
| 430 |
+
"one or more pairs of stash and pop do not match:\n\n%s" "" % "\n".join("* %s" % x for x in msgs)
|
| 431 |
+
)
|
vlmpy310/lib/python3.10/site-packages/torch/distributed/pipeline/sync/skip/tracker.py
ADDED
|
@@ -0,0 +1,180 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright 2019 Kakao Brain
|
| 2 |
+
#
|
| 3 |
+
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
|
| 4 |
+
#
|
| 5 |
+
# This source code is licensed under the BSD license found in the
|
| 6 |
+
# LICENSE file in the root directory of this source tree.
|
| 7 |
+
"""Tracks skip tensors on a thread."""
|
| 8 |
+
from contextlib import contextmanager
|
| 9 |
+
import threading
|
| 10 |
+
from typing import Dict, Generator, List, Optional, Tuple
|
| 11 |
+
|
| 12 |
+
from torch import Tensor
|
| 13 |
+
|
| 14 |
+
from ..checkpoint import is_checkpointing
|
| 15 |
+
from ..dependency import fork, join
|
| 16 |
+
from ..microbatch import Batch
|
| 17 |
+
from ..stream import AbstractStream
|
| 18 |
+
from .layout import SkipLayout
|
| 19 |
+
from .namespace import Namespace
|
| 20 |
+
from .portal import Portal
|
| 21 |
+
|
| 22 |
+
__all__: List[str] = []
|
| 23 |
+
|
| 24 |
+
|
| 25 |
+
class SkipTracker:
|
| 26 |
+
"""Tracks saved skip tensors.
|
| 27 |
+
|
| 28 |
+
It will update the given micro-batch in place. This is because when it
|
| 29 |
+
manipulates the underlying skip tensors, the current micro-batch also has
|
| 30 |
+
to be connected with the skip tensors.
|
| 31 |
+
|
| 32 |
+
One thread has one skip tracker. Call :func:`current_skip_tracker` to get
|
| 33 |
+
the skip tracker on the current thread.
|
| 34 |
+
|
| 35 |
+
"""
|
| 36 |
+
|
| 37 |
+
def __init__(self) -> None:
|
| 38 |
+
self.tensors: Dict[Tuple[Namespace, str], Optional[Tensor]] = {}
|
| 39 |
+
|
| 40 |
+
def save(self, batch: Batch, ns: Namespace, name: str, tensor: Optional[Tensor]) -> None:
|
| 41 |
+
self.tensors[(ns, name)] = tensor
|
| 42 |
+
|
| 43 |
+
def load(self, batch: Batch, ns: Namespace, name: str) -> Optional[Tensor]:
|
| 44 |
+
return self.tensors.pop((ns, name))
|
| 45 |
+
|
| 46 |
+
def copy(
|
| 47 |
+
self, batch: Batch, prev_stream: AbstractStream, next_stream: AbstractStream, ns: Namespace, name: str,
|
| 48 |
+
) -> None:
|
| 49 |
+
raise TypeError("copy is not supported for non-portal skip tensors")
|
| 50 |
+
|
| 51 |
+
|
| 52 |
+
class SkipTrackerThroughPotals(SkipTracker):
|
| 53 |
+
"""Tracks saved skip tensors through portals. The skip tensors will be
|
| 54 |
+
hidden in portals so that the autograd engine does not need to track them.
|
| 55 |
+
|
| 56 |
+
This tracker is only used when the training or evaluating module is wrapped
|
| 57 |
+
with :class:`torchpipe.Pipe`.
|
| 58 |
+
|
| 59 |
+
"""
|
| 60 |
+
|
| 61 |
+
def __init__(self, skip_layout: SkipLayout) -> None:
|
| 62 |
+
super().__init__()
|
| 63 |
+
self.skip_layout = skip_layout
|
| 64 |
+
self.portals: Dict[Tuple[Namespace, str], Portal] = {}
|
| 65 |
+
|
| 66 |
+
def save(self, batch: Batch, ns: Namespace, name: str, tensor: Optional[Tensor]) -> None:
|
| 67 |
+
"""Saves the stashed skip tensor in a portal. The portal is then
|
| 68 |
+
connected to the given micro-batch with :class:`Join`.
|
| 69 |
+
"""
|
| 70 |
+
if not self.skip_layout.requires_copy(ns, name):
|
| 71 |
+
super().save(batch, ns, name, tensor)
|
| 72 |
+
return
|
| 73 |
+
|
| 74 |
+
# See [Tensor Life of Portal] at Portal.put_tensor() to understand the
|
| 75 |
+
# below tensor_life values. Here are the selected events which retrieve
|
| 76 |
+
# the tensor in portal:
|
| 77 |
+
#
|
| 78 |
+
# 1. [x] blue()
|
| 79 |
+
# ...
|
| 80 |
+
# 6. [x] PortalOrange.forward
|
| 81 |
+
# ...
|
| 82 |
+
# 8. [x] PortalOrange.forward (recomputed)
|
| 83 |
+
# ...
|
| 84 |
+
# 11. [x] blue() (recomputed)
|
| 85 |
+
#
|
| 86 |
+
if (ns, name) not in self.portals:
|
| 87 |
+
if is_checkpointing():
|
| 88 |
+
# Under checkpointing, the tensor used by the first
|
| 89 |
+
# PortalOrange should be alive in the portal. This tensor will
|
| 90 |
+
# be used again by the second PortalOrange during the
|
| 91 |
+
# recomputation.
|
| 92 |
+
tensor_life = 3 # Delete at [8. PortalOrange.forward (recomputed)]
|
| 93 |
+
else:
|
| 94 |
+
tensor_life = 2 # Delete at [6. PortalOrange.forward]
|
| 95 |
+
|
| 96 |
+
portal = Portal(tensor, tensor_life)
|
| 97 |
+
self.portals[(ns, name)] = portal
|
| 98 |
+
|
| 99 |
+
else:
|
| 100 |
+
# Under recomputation, the portal already exists.
|
| 101 |
+
portal = self.portals[(ns, name)]
|
| 102 |
+
|
| 103 |
+
# The existing tensor life already became 0. It should be reset as
|
| 104 |
+
# 1 to delete the tensor after the second PortalBlue immediately.
|
| 105 |
+
tensor_life = 1 # Delete at [11. blue() (recomputed)]
|
| 106 |
+
|
| 107 |
+
portal.put_tensor(tensor, tensor_life)
|
| 108 |
+
|
| 109 |
+
phony = portal.blue()
|
| 110 |
+
tensor_idx = batch.find_tensor_idx()
|
| 111 |
+
batch[tensor_idx] = join(batch[tensor_idx], phony)
|
| 112 |
+
|
| 113 |
+
def load(self, batch: Batch, ns: Namespace, name: str) -> Optional[Tensor]:
|
| 114 |
+
"""Loads a skip tensor from the corresponding portal to pop. The given
|
| 115 |
+
micro-batch is connected to the portal with :class:`Fork`.
|
| 116 |
+
"""
|
| 117 |
+
if not self.skip_layout.requires_copy(ns, name):
|
| 118 |
+
tensor = super().load(batch, ns, name)
|
| 119 |
+
return tensor
|
| 120 |
+
|
| 121 |
+
portal = self.portals[(ns, name)]
|
| 122 |
+
tensor_idx = batch.find_tensor_idx()
|
| 123 |
+
batch[tensor_idx], phony = fork(batch[tensor_idx])
|
| 124 |
+
tensor = portal.orange(phony)
|
| 125 |
+
return tensor
|
| 126 |
+
|
| 127 |
+
def copy(
|
| 128 |
+
self, batch: Batch, prev_stream: AbstractStream, next_stream: AbstractStream, ns: Namespace, name: str,
|
| 129 |
+
) -> None:
|
| 130 |
+
"""Copies the skip tensor in the corresponding portal. The given
|
| 131 |
+
micro-batch and the portal will be tied with :class:`Fork` and
|
| 132 |
+
:class:`Join`.
|
| 133 |
+
"""
|
| 134 |
+
assert self.skip_layout.requires_copy(ns, name)
|
| 135 |
+
|
| 136 |
+
tensor_idx = batch.find_tensor_idx()
|
| 137 |
+
batch[tensor_idx], phony = fork(batch[tensor_idx])
|
| 138 |
+
|
| 139 |
+
portal = self.portals[(ns, name)]
|
| 140 |
+
phony = portal.copy(prev_stream, next_stream, phony)
|
| 141 |
+
|
| 142 |
+
batch[tensor_idx] = join(batch[tensor_idx], phony)
|
| 143 |
+
|
| 144 |
+
|
| 145 |
+
class ThreadLocal(threading.local):
|
| 146 |
+
def __init__(self) -> None:
|
| 147 |
+
self.skip_tracker: Optional[SkipTracker] = None
|
| 148 |
+
|
| 149 |
+
|
| 150 |
+
thread_local = ThreadLocal()
|
| 151 |
+
|
| 152 |
+
|
| 153 |
+
@contextmanager
|
| 154 |
+
def use_skip_tracker(skip_tracker: SkipTracker) -> Generator[None, None, None]:
|
| 155 |
+
"""Registers the given skip tracker on the current thread within a
|
| 156 |
+
context::
|
| 157 |
+
|
| 158 |
+
with use_skip_tracker(my_skip_tracker):
|
| 159 |
+
...
|
| 160 |
+
|
| 161 |
+
"""
|
| 162 |
+
orig = thread_local.skip_tracker
|
| 163 |
+
|
| 164 |
+
thread_local.skip_tracker = skip_tracker
|
| 165 |
+
|
| 166 |
+
try:
|
| 167 |
+
yield
|
| 168 |
+
finally:
|
| 169 |
+
thread_local.skip_tracker = orig
|
| 170 |
+
|
| 171 |
+
|
| 172 |
+
def current_skip_tracker() -> SkipTracker:
|
| 173 |
+
"""Gets the skip tracker on the current thread."""
|
| 174 |
+
skip_tracker = thread_local.skip_tracker
|
| 175 |
+
|
| 176 |
+
if skip_tracker is None:
|
| 177 |
+
skip_tracker = SkipTracker()
|
| 178 |
+
thread_local.skip_tracker = skip_tracker
|
| 179 |
+
|
| 180 |
+
return skip_tracker
|
vlmpy310/lib/python3.10/site-packages/torch/distributed/pipeline/sync/stream.py
ADDED
|
@@ -0,0 +1,120 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright 2019 Kakao Brain
|
| 2 |
+
#
|
| 3 |
+
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
|
| 4 |
+
#
|
| 5 |
+
# This source code is licensed under the BSD license found in the
|
| 6 |
+
# LICENSE file in the root directory of this source tree.
|
| 7 |
+
"""Utilities for eliminating boilerplate code to handle abstract streams with
|
| 8 |
+
CPU device.
|
| 9 |
+
"""
|
| 10 |
+
from contextlib import contextmanager
|
| 11 |
+
from typing import Generator, List, Union, cast
|
| 12 |
+
|
| 13 |
+
import torch
|
| 14 |
+
|
| 15 |
+
__all__: List[str] = ["CPUStreamType", "new_stream", "current_stream", "default_stream",
|
| 16 |
+
"use_device", "use_stream", "get_device", "wait_stream", "record_stream",
|
| 17 |
+
"is_cuda", "as_cuda"]
|
| 18 |
+
|
| 19 |
+
|
| 20 |
+
class CPUStreamType:
|
| 21 |
+
pass
|
| 22 |
+
|
| 23 |
+
|
| 24 |
+
# The placeholder on place of streams for the CPU device instead of CUDA.
|
| 25 |
+
CPUStream = CPUStreamType()
|
| 26 |
+
|
| 27 |
+
# It represents both CUDA streams and the CPU stream.
|
| 28 |
+
AbstractStream = Union[torch.cuda.Stream, CPUStreamType]
|
| 29 |
+
|
| 30 |
+
|
| 31 |
+
def new_stream(device: torch.device) -> AbstractStream:
|
| 32 |
+
"""Creates a new stream for either CPU or CUDA device."""
|
| 33 |
+
if device.type != "cuda":
|
| 34 |
+
return CPUStream
|
| 35 |
+
return torch.cuda.Stream(device)
|
| 36 |
+
|
| 37 |
+
|
| 38 |
+
def current_stream(device: torch.device) -> AbstractStream:
|
| 39 |
+
""":func:`torch.cuda.current_stream` for either CPU or CUDA device."""
|
| 40 |
+
if device.type != "cuda":
|
| 41 |
+
return CPUStream
|
| 42 |
+
return torch.cuda.current_stream(device)
|
| 43 |
+
|
| 44 |
+
|
| 45 |
+
def default_stream(device: torch.device) -> AbstractStream:
|
| 46 |
+
""":func:`torch.cuda.default_stream` for either CPU or CUDA device."""
|
| 47 |
+
if device.type != "cuda":
|
| 48 |
+
return CPUStream
|
| 49 |
+
return torch.cuda.default_stream(device)
|
| 50 |
+
|
| 51 |
+
|
| 52 |
+
@contextmanager
|
| 53 |
+
def use_device(device: torch.device) -> Generator[None, None, None]:
|
| 54 |
+
""":func:`torch.cuda.device` for either CPU or CUDA device."""
|
| 55 |
+
if device.type != "cuda":
|
| 56 |
+
yield
|
| 57 |
+
return
|
| 58 |
+
|
| 59 |
+
with torch.cuda.device(device):
|
| 60 |
+
yield
|
| 61 |
+
|
| 62 |
+
|
| 63 |
+
@contextmanager
|
| 64 |
+
def use_stream(stream: AbstractStream) -> Generator[None, None, None]:
|
| 65 |
+
""":func:`torch.cuda.stream` for either CPU or CUDA stream."""
|
| 66 |
+
if not is_cuda(stream):
|
| 67 |
+
yield
|
| 68 |
+
return
|
| 69 |
+
|
| 70 |
+
with torch.cuda.stream(as_cuda(stream)):
|
| 71 |
+
yield
|
| 72 |
+
|
| 73 |
+
|
| 74 |
+
def get_device(stream: AbstractStream) -> torch.device:
|
| 75 |
+
"""Gets the device from CPU or CUDA stream."""
|
| 76 |
+
if is_cuda(stream):
|
| 77 |
+
return as_cuda(stream).device
|
| 78 |
+
return torch.device("cpu")
|
| 79 |
+
|
| 80 |
+
|
| 81 |
+
def wait_stream(source: AbstractStream, target: AbstractStream) -> None:
|
| 82 |
+
""":meth:`torch.cuda.Stream.wait_stream` for either CPU or CUDA stream. It
|
| 83 |
+
makes the source stream wait until the target stream completes work queued.
|
| 84 |
+
"""
|
| 85 |
+
if is_cuda(target):
|
| 86 |
+
if is_cuda(source):
|
| 87 |
+
# A CUDA stream waits another CUDA stream.
|
| 88 |
+
as_cuda(source).wait_stream(as_cuda(target))
|
| 89 |
+
else:
|
| 90 |
+
# CPU waits a CUDA stream.
|
| 91 |
+
as_cuda(target).synchronize()
|
| 92 |
+
|
| 93 |
+
# If the target is CPU, synchronization is not required.
|
| 94 |
+
|
| 95 |
+
|
| 96 |
+
def record_stream(tensor: torch.Tensor, stream: AbstractStream) -> None:
|
| 97 |
+
""":meth:`torch.Tensor.record_stream` for either CPU or CUDA stream."""
|
| 98 |
+
if is_cuda(stream):
|
| 99 |
+
# NOTE(sublee): record_stream() on a shifted view tensor throws
|
| 100 |
+
# RuntimeError in PyTorch 1.1.0, and does nothing in 1.2.0. To safely
|
| 101 |
+
# protect the tensor against unexpected reallocation, here we use a
|
| 102 |
+
# temporal tensor associated with the same storage without shifting as
|
| 103 |
+
# a workaround.
|
| 104 |
+
#
|
| 105 |
+
# Issue: https://github.com/pytorch/pytorch/issues/27366
|
| 106 |
+
#
|
| 107 |
+
tensor = tensor.new_empty([0]).set_(tensor._typed_storage())
|
| 108 |
+
|
| 109 |
+
# Typechecking: torch.cuda.Stream is incompatible with torch._C.Stream
|
| 110 |
+
tensor.record_stream(as_cuda(stream)) # type: ignore[arg-type]
|
| 111 |
+
|
| 112 |
+
|
| 113 |
+
def is_cuda(stream: AbstractStream) -> bool:
|
| 114 |
+
"""Returns ``True`` if the given stream is a valid CUDA stream."""
|
| 115 |
+
return stream is not CPUStream
|
| 116 |
+
|
| 117 |
+
|
| 118 |
+
def as_cuda(stream: AbstractStream) -> torch.cuda.Stream:
|
| 119 |
+
"""Casts the given stream as :class:`torch.cuda.Stream`."""
|
| 120 |
+
return cast(torch.cuda.Stream, stream)
|
vlmpy310/lib/python3.10/site-packages/torch/distributed/pipeline/sync/utils.py
ADDED
|
@@ -0,0 +1,38 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from torch import nn
|
| 2 |
+
from typing import List, Optional
|
| 3 |
+
|
| 4 |
+
__all__ = ["partition_model"]
|
| 5 |
+
|
| 6 |
+
def partition_model(
|
| 7 |
+
module: nn.Sequential,
|
| 8 |
+
balance: List[int],
|
| 9 |
+
devices: Optional[List[int]] = None):
|
| 10 |
+
"""
|
| 11 |
+
Partions the model accross multiple GPU devices.
|
| 12 |
+
|
| 13 |
+
Given an :class:`nn.Sequential <torch.nn.Sequential>` module, partitions
|
| 14 |
+
the model across multiple GPU devices according the provided ``balance``
|
| 15 |
+
and ``devices``.
|
| 16 |
+
|
| 17 |
+
Args:
|
| 18 |
+
module (:class:`nn.Sequential <torch.nn.Sequential>`):
|
| 19 |
+
Sequential model representing the pipe.
|
| 20 |
+
balance (List[int]):
|
| 21 |
+
List indicating the number of layers in each partition.
|
| 22 |
+
devices (List[int], optional):
|
| 23 |
+
List indicating the device to use for each partition. Defaults to
|
| 24 |
+
``range(len(balance))``
|
| 25 |
+
"""
|
| 26 |
+
device_idx = 0
|
| 27 |
+
pipe_idx = 0
|
| 28 |
+
balanced_pipe = []
|
| 29 |
+
for num_layers in balance:
|
| 30 |
+
layers = []
|
| 31 |
+
for i in range(num_layers):
|
| 32 |
+
layers.append(module[pipe_idx])
|
| 33 |
+
pipe_idx += 1
|
| 34 |
+
device = device_idx if devices is None else devices[device_idx]
|
| 35 |
+
balanced_pipe.append(nn.Sequential(*layers).to(device))
|
| 36 |
+
device_idx += 1
|
| 37 |
+
|
| 38 |
+
return nn.Sequential(*balanced_pipe)
|
vlmpy310/lib/python3.10/site-packages/torch/distributed/pipeline/sync/worker.py
ADDED
|
@@ -0,0 +1,132 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright 2019 Kakao Brain
|
| 2 |
+
#
|
| 3 |
+
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
|
| 4 |
+
#
|
| 5 |
+
# This source code is licensed under the BSD license found in the
|
| 6 |
+
# LICENSE file in the root directory of this source tree.
|
| 7 |
+
"""Multithreading in pipeline parallelism."""
|
| 8 |
+
from contextlib import contextmanager
|
| 9 |
+
from queue import Queue
|
| 10 |
+
import sys
|
| 11 |
+
from threading import Thread
|
| 12 |
+
from types import TracebackType
|
| 13 |
+
from typing import TYPE_CHECKING, Callable, Dict, Generator, List, Optional, Tuple, Type, Union, cast
|
| 14 |
+
|
| 15 |
+
import torch
|
| 16 |
+
|
| 17 |
+
from .microbatch import Batch
|
| 18 |
+
from .stream import AbstractStream, use_device, use_stream
|
| 19 |
+
|
| 20 |
+
__all__: List[str] = ["Task", "worker", "create_workers", "spawn_workers"]
|
| 21 |
+
|
| 22 |
+
|
| 23 |
+
ExcInfo = Tuple[Type[BaseException], BaseException, TracebackType]
|
| 24 |
+
|
| 25 |
+
# Queue is generic only in stubs.
|
| 26 |
+
# https://mypy.readthedocs.io/en/latest/common_issues.html#using-classes-that-are-generic-in-stubs-but-not-at-runtime
|
| 27 |
+
if TYPE_CHECKING:
|
| 28 |
+
InQueue = Queue[Optional["Task"]]
|
| 29 |
+
OutQueue = Queue[Tuple[bool, Union[Tuple["Task", Batch], ExcInfo, None]]]
|
| 30 |
+
else:
|
| 31 |
+
InQueue = Queue
|
| 32 |
+
OutQueue = Queue
|
| 33 |
+
|
| 34 |
+
|
| 35 |
+
class Task:
|
| 36 |
+
"""A task represents how to compute a micro-batch on a partition.
|
| 37 |
+
|
| 38 |
+
It consists of two parts: :meth:`compute` and :meth:`finalize`.
|
| 39 |
+
:meth:`compute` should be executed in worker threads concurrently.
|
| 40 |
+
:meth:`finalize` should be executed after when worker threads complete to
|
| 41 |
+
execute :meth:`compute`.
|
| 42 |
+
|
| 43 |
+
:meth:`compute` might be boosted by worker threads. Because it produces
|
| 44 |
+
several CUDA API calls by user code. In PyTorch, parallel CUDA API calls
|
| 45 |
+
are not serialized through GIL. So more than one CUDA API call can be
|
| 46 |
+
produced at the same time.
|
| 47 |
+
|
| 48 |
+
"""
|
| 49 |
+
|
| 50 |
+
def __init__(
|
| 51 |
+
self, stream: AbstractStream, *, compute: Callable[[], Batch], finalize: Optional[Callable[[Batch], None]],
|
| 52 |
+
) -> None:
|
| 53 |
+
self.stream = stream
|
| 54 |
+
self._compute = compute
|
| 55 |
+
self._finalize = finalize
|
| 56 |
+
self._grad_enabled = torch.is_grad_enabled()
|
| 57 |
+
|
| 58 |
+
def compute(self) -> Batch:
|
| 59 |
+
with use_stream(self.stream), torch.set_grad_enabled(self._grad_enabled):
|
| 60 |
+
return self._compute()
|
| 61 |
+
|
| 62 |
+
def finalize(self, batch: Batch) -> None:
|
| 63 |
+
if self._finalize is None:
|
| 64 |
+
return
|
| 65 |
+
with use_stream(self.stream), torch.set_grad_enabled(self._grad_enabled):
|
| 66 |
+
self._finalize(batch)
|
| 67 |
+
|
| 68 |
+
|
| 69 |
+
def worker(in_queue: InQueue, out_queue: OutQueue, device: torch.device) -> None:
|
| 70 |
+
"""Main loop of a worker thread."""
|
| 71 |
+
with use_device(device):
|
| 72 |
+
while True:
|
| 73 |
+
task = in_queue.get()
|
| 74 |
+
|
| 75 |
+
if task is None:
|
| 76 |
+
break
|
| 77 |
+
|
| 78 |
+
try:
|
| 79 |
+
batch = task.compute()
|
| 80 |
+
except Exception:
|
| 81 |
+
exc_info = cast(ExcInfo, sys.exc_info())
|
| 82 |
+
out_queue.put((False, exc_info))
|
| 83 |
+
continue
|
| 84 |
+
|
| 85 |
+
out_queue.put((True, (task, batch)))
|
| 86 |
+
|
| 87 |
+
done = (False, None)
|
| 88 |
+
out_queue.put(done)
|
| 89 |
+
|
| 90 |
+
|
| 91 |
+
def create_workers(devices: List[torch.device],) -> Tuple[List[InQueue], List[OutQueue]]:
|
| 92 |
+
"""Spawns worker threads. A worker thread is bound to a device."""
|
| 93 |
+
in_queues: List[InQueue] = []
|
| 94 |
+
out_queues: List[OutQueue] = []
|
| 95 |
+
|
| 96 |
+
# Spawn workers.
|
| 97 |
+
workers: Dict[torch.device, Tuple[InQueue, OutQueue]] = {}
|
| 98 |
+
|
| 99 |
+
def normalize_device(device: torch.device) -> torch.device:
|
| 100 |
+
if device.type == "cuda" and device.index is None:
|
| 101 |
+
return torch.device("cuda", index=torch.cuda.current_device())
|
| 102 |
+
|
| 103 |
+
if device.type == "cpu" and device.index is not None:
|
| 104 |
+
return torch.device("cpu")
|
| 105 |
+
|
| 106 |
+
return device
|
| 107 |
+
|
| 108 |
+
for device in devices:
|
| 109 |
+
device = normalize_device(device)
|
| 110 |
+
|
| 111 |
+
try:
|
| 112 |
+
in_queue, out_queue = workers[device]
|
| 113 |
+
except KeyError:
|
| 114 |
+
in_queue = Queue()
|
| 115 |
+
out_queue = Queue()
|
| 116 |
+
workers[device] = (in_queue, out_queue)
|
| 117 |
+
|
| 118 |
+
t = Thread(target=worker, args=(in_queue, out_queue, device), daemon=True,)
|
| 119 |
+
t.start()
|
| 120 |
+
|
| 121 |
+
in_queues.append(in_queue)
|
| 122 |
+
out_queues.append(out_queue)
|
| 123 |
+
|
| 124 |
+
return (in_queues, out_queues)
|
| 125 |
+
|
| 126 |
+
@contextmanager
|
| 127 |
+
def spawn_workers(devices: List[torch.device],) -> Generator[Tuple[List[InQueue], List[OutQueue]], None, None]:
|
| 128 |
+
try:
|
| 129 |
+
(in_queues, out_queues) = create_workers(devices)
|
| 130 |
+
yield (in_queues, out_queues)
|
| 131 |
+
finally:
|
| 132 |
+
pass
|
vlmpy310/lib/python3.10/site-packages/torch/distributed/rpc/__init__.py
ADDED
|
@@ -0,0 +1,249 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from datetime import timedelta
|
| 2 |
+
import logging
|
| 3 |
+
import os
|
| 4 |
+
import threading
|
| 5 |
+
import warnings
|
| 6 |
+
from typing import Generator, Tuple
|
| 7 |
+
from urllib.parse import urlparse
|
| 8 |
+
|
| 9 |
+
import torch
|
| 10 |
+
import torch.distributed as dist
|
| 11 |
+
|
| 12 |
+
logger = logging.getLogger(__name__)
|
| 13 |
+
|
| 14 |
+
|
| 15 |
+
_init_counter = 0
|
| 16 |
+
_init_counter_lock = threading.Lock()
|
| 17 |
+
|
| 18 |
+
__all__ = ["is_available"]
|
| 19 |
+
|
| 20 |
+
def is_available() -> bool:
|
| 21 |
+
return hasattr(torch._C, "_rpc_init")
|
| 22 |
+
|
| 23 |
+
|
| 24 |
+
if is_available() and not torch._C._rpc_init():
|
| 25 |
+
raise RuntimeError("Failed to initialize torch.distributed.rpc")
|
| 26 |
+
|
| 27 |
+
|
| 28 |
+
if is_available():
|
| 29 |
+
from torch._C._distributed_c10d import Store
|
| 30 |
+
from torch._C._distributed_rpc import (
|
| 31 |
+
_disable_jit_rref_pickle,
|
| 32 |
+
_enable_jit_rref_pickle,
|
| 33 |
+
_disable_server_process_global_profiler,
|
| 34 |
+
_enable_server_process_global_profiler,
|
| 35 |
+
_set_and_start_rpc_agent,
|
| 36 |
+
_reset_current_rpc_agent,
|
| 37 |
+
_delete_all_user_and_unforked_owner_rrefs,
|
| 38 |
+
_destroy_rref_context,
|
| 39 |
+
_set_profiler_node_id,
|
| 40 |
+
_is_current_rpc_agent_set,
|
| 41 |
+
_rref_context_get_debug_info,
|
| 42 |
+
_cleanup_python_rpc_handler,
|
| 43 |
+
_invoke_rpc_builtin,
|
| 44 |
+
_invoke_rpc_python_udf,
|
| 45 |
+
_invoke_rpc_torchscript,
|
| 46 |
+
_invoke_remote_builtin,
|
| 47 |
+
_invoke_remote_python_udf,
|
| 48 |
+
_invoke_remote_torchscript,
|
| 49 |
+
_set_rpc_timeout,
|
| 50 |
+
_get_current_rpc_agent,
|
| 51 |
+
get_rpc_timeout,
|
| 52 |
+
enable_gil_profiling,
|
| 53 |
+
RpcBackendOptions,
|
| 54 |
+
_TensorPipeRpcBackendOptionsBase,
|
| 55 |
+
RpcAgent,
|
| 56 |
+
PyRRef,
|
| 57 |
+
TensorPipeAgent,
|
| 58 |
+
RemoteProfilerManager,
|
| 59 |
+
WorkerInfo,
|
| 60 |
+
_DEFAULT_INIT_METHOD,
|
| 61 |
+
_DEFAULT_NUM_WORKER_THREADS,
|
| 62 |
+
_UNSET_RPC_TIMEOUT,
|
| 63 |
+
_DEFAULT_RPC_TIMEOUT_SEC,
|
| 64 |
+
) # noqa: F401
|
| 65 |
+
|
| 66 |
+
from . import api, backend_registry, functions
|
| 67 |
+
from .api import * # noqa: F401,F403
|
| 68 |
+
import numbers
|
| 69 |
+
|
| 70 |
+
import torch.distributed.autograd as dist_autograd
|
| 71 |
+
|
| 72 |
+
from .backend_registry import BackendType
|
| 73 |
+
from .options import TensorPipeRpcBackendOptions # noqa: F401
|
| 74 |
+
from .server_process_global_profiler import (
|
| 75 |
+
_server_process_global_profile,
|
| 76 |
+
)
|
| 77 |
+
|
| 78 |
+
rendezvous_iterator: Generator[Tuple[Store, int, int], None, None]
|
| 79 |
+
|
| 80 |
+
__all__ += ["init_rpc", "BackendType", "TensorPipeRpcBackendOptions"]
|
| 81 |
+
__all__ = __all__ + api.__all__ + backend_registry.__all__ # noqa: PLE0605
|
| 82 |
+
|
| 83 |
+
def init_rpc(
|
| 84 |
+
name,
|
| 85 |
+
backend=None,
|
| 86 |
+
rank=-1,
|
| 87 |
+
world_size=None,
|
| 88 |
+
rpc_backend_options=None,
|
| 89 |
+
):
|
| 90 |
+
r"""
|
| 91 |
+
Initializes RPC primitives such as the local RPC agent
|
| 92 |
+
and distributed autograd, which immediately makes the current
|
| 93 |
+
process ready to send and receive RPCs.
|
| 94 |
+
|
| 95 |
+
Args:
|
| 96 |
+
name (str): a globally unique name of this node. (e.g.,
|
| 97 |
+
``Trainer3``, ``ParameterServer2``, ``Master``, ``Worker1``)
|
| 98 |
+
Name can only contain number, alphabet, underscore, colon,
|
| 99 |
+
and/or dash, and must be shorter than 128 characters.
|
| 100 |
+
backend (BackendType, optional): The type of RPC backend
|
| 101 |
+
implementation. Supported values is
|
| 102 |
+
``BackendType.TENSORPIPE`` (the default).
|
| 103 |
+
See :ref:`rpc-backends` for more information.
|
| 104 |
+
rank (int): a globally unique id/rank of this node.
|
| 105 |
+
world_size (int): The number of workers in the group.
|
| 106 |
+
rpc_backend_options (RpcBackendOptions, optional): The options
|
| 107 |
+
passed to the RpcAgent constructor. It must be an agent-specific
|
| 108 |
+
subclass of :class:`~torch.distributed.rpc.RpcBackendOptions`
|
| 109 |
+
and contains agent-specific initialization configurations. By
|
| 110 |
+
default, for all agents, it sets the default timeout to 60
|
| 111 |
+
seconds and performs the rendezvous with an underlying process
|
| 112 |
+
group initialized using ``init_method = "env://"``,
|
| 113 |
+
meaning that environment variables ``MASTER_ADDR`` and
|
| 114 |
+
``MASTER_PORT`` need to be set properly. See
|
| 115 |
+
:ref:`rpc-backends` for more information and find which options
|
| 116 |
+
are available.
|
| 117 |
+
"""
|
| 118 |
+
torch._C._log_api_usage_once("torch.distributed.init_rpc")
|
| 119 |
+
if backend is not None and not isinstance(
|
| 120 |
+
backend, backend_registry.BackendType
|
| 121 |
+
):
|
| 122 |
+
raise TypeError("Argument backend must be a member of BackendType")
|
| 123 |
+
|
| 124 |
+
if rpc_backend_options is not None and not isinstance(
|
| 125 |
+
rpc_backend_options, RpcBackendOptions
|
| 126 |
+
):
|
| 127 |
+
raise TypeError(
|
| 128 |
+
"Argument rpc_backend_options must be an instance of RpcBackendOptions"
|
| 129 |
+
)
|
| 130 |
+
|
| 131 |
+
# Try to detect the backend from the options
|
| 132 |
+
if backend is None and rpc_backend_options is not None:
|
| 133 |
+
for candidate_backend in BackendType:
|
| 134 |
+
if isinstance(
|
| 135 |
+
rpc_backend_options,
|
| 136 |
+
type(
|
| 137 |
+
backend_registry.construct_rpc_backend_options(
|
| 138 |
+
candidate_backend
|
| 139 |
+
)
|
| 140 |
+
),
|
| 141 |
+
):
|
| 142 |
+
backend = candidate_backend
|
| 143 |
+
break
|
| 144 |
+
else:
|
| 145 |
+
raise TypeError(
|
| 146 |
+
f"Could not infer backend for options {rpc_backend_options}"
|
| 147 |
+
)
|
| 148 |
+
# Ignore type error because mypy doesn't handle dynamically generated type objects (#4865)
|
| 149 |
+
if backend != BackendType.TENSORPIPE: # type: ignore[attr-defined]
|
| 150 |
+
logger.warning(
|
| 151 |
+
"RPC was initialized with no explicit backend but with options " # type: ignore[attr-defined]
|
| 152 |
+
"corresponding to %(backend)s, hence that backend will be used "
|
| 153 |
+
"instead of the default BackendType.TENSORPIPE. To silence this "
|
| 154 |
+
"warning pass `backend=%(backend)s` explicitly.",
|
| 155 |
+
{'backend': backend}
|
| 156 |
+
)
|
| 157 |
+
|
| 158 |
+
if backend is None:
|
| 159 |
+
backend = BackendType.TENSORPIPE # type: ignore[attr-defined]
|
| 160 |
+
|
| 161 |
+
if rpc_backend_options is None:
|
| 162 |
+
# default construct a set of RPC backend options.
|
| 163 |
+
rpc_backend_options = backend_registry.construct_rpc_backend_options(
|
| 164 |
+
backend
|
| 165 |
+
)
|
| 166 |
+
|
| 167 |
+
# Create store, performs rendezvous for static RPC group.
|
| 168 |
+
if not world_size:
|
| 169 |
+
# If world_size is not set in construction and also not set in environment variables
|
| 170 |
+
# The store will be created for the dynamic group setting
|
| 171 |
+
store = dist._create_store_from_options(rpc_backend_options, rank)
|
| 172 |
+
else:
|
| 173 |
+
# This rendezvous state sometimes is destroyed before all processes
|
| 174 |
+
# finishing handshaking. To avoid that issue, we make it global to
|
| 175 |
+
# keep it alive.
|
| 176 |
+
global rendezvous_iterator
|
| 177 |
+
rendezvous_iterator = dist.rendezvous(
|
| 178 |
+
rpc_backend_options.init_method, rank=rank, world_size=world_size
|
| 179 |
+
)
|
| 180 |
+
store, _, _ = next(rendezvous_iterator)
|
| 181 |
+
# Use same timeout as RPC.
|
| 182 |
+
store.set_timeout(timedelta(seconds=rpc_backend_options.rpc_timeout))
|
| 183 |
+
|
| 184 |
+
# Use a PrefixStore to distinguish multiple invocations.
|
| 185 |
+
with _init_counter_lock:
|
| 186 |
+
global _init_counter
|
| 187 |
+
store = dist.PrefixStore(str(f"rpc_prefix_{_init_counter}"), store)
|
| 188 |
+
_init_counter += 1
|
| 189 |
+
|
| 190 |
+
# Initialize autograd before RPC since _init_rpc_backend guarantees all
|
| 191 |
+
# processes sync via the store. If we initialize autograd after RPC,
|
| 192 |
+
# there could be a race where some nodes might have initialized autograd
|
| 193 |
+
# and others might not have. As a result, a node calling
|
| 194 |
+
# torch.distributed.autograd.backward() would run into errors since
|
| 195 |
+
# other nodes might not have been initialized.
|
| 196 |
+
dist_autograd._init(rank)
|
| 197 |
+
|
| 198 |
+
_set_profiler_node_id(rank)
|
| 199 |
+
# Initialize RPC.
|
| 200 |
+
_init_rpc_backend(backend, store, name, rank, world_size, rpc_backend_options)
|
| 201 |
+
|
| 202 |
+
def _validate_rpc_args(backend, store, name, rank, world_size, rpc_backend_options):
|
| 203 |
+
type_mapping = {
|
| 204 |
+
backend: backend_registry.BackendType,
|
| 205 |
+
store: dist.Store,
|
| 206 |
+
name: str,
|
| 207 |
+
rank: numbers.Integral,
|
| 208 |
+
# world_size can be None for a dynamic group
|
| 209 |
+
world_size: (numbers.Integral, type(None)),
|
| 210 |
+
rpc_backend_options: RpcBackendOptions,
|
| 211 |
+
}
|
| 212 |
+
for arg, arg_type in type_mapping.items():
|
| 213 |
+
if not isinstance(arg, arg_type): # type: ignore[arg-type]
|
| 214 |
+
raise RuntimeError(
|
| 215 |
+
f"Argument {arg} must be of type {arg_type} but got type {type(arg)}"
|
| 216 |
+
)
|
| 217 |
+
|
| 218 |
+
def _init_rpc_backend(
|
| 219 |
+
backend=BackendType.TENSORPIPE, # type: ignore[attr-defined]
|
| 220 |
+
store=None,
|
| 221 |
+
name=None,
|
| 222 |
+
rank=-1,
|
| 223 |
+
world_size=None,
|
| 224 |
+
rpc_backend_options=None,
|
| 225 |
+
):
|
| 226 |
+
|
| 227 |
+
_validate_rpc_args(backend, store, name, rank, world_size, rpc_backend_options)
|
| 228 |
+
|
| 229 |
+
if _is_current_rpc_agent_set():
|
| 230 |
+
raise RuntimeError("RPC is already initialized")
|
| 231 |
+
|
| 232 |
+
# Initialize RPC.
|
| 233 |
+
rpc_agent = backend_registry.init_backend(
|
| 234 |
+
backend,
|
| 235 |
+
store=store,
|
| 236 |
+
name=name,
|
| 237 |
+
rank=rank,
|
| 238 |
+
world_size=world_size,
|
| 239 |
+
rpc_backend_options=rpc_backend_options,
|
| 240 |
+
)
|
| 241 |
+
|
| 242 |
+
api._init_rpc_states(rpc_agent)
|
| 243 |
+
|
| 244 |
+
@api._require_initialized
|
| 245 |
+
def _get_debug_info():
|
| 246 |
+
info = _rref_context_get_debug_info()
|
| 247 |
+
info.update(api._get_current_rpc_agent().get_debug_info())
|
| 248 |
+
info.update(dist_autograd._get_debug_info())
|
| 249 |
+
return info
|
vlmpy310/lib/python3.10/site-packages/torch/distributed/rpc/__pycache__/__init__.cpython-310.pyc
ADDED
|
Binary file (6.63 kB). View file
|
|
|