ZTWHHH commited on
Commit
98fd810
·
verified ·
1 Parent(s): 5bade23

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .gitattributes +1 -0
  2. evalkit_cambrian/lib/python3.10/site-packages/deepspeed/runtime/activation_checkpointing/__init__.py +5 -0
  3. evalkit_cambrian/lib/python3.10/site-packages/deepspeed/runtime/activation_checkpointing/checkpointing.py +1162 -0
  4. evalkit_cambrian/lib/python3.10/site-packages/deepspeed/runtime/data_pipeline/__pycache__/config.cpython-310.pyc +0 -0
  5. evalkit_cambrian/lib/python3.10/site-packages/deepspeed/runtime/data_pipeline/__pycache__/constants.cpython-310.pyc +0 -0
  6. evalkit_cambrian/lib/python3.10/site-packages/deepspeed/runtime/data_pipeline/data_sampling/__init__.py +5 -0
  7. evalkit_cambrian/lib/python3.10/site-packages/deepspeed/runtime/data_pipeline/data_sampling/__pycache__/data_analyzer.cpython-310.pyc +0 -0
  8. evalkit_cambrian/lib/python3.10/site-packages/deepspeed/runtime/data_pipeline/data_sampling/__pycache__/data_sampler.cpython-310.pyc +0 -0
  9. evalkit_cambrian/lib/python3.10/site-packages/deepspeed/runtime/data_pipeline/data_sampling/__pycache__/indexed_dataset.cpython-310.pyc +0 -0
  10. evalkit_cambrian/lib/python3.10/site-packages/deepspeed/runtime/data_pipeline/data_sampling/__pycache__/utils.cpython-310.pyc +0 -0
  11. evalkit_cambrian/lib/python3.10/site-packages/deepspeed/runtime/data_pipeline/data_sampling/data_analyzer.py +417 -0
  12. evalkit_cambrian/lib/python3.10/site-packages/deepspeed/runtime/data_pipeline/data_sampling/data_sampler.py +338 -0
  13. evalkit_cambrian/lib/python3.10/site-packages/deepspeed/runtime/fp16/__pycache__/__init__.cpython-310.pyc +0 -0
  14. evalkit_cambrian/lib/python3.10/site-packages/deepspeed/runtime/fp16/__pycache__/fused_optimizer.cpython-310.pyc +0 -0
  15. evalkit_cambrian/lib/python3.10/site-packages/deepspeed/runtime/fp16/__pycache__/loss_scaler.cpython-310.pyc +0 -0
  16. evalkit_cambrian/lib/python3.10/site-packages/deepspeed/runtime/fp16/__pycache__/unfused_optimizer.cpython-310.pyc +0 -0
  17. evalkit_cambrian/lib/python3.10/site-packages/deepspeed/runtime/fp16/onebit/__pycache__/__init__.cpython-310.pyc +0 -0
  18. evalkit_cambrian/lib/python3.10/site-packages/deepspeed/runtime/fp16/onebit/__pycache__/adam.cpython-310.pyc +0 -0
  19. evalkit_cambrian/lib/python3.10/site-packages/deepspeed/runtime/fp16/onebit/__pycache__/zoadam.cpython-310.pyc +0 -0
  20. evalkit_cambrian/lib/python3.10/site-packages/deepspeed/runtime/fp16/onebit/adam.py +306 -0
  21. infer_4_47_1/lib/python3.10/site-packages/torch/bin/protoc +3 -0
  22. infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/__pycache__/check_kernel_launches.cpython-310.pyc +0 -0
  23. infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/__pycache__/common_dtype.cpython-310.pyc +0 -0
  24. infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/__pycache__/common_modules.cpython-310.pyc +0 -0
  25. infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/__pycache__/custom_op_db.cpython-310.pyc +0 -0
  26. infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/__pycache__/custom_tensor.cpython-310.pyc +0 -0
  27. infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/__pycache__/quantization_torch_package_models.cpython-310.pyc +0 -0
  28. infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/data/__init__.py +1 -0
  29. infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/data/__pycache__/__init__.cpython-310.pyc +0 -0
  30. infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/data/__pycache__/network1.cpython-310.pyc +0 -0
  31. infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/data/__pycache__/network2.cpython-310.pyc +0 -0
  32. infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/distributed/__pycache__/__init__.cpython-310.pyc +0 -0
  33. infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/distributed/__pycache__/checkpoint_utils.cpython-310.pyc +0 -0
  34. infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/distributed/__pycache__/common_state_dict.cpython-310.pyc +0 -0
  35. infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/distributed/__pycache__/ddp_under_dist_autograd_test.cpython-310.pyc +0 -0
  36. infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/distributed/__pycache__/distributed_utils.cpython-310.pyc +0 -0
  37. infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/distributed/__pycache__/fake_pg.cpython-310.pyc +0 -0
  38. infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/distributed/__pycache__/multi_threaded_pg.cpython-310.pyc +0 -0
  39. infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/distributed/__pycache__/rpc_utils.cpython-310.pyc +0 -0
  40. infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/distributed/_shard/__pycache__/__init__.cpython-310.pyc +0 -0
  41. infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/distributed/_shard/__pycache__/test_common.cpython-310.pyc +0 -0
  42. infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/distributed/_shard/sharded_tensor/__pycache__/_test_st_common.cpython-310.pyc +0 -0
  43. infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/distributed/_tensor/__init__.py +0 -0
  44. infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/distributed/_tensor/__pycache__/__init__.cpython-310.pyc +0 -0
  45. infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/distributed/_tensor/__pycache__/common_dtensor.cpython-310.pyc +0 -0
  46. infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/distributed/_tensor/common_dtensor.py +548 -0
  47. infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/distributed/common_state_dict.py +122 -0
  48. infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/distributed/nn/__pycache__/__init__.cpython-310.pyc +0 -0
  49. infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/distributed/nn/api/__pycache__/remote_module_test.cpython-310.pyc +0 -0
  50. infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/distributed/nn/api/remote_module_test.py +734 -0
.gitattributes CHANGED
@@ -1534,3 +1534,4 @@ janus/lib/libgcc_s.so.1 filter=lfs diff=lfs merge=lfs -text
1534
  infer_4_37_2/lib/python3.10/site-packages/decord/libdecord.so filter=lfs diff=lfs merge=lfs -text
1535
  infer_4_47_1/lib/python3.10/site-packages/torch/_refs/__pycache__/__init__.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
1536
  infer_4_47_1/lib/python3.10/site-packages/msgpack/_cmsgpack.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
 
 
1534
  infer_4_37_2/lib/python3.10/site-packages/decord/libdecord.so filter=lfs diff=lfs merge=lfs -text
1535
  infer_4_47_1/lib/python3.10/site-packages/torch/_refs/__pycache__/__init__.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
1536
  infer_4_47_1/lib/python3.10/site-packages/msgpack/_cmsgpack.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
1537
+ infer_4_47_1/lib/python3.10/site-packages/torch/bin/protoc filter=lfs diff=lfs merge=lfs -text
evalkit_cambrian/lib/python3.10/site-packages/deepspeed/runtime/activation_checkpointing/__init__.py ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ # Copyright (c) Microsoft Corporation.
2
+ # SPDX-License-Identifier: Apache-2.0
3
+
4
+ # DeepSpeed Team
5
+ '''Copyright The Microsoft DeepSpeed Team'''
evalkit_cambrian/lib/python3.10/site-packages/deepspeed/runtime/activation_checkpointing/checkpointing.py ADDED
@@ -0,0 +1,1162 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Microsoft Corporation.
2
+ # SPDX-License-Identifier: Apache-2.0
3
+
4
+ # DeepSpeed Team
5
+ """
6
+ Use to partition the activations stored for backward propagation
7
+ Therefore reduces the memory consumption
8
+ Also implements CPU checkpointing and contiguous memory checkpointing
9
+ Reduces memory consumption and memory fragmentation
10
+
11
+ Code for rng checkpointing taken from NVIDIA Megatron-LM mpu/random.py
12
+ b886b7bb972afe72bac0f5de4f42a4a7bae8ebef
13
+ """
14
+
15
+ # Parts of the code here are adapted from PyTorch
16
+ # repo: https://github.com/pytorch/pytorch
17
+ import copy
18
+ import torch
19
+ import contextlib
20
+ from deepspeed import comm as dist
21
+ import weakref
22
+
23
+ import mmap
24
+ from torch import _C
25
+
26
+ from deepspeed.runtime.config import DeepSpeedConfig
27
+ from deepspeed.utils import logger
28
+ from deepspeed.runtime.utils import copy_to_device, move_to_device, see_memory_usage, bwc_tensor_model_parallel_rank
29
+ from deepspeed.utils.timer import SynchronizedWallClockTimer as Timers, FORWARD_GLOBAL_TIMER
30
+ from deepspeed.accelerator import get_accelerator
31
+
32
+ # DeepSpeed Checkpointing Enabled or Disabled
33
+ deepspeed_checkpointing_enabled = False
34
+
35
+ # MP parameters
36
+ mpu = None
37
+ mp_rank = None
38
+ mp_size = None
39
+ mp_group = None
40
+
41
+ # Model Parameters
42
+ num_layers = None
43
+
44
+ # Checkpointing buffers
45
+ contiguous_data_buffers = []
46
+ data_offsets = []
47
+
48
+ contiguous_size_buffers = []
49
+ size_offsets = []
50
+
51
+ timers = None
52
+
53
+ # optimization flags
54
+ PARTITION_ACTIVATIONS = False
55
+ CPU_CHECKPOINT = False
56
+ CONTIGUOUS_CHECKPOINTING = False
57
+ SYNCHRONIZE = False
58
+ PROFILE_TIME = False
59
+
60
+ # Default name for the model parallel rng tracker.
61
+ _MODEL_PARALLEL_RNG_TRACKER_NAME = 'model-parallel-rng'
62
+ transport_stream = None
63
+ cuda_device = None
64
+
65
+
66
+ def detach_variable(inputs, device=None):
67
+ if isinstance(inputs, tuple):
68
+ out = []
69
+ for inp in inputs:
70
+ if not isinstance(inp, torch.Tensor):
71
+ out.append(inp)
72
+ continue
73
+
74
+ requires_grad = inp.requires_grad
75
+
76
+ if device is not None:
77
+ x = inp.to(device=device)
78
+ else:
79
+ x = inp
80
+
81
+ x = x.detach()
82
+ x.requires_grad = requires_grad
83
+ out.append(x)
84
+ return tuple(out)
85
+ else:
86
+ raise RuntimeError("Only tuple of tensors is supported. Got Unsupported input type: ", type(inputs).__name__)
87
+
88
+
89
+ def _set_cuda_rng_state(new_state, device=-1):
90
+ """Sets the random number generator state of the current GPU.
91
+
92
+ Arguments:
93
+ new_state (torch.ByteTensor): The desired state
94
+ This function is adapted from PyTorch repo (torch.cuda.set_rng_state) #ignore-cuda
95
+ with a single change: the input state is not cloned. Cloning caused
96
+ major performance issues for +4 GPU cases.
97
+ """
98
+ if hasattr(_C, '_cuda_setRNGState') and callable(_C._cuda_setRNGState):
99
+ # older PyTorch
100
+ def cb():
101
+ with get_accelerator().device(device):
102
+ _C._cuda_setRNGState(new_state)
103
+ else:
104
+ # newer PyTorch
105
+ if device == -1:
106
+ device = torch.device(get_accelerator().device_name())
107
+ elif isinstance(device, str):
108
+ device = torch.device(device)
109
+ elif isinstance(device, int):
110
+ device = torch.device(get_accelerator().device_name(), device)
111
+
112
+ def cb():
113
+ idx = device.index
114
+ if idx is None:
115
+ idx = get_accelerator().current_device()
116
+ default_generator = get_accelerator().default_generator(idx)
117
+ default_generator.set_state(new_state)
118
+
119
+ get_accelerator().lazy_call(cb)
120
+
121
+
122
+ class CudaRNGStatesTracker:
123
+ """Tracker for the cuda RNG states.
124
+
125
+ Using the `add` method, a cuda rng state is initialized based on
126
+ the input `seed` and is assigned to `name`. Later, by forking the
127
+ rng state, we can perform operations and return to our starting
128
+ cuda state.
129
+ """
130
+
131
+ def __init__(self):
132
+ # Map from a string name to the cuda rng state.
133
+ self.states_ = {}
134
+ # Seeds are just for book keeping and ensure no seed is set twice.
135
+ self.seeds_ = set()
136
+
137
+ def reset(self):
138
+ """Set to the initial state (no tracker)."""
139
+ self.states_ = {}
140
+ self.seeds_ = set()
141
+
142
+ def get_states(self):
143
+ """Get rng states. Copy the dictionary so we have direct
144
+ pointers to the states, not just a pointer to the dictionary."""
145
+ return copy.copy(self.states_)
146
+
147
+ def set_states(self, states):
148
+ """Set the rng states. For efficiency purposes, we do not check
149
+ the size of seed for compatibility."""
150
+ self.states_ = states
151
+
152
+ def add(self, name, seed):
153
+ """Track the rng state."""
154
+ # Check seed is not already used.
155
+ if seed in self.seeds_:
156
+ raise Exception('seed {} already exists'.format(seed))
157
+ self.seeds_.add(seed)
158
+ # Check that state is not already defined.
159
+ if name in self.states_:
160
+ raise Exception('cuda rng state {} already exists'.format(name))
161
+ # Get the current rng state.
162
+ orig_rng_state = get_accelerator().get_rng_state()
163
+ # Set the new state and store it.
164
+ get_accelerator().manual_seed(seed)
165
+ self.states_[name] = get_accelerator().get_rng_state()
166
+ # Reset rng state to what it was.
167
+ _set_cuda_rng_state(orig_rng_state)
168
+
169
+ @contextlib.contextmanager
170
+ def fork(self, name=_MODEL_PARALLEL_RNG_TRACKER_NAME):
171
+ """Fork the cuda rng state, perform operations, and exit with
172
+ the original state."""
173
+ # Check if we have added the state
174
+ if name not in self.states_:
175
+ raise Exception('cuda rng state {} is not added'.format(name))
176
+ # Store current rng state.
177
+ orig_cuda_rng_state = get_accelerator().get_rng_state()
178
+ # Set rng state to the desired one
179
+ _set_cuda_rng_state(self.states_[name])
180
+ # Do the stuff we wanted to do.
181
+ try:
182
+ yield
183
+ finally:
184
+ # Update the current rng state for later use.
185
+ self.states_[name] = get_accelerator().get_rng_state()
186
+ # And set the state to the original state we started with.
187
+ _set_cuda_rng_state(orig_cuda_rng_state)
188
+
189
+
190
+ # RNG tracker object.
191
+ _CUDA_RNG_STATE_TRACKER = CudaRNGStatesTracker()
192
+
193
+
194
+ def get_cuda_rng_tracker():
195
+ """Get cuda rng tracker."""
196
+ return _CUDA_RNG_STATE_TRACKER
197
+
198
+
199
+ def model_parallel_cuda_manual_seed(seed):
200
+ """Initialize model parallel cuda seed.
201
+
202
+ This function should be called after the model parallel is
203
+ initialized. Also, no get_accelerator().manual_seed should be called
204
+ after this function. Basically, this is replacement for that
205
+ function.
206
+ Two set of RNG states are tracked:
207
+ default state: This is for data parallelism and is the same among a
208
+ set of model parallel GPUs but different across
209
+ different model parallel groups. This is used for
210
+ example for dropout in the non-model-parallel regions.
211
+ model-parallel state: This state is different among a set of model
212
+ parallel GPUs, but the same across data parallel
213
+ groups. This is used for example for dropout in
214
+ model parallel regions.
215
+ """
216
+ global mpu
217
+
218
+ tp_rank = bwc_tensor_model_parallel_rank(mpu)
219
+
220
+ # 2718 is just for fun and any POSITIVE value will work.
221
+ offset = seed + 2718
222
+ model_parallel_seed = offset + tp_rank
223
+ # Data parallel gets the original seed.
224
+ data_parallel_seed = seed
225
+
226
+ if dist.get_rank() == 0:
227
+ logger.info(
228
+ '> initializing model parallel cuda seeds on global rank {}, '
229
+ 'model parallel rank {}, and data parallel rank {} with '
230
+ 'model parallel seed: {} and data parallel seed: {}'.format(dist.get_rank(), tp_rank,
231
+ mpu.get_data_parallel_rank(),
232
+ model_parallel_seed, data_parallel_seed), )
233
+ _CUDA_RNG_STATE_TRACKER.reset()
234
+ # Set the default state.
235
+ get_accelerator().manual_seed(data_parallel_seed)
236
+ # and model parallel state.
237
+ _CUDA_RNG_STATE_TRACKER.add(_MODEL_PARALLEL_RNG_TRACKER_NAME, model_parallel_seed)
238
+
239
+
240
+ def model_parallel_reconfigure_tp_seed(seed):
241
+ global mpu
242
+ tp_rank = bwc_tensor_model_parallel_rank(mpu)
243
+ model_parallel_seed = seed + 2718 + tp_rank
244
+ with _CUDA_RNG_STATE_TRACKER.fork():
245
+ get_accelerator().manual_seed(model_parallel_seed)
246
+
247
+
248
+ def get_partition_start(item):
249
+ global mp_rank, mp_size, mp_group
250
+ size = item.numel()
251
+ partition_size = size / mp_size
252
+ start = partition_size * mp_rank
253
+ return int(start)
254
+
255
+
256
+ def get_partition_size(item):
257
+ global mp_rank, mp_size, mp_group
258
+ size = item.numel()
259
+ assert size % mp_size == 0, "Doesn't handle if partition activation if item is not divisible by mp size"
260
+ partition_size = size / mp_size
261
+ return int(partition_size)
262
+
263
+
264
+ def gather_partitioned_activations(tensors, device=None):
265
+ global mp_rank, mp_size, mp_group
266
+ assert len(tensors) % 2 == 0, f'Expected even count of tensors, instead got {len(tensors)}'
267
+ inputs = []
268
+ num_args = int(len(tensors) / 2)
269
+ for i in range(num_args):
270
+
271
+ item = tensors[2 * i]
272
+ size = tensors[2 * i + 1]
273
+
274
+ if not is_activation_to_checkpoint(item):
275
+ inputs.append(item)
276
+ continue
277
+
278
+ # don't need to do all_gather if model parallel is not enabled
279
+ if mp_group is None or mp_size == 1:
280
+ item = item.view(list(size.numpy()))
281
+ if device is not None:
282
+ item = item.to(device)
283
+ inputs.append(item)
284
+ continue
285
+
286
+ partition_size = item.numel()
287
+ tensor_size = partition_size * mp_size
288
+ if device is not None:
289
+ flat_tensor = torch.zeros([tensor_size], dtype=item.dtype, device=device)
290
+ else:
291
+ flat_tensor = torch.zeros([tensor_size], dtype=item.dtype, device=item.device)
292
+ part = flat_tensor.narrow(0, partition_size * mp_rank, partition_size)
293
+ part.copy_(item)
294
+ dist.all_gather_into_tensor(flat_tensor, part, group=mp_group)
295
+ input_tensor = flat_tensor.view(list(size.numpy()))
296
+ item.data = input_tensor.data
297
+
298
+ inputs.append(item)
299
+
300
+ return tuple(inputs)
301
+
302
+
303
+ def extract_tensors(all_objects):
304
+ """
305
+ Separate objects in list/tuple into tensors and non-tensors and create a mapping to enable re-aggregation.
306
+ The order of tensors and non-tensors is preserved in their respective output groups.
307
+
308
+ Parameters:
309
+ all_objects (list/tuple): Objects containing tensors and non-tensors to be split.
310
+
311
+ Returns:
312
+ tuple: Containing tensors, non-tensors, and bools of whether each position in original list/tuple was a tensor.
313
+
314
+ """
315
+ tensor_objects = [v for v in all_objects if torch.is_tensor(v)]
316
+ non_tensor_objects = [v for v in all_objects if not torch.is_tensor(v)]
317
+ tensor_flags = [torch.is_tensor(v) for v in all_objects]
318
+ if type(all_objects) is tuple:
319
+ return tuple(tensor_objects), tuple(non_tensor_objects), tuple(tensor_flags)
320
+ return tensor_objects, non_tensor_objects, tensor_flags
321
+
322
+
323
+ def merge_tensors(tensor_objects, non_tensor_objects, tensor_flags):
324
+ """
325
+ Merge two lists (or tuples) of tensors and non-tensors using a mapping of positions in merged list (or tuple).
326
+
327
+ Parameters:
328
+ tensor_objects (list/tuple): Tensors to merge.
329
+ non_tensor_objects (list/tuple): Non-tensors to merge.
330
+ tensor_flags (list/tuple): Indicates whether each position in output is a tensor.
331
+
332
+ Returns:
333
+ tuple: Merge of tensors and non-tensors
334
+ """
335
+ merged_objects = []
336
+ tensor_idx = 0
337
+ non_tensor_idx = 0
338
+
339
+ real_tensor_flags = None
340
+
341
+ # remove the flags that are assigned to the size of the flattened tensors
342
+ if PARTITION_ACTIVATIONS:
343
+ real_tensor_flags = []
344
+ previous_flag = False
345
+ for flag in tensor_flags:
346
+ if previous_flag:
347
+ previous_flag = False
348
+ continue
349
+ previous_flag = flag
350
+ real_tensor_flags.append(flag)
351
+ else:
352
+ real_tensor_flags = tensor_flags
353
+
354
+ for is_tensor in real_tensor_flags:
355
+ if is_tensor:
356
+ merged_objects.append(tensor_objects[tensor_idx])
357
+ tensor_idx += 1
358
+ else:
359
+ merged_objects.append(non_tensor_objects[non_tensor_idx])
360
+ non_tensor_idx += 1
361
+
362
+ return tuple(merged_objects)
363
+
364
+
365
+ def is_activation_to_checkpoint(item):
366
+ """
367
+ Is an activation to be checkpointed
368
+ """
369
+ global mp_size
370
+ return torch.is_tensor(item) and item.is_floating_point() and item.numel() >= mp_size
371
+
372
+
373
+ def partition_activations(args, cpu_checkpoint, contiguous_checkpoint):
374
+ global contiguous_data_buffers, data_offsets
375
+
376
+ inputs = []
377
+ num_non_fp_tensors = 0
378
+
379
+ for arg_index, item in enumerate(args):
380
+ if not is_activation_to_checkpoint(item):
381
+ inputs.append(item)
382
+ num_non_fp_tensors += 1
383
+ continue
384
+
385
+ i = arg_index - num_non_fp_tensors
386
+ partition_size = get_partition_size(item)
387
+ partition = item.detach().contiguous().view(-1).narrow(0, get_partition_start(item), partition_size).clone()
388
+
389
+ buffer_device = torch.device('cpu') if cpu_checkpoint else partition.device
390
+
391
+ if contiguous_checkpoint:
392
+ if i >= len(contiguous_data_buffers):
393
+ tensor_list = [
394
+ torch.tensor(()).new_empty([partition_size], dtype=partition.dtype, device=buffer_device)
395
+ for _ in range(num_layers)
396
+ ]
397
+ contiguous_data_buffers.append(tensor_list)
398
+ data_offsets.append(0)
399
+ elif contiguous_data_buffers[i] is None:
400
+ tensor_list = [
401
+ torch.tensor(()).new_empty([partition_size], dtype=partition.dtype, device=buffer_device)
402
+ for _ in range(num_layers)
403
+ ]
404
+ contiguous_data_buffers[i] = tensor_list
405
+ data_offsets[i] = 0
406
+
407
+ # Because the 'new_empty' returns uninitialized pages,
408
+ # the pages need to be populated during the cudaMemcpy time
409
+ # which increases the data copy time. To avoid this, we
410
+ # pre-populate these pages by simply writing 0 ahead of
411
+ # the actual cudaMemcpy operation time. Due to the
412
+ # previously launched GPU kernels, there is a small
413
+ # window of time here for CPUs to populate pages asynchronously.
414
+ contiguous_data_buffers[i][data_offsets[i]].data[range(
415
+ 0, contiguous_data_buffers[i][data_offsets[i]].data.shape[0],
416
+ int(mmap.PAGESIZE / contiguous_data_buffers[i][data_offsets[i]].data.element_size()))] = 0
417
+
418
+ contiguous_partition = contiguous_data_buffers[i][data_offsets[i]].data.copy_(partition.data)
419
+ data_offsets[i] = data_offsets[i] + 1
420
+ inputs.append(contiguous_partition)
421
+ else:
422
+ partition = partition.cpu() if CPU_CHECKPOINT else partition
423
+ inputs.append(partition)
424
+
425
+ return inputs
426
+
427
+
428
+ def get_partitioned_activations_for_backward(args, inputs, contiguous_checkpoint):
429
+ global contiguous_size_buffers, size_offsets
430
+
431
+ new_args = []
432
+ num_non_fp_tensors = 0
433
+
434
+ for arg_index, (arg, inp) in enumerate(zip(args, inputs)):
435
+ size = torch.tensor(arg.size()) if torch.is_tensor(arg) else None
436
+ if not is_activation_to_checkpoint(arg):
437
+ new_args.append(arg)
438
+ new_args.append(size)
439
+ num_non_fp_tensors += 1
440
+ continue
441
+
442
+ arg.data = torch.empty([], device=arg.device).data
443
+ arg.saved_data = inp.data
444
+
445
+ new_args.append(arg)
446
+ i = arg_index - num_non_fp_tensors
447
+
448
+ if contiguous_checkpoint:
449
+ numel = size.numel()
450
+ if i >= len(contiguous_size_buffers):
451
+ tmp = torch.tensor(())
452
+ contiguous_size_buffers.append(
453
+ tmp.new_empty([numel * num_layers], dtype=size.dtype, device=size.device))
454
+ size_offsets.append(0)
455
+ elif contiguous_size_buffers[i] is None:
456
+ tmp = torch.tensor(())
457
+ contiguous_size_buffers[i] = tmp.new_empty([numel * num_layers], dtype=size.dtype, device=size.device)
458
+ size_offsets[i] = 0
459
+
460
+ contiguous_size = contiguous_size_buffers[i].narrow(0, size_offsets[i], numel).data.copy_(size.data)
461
+ contiguous_size = contiguous_size.view_as(size)
462
+ size_offsets[i] = size_offsets[i] + numel
463
+ new_args.append(contiguous_size)
464
+ else:
465
+ new_args.append(size)
466
+
467
+ return new_args
468
+
469
+
470
+ def get_cpu_activations_for_backward(args, inputs):
471
+ new_args = []
472
+ for i, (arg, inp) in enumerate(zip(args, inputs)):
473
+ if not is_activation_to_checkpoint(arg):
474
+ new_args.append(arg)
475
+ continue
476
+
477
+ arg.data = torch.empty([], device=arg.device).data
478
+ arg.saved_data = inp.data
479
+ new_args.append(arg)
480
+
481
+ return new_args
482
+
483
+
484
+ class CheckpointFunction(torch.autograd.Function):
485
+ """This function is adapted from torch.utils.checkpoint with
486
+ two main changes:
487
+ 1) torch.cuda.set_rng_state is replaced with `_set_cuda_rng_state` #ignore-cuda
488
+ 2) the states in the model parallel tracker are also properly
489
+ tracked/set/reset.
490
+ 3) Performance activation partitioning, contiguous memory optimization
491
+ 4) CPU Checkpointing
492
+ 5) Profile forward and backward functions
493
+ """
494
+
495
+ @staticmethod
496
+ def forward(ctx, run_function, all_outputs, *args):
497
+ global mpu, timers, SYNCHRONIZE, PROFILE_TIME
498
+
499
+ def save_args_for_backward(*all_args):
500
+ tensor_args, non_tensor_args, tensor_flags = extract_tensors(all_objects=all_args)
501
+ ctx.deepspeed_saved_tensors = tensor_args
502
+ ctx.non_tensor_args = non_tensor_args
503
+ ctx.tensor_flags = tensor_flags
504
+
505
+ if SYNCHRONIZE:
506
+ get_accelerator().synchronize()
507
+
508
+ if timers is None and PROFILE_TIME:
509
+ timers = Timers()
510
+
511
+ if PROFILE_TIME:
512
+ timers(FORWARD_GLOBAL_TIMER).start()
513
+
514
+ ctx.run_function = run_function
515
+ global num_layers
516
+ global mp_rank, mp_size, mp_group
517
+ global contiguous_data_buffers, contiguous_size_buffers
518
+ global data_offsets, size_offsets
519
+ if mp_rank is None:
520
+ if mpu is not None:
521
+ if hasattr(mpu, 'get_tensor_model_parallel_rank'):
522
+ mp_rank = mpu.get_tensor_model_parallel_rank()
523
+ mp_size = mpu.get_tensor_model_parallel_world_size()
524
+ mp_group = mpu.get_tensor_model_parallel_group()
525
+ else:
526
+ mp_rank = mpu.get_model_parallel_rank()
527
+ mp_size = mpu.get_model_parallel_world_size()
528
+ mp_group = mpu.get_model_parallel_group()
529
+ else:
530
+ mp_rank = 0
531
+ mp_size = 1
532
+ mp_group = None
533
+
534
+ global cuda_device, transport_stream, PARTITION_ACTIVATIONS, buffer_0, buffer_1, buffer_0_offset, buffer_1_offset
535
+
536
+ if cuda_device is None:
537
+ see_memory_usage("First Forward Beginning", force=False)
538
+ if dist.get_rank() == 0:
539
+ logger.info(f"Activation Checkpointing Information")
540
+ logger.info(f"----Partition Activations {PARTITION_ACTIVATIONS}, CPU CHECKPOINTING {CPU_CHECKPOINT}")
541
+ logger.info(
542
+ f"----contiguous Memory Checkpointing {CONTIGUOUS_CHECKPOINTING} with {num_layers} total layers")
543
+ logger.info(f"----Synchronization {SYNCHRONIZE}")
544
+ logger.info(f"----Profiling time in checkpointing {PROFILE_TIME}")
545
+
546
+ cuda_device = get_accelerator().current_device_name()
547
+ transport_stream = get_accelerator().Stream(device=cuda_device)
548
+
549
+ if PARTITION_ACTIVATIONS:
550
+ inputs = partition_activations(args, CPU_CHECKPOINT, CONTIGUOUS_CHECKPOINTING)
551
+ elif CPU_CHECKPOINT:
552
+ inputs = copy_to_device(args, device=torch.device('cpu'), criterion_func=is_activation_to_checkpoint)
553
+
554
+ # just in case something funky is happening such as reuse of inputs
555
+ inputs_cuda = copy_to_device(args, device=cuda_device, criterion_func=is_activation_to_checkpoint)
556
+
557
+ # Copy the rng states.
558
+ ctx.fwd_cpu_rng_state = torch.get_rng_state()
559
+ ctx.fwd_cuda_rng_state = get_accelerator().get_rng_state()
560
+ ctx.fwd_cuda_rng_state_tracker = get_cuda_rng_tracker().get_states()
561
+
562
+ see_memory_usage("Before running forward on the layer", force=False)
563
+ # ctx.save_for_backward(*args)
564
+ with torch.no_grad():
565
+ outputs = run_function(*inputs_cuda)
566
+
567
+ see_memory_usage("After running forward on the layer", force=False)
568
+ del inputs_cuda
569
+
570
+ if PARTITION_ACTIVATIONS:
571
+ new_args = get_partitioned_activations_for_backward(args, inputs, CONTIGUOUS_CHECKPOINTING)
572
+ assert len(new_args) % 2 == 0, f'save_for_backward called with odd number of args, {len(new_args)}'
573
+ save_args_for_backward(*new_args)
574
+ elif CPU_CHECKPOINT:
575
+ new_args = get_cpu_activations_for_backward(args, inputs)
576
+ save_args_for_backward(*new_args)
577
+ else:
578
+ save_args_for_backward(*args)
579
+
580
+ if PROFILE_TIME:
581
+ timers(FORWARD_GLOBAL_TIMER).stop()
582
+ timers.log([FORWARD_GLOBAL_TIMER])
583
+ if SYNCHRONIZE:
584
+ get_accelerator().synchronize()
585
+
586
+ # Tensors returned from forward() may not be differentiable.
587
+ if torch.is_tensor(outputs):
588
+ non_grad_outputs = [outputs] if not outputs.is_floating_point() else []
589
+ else:
590
+ non_grad_outputs = [o for o in outputs if torch.is_tensor(o) and not o.is_floating_point()]
591
+ ctx.mark_non_differentiable(*non_grad_outputs)
592
+
593
+ if torch.is_tensor(outputs):
594
+ all_outputs += [outputs]
595
+ return outputs
596
+ else:
597
+ all_outputs += outputs
598
+ outputs, _, _ = extract_tensors(all_objects=outputs)
599
+ return tuple(outputs)
600
+
601
+ @staticmethod
602
+ def backward(ctx, *grads):
603
+ global timers
604
+ see_memory_usage("In backward", force=False)
605
+ # removing pointers to the contiguous buffer memory
606
+ # so that they can be garbage collected once the checkpoints
607
+ # have been used
608
+ if SYNCHRONIZE:
609
+ get_accelerator().synchronize()
610
+ if PROFILE_TIME:
611
+ timers('backward').start()
612
+
613
+ if CONTIGUOUS_CHECKPOINTING:
614
+ global data_offsets, size_offsets
615
+ global contiguous_data_buffers, contiguous_size_buffers
616
+
617
+ for buffers in contiguous_data_buffers:
618
+ buffers = []
619
+
620
+ # frees up all the pointers to the checkpoints except for the ones
621
+ # stored by save for backward
622
+ contiguous_data_buffers = []
623
+ contiguous_size_buffers = []
624
+ data_offsets = []
625
+ size_offsets = []
626
+
627
+ see_memory_usage("In backward checkpointing code", force=False)
628
+ if not torch.autograd._is_checkpoint_valid():
629
+ raise RuntimeError("Checkpointing is not compatible with .grad(), "
630
+ "please use .backward() if possible")
631
+
632
+ global cuda_device, transport_stream, PARTITION_ACTIVATIONS
633
+
634
+ # Rebuild deepspeed_saved_tensors
635
+ for t in ctx.deepspeed_saved_tensors:
636
+ if t is not None and hasattr(t, 'saved_data') and t.saved_data is not None:
637
+ t.data = t.saved_data.to(t.device)
638
+ t.saved_data = None
639
+
640
+ if PARTITION_ACTIVATIONS:
641
+ # with get_accelerator().stream(transport_stream):
642
+ inputs = gather_partitioned_activations(ctx.deepspeed_saved_tensors,
643
+ device=cuda_device if CPU_CHECKPOINT else None)
644
+ detached_inputs = detach_variable(inputs)
645
+ elif CPU_CHECKPOINT:
646
+ inputs = move_to_device(ctx.deepspeed_saved_tensors, cuda_device, is_activation_to_checkpoint)
647
+ detached_inputs = detach_variable(inputs)
648
+ else:
649
+ inputs = ctx.deepspeed_saved_tensors
650
+ detached_inputs = detach_variable(inputs)
651
+
652
+ # Add non tensor input args
653
+ detached_inputs = merge_tensors(tensor_objects=detached_inputs,
654
+ non_tensor_objects=ctx.non_tensor_args,
655
+ tensor_flags=ctx.tensor_flags)
656
+
657
+ # Store the current states.
658
+ bwd_cpu_rng_state = torch.get_rng_state()
659
+ bwd_cuda_rng_state = get_accelerator().get_rng_state()
660
+ bwd_cuda_rng_state_tracker = get_cuda_rng_tracker().get_states()
661
+
662
+ # Set the states to what it used to be before the forward pass.
663
+ torch.set_rng_state(ctx.fwd_cpu_rng_state)
664
+ _set_cuda_rng_state(ctx.fwd_cuda_rng_state)
665
+ get_cuda_rng_tracker().set_states(ctx.fwd_cuda_rng_state_tracker)
666
+
667
+ # if PARTITION_ACTIVATIONS:
668
+ # current_stream=get_accelerator().current_stream()
669
+ # current_stream.wait_stream(transport_stream)
670
+
671
+ see_memory_usage("In backward checkpointing code before forward", force=False)
672
+
673
+ with torch.enable_grad():
674
+ outputs = ctx.run_function(*detached_inputs)
675
+
676
+ see_memory_usage("In backward checkpointing code after forward", force=False)
677
+ # Set the states back to what it was at the start of this function.
678
+ torch.set_rng_state(bwd_cpu_rng_state)
679
+ _set_cuda_rng_state(bwd_cuda_rng_state)
680
+ get_cuda_rng_tracker().set_states(bwd_cuda_rng_state_tracker)
681
+
682
+ if isinstance(outputs, torch.Tensor):
683
+ outputs = (outputs, )
684
+
685
+ # Filter out non tensor outputs
686
+ outputs, _, _ = extract_tensors(all_objects=outputs)
687
+
688
+ # Construct arguments to autograd.backward().
689
+ # This is usually just outputs and grads, but forward() can return tensors that
690
+ # are not differentiable.
691
+ output_tensors = []
692
+ grad_tensors = []
693
+ for out, grad in zip(outputs, grads):
694
+ if out.requires_grad:
695
+ output_tensors.append(out)
696
+ grad_tensors.append(grad)
697
+
698
+ see_memory_usage("In backward checkpointing code before backward", force=False)
699
+
700
+ torch.autograd.backward(output_tensors, grad_tensors)
701
+
702
+ # Force clear our stashed tensors to prevent a memory leak in certain scenarios
703
+ ctx.deepspeed_saved_tensors = None
704
+ ctx.non_tensor_args = None
705
+ ctx.tensor_flags = None
706
+
707
+ see_memory_usage("After backward checkpointing code after backward", force=False)
708
+
709
+ if PROFILE_TIME:
710
+ timers('backward').stop()
711
+ timers.log(['backward'])
712
+ if SYNCHRONIZE:
713
+ get_accelerator().synchronize()
714
+ ret_list = [None, None] # first None for ctx
715
+ for inp in detached_inputs:
716
+ if torch.is_tensor(inp):
717
+ ret_list.append(inp.grad)
718
+ else:
719
+ ret_list.append(None)
720
+
721
+ return tuple(ret_list)
722
+
723
+
724
+ def non_reentrant_checkpoint(function, *args):
725
+ """This function is union of `torch.utils.checkpoint._checkpoint_without_reentrant` and `CheckpointFunction` in this module
726
+
727
+ This function is aim to solve the back probagation error raised from all input requires no grad.
728
+ * has already been implemented in pytorch for a while, the solution is stable at most time except for jit module mode.
729
+ * can help to solve the issue which is hacked by `deepspeed.runtime.pipe.module.PipelineModule._is_checkpointable`
730
+
731
+ Main modifications compared to the implementation of torch:
732
+ 1. adapt to the signature of `checkpoint` function in this module
733
+ 2. solve the non-deterministic by random state management consistent with deepspeed `CheckpointFunction`
734
+ 3. when there is partition or cpu checkpointing, gather them in the unpack_hook during back probagation
735
+ 4. make all after backward blocks in the hook which will executed after all leaf nodes backward execution.
736
+ 5. above 4. is inspired by `torch.autograd.graph.register_multi_grad_hook`, which is only implemented after 2.0.0
737
+ """
738
+ global mpu, timers, SYNCHRONIZE, PROFILE_TIME
739
+
740
+ deepspeed_saved_tensors = None
741
+ non_tensor_args = None
742
+ tensor_flags = None
743
+
744
+ def save_args_for_backward(*all_args):
745
+ """keep this function to reduce the modification from original implementation"""
746
+ nonlocal deepspeed_saved_tensors, non_tensor_args, tensor_flags
747
+ tensor_args, non_tensor_args, tensor_flags = extract_tensors(all_objects=all_args)
748
+ deepspeed_saved_tensors = tensor_args
749
+ non_tensor_args = non_tensor_args
750
+ tensor_flags = tensor_flags
751
+
752
+ if SYNCHRONIZE:
753
+ get_accelerator().synchronize()
754
+
755
+ if timers is None and PROFILE_TIME:
756
+ timers = Timers()
757
+
758
+ if PROFILE_TIME:
759
+ timers(FORWARD_GLOBAL_TIMER).start()
760
+
761
+ global num_layers
762
+ global mp_rank, mp_size, mp_group
763
+ global contiguous_data_buffers, contiguous_size_buffers
764
+ global data_offsets, size_offsets
765
+ if mp_rank is None:
766
+ if mpu is not None:
767
+ if hasattr(mpu, 'get_tensor_model_parallel_rank'):
768
+ mp_rank = mpu.get_tensor_model_parallel_rank()
769
+ mp_size = mpu.get_tensor_model_parallel_world_size()
770
+ mp_group = mpu.get_tensor_model_parallel_group()
771
+ else:
772
+ mp_rank = mpu.get_model_parallel_rank()
773
+ mp_size = mpu.get_model_parallel_world_size()
774
+ mp_group = mpu.get_model_parallel_group()
775
+ else:
776
+ mp_rank = 0
777
+ mp_size = 1
778
+ mp_group = None
779
+
780
+ global cuda_device, transport_stream, PARTITION_ACTIVATIONS, buffer_0, buffer_1, buffer_0_offset, buffer_1_offset
781
+
782
+ if cuda_device is None:
783
+ see_memory_usage("First Forward Beginning", force=False)
784
+ if dist.get_rank() == 0:
785
+ logger.info(f"Activation Checkpointing Information")
786
+ logger.info(f"----Partition Activations {PARTITION_ACTIVATIONS}, CPU CHECKPOINTING {CPU_CHECKPOINT}")
787
+ logger.info(
788
+ f"----contiguous Memory Checkpointing {CONTIGUOUS_CHECKPOINTING} with {num_layers} total layers")
789
+ logger.info(f"----Synchronization {SYNCHRONIZE}")
790
+ logger.info(f"----Profiling time in checkpointing {PROFILE_TIME}")
791
+
792
+ cuda_device = get_accelerator().current_device_name()
793
+ transport_stream = get_accelerator().Stream(device=cuda_device)
794
+
795
+ if PARTITION_ACTIVATIONS:
796
+ inputs = partition_activations(args, CPU_CHECKPOINT, CONTIGUOUS_CHECKPOINTING)
797
+ elif CPU_CHECKPOINT:
798
+ inputs = copy_to_device(args, device=torch.device('cpu'), criterion_func=is_activation_to_checkpoint)
799
+
800
+ # just in case something funky is happening such as reuse of inputs
801
+ inputs_cuda = copy_to_device(args, device=cuda_device, criterion_func=is_activation_to_checkpoint)
802
+
803
+ # Copy the rng states.
804
+ fwd_cpu_rng_state = torch.get_rng_state()
805
+ fwd_cuda_rng_state = get_accelerator().get_rng_state()
806
+ fwd_cuda_rng_state_tracker = get_cuda_rng_tracker().get_states()
807
+
808
+ if PARTITION_ACTIVATIONS:
809
+ new_args = get_partitioned_activations_for_backward(args, inputs, CONTIGUOUS_CHECKPOINTING)
810
+ assert len(new_args) % 2 == 0, f'save_for_backward called with odd number of args, {len(new_args)}'
811
+ save_args_for_backward(*new_args)
812
+ elif CPU_CHECKPOINT:
813
+ new_args = get_cpu_activations_for_backward(args, inputs)
814
+ save_args_for_backward(*new_args)
815
+ else:
816
+ save_args_for_backward(*args)
817
+
818
+ class Holder():
819
+ """the place holder object used as activations to save memory"""
820
+ pass
821
+
822
+ # weakref seems utilized to discover the tensor deletion before a whole
823
+ # forward backward pair loop finished
824
+ storage: weakref.WeakKeyDictionary = weakref.WeakKeyDictionary()
825
+ weak_holder_list = []
826
+ leaf_tensors = []
827
+ backward_visited_leaf_nodes = 0
828
+
829
+ def checkpoint_pack(tensor_from_forward):
830
+ """used to record the activation order in the `weak_holder_list`
831
+
832
+ the activation order in holder list is consistent between the first forward and recomputing forward.
833
+ * the jit compiled forward will break the order consistency *
834
+ """
835
+ res = Holder()
836
+ weak_holder_list.append(weakref.ref(res))
837
+
838
+ # if this is a leaf tensor, save it for backward progression trace
839
+ # leaf tensor used to be input or parameters, which is not activations and
840
+ # has no memory overhead
841
+ if tensor_from_forward.requires_grad and tensor_from_forward.is_leaf:
842
+ leaf_tensors.append(tensor_from_forward)
843
+ return res
844
+
845
+ def checkpoint_unpack(holder_from_backward):
846
+ """retrieve the activations from recompute"""
847
+ nonlocal deepspeed_saved_tensors, non_tensor_args, tensor_flags
848
+
849
+ # if this is the first step of backward probagation, recompute the graph and save
850
+ # all the activations with the same order as `checkpoint_pack` does
851
+ if len(storage) == 0:
852
+ unpack_counter = 0
853
+
854
+ def replay_pack(tensor_from_replay):
855
+ """save recompute activations"""
856
+ nonlocal unpack_counter
857
+ unpack_counter += 1
858
+
859
+ if weak_holder_list[unpack_counter - 1]() is None:
860
+ return
861
+
862
+ detached_activations = tensor_from_replay.detach()
863
+ storage[weak_holder_list[unpack_counter - 1]()] = detached_activations
864
+
865
+ return
866
+
867
+ def replay_unpack(none_value):
868
+ """recompute graph need not to backward"""
869
+ raise RuntimeError("You are calling backwards on a tensor that is never exposed.")
870
+
871
+ global timers
872
+ see_memory_usage("In backward", force=False)
873
+ # removing pointers to the contiguous buffer memory
874
+ # so that they can be garbage collected once the checkpoints
875
+ # have been used
876
+ if SYNCHRONIZE:
877
+ get_accelerator().synchronize()
878
+ if PROFILE_TIME:
879
+ timers('backward').start()
880
+
881
+ if CONTIGUOUS_CHECKPOINTING:
882
+ global data_offsets, size_offsets
883
+ global contiguous_data_buffers, contiguous_size_buffers
884
+
885
+ for buffers in contiguous_data_buffers:
886
+ buffers = []
887
+
888
+ # frees up all the pointers to the checkpoints except for the ones
889
+ # stored by save for backward
890
+ contiguous_data_buffers = []
891
+ contiguous_size_buffers = []
892
+ data_offsets = []
893
+ size_offsets = []
894
+
895
+ see_memory_usage("In backward checkpointing code", force=False)
896
+ if not torch.autograd._is_checkpoint_valid():
897
+ raise RuntimeError("Checkpointing is not compatible with .grad(), "
898
+ "please use .backward() if possible")
899
+
900
+ global cuda_device, transport_stream, PARTITION_ACTIVATIONS
901
+
902
+ # gather inputs which is partitioned or checkpointed before first forward
903
+ if PARTITION_ACTIVATIONS:
904
+ # with get_accelerator().stream(transport_stream):
905
+ inputs = gather_partitioned_activations(deepspeed_saved_tensors,
906
+ device=cuda_device if CPU_CHECKPOINT else None)
907
+ detached_inputs = detach_variable(inputs)
908
+ elif CPU_CHECKPOINT:
909
+ inputs = move_to_device(deepspeed_saved_tensors, cuda_device, is_activation_to_checkpoint)
910
+ detached_inputs = detach_variable(inputs)
911
+ else:
912
+ inputs = deepspeed_saved_tensors
913
+ detached_inputs = detach_variable(inputs)
914
+
915
+ # Add non tensor input args
916
+ detached_inputs = merge_tensors(tensor_objects=detached_inputs,
917
+ non_tensor_objects=non_tensor_args,
918
+ tensor_flags=tensor_flags)
919
+
920
+ # Store the current states.
921
+ bwd_cpu_rng_state = torch.get_rng_state()
922
+ bwd_cuda_rng_state = get_accelerator().get_rng_state()
923
+ bwd_cuda_rng_state_tracker = get_cuda_rng_tracker().get_states()
924
+
925
+ # Set the states to what it used to be before the forward pass.
926
+ torch.set_rng_state(fwd_cpu_rng_state)
927
+ _set_cuda_rng_state(fwd_cuda_rng_state)
928
+ get_cuda_rng_tracker().set_states(fwd_cuda_rng_state_tracker)
929
+
930
+ see_memory_usage("In backward checkpointing code before forward", force=False)
931
+ with torch.enable_grad(), torch.autograd.graph.saved_tensors_hooks(replay_pack, replay_unpack):
932
+ _unused = function(*detached_inputs)
933
+
934
+ see_memory_usage("In backward checkpointing code after forward", force=False)
935
+ # Set the states back to what it was at the start of this function.
936
+ torch.set_rng_state(bwd_cpu_rng_state)
937
+ _set_cuda_rng_state(bwd_cuda_rng_state)
938
+ get_cuda_rng_tracker().set_states(bwd_cuda_rng_state_tracker)
939
+
940
+ deepspeed_saved_tensors = None
941
+ non_tensor_args = None
942
+ tensor_flags = None
943
+
944
+ if holder_from_backward not in storage:
945
+ raise RuntimeError("Attempt to retrieve a tensor saved by autograd multiple times without checkpoint"
946
+ " recomputation being triggered in between, this is not currently supported.")
947
+
948
+ return storage[holder_from_backward]
949
+
950
+ def after_backward_hook(_nonuse_grads):
951
+ """the hook registered to all leaf tensors"""
952
+ nonlocal leaf_tensors, backward_visited_leaf_nodes
953
+ backward_visited_leaf_nodes += 1
954
+
955
+ if backward_visited_leaf_nodes == len(leaf_tensors):
956
+ see_memory_usage("After backward checkpointing code after backward", force=False)
957
+
958
+ if PROFILE_TIME:
959
+ timers('backward').stop()
960
+ timers.log(['backward'])
961
+ if SYNCHRONIZE:
962
+ get_accelerator().synchronize()
963
+
964
+ with torch.autograd.graph.saved_tensors_hooks(checkpoint_pack, checkpoint_unpack):
965
+ outputs = function(*inputs_cuda)
966
+ for leaf_tensor in leaf_tensors:
967
+ leaf_tensor.register_hook(after_backward_hook)
968
+
969
+ see_memory_usage("After running forward on the layer", force=False)
970
+
971
+ if PROFILE_TIME:
972
+ timers(FORWARD_GLOBAL_TIMER).stop()
973
+ timers.log([FORWARD_GLOBAL_TIMER])
974
+ if SYNCHRONIZE:
975
+ get_accelerator().synchronize()
976
+
977
+ all_outputs = []
978
+ if torch.is_tensor(outputs):
979
+ all_outputs += [outputs]
980
+ else:
981
+ all_outputs += outputs
982
+
983
+ if len(all_outputs) == 1:
984
+ return all_outputs[0]
985
+ else:
986
+ return tuple(all_outputs)
987
+
988
+
989
+ def checkpoint(function, *args):
990
+ """Checkpoint a model or part of the model.
991
+ This has been directly copied from torch.utils.checkpoint. """
992
+
993
+ all_outputs = []
994
+ CheckpointFunction.apply(function, all_outputs, *args)
995
+ if len(all_outputs) == 1:
996
+ return all_outputs[0]
997
+ else:
998
+ return tuple(all_outputs)
999
+
1000
+
1001
+ def partition_activations_in_checkpoint(partition_activation):
1002
+ global PARTITION_ACTIVATIONS
1003
+ PARTITION_ACTIVATIONS = partition_activation
1004
+ if dist.get_rank() == 0:
1005
+ logger.info(f"**************Partition Activations {PARTITION_ACTIVATIONS}************")
1006
+
1007
+
1008
+ def set_num_layers(nlayers):
1009
+ global num_layers
1010
+ num_layers = nlayers
1011
+
1012
+
1013
+ def reset():
1014
+ """Resets memory buffers related to contiguous memory optimizations.
1015
+ Should be called during eval when multiple forward propagations are
1016
+ computed without any backward propagation that usually clears these
1017
+ buffers.
1018
+ Arguments:
1019
+ None
1020
+
1021
+ Return:
1022
+ None
1023
+ """
1024
+ if CONTIGUOUS_CHECKPOINTING:
1025
+ global data_offsets, size_offsets
1026
+ global contiguous_data_buffers, contiguous_size_buffers
1027
+
1028
+ for buffers in contiguous_data_buffers:
1029
+ buffers = []
1030
+
1031
+ # frees up all the pointers to the checkpoints except for the ones
1032
+ # stored by save for backward
1033
+ contiguous_data_buffers = []
1034
+ contiguous_size_buffers = []
1035
+ data_offsets = []
1036
+ size_offsets = []
1037
+
1038
+
1039
+ def _configure_using_config_file(config, mpu=None):
1040
+ global num_layers, PARTITION_ACTIVATIONS, CONTIGUOUS_CHECKPOINTING, \
1041
+ CPU_CHECKPOINT, SYNCHRONIZE, PROFILE_TIME
1042
+
1043
+ config = DeepSpeedConfig(config, mpu=mpu).activation_checkpointing_config
1044
+ if dist.get_rank() == 0:
1045
+ logger.info(config.repr())
1046
+ PARTITION_ACTIVATIONS = config.partition_activations
1047
+ CONTIGUOUS_CHECKPOINTING = config.contiguous_memory_optimization
1048
+ num_layers = config.number_checkpoints
1049
+ CPU_CHECKPOINT = config.cpu_checkpointing
1050
+ SYNCHRONIZE = config.synchronize_checkpoint_boundary
1051
+ PROFILE_TIME = config.profile
1052
+
1053
+
1054
+ def _configure_defaults():
1055
+
1056
+ global mpu, num_layers, deepspeed_checkpointing_enabled
1057
+
1058
+ global PARTITION_ACTIVATIONS, CONTIGUOUS_CHECKPOINTING, \
1059
+ CPU_CHECKPOINT, SYNCHRONIZE, PROFILE_TIME
1060
+
1061
+ PARTITION_ACTIVATIONS = False
1062
+ CONTIGUOUS_CHECKPOINTING = False
1063
+ num_layers = False
1064
+ CPU_CHECKPOINT = False
1065
+ SYNCHRONIZE = False
1066
+ PROFILE_TIME = False
1067
+ deepspeed_checkpointing_enabled = True
1068
+
1069
+
1070
+ def configure(
1071
+ mpu_,
1072
+ deepspeed_config=None,
1073
+ partition_activations=None,
1074
+ contiguous_checkpointing=None,
1075
+ num_checkpoints=None,
1076
+ checkpoint_in_cpu=None,
1077
+ synchronize=None,
1078
+ profile=None,
1079
+ ):
1080
+ """Configure DeepSpeed Activation Checkpointing.
1081
+
1082
+ Arguments:
1083
+ mpu_: Optional: An object that implements the following methods
1084
+ get_model_parallel_rank/group/world_size, and get_data_parallel_rank/group/world_size
1085
+
1086
+ deepspeed_config: Optional: DeepSpeed Config json file when provided will be used to
1087
+ configure DeepSpeed Activation Checkpointing
1088
+
1089
+ partition_activations: Optional: Partitions activation checkpoint across model parallel
1090
+ GPUs when enabled. By default False. Will overwrite deepspeed_config if provided
1091
+
1092
+ contiguous_checkpointing: Optional: Copies activation checkpoints to a contiguous memory
1093
+ buffer. Works only with homogeneous checkpoints when partition_activations is enabled.
1094
+ Must provide num_checkpoints. By default False. Will overwrite deepspeed_config if
1095
+ provided
1096
+
1097
+ num_checkpoints: Optional: Number of activation checkpoints stored during the forward
1098
+ propagation of the model. Used to calculate the buffer size for contiguous_checkpointing
1099
+ Will overwrite deepspeed_config if provided
1100
+
1101
+ checkpoint_in_cpu: Optional: Moves the activation checkpoint to CPU. Only works with
1102
+ partition_activation. Default is false. Will overwrite deepspeed_config if provided
1103
+
1104
+ synchronize: Optional: Performs get_accelerator().synchronize() at the beginning and end of
1105
+ each call to deepspeed.checkpointing.checkpoint for both forward and backward pass.
1106
+ By default false. Will overwrite deepspeed_config if provided
1107
+
1108
+ profile: Optional: Logs the forward and backward time for each
1109
+ deepspeed.checkpointing.checkpoint invocation. Will overwrite deepspeed_config
1110
+ if provided
1111
+
1112
+ Returns:
1113
+ None
1114
+ """
1115
+ global mpu, num_layers, deepspeed_checkpointing_enabled
1116
+
1117
+ global PARTITION_ACTIVATIONS, CONTIGUOUS_CHECKPOINTING, \
1118
+ CPU_CHECKPOINT, SYNCHRONIZE, PROFILE_TIME
1119
+
1120
+ _configure_defaults()
1121
+
1122
+ if mpu_ is not None:
1123
+ mpu = mpu_
1124
+
1125
+ if deepspeed_config is not None:
1126
+ _configure_using_config_file(deepspeed_config, mpu=mpu)
1127
+
1128
+ if partition_activations is not None:
1129
+ PARTITION_ACTIVATIONS = partition_activations
1130
+
1131
+ if contiguous_checkpointing is not None:
1132
+ CONTIGUOUS_CHECKPOINTING = contiguous_checkpointing
1133
+
1134
+ if num_checkpoints is not None:
1135
+ num_layers = num_checkpoints
1136
+
1137
+ if checkpoint_in_cpu is not None:
1138
+ CPU_CHECKPOINT = checkpoint_in_cpu
1139
+
1140
+ if synchronize is not None:
1141
+ SYNCHRONIZE = synchronize
1142
+
1143
+ if profile is not None:
1144
+ PROFILE_TIME = profile
1145
+
1146
+ if CONTIGUOUS_CHECKPOINTING:
1147
+ assert PARTITION_ACTIVATIONS, "Contiguous Checkpointing is only available with partitioned activations. Set partitioned activations to true in deepspeed config"
1148
+ if CONTIGUOUS_CHECKPOINTING:
1149
+ assert num_layers is not None, "Must specify the number of layers with contiguous memory checkpointing"
1150
+
1151
+
1152
+ def is_configured():
1153
+ """True if deepspeed activation checkpointing has been configured
1154
+ by calling deepspeed.checkpointing.configure, else returns false
1155
+
1156
+ Arguments:
1157
+ None
1158
+
1159
+ Return:
1160
+ True of configured, else False
1161
+ """
1162
+ return deepspeed_checkpointing_enabled
evalkit_cambrian/lib/python3.10/site-packages/deepspeed/runtime/data_pipeline/__pycache__/config.cpython-310.pyc ADDED
Binary file (4.24 kB). View file
 
evalkit_cambrian/lib/python3.10/site-packages/deepspeed/runtime/data_pipeline/__pycache__/constants.cpython-310.pyc ADDED
Binary file (4.31 kB). View file
 
evalkit_cambrian/lib/python3.10/site-packages/deepspeed/runtime/data_pipeline/data_sampling/__init__.py ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ # Copyright (c) Microsoft Corporation.
2
+ # SPDX-License-Identifier: Apache-2.0
3
+
4
+ # DeepSpeed Team
5
+ '''Copyright The Microsoft DeepSpeed Team'''
evalkit_cambrian/lib/python3.10/site-packages/deepspeed/runtime/data_pipeline/data_sampling/__pycache__/data_analyzer.cpython-310.pyc ADDED
Binary file (12.6 kB). View file
 
evalkit_cambrian/lib/python3.10/site-packages/deepspeed/runtime/data_pipeline/data_sampling/__pycache__/data_sampler.cpython-310.pyc ADDED
Binary file (11.2 kB). View file
 
evalkit_cambrian/lib/python3.10/site-packages/deepspeed/runtime/data_pipeline/data_sampling/__pycache__/indexed_dataset.cpython-310.pyc ADDED
Binary file (20.3 kB). View file
 
evalkit_cambrian/lib/python3.10/site-packages/deepspeed/runtime/data_pipeline/data_sampling/__pycache__/utils.cpython-310.pyc ADDED
Binary file (1.91 kB). View file
 
evalkit_cambrian/lib/python3.10/site-packages/deepspeed/runtime/data_pipeline/data_sampling/data_analyzer.py ADDED
@@ -0,0 +1,417 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Microsoft Corporation.
2
+ # SPDX-License-Identifier: Apache-2.0
3
+
4
+ # DeepSpeed Team
5
+
6
+ import os
7
+ from collections import defaultdict
8
+ import csv
9
+ import time
10
+ from multiprocessing import Process, Manager
11
+ import numpy as np
12
+ import torch
13
+ from torch.utils.data import BatchSampler, SequentialSampler, DataLoader, Subset
14
+
15
+ from deepspeed.utils import logger
16
+ from .indexed_dataset import MMapIndexedDataset
17
+ from .utils import split_dataset, split_index, create_mmap_dataset_builder, close_mmap_dataset_builder, find_fit_int_dtype
18
+
19
+
20
+ class DataAnalyzer(object):
21
+
22
+ def __init__(self,
23
+ dataset,
24
+ num_workers=1,
25
+ worker_id=0,
26
+ num_threads=1,
27
+ num_threads_reduce=1,
28
+ specific_threads=[],
29
+ batch_size=1,
30
+ metric_names=[],
31
+ metric_functions=[],
32
+ metric_types=[],
33
+ metric_dtypes=[],
34
+ save_path="./",
35
+ collate_fn=None,
36
+ custom_map_init=None,
37
+ custom_map_update=None,
38
+ custom_map_finalize=None,
39
+ custom_reduce=None):
40
+ super().__init__()
41
+ self.dataset = dataset
42
+ self.num_workers = num_workers
43
+ self.worker_id = worker_id
44
+ self.num_threads = num_threads
45
+ self.num_threads_reduce = num_threads_reduce
46
+ self.specific_threads = specific_threads
47
+ self.batch_size = batch_size
48
+ self.metric_names = metric_names
49
+ self.metric_functions = metric_functions
50
+ self.metric_types = metric_types
51
+ self.metric_dtypes = metric_dtypes
52
+ self.save_path = save_path
53
+ self.collate_fn = collate_fn
54
+ self.custom_map_init = custom_map_init
55
+ self.custom_map_update = custom_map_update
56
+ self.custom_map_finalize = custom_map_finalize
57
+ self.custom_reduce = custom_reduce
58
+
59
+ def init_metric_results(self, thread_id, metric_names, metric_types, metric_dtypes, save_path, worker_id):
60
+ metric_results = []
61
+ for m_idx in range(len(metric_names)):
62
+ metric_name, metric_type, metric_dtype = metric_names[m_idx], \
63
+ metric_types[m_idx], metric_dtypes[m_idx]
64
+ assert metric_dtype not in [
65
+ np.float64, np.double
66
+ ], "Currently floating point metric values are not supported. Please change your metric into integer values (and potentially multiply a larger coefficient to keep the precision)."
67
+ metric_save_path = f"{save_path}/{metric_name}/worker{worker_id}_thread{thread_id}/"
68
+ os.makedirs(metric_save_path, exist_ok=True)
69
+ if metric_type == 'single_value_per_sample':
70
+ sample_to_metric_fname = f"{metric_save_path}/{metric_name}_sample_to_metric"
71
+ sample_to_metric_builder = create_mmap_dataset_builder(sample_to_metric_fname, metric_dtype)
72
+ metric_to_sample_fname = f"{metric_save_path}/{metric_name}_metric_to_sample"
73
+ os.system(f"rm -rf {metric_to_sample_fname}*")
74
+ metric_to_sample_dict = defaultdict(list)
75
+ metric_results.append({
76
+ "sample_to_metric_fname": sample_to_metric_fname,
77
+ "sample_to_metric_builder": sample_to_metric_builder,
78
+ "metric_to_sample_fname": metric_to_sample_fname,
79
+ "metric_to_sample_dict": metric_to_sample_dict
80
+ })
81
+ elif metric_type == 'accumulate_value_over_samples':
82
+ metric_value = None
83
+ metric_value_fname = f"{metric_save_path}/{metric_name}_metric_value"
84
+ metric_results.append({"metric_value": metric_value, "metric_value_fname": metric_value_fname})
85
+ return metric_results
86
+
87
+ def update_metric_results(self, data, metric_types, metric_functions, metric_results):
88
+ for m_idx in range(len(metric_types)):
89
+ metric_type, metric_function, metric_result = metric_types[m_idx], \
90
+ metric_functions[m_idx], metric_results[m_idx]
91
+ if metric_type == 'single_value_per_sample':
92
+ metric_values = metric_function(data)
93
+ for row in range(metric_values.size()[0]):
94
+ metric_result["sample_to_metric_builder"].add_item(metric_values[row].reshape(-1))
95
+ metric_result["metric_to_sample_dict"][metric_values[row].item()].append(
96
+ data['index'][row][0].item())
97
+ for m_value in metric_result["metric_to_sample_dict"]:
98
+ if len(metric_result["metric_to_sample_dict"][m_value]) > 100:
99
+ metric_fname = metric_result["metric_to_sample_fname"]
100
+ with open(f"{metric_fname}_{m_value}.csv", 'a') as f:
101
+ writer = csv.writer(f)
102
+ writer.writerows([metric_result["metric_to_sample_dict"][m_value]])
103
+ metric_result["metric_to_sample_dict"][m_value] = []
104
+ elif metric_type == 'accumulate_value_over_samples':
105
+ metric_values = metric_function(data)
106
+ if metric_result["metric_value"] is None:
107
+ metric_result["metric_value"] = metric_values
108
+ else:
109
+ metric_result["metric_value"].add_(metric_values)
110
+
111
+ def finalize_metric_results(self, metric_types, metric_dtypes, metric_results):
112
+ for m_idx in range(len(metric_types)):
113
+ metric_type, metric_dtype, metric_result = metric_types[m_idx], \
114
+ metric_dtypes[m_idx], metric_results[m_idx]
115
+ if metric_type == 'single_value_per_sample':
116
+ metric_fname = metric_result["sample_to_metric_fname"]
117
+ close_mmap_dataset_builder(metric_result["sample_to_metric_builder"], metric_fname)
118
+ for m_value in metric_result["metric_to_sample_dict"]:
119
+ if len(metric_result["metric_to_sample_dict"][m_value]) > 0:
120
+ metric_fname = metric_result["metric_to_sample_fname"]
121
+ with open(f"{metric_fname}_{m_value}.csv", 'a') as f:
122
+ writer = csv.writer(f)
123
+ writer.writerows([metric_result["metric_to_sample_dict"][m_value]])
124
+ metric_result["metric_to_sample_dict"][m_value] = []
125
+ elif metric_type == 'accumulate_value_over_samples':
126
+ if metric_result["metric_value"] is not None:
127
+ metric_value_builder = create_mmap_dataset_builder(metric_result["metric_value_fname"],
128
+ metric_dtype)
129
+ metric_value_builder.add_item(metric_result["metric_value"].reshape(-1))
130
+ close_mmap_dataset_builder(metric_value_builder, metric_result["metric_value_fname"])
131
+
132
+ def run_map_helper(self, thread_id):
133
+ start_idx, end_idx = self.thread_splits[thread_id][0], \
134
+ self.thread_splits[thread_id][1]
135
+ logger.info(f"worker {self.worker_id} thread {thread_id}: start working " \
136
+ f"on data subset {start_idx} to {end_idx}")
137
+ thread_dataset = Subset(self.dataset, list(range(start_idx, end_idx)))
138
+ sampler = BatchSampler(SequentialSampler(thread_dataset), batch_size=self.batch_size, drop_last=False)
139
+ if self.collate_fn is None:
140
+ iterator = iter(DataLoader(thread_dataset, batch_sampler=sampler, num_workers=0, pin_memory=False))
141
+ else:
142
+ iterator = iter(
143
+ DataLoader(thread_dataset,
144
+ batch_sampler=sampler,
145
+ num_workers=0,
146
+ collate_fn=self.collate_fn,
147
+ pin_memory=False))
148
+ if self.custom_map_init is None:
149
+ metric_results = self.init_metric_results(thread_id, self.metric_names, self.metric_types,
150
+ self.metric_dtypes, self.save_path, self.worker_id)
151
+ else:
152
+ metric_results = self.custom_map_init(thread_id, self.metric_names, self.metric_types, self.metric_dtypes,
153
+ self.save_path, self.worker_id)
154
+ total_sample = len(thread_dataset)
155
+ processed_sample = 0
156
+ start = time.time()
157
+ while True:
158
+ try:
159
+ data = next(iterator)
160
+ if self.custom_map_update is None:
161
+ self.update_metric_results(data, self.metric_types, self.metric_functions, metric_results)
162
+ else:
163
+ self.custom_map_update(data, self.metric_types, self.metric_functions, metric_results)
164
+ processed_sample += self.batch_size
165
+ duration = (time.time() - start) / 3600.0
166
+ remain_duration = duration * total_sample / processed_sample - duration
167
+ logger.info(
168
+ f"worker {self.worker_id} thread {thread_id}: {processed_sample} " \
169
+ f"out of {total_sample} processed in {duration:.2f} hr, " \
170
+ f"estimated to finish in {remain_duration:.2f} hr")
171
+ except StopIteration:
172
+ logger.info(f"worker {self.worker_id} thread {thread_id}: reach end of file")
173
+ break
174
+ if self.custom_map_finalize is None:
175
+ self.finalize_metric_results(self.metric_types, self.metric_dtypes, metric_results)
176
+ else:
177
+ self.custom_map_finalize(self.metric_types, self.metric_dtypes, metric_results)
178
+ logger.info(f"worker {self.worker_id} thread {thread_id}: finished")
179
+
180
+ def run_map(self):
181
+ self.worker_splits, self.thread_splits = split_dataset(self.dataset, self.num_workers, self.worker_id,
182
+ self.num_threads)
183
+ if len(self.specific_threads) > 0:
184
+ threads_to_run = self.specific_threads
185
+ else:
186
+ threads_to_run = list(range(self.num_threads))
187
+ if self.num_threads > 1:
188
+ p = []
189
+ for thread in threads_to_run:
190
+ p.append(Process(target=self.run_map_helper, args=(thread, )))
191
+ p[thread].start()
192
+
193
+ for thread in threads_to_run:
194
+ p[thread].join()
195
+ else:
196
+ assert self.num_threads == 1
197
+ self.run_map_helper(0)
198
+
199
+ def get_metric_value_percentiles(self, metric_name, num_sample_per_value, total_num_samples):
200
+ logger.info(f"Checking the value percentiles of metric {metric_name}...")
201
+ processed_samples = 0
202
+ current_percentile = 5
203
+ for key in sorted(num_sample_per_value.keys()):
204
+ processed_samples += num_sample_per_value[key]
205
+ if processed_samples >= total_num_samples * current_percentile / 100.0:
206
+ logger.info(f"Metric {metric_name} {current_percentile}th percentile: {key}")
207
+ current_percentile += 5
208
+
209
+ def merge_gather_map_stats(self, num_workers, num_threads, num_threads_reduce, t_idx_reduce, metric_save_path,
210
+ metric_name, return_dict):
211
+ results = []
212
+ for w_idx in range(num_workers):
213
+ for t_idx in range(num_threads):
214
+ if (w_idx * num_threads + t_idx) % num_threads_reduce == t_idx_reduce:
215
+ w_metric_save_path = f"{metric_save_path}/worker{w_idx}_thread{t_idx}/"
216
+ w_sample_to_metric_fname = f"{w_metric_save_path}/{metric_name}_sample_to_metric"
217
+ w_sample_to_metric = MMapIndexedDataset(w_sample_to_metric_fname, skip_warmup=True)
218
+ unique_v = list(np.unique(w_sample_to_metric))
219
+ sample_to_metric_count = len(w_sample_to_metric)
220
+ logger.info(f"Finished gathering map stats from worker {w_idx} thread {t_idx}.")
221
+ results.append([unique_v, sample_to_metric_count])
222
+ return_dict[t_idx_reduce] = results
223
+
224
+ def merge_sample_to_metric(self, t_idx_reduce, metric_save_path, metric_name, metric_value_dtype,
225
+ map_worker_thread):
226
+ sample_to_metric_fname = f"{metric_save_path}/{metric_name}_sample_to_metric_thread{t_idx_reduce}"
227
+ sample_to_metric_builder = create_mmap_dataset_builder(sample_to_metric_fname, metric_value_dtype)
228
+ for w_t in map_worker_thread:
229
+ w_metric_save_path = f"{metric_save_path}/worker{w_t[0]}_thread{w_t[1]}/"
230
+ w_sample_to_metric_fname = f"{w_metric_save_path}/{metric_name}_sample_to_metric"
231
+ w_data = MMapIndexedDataset(w_sample_to_metric_fname, skip_warmup=True)
232
+ for row in range(len(w_data)):
233
+ sample_to_metric_builder.add_item(torch.tensor(w_data[row].astype(np.int64), dtype=torch.long))
234
+ logger.info(f"Finished merge_sample_to_metric from worker {w_t[0]} thread {w_t[1]}.")
235
+ close_mmap_dataset_builder(sample_to_metric_builder, sample_to_metric_fname)
236
+
237
+ def merge_metric_to_sample(self, t_idx_reduce, metric_save_path, metric_name, sample_idx_dtype, metric_value_dtype,
238
+ unique_metric_values, num_workers, num_threads):
239
+ index_to_sample_fname = f"{metric_save_path}/{metric_name}_index_to_sample_thread{t_idx_reduce}"
240
+ index_to_sample_builder = create_mmap_dataset_builder(index_to_sample_fname, sample_idx_dtype)
241
+ index_to_metric_fname = f"{metric_save_path}/{metric_name}_index_to_metric_thread{t_idx_reduce}"
242
+ index_to_metric_builder = create_mmap_dataset_builder(index_to_metric_fname, metric_value_dtype)
243
+ for unique_v in unique_metric_values:
244
+ samples = []
245
+ for w_idx in range(num_workers):
246
+ for t_idx in range(num_threads):
247
+ w_metric_save_path = f"{metric_save_path}/worker{w_idx}_thread{t_idx}/"
248
+ w_metric_to_sample_fname = f"{w_metric_save_path}/{metric_name}_metric_to_sample_{unique_v}.csv"
249
+ if os.path.isfile(w_metric_to_sample_fname):
250
+ with open(w_metric_to_sample_fname, 'r') as f:
251
+ datareader = csv.reader(f)
252
+ for row in datareader:
253
+ samples += [int(x) for x in row]
254
+ index_to_sample_builder.add_item(torch.tensor(samples, dtype=torch.long))
255
+ index_to_metric_builder.add_item(torch.tensor([unique_v], dtype=torch.long))
256
+ logger.info(f"Finished reducing metric {metric_name} value {unique_v}.")
257
+ close_mmap_dataset_builder(index_to_sample_builder, index_to_sample_fname)
258
+ close_mmap_dataset_builder(index_to_metric_builder, index_to_metric_fname)
259
+
260
+ def merge_map_results(self, dataset, metric_names, metric_types, save_path, num_workers, num_threads,
261
+ num_threads_reduce):
262
+ total_num_samples = len(dataset)
263
+ sample_idx_dtype = find_fit_int_dtype(0, total_num_samples - 1)
264
+ logger.info(
265
+ f"Total number of data samples: {total_num_samples}. Will use {sample_idx_dtype} to store the sample indexes."
266
+ )
267
+ for m_idx in range(len(metric_names)):
268
+ metric_name, metric_type = metric_names[m_idx], metric_types[m_idx]
269
+ if metric_type == 'single_value_per_sample':
270
+ metric_save_path = f"{save_path}/{metric_name}/"
271
+ sample_to_metric_count = 0
272
+ unique_metric_values = set([])
273
+ manager = Manager()
274
+ return_dict = manager.dict()
275
+ p = []
276
+ for t_idx_reduce in range(num_threads_reduce):
277
+ p.append(
278
+ Process(target=self.merge_gather_map_stats,
279
+ args=(
280
+ num_workers,
281
+ num_threads,
282
+ num_threads_reduce,
283
+ t_idx_reduce,
284
+ metric_save_path,
285
+ metric_name,
286
+ return_dict,
287
+ )))
288
+ p[t_idx_reduce].start()
289
+ for t_idx_reduce in range(num_threads_reduce):
290
+ p[t_idx_reduce].join()
291
+ for t_idx_reduce in range(num_threads_reduce):
292
+ results = return_dict[t_idx_reduce]
293
+ for res in results:
294
+ unique_metric_values = unique_metric_values.union(set(res[0]))
295
+ sample_to_metric_count += res[1]
296
+ value_max = max(unique_metric_values)
297
+ value_min = min(unique_metric_values)
298
+ assert sample_to_metric_count == total_num_samples, "The number of samples in map result files are not correct. It's possible that some map worker didn't finish successfully."
299
+ metric_value_dtype = find_fit_int_dtype(value_min, value_max)
300
+ logger.info(
301
+ f"Metric {metric_name} has values between {value_min} and {value_max}. Will use {metric_value_dtype} to store the metric values."
302
+ )
303
+
304
+ # sample_to_metric
305
+ map_worker_thread = []
306
+ for w_idx in range(num_workers):
307
+ for t_idx in range(num_threads):
308
+ map_worker_thread.append([w_idx, t_idx])
309
+ thread_splits = split_index(0, len(map_worker_thread), num_threads_reduce)
310
+ p = []
311
+ for t_idx_reduce in range(num_threads_reduce):
312
+ start_idx, end_idx = thread_splits[t_idx_reduce][0], thread_splits[t_idx_reduce][1]
313
+ p.append(
314
+ Process(target=self.merge_sample_to_metric,
315
+ args=(
316
+ t_idx_reduce,
317
+ metric_save_path,
318
+ metric_name,
319
+ metric_value_dtype,
320
+ map_worker_thread[start_idx:end_idx],
321
+ )))
322
+ p[t_idx_reduce].start()
323
+ for t_idx_reduce in range(num_threads_reduce):
324
+ p[t_idx_reduce].join()
325
+
326
+ sample_to_metric_fname = f"{metric_save_path}/{metric_name}_sample_to_metric"
327
+ sample_to_metric_builder = create_mmap_dataset_builder(sample_to_metric_fname, metric_value_dtype)
328
+ for t_idx_reduce in range(num_threads_reduce):
329
+ chunk_fname = f"{metric_save_path}/{metric_name}_sample_to_metric_thread{t_idx_reduce}"
330
+ logger.info(f"Merging file {chunk_fname}")
331
+ sample_to_metric_builder.merge_file_(chunk_fname)
332
+ close_mmap_dataset_builder(sample_to_metric_builder, sample_to_metric_fname)
333
+ sample_to_metric = MMapIndexedDataset(sample_to_metric_fname, skip_warmup=True)
334
+ assert len(sample_to_metric) == total_num_samples
335
+
336
+ # metric_to_sample
337
+ unique_metric_values = list(sorted(unique_metric_values))
338
+ thread_splits = split_index(0, len(unique_metric_values), num_threads_reduce)
339
+ p = []
340
+ for t_idx_reduce in range(num_threads_reduce):
341
+ start_idx, end_idx = thread_splits[t_idx_reduce][0], thread_splits[t_idx_reduce][1]
342
+ p.append(
343
+ Process(target=self.merge_metric_to_sample,
344
+ args=(
345
+ t_idx_reduce,
346
+ metric_save_path,
347
+ metric_name,
348
+ sample_idx_dtype,
349
+ metric_value_dtype,
350
+ unique_metric_values[start_idx:end_idx],
351
+ num_workers,
352
+ num_threads,
353
+ )))
354
+ p[t_idx_reduce].start()
355
+ for t_idx_reduce in range(num_threads_reduce):
356
+ p[t_idx_reduce].join()
357
+ index_to_sample_fname = f"{metric_save_path}/{metric_name}_index_to_sample"
358
+ index_to_sample_builder = create_mmap_dataset_builder(index_to_sample_fname, sample_idx_dtype)
359
+ index_to_metric_fname = f"{metric_save_path}/{metric_name}_index_to_metric"
360
+ index_to_metric_builder = create_mmap_dataset_builder(index_to_metric_fname, metric_value_dtype)
361
+ for t_idx_reduce in range(num_threads_reduce):
362
+ chunk_is_fname = f"{metric_save_path}/{metric_name}_index_to_sample_thread{t_idx_reduce}"
363
+ logger.info(f"Merging file {chunk_is_fname}")
364
+ index_to_sample_builder.merge_file_(chunk_is_fname)
365
+ chunk_im_fname = f"{metric_save_path}/{metric_name}_index_to_metric_thread{t_idx_reduce}"
366
+ logger.info(f"Merging file {chunk_im_fname}")
367
+ index_to_metric_builder.merge_file_(chunk_im_fname)
368
+ close_mmap_dataset_builder(index_to_sample_builder, index_to_sample_fname)
369
+ close_mmap_dataset_builder(index_to_metric_builder, index_to_metric_fname)
370
+ num_sample_per_value = {}
371
+ index_to_sample = MMapIndexedDataset(index_to_sample_fname, skip_warmup=True)
372
+ index_to_metric = MMapIndexedDataset(index_to_metric_fname, skip_warmup=True)
373
+ index_to_sample_merged_fname = f"{metric_save_path}/{metric_name}_index_to_sample_percentile_merged"
374
+ index_to_sample_merged_builder = create_mmap_dataset_builder(index_to_sample_merged_fname,
375
+ sample_idx_dtype)
376
+ for v_idx in range(len(index_to_sample)):
377
+ if v_idx > 0:
378
+ assert index_to_metric[v_idx] > index_to_metric[v_idx - 1]
379
+ num_sample_per_value[index_to_metric[v_idx][0]] = len(index_to_sample[v_idx])
380
+ assert sum(num_sample_per_value.values()) == total_num_samples
381
+ merge_step = max(1, len(index_to_sample) // 100)
382
+ for v_idx in range(0, len(index_to_sample), merge_step):
383
+ merged_samples = np.copy(
384
+ np.concatenate(index_to_sample[v_idx:min(len(index_to_sample), (v_idx + merge_step))],
385
+ axis=None))
386
+ index_to_sample_merged_builder.add_item(
387
+ torch.tensor(merged_samples.astype(np.int64), dtype=torch.long))
388
+ logger.info(f"Finished merging index_to_sample {v_idx} to {v_idx+merge_step}.")
389
+ close_mmap_dataset_builder(index_to_sample_merged_builder, index_to_sample_merged_fname)
390
+ self.get_metric_value_percentiles(metric_name, num_sample_per_value, total_num_samples)
391
+ elif metric_type == 'accumulate_value_over_samples':
392
+ metric_save_path = f"{save_path}/{metric_name}/"
393
+ metric_value = None
394
+ for w_idx in range(num_workers):
395
+ for t_idx in range(num_threads):
396
+ w_metric_save_path = f"{metric_save_path}/worker{w_idx}_thread{t_idx}/"
397
+ w_metric_value_fname = f"{w_metric_save_path}/{metric_name}_metric_value"
398
+ w_metric_value = MMapIndexedDataset(w_metric_value_fname, skip_warmup=True)
399
+ if metric_value is None:
400
+ metric_value = np.copy(w_metric_value[0])
401
+ else:
402
+ metric_value += np.copy(w_metric_value[0])
403
+ value_max = int(max(metric_value))
404
+ value_min = int(min(metric_value))
405
+ metric_value_dtype = find_fit_int_dtype(value_min, value_max)
406
+ metric_value_fname = f"{metric_save_path}/{metric_name}_metric_value"
407
+ metric_value_builder = create_mmap_dataset_builder(metric_value_fname, metric_value_dtype)
408
+ metric_value_builder.add_item(torch.tensor(metric_value.astype(np.int64), dtype=torch.long))
409
+ close_mmap_dataset_builder(metric_value_builder, metric_value_fname)
410
+
411
+ def run_reduce(self):
412
+ if self.custom_reduce is None:
413
+ self.merge_map_results(self.dataset, self.metric_names, self.metric_types, self.save_path,
414
+ self.num_workers, self.num_threads, self.num_threads_reduce)
415
+ else:
416
+ self.custom_reduce(self.dataset, self.metric_names, self.metric_types, self.save_path, self.num_workers,
417
+ self.num_threads, self.num_threads_reduce)
evalkit_cambrian/lib/python3.10/site-packages/deepspeed/runtime/data_pipeline/data_sampling/data_sampler.py ADDED
@@ -0,0 +1,338 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Microsoft Corporation.
2
+ # SPDX-License-Identifier: Apache-2.0
3
+
4
+ # DeepSpeed Team
5
+ """
6
+ coding=utf-8
7
+ Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
8
+
9
+ Licensed under the Apache License, Version 2.0 (the "License");
10
+ you may not use this file except in compliance with the License.
11
+ You may obtain a copy of the License at
12
+
13
+ http://www.apache.org/licenses/LICENSE-2.0
14
+
15
+ Unless required by applicable law or agreed to in writing, software
16
+ distributed under the License is distributed on an "AS IS" BASIS,
17
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
18
+ See the License for the specific language governing permissions and
19
+ limitations under the License.
20
+ Part of this code was adopted from https://github.com/NVIDIA/Megatron-LM/blob/main/megatron/data/data_samplers.py
21
+ """
22
+
23
+ import torch
24
+ import os
25
+ import numpy as np
26
+
27
+ import deepspeed.comm as dist
28
+ from deepspeed.utils import logger
29
+ from deepspeed.accelerator import get_accelerator
30
+ from ..constants import *
31
+ from ..curriculum_scheduler import CurriculumScheduler
32
+ from .indexed_dataset import MMapIndexedDataset
33
+ from .utils import create_mmap_dataset_builder, close_mmap_dataset_builder, find_fit_int_dtype
34
+
35
+
36
+ class DeepSpeedDataSampler(object):
37
+
38
+ def __init__(self,
39
+ data_efficiency_config,
40
+ one_epoch_total_samples,
41
+ micro_batch_size,
42
+ data_parallel_rank,
43
+ data_parallel_size,
44
+ data_parallel_group,
45
+ gradient_accumulation_steps,
46
+ global_rank,
47
+ drop_last=True):
48
+ # Keep a copy of input params for later use.
49
+ self.data_efficiency_config = data_efficiency_config
50
+ self.one_epoch_total_samples = one_epoch_total_samples
51
+ self.index_dtype = find_fit_int_dtype(0, one_epoch_total_samples)
52
+ self.total_samples = one_epoch_total_samples * self.data_efficiency_config[DATA_SAMPLING][
53
+ DATA_SAMPLING_NUM_EPOCHS]
54
+ self.micro_batch_size = micro_batch_size
55
+ self.data_parallel_rank = data_parallel_rank
56
+ self.data_parallel_group = data_parallel_group
57
+ self.micro_batch_times_data_parallel_size = \
58
+ self.micro_batch_size * data_parallel_size
59
+ self.gradient_accumulation_steps = gradient_accumulation_steps
60
+ self.global_batch_size = self.micro_batch_times_data_parallel_size * \
61
+ self.gradient_accumulation_steps
62
+ self.global_rank = global_rank
63
+ self.drop_last = drop_last
64
+ self.np_rng = np.random.default_rng(self.data_efficiency_config[DATA_EFFICIENCY_SEED])
65
+ self.state = {}
66
+ self.batch = []
67
+ self.consumed_samples = 0
68
+ if self.data_efficiency_config[DATA_SAMPLING][CURRICULUM_LEARNING][CURRICULUM_LEARNING_ENABLED]:
69
+ self.curriculum_step = 0
70
+ self.current_difficulties = {}
71
+ self.data_cluster_paths = []
72
+ self.data_cluster_current_position = []
73
+ self.curriculum_schedulers = {}
74
+ self.curriculum_index_to_sample = {}
75
+ self.curriculum_index_to_metric = {}
76
+ self.difficulty_type = {}
77
+ self.clustering_type = {}
78
+ self.data_1epoch_size = None
79
+ if self.global_rank == 0:
80
+ self.data_clusters = []
81
+ self.data_cluster_sizes = []
82
+ cluster_path = self.data_efficiency_config[DATA_SAMPLING][CURRICULUM_LEARNING][
83
+ CURRICULUM_LEARNING_CLUSTER_PATH]
84
+ if not os.path.exists(cluster_path):
85
+ os.makedirs(cluster_path)
86
+ for metric in self.data_efficiency_config[DATA_SAMPLING][CURRICULUM_LEARNING][CURRICULUM_LEARNING_METRICS]:
87
+ self.curriculum_schedulers[metric] = CurriculumScheduler(
88
+ data_efficiency_config[DATA_SAMPLING][CURRICULUM_LEARNING][CURRICULUM_LEARNING_METRICS][metric])
89
+ self.difficulty_type[metric] = data_efficiency_config[DATA_SAMPLING][CURRICULUM_LEARNING][
90
+ CURRICULUM_LEARNING_METRICS][metric][CURRICULUM_LEARNING_DIFFICULTY_TYPE]
91
+ self.clustering_type[metric] = data_efficiency_config[DATA_SAMPLING][CURRICULUM_LEARNING][
92
+ CURRICULUM_LEARNING_METRICS][metric][CURRICULUM_LEARNING_CLUSTERING_TYPE]
93
+ if self.global_rank == 0:
94
+ if self.clustering_type[metric] != CURRICULUM_LEARNING_SINGLE_CLUSTER:
95
+ self.curriculum_index_to_sample[metric] = MMapIndexedDataset(
96
+ data_efficiency_config[DATA_SAMPLING][CURRICULUM_LEARNING][CURRICULUM_LEARNING_METRICS]
97
+ [metric][CURRICULUM_LEARNING_SAMPLE_PATH],
98
+ skip_warmup=True)
99
+ if self.difficulty_type[metric] == CURRICULUM_LEARNING_VALUE_BASED:
100
+ self.curriculum_index_to_metric[metric] = MMapIndexedDataset(
101
+ data_efficiency_config[DATA_SAMPLING][CURRICULUM_LEARNING][CURRICULUM_LEARNING_METRICS]
102
+ [metric][CURRICULUM_LEARNING_METRIC_PATH],
103
+ skip_warmup=True)
104
+
105
+ # Sanity checks.
106
+ assert self.total_samples > 0, \
107
+ 'no sample to consume: {}'.format(self.total_samples)
108
+ assert self.micro_batch_size > 0
109
+ assert data_parallel_size > 0
110
+ assert self.data_parallel_rank < data_parallel_size, \
111
+ 'data_parallel_rank should be smaller than data size: {}, ' \
112
+ '{}'.format(self.data_parallel_rank, data_parallel_size)
113
+
114
+ def __len__(self):
115
+ return self.total_samples
116
+
117
+ def set_custom_curriculum_learning_schedule(self, schedule_func_dict):
118
+ for metric in self.curriculum_schedulers:
119
+ if metric in schedule_func_dict:
120
+ self.curriculum_schedulers[metric].set_custom_get_difficulty(schedule_func_dict[metric])
121
+
122
+ def get_start_end_idx(self):
123
+ start_idx = self.data_parallel_rank * self.micro_batch_size
124
+ end_idx = start_idx + self.micro_batch_size
125
+ return start_idx, end_idx
126
+
127
+ def get_sample_based_on_metric_value(self, metric, value_start, value_end):
128
+ new_samples = None
129
+ for row in range(len(self.curriculum_index_to_sample[metric])):
130
+ if self.curriculum_index_to_metric[metric][row] <= value_end and self.curriculum_index_to_metric[metric][
131
+ row] > value_start:
132
+ row_samples = np.copy(self.curriculum_index_to_sample[metric][row])
133
+ new_samples = row_samples if new_samples is None else np.concatenate(
134
+ (new_samples, row_samples), axis=None)
135
+ return new_samples
136
+
137
+ def get_sample_based_on_metric_percentile(self, metric, percentile_start, percentile_end):
138
+ new_samples = None
139
+ if self.data_1epoch_size is None:
140
+ self.data_1epoch_size = sum(len(x) for x in self.curriculum_index_to_sample[metric])
141
+ max_percentile = self.data_efficiency_config[DATA_SAMPLING][CURRICULUM_LEARNING][CURRICULUM_LEARNING_METRICS][
142
+ metric][CURRICULUM_LEARNING_MAX_DIFFICULTY]
143
+ sample_per_percentile = self.data_1epoch_size // max_percentile
144
+ start_count = sample_per_percentile * percentile_start
145
+ end_count = sample_per_percentile * percentile_end
146
+ if percentile_end == max_percentile:
147
+ end_count = self.data_1epoch_size
148
+ current_count = 0
149
+ for row in range(len(self.curriculum_index_to_sample[metric])):
150
+ row_size = len(self.curriculum_index_to_sample[metric][row])
151
+ if current_count + row_size > start_count:
152
+ row_start = max(0, start_count - current_count)
153
+ if current_count + row_size <= end_count:
154
+ row_end = row_size
155
+ else:
156
+ row_end = end_count - current_count
157
+ row_samples = np.copy(self.curriculum_index_to_sample[metric][row][row_start:row_end])
158
+ new_samples = row_samples if new_samples is None else np.concatenate(
159
+ (new_samples, row_samples), axis=None)
160
+ current_count += row_size
161
+ if current_count >= end_count:
162
+ break
163
+ return new_samples
164
+
165
+ def get_new_cluster(self, previous_difficulties):
166
+ cluster_fname = CURRICULUM_LEARNING_CLUSTER_PREFIX
167
+ for metric in self.curriculum_schedulers:
168
+ cluster_fname = f"{cluster_fname}_{metric}{self.current_difficulties[metric]}"
169
+ cluster_path = self.data_efficiency_config[DATA_SAMPLING][CURRICULUM_LEARNING][
170
+ CURRICULUM_LEARNING_CLUSTER_PATH]
171
+ cluster_path = f"{cluster_path}/{cluster_fname}"
172
+ if self.global_rank == 0:
173
+ new_cluster = None
174
+ need_clustering = 0
175
+ for metric in self.clustering_type:
176
+ if self.clustering_type[metric] != CURRICULUM_LEARNING_SINGLE_CLUSTER:
177
+ need_clustering += 1
178
+ if need_clustering > 1:
179
+ for metric in self.curriculum_schedulers:
180
+ if self.clustering_type[metric] == CURRICULUM_LEARNING_SINGLE_CLUSTER:
181
+ metric_cluster = np.arange(start=0,
182
+ stop=self.one_epoch_total_samples,
183
+ step=1,
184
+ dtype=self.index_dtype)
185
+ else:
186
+ if self.difficulty_type[metric] == CURRICULUM_LEARNING_VALUE_BASED:
187
+ metric_cluster = self.get_sample_based_on_metric_value(metric, float('-inf'),
188
+ self.current_difficulties[metric])
189
+ elif self.difficulty_type[metric] == CURRICULUM_LEARNING_PERCENTILE_BASED:
190
+ metric_cluster = self.get_sample_based_on_metric_percentile(
191
+ metric, 0, self.current_difficulties[metric])
192
+ new_cluster = metric_cluster if new_cluster is None else \
193
+ np.intersect1d(new_cluster, metric_cluster, assume_unique=True)
194
+ for cluster in self.data_clusters:
195
+ new_cluster = np.setdiff1d(new_cluster, cluster[0], assume_unique=True)
196
+ else:
197
+ if len(self.data_clusters) == 0:
198
+ new_cluster = np.arange(start=0, stop=self.one_epoch_total_samples, step=1, dtype=self.index_dtype)
199
+ for metric in self.curriculum_schedulers:
200
+ if self.clustering_type[metric] != CURRICULUM_LEARNING_SINGLE_CLUSTER:
201
+ if self.difficulty_type[metric] == CURRICULUM_LEARNING_VALUE_BASED:
202
+ new_cluster = self.get_sample_based_on_metric_value(metric, previous_difficulties[metric],
203
+ self.current_difficulties[metric])
204
+ elif self.difficulty_type[metric] == CURRICULUM_LEARNING_PERCENTILE_BASED:
205
+ new_cluster = self.get_sample_based_on_metric_percentile(
206
+ metric, previous_difficulties[metric], self.current_difficulties[metric])
207
+ if new_cluster is not None and len(new_cluster) > 0:
208
+ logger.info(
209
+ f"new data cluster (previous_difficulties {previous_difficulties}, current_difficulties {self.current_difficulties}) with size {len(new_cluster)} generated."
210
+ )
211
+ self.np_rng.shuffle(new_cluster)
212
+ cluster_builder = create_mmap_dataset_builder(cluster_path, self.index_dtype)
213
+ cluster_builder.add_item_numpy(new_cluster)
214
+ close_mmap_dataset_builder(cluster_builder, cluster_path)
215
+ self.data_clusters.append(MMapIndexedDataset(cluster_path, skip_warmup=True))
216
+ self.data_cluster_sizes.append(len(self.data_clusters[-1][0]))
217
+ else:
218
+ logger.info(
219
+ f"new data cluster (previous_difficulties {previous_difficulties}, current_difficulties {self.current_difficulties}) has no matched data thus skipped."
220
+ )
221
+ dist.barrier(group=self.data_parallel_group)
222
+ if os.path.isfile(f"{cluster_path}.bin"):
223
+ self.data_cluster_paths.append(cluster_fname)
224
+ self.data_cluster_current_position.append(0)
225
+
226
+ def sample_from_clusters(self):
227
+ num_clusters = len(self.data_clusters)
228
+ weight_sum = sum(self.data_cluster_sizes)
229
+ weights = [x / weight_sum for x in self.data_cluster_sizes]
230
+ samples = self.np_rng.choice(num_clusters, self.global_batch_size, replace=True, p=weights)
231
+ samples = np.bincount(samples, minlength=num_clusters)
232
+ return samples
233
+
234
+ def reshuffle_clusters(self, cidx):
235
+ cluster_fname = self.data_cluster_paths[cidx]
236
+ cluster_path = self.data_efficiency_config[DATA_SAMPLING][CURRICULUM_LEARNING][
237
+ CURRICULUM_LEARNING_CLUSTER_PATH]
238
+ cluster_path = f"{cluster_path}/{cluster_fname}"
239
+ cluster = np.copy(self.data_clusters[cidx][0])
240
+ self.np_rng.shuffle(cluster)
241
+ cluster_builder = create_mmap_dataset_builder(cluster_path, self.index_dtype)
242
+ cluster_builder.add_item_numpy(cluster)
243
+ close_mmap_dataset_builder(cluster_builder, cluster_path)
244
+ self.data_clusters[cidx] = MMapIndexedDataset(cluster_path, skip_warmup=True)
245
+
246
+ def get_sample_from_cluster(self, cidx, num_samples):
247
+ start_idx = self.data_cluster_current_position[cidx]
248
+ samples = list(np.copy(self.data_clusters[cidx][0][start_idx:(start_idx + num_samples)]))
249
+ self.data_cluster_current_position[cidx] += num_samples
250
+ if len(samples) < num_samples:
251
+ num_samples_remained = num_samples - len(samples)
252
+ logger.info(f"reshuffling cluster {cidx}.")
253
+ self.reshuffle_clusters(cidx)
254
+ samples += list(np.copy(self.data_clusters[cidx][0][:num_samples_remained]))
255
+ self.data_cluster_current_position[cidx] = num_samples_remained
256
+ return samples
257
+
258
+ def get_next_global_batch(self):
259
+ if self.data_efficiency_config[DATA_SAMPLING][CURRICULUM_LEARNING][CURRICULUM_LEARNING_ENABLED]:
260
+ self.curriculum_step += 1
261
+ new_cluster = False
262
+ previous_difficulties = {}
263
+ for metric in self.curriculum_schedulers:
264
+ next_difficulty = self.curriculum_schedulers[metric].update_difficulty(self.curriculum_step)
265
+ if metric not in self.current_difficulties or \
266
+ next_difficulty != self.current_difficulties[metric]:
267
+ new_cluster = True
268
+ if metric in self.current_difficulties:
269
+ previous_difficulties[metric] = self.current_difficulties[metric]
270
+ else:
271
+ if self.difficulty_type[metric] == CURRICULUM_LEARNING_VALUE_BASED:
272
+ previous_difficulties[metric] = float('-inf')
273
+ elif self.difficulty_type[metric] == CURRICULUM_LEARNING_PERCENTILE_BASED:
274
+ previous_difficulties[metric] = 0
275
+ self.current_difficulties[metric] = next_difficulty
276
+ if new_cluster:
277
+ self.get_new_cluster(previous_difficulties)
278
+ if self.global_rank == 0:
279
+ samples_per_cluster = self.sample_from_clusters()
280
+ batch = []
281
+ for cidx in range(len(samples_per_cluster)):
282
+ batch += self.get_sample_from_cluster(cidx, samples_per_cluster[cidx])
283
+ self.np_rng.shuffle(batch)
284
+ batch = torch.tensor(batch, device=get_accelerator().current_device_name(), dtype=torch.long).view(-1)
285
+ else:
286
+ batch = torch.empty(self.global_batch_size,
287
+ device=get_accelerator().current_device_name(),
288
+ dtype=torch.long)
289
+ dist.broadcast(batch, 0, group=self.data_parallel_group)
290
+ self.batch = batch.tolist()
291
+
292
+ def __iter__(self):
293
+ while self.consumed_samples <= self.total_samples:
294
+ if len(self.batch) == 0:
295
+ self.get_next_global_batch()
296
+ current_batch = self.batch[:self.micro_batch_times_data_parallel_size]
297
+ self.batch = self.batch[self.micro_batch_times_data_parallel_size:]
298
+ if len(current_batch) == self.micro_batch_times_data_parallel_size or \
299
+ (len(current_batch) > 0 and not self.drop_last):
300
+ start_idx, end_idx = self.get_start_end_idx()
301
+ yield current_batch[start_idx:end_idx]
302
+ self.consumed_samples += len(current_batch)
303
+ current_batch = []
304
+
305
+ def state_dict(self):
306
+ return {
307
+ CURRICULUM_LEARNING_BATCH: self.batch,
308
+ CURRICULUM_LEARNING_CONSUMED_SAMPLES: self.consumed_samples,
309
+ CURRICULUM_LEARNING_STEP: self.curriculum_step,
310
+ CURRICULUM_LEARNING_CURRENT_DIFFICULTIES: self.current_difficulties,
311
+ CURRICULUM_LEARNING_DATA_CLUSTER_PATHS: self.data_cluster_paths,
312
+ CURRICULUM_LEARNING_DATA_CLUSTER_CURRENT_POSITION: self.data_cluster_current_position,
313
+ CURRICULUM_LEARNING_NP_RNG_STATE: np.random.get_state()
314
+ }
315
+
316
+ def load_state_dict(self, state_dict):
317
+ self.batch = state_dict[CURRICULUM_LEARNING_BATCH]
318
+ self.consumed_samples = state_dict[CURRICULUM_LEARNING_CONSUMED_SAMPLES]
319
+ self.curriculum_step = state_dict[CURRICULUM_LEARNING_STEP]
320
+ self.current_difficulties = state_dict[CURRICULUM_LEARNING_CURRENT_DIFFICULTIES]
321
+ self.data_cluster_paths = state_dict[CURRICULUM_LEARNING_DATA_CLUSTER_PATHS]
322
+ self.data_cluster_current_position = state_dict[CURRICULUM_LEARNING_DATA_CLUSTER_CURRENT_POSITION]
323
+ np.random.set_state(state_dict[CURRICULUM_LEARNING_NP_RNG_STATE])
324
+ cluster_root_path = self.data_efficiency_config[DATA_SAMPLING][CURRICULUM_LEARNING][
325
+ CURRICULUM_LEARNING_CLUSTER_PATH]
326
+ # Backward compatibility: previously data_cluster_paths were stored as
327
+ # absolute paths. Now we changed it to just the file name so that even
328
+ # if user moved the cluster files, the checkpoint loading still works
329
+ # as long as user set the correct new CURRICULUM_LEARNING_CLUSTER_PATH
330
+ # in deepspeed json config.
331
+ for idx in range(len(self.data_cluster_paths)):
332
+ if '/' in self.data_cluster_paths[idx]:
333
+ self.data_cluster_paths[idx] = self.data_cluster_paths[idx].split('/')[-1]
334
+ if self.global_rank == 0:
335
+ for cluster_fname in self.data_cluster_paths:
336
+ cluster_path = f"{cluster_root_path}/{cluster_fname}"
337
+ self.data_clusters.append(MMapIndexedDataset(cluster_path, skip_warmup=True))
338
+ self.data_cluster_sizes.append(len(self.data_clusters[-1][0]))
evalkit_cambrian/lib/python3.10/site-packages/deepspeed/runtime/fp16/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (234 Bytes). View file
 
evalkit_cambrian/lib/python3.10/site-packages/deepspeed/runtime/fp16/__pycache__/fused_optimizer.cpython-310.pyc ADDED
Binary file (13.2 kB). View file
 
evalkit_cambrian/lib/python3.10/site-packages/deepspeed/runtime/fp16/__pycache__/loss_scaler.cpython-310.pyc ADDED
Binary file (8.27 kB). View file
 
evalkit_cambrian/lib/python3.10/site-packages/deepspeed/runtime/fp16/__pycache__/unfused_optimizer.cpython-310.pyc ADDED
Binary file (11.8 kB). View file
 
evalkit_cambrian/lib/python3.10/site-packages/deepspeed/runtime/fp16/onebit/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (311 Bytes). View file
 
evalkit_cambrian/lib/python3.10/site-packages/deepspeed/runtime/fp16/onebit/__pycache__/adam.cpython-310.pyc ADDED
Binary file (8.4 kB). View file
 
evalkit_cambrian/lib/python3.10/site-packages/deepspeed/runtime/fp16/onebit/__pycache__/zoadam.cpython-310.pyc ADDED
Binary file (10.2 kB). View file
 
evalkit_cambrian/lib/python3.10/site-packages/deepspeed/runtime/fp16/onebit/adam.py ADDED
@@ -0,0 +1,306 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Microsoft Corporation.
2
+ # SPDX-License-Identifier: Apache-2.0
3
+
4
+ # DeepSpeed Team
5
+
6
+ import types
7
+ import torch
8
+ import numpy as np
9
+ from deepspeed.accelerator import get_accelerator
10
+ from deepspeed.runtime.utils import required_torch_version
11
+ from deepspeed import comm as dist
12
+
13
+
14
+ class OnebitAdam(torch.optim.Optimizer):
15
+ """Implements the 1-bit Adam algorithm. Currently GPU-only.
16
+ For usage example please see https://www.deepspeed.ai/tutorials/onebit-adam/
17
+ For technical details please read https://arxiv.org/abs/2102.02888
18
+
19
+ Arguments:
20
+ params (iterable): iterable of parameters to optimize or dicts defining
21
+ parameter groups.
22
+ lr (float, optional): learning rate. (default: 1e-3)
23
+ freeze_step (int, optional): Number of steps for warmup (uncompressed)
24
+ stage before we start using compressed communication. (default 100000)
25
+ betas (Tuple[float, float], optional): coefficients used for computing
26
+ running averages of gradient and its square. (default: (0.9, 0.999))
27
+ eps (float, optional): term added to the denominator to improve
28
+ numerical stability. (default: 1e-8)
29
+ weight_decay (float, optional): weight decay (L2 penalty) (default: 0)
30
+ amsgrad (boolean, optional): whether to use the AMSGrad variant of this
31
+ algorithm from the paper `On the Convergence of Adam and Beyond`_
32
+ (default: False) NOT SUPPORTED in 1-bit Adam!
33
+ eps_inside_sqrt (boolean, optional): in the 'update parameters' step,
34
+ adds eps to the bias-corrected second moment estimate before
35
+ evaluating square root instead of adding it to the square root of
36
+ second moment estimate as in the original paper. (default: False)
37
+ cuda_aware (boolean, required): Set True if the underlying MPI implementation
38
+ supports CUDA-Aware communication. (default: False)
39
+ comm_backend_name (string, optional): Set to 'mpi' if needed. (default: 'nccl')
40
+ .. _Adam\\: A Method for Stochastic Optimization:
41
+ https://arxiv.org/abs/1412.6980
42
+ .. _On the Convergence of Adam and Beyond:
43
+ https://openreview.net/forum?id=ryQu7f-RZ
44
+ """
45
+
46
+ def __init__(self,
47
+ params,
48
+ deepspeed=None,
49
+ lr=1e-3,
50
+ freeze_step=100000,
51
+ bias_correction=True,
52
+ betas=(0.9, 0.999),
53
+ eps=1e-8,
54
+ eps_inside_sqrt=False,
55
+ weight_decay=0.,
56
+ max_grad_norm=0.,
57
+ amsgrad=False,
58
+ cuda_aware=False,
59
+ comm_backend_name='nccl'):
60
+
61
+ if amsgrad:
62
+ raise RuntimeError('1-bit Adam does not support the AMSGrad variant.')
63
+
64
+ defaults = dict(lr=lr,
65
+ bias_correction=bias_correction,
66
+ betas=betas,
67
+ eps=eps,
68
+ weight_decay=weight_decay,
69
+ max_grad_norm=max_grad_norm)
70
+
71
+ super(OnebitAdam, self).__init__(params, defaults)
72
+ self.eps_mode = 0 if eps_inside_sqrt else 1
73
+ self.comm_time = 0.0
74
+ self.step_time = 0.0
75
+ self.ave_step = 1
76
+ self.bk_time = 0.0
77
+
78
+ self.deepspeed = deepspeed
79
+ self.adam_freeze_key = False
80
+ self.initialize = False
81
+ self.freeze_step = freeze_step
82
+ self.cuda_aware = cuda_aware
83
+ self.using_pipeline = False
84
+
85
+ self.comm_backend_name = comm_backend_name
86
+
87
+ assert dist.is_initialized(), "Please initialize the torch distributed backend."
88
+ # Empty initializer. Set handle based on the comm backend as follows.
89
+ self.comm_backend_handle = None
90
+ if self.comm_backend_name == 'nccl':
91
+ assert (
92
+ required_torch_version(min_version=1.8)
93
+ ), "Please use torch 1.8 or greater to enable NCCL backend in 1-bit Adam. Alternatively, please specify 'mpi' as the 'comm_backend_name' in config file to proceed with the MPI backend"
94
+ from deepspeed.runtime.comm.nccl import NcclBackend
95
+ self.using_pipeline = hasattr(self.deepspeed, 'pipeline_enable_backward_allreduce')
96
+ self.comm_backend_handle = NcclBackend(self.deepspeed.mpu)
97
+ elif self.comm_backend_name == 'mpi':
98
+ from deepspeed.runtime.comm.mpi import MpiBackend
99
+ self.comm_backend_handle = MpiBackend(cuda_aware)
100
+ elif self.comm_backend_name == 'hccl':
101
+ from deepspeed.runtime.comm.hccl import HcclBackend
102
+ self.using_pipeline = hasattr(self.deepspeed, 'pipeline_enable_backward_allreduce')
103
+ self.comm_backend_handle = HcclBackend(self.deepspeed.mpu)
104
+ self.size = self.comm_backend_handle.size
105
+
106
+ self.divider = int(self.size * 8 / np.gcd(self.size, 8))
107
+
108
+ def step(self, closure=None, grads=None):
109
+ """Performs a single optimization step.
110
+ Arguments:
111
+ closure (callable, optional): A closure that reevaluates the model
112
+ and returns the loss.
113
+ grads (list of tensors, optional): weight gradient to use for the
114
+ optimizer update. If gradients have type torch.half, parameters
115
+ are expected to be in type torch.float. (default: None)
116
+ output params (list of tensors, optional): A reduced precision copy
117
+ of the updated weights written out in addition to the regular
118
+ updated weights. Have to be of same type as gradients. (default: None)
119
+ scale (float, optional): factor to divide gradient tensor values
120
+ by before applying to weights. (default: 1)
121
+ """
122
+ loss = None
123
+ if closure is not None:
124
+ loss = closure()
125
+
126
+ gather_time = 0
127
+ allgather_time = 0
128
+ all_time = 0
129
+
130
+ if self.adam_freeze_key is False:
131
+ v_diff_buffer = 0.0
132
+
133
+ if grads is None:
134
+ grads_group = [None] * len(self.param_groups)
135
+ # backward compatibility
136
+ # assuming a list/generator of parameter means single group
137
+ elif isinstance(grads, types.GeneratorType):
138
+ grads_group = [grads]
139
+ elif type(grads[0]) != list:
140
+ grads_group = [grads]
141
+ else:
142
+ grads_group = grads
143
+
144
+ for group, grads_this_group in zip(self.param_groups, grads_group):
145
+ if grads_this_group is None:
146
+ grads_this_group = [None] * len(group['params'])
147
+
148
+ bias_correction = 1 if group['bias_correction'] else 0
149
+
150
+ for p, grad in zip(group['params'], grads_this_group):
151
+ if p.grad is None and grad is None:
152
+ continue
153
+ if grad is None:
154
+ grad = p.grad.data
155
+ if grad.is_sparse:
156
+ raise RuntimeError('1-bit Adam does not support sparse gradients')
157
+
158
+ state = self.state[p]
159
+
160
+ # State initialization
161
+ if len(state) == 0:
162
+ state['step'] = 0
163
+ # Exponential moving average of gradient values
164
+ state['exp_avg'] = torch.zeros_like(p.data)
165
+ # Exponential moving average of squared gradient values
166
+ state['exp_avg_sq'] = torch.zeros_like(p.data)
167
+
168
+ if not self.initialize or (self.adam_freeze_key and 'worker_error' not in state.keys()):
169
+ state['tensor_size'] = torch.numel(p.data)
170
+ state['corrected_tensor_size'] = state['tensor_size']
171
+
172
+ if state['tensor_size'] % (self.size * self.divider) != 0:
173
+ state['corrected_tensor_size'] += ((self.size * self.divider) - (state['tensor_size'] %
174
+ (self.size * self.divider)))
175
+ state['server_chunk_size'] = state['corrected_tensor_size'] // self.size
176
+ get_accelerator().empty_cache()
177
+ state['worker_error'] = torch.zeros(state['corrected_tensor_size'], device=p.device)
178
+ state['server_error'] = torch.zeros(state['server_chunk_size'], device=p.device)
179
+ get_accelerator().empty_cache()
180
+ self.adam_freeze_key = True
181
+ if not self.initialize and dist.get_rank() == 0:
182
+ print("Cupy Buffers Initialized Successfully.")
183
+
184
+ exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq']
185
+ beta1, beta2 = group['betas']
186
+
187
+ state['step'] += 1
188
+
189
+ if self.adam_freeze_key is False:
190
+ exp_avg.mul_(beta1).add_(1 - beta1, grad)
191
+ exp_avg_sq.mul_(beta2).addcmul_(1 - beta2, grad, grad)
192
+ grad = None
193
+ if self.initialize:
194
+ update = exp_avg / (exp_avg_sq.sqrt() + group['eps'])
195
+
196
+ else:
197
+ if 'non_freeze' in group.keys() and group['non_freeze'] is True:
198
+ dist.all_reduce(grad)
199
+ grad.mul_(1 / dist.get_world_size())
200
+ exp_avg.mul_(beta1).add_(1 - beta1, grad)
201
+ exp_avg_sq.mul_(beta2).addcmul_(1 - beta2, grad, grad)
202
+ grad = None
203
+ else:
204
+ if self.initialize is True:
205
+ exp_avg.mul_(beta1).add_(1 - beta1, grad)
206
+ grad = None
207
+
208
+ if self.size > 1:
209
+ exp_avg.set_(
210
+ self.comm_backend_handle.compressed_allreduce(exp_avg, state['worker_error'],
211
+ state['server_error'],
212
+ self.deepspeed.local_rank))
213
+ # Because 1-bit compression cannot represent exact zero, it is required to
214
+ # provide a momentum mask for those params that have constant exact zeros in their
215
+ # momentums, otherwise the compression error would keep accumulating.
216
+ # For example, for BERT pre-training seq 128, bert.embeddings.position_embeddings.weight
217
+ # always have exact zeros in its momentum for row 129 to 512, because it only
218
+ # learns up to seq length 128 while the model supports up to 512 seq length.
219
+ # (See example in DeepSpeedExamples/bing_bert/deepspeed_train.py.)
220
+ if 'exp_avg_mask' in group:
221
+ if exp_avg.device != group['exp_avg_mask'].device:
222
+ group['exp_avg_mask'] = group['exp_avg_mask'].to(device=exp_avg.device)
223
+ exp_avg.mul_(group['exp_avg_mask'])
224
+
225
+ if self.initialize:
226
+ update = exp_avg / (exp_avg_sq.sqrt() + group['eps'])
227
+
228
+ if self.initialize:
229
+ if group['weight_decay'] > 0.0:
230
+ update += group['weight_decay'] * p.data
231
+ with torch.no_grad():
232
+ p.add_(-group['lr'] * update)
233
+
234
+ if not self.initialize:
235
+ print('Pop out errors', flush=True)
236
+ state.pop('worker_error')
237
+ state.pop('server_error')
238
+
239
+ if not self.initialize:
240
+ self.adam_freeze_key = False
241
+ self.initialize = True
242
+ print(f"Finished the initialization step at rank {dist.get_rank()}")
243
+ return loss
244
+
245
+ if self.adam_freeze_key is False:
246
+ if state['step'] >= self.freeze_step:
247
+ print('OnebitAdam - starting compressed communication')
248
+ self.adam_freeze_key = True
249
+ if self.using_pipeline:
250
+ self.deepspeed.pipeline_enable_backward_allreduce = False
251
+ else:
252
+ self.deepspeed.enable_backward_allreduce = False
253
+
254
+ return loss
255
+
256
+ def load_state_dict(self, state_dict):
257
+ """
258
+ Overrides load_state_dict() to add special handling when loading checkpoints
259
+ """
260
+ # Because at different stage exp_avg_mask may change (e.g.,
261
+ # BERT pre-training seqlen 128 and 512 ), we don't use the exp_avg_mask
262
+ # in checkpoints but always use the one user provided in training script.
263
+ # (See example in DeepSpeedExamples/bing_bert/deepspeed_train.py.)
264
+ # Thus here we keep the exp_avg_mask unchanged when loading checkpoint
265
+ for i, group in enumerate(self.param_groups):
266
+ if 'exp_avg_mask' in group:
267
+ state_dict['param_groups'][i]['exp_avg_mask'] = group['exp_avg_mask']
268
+ elif 'exp_avg_mask' not in group and 'exp_avg_mask' in state_dict['param_groups'][i]:
269
+ state_dict['param_groups'][i].pop('exp_avg_mask')
270
+ super().load_state_dict(state_dict)
271
+ if self.state[self.param_groups[0]['params'][0]]['step'] < self.freeze_step:
272
+ if dist.get_rank() == 0:
273
+ print("Checkpoint loaded and OnebitAdam warmup stage starts/continues.")
274
+ if self.adam_freeze_key is True:
275
+ self.adam_freeze_key = False
276
+ if self.using_pipeline:
277
+ self.deepspeed.pipeline_enable_backward_allreduce = True
278
+ else:
279
+ self.deepspeed.enable_backward_allreduce = True
280
+ else:
281
+ if dist.get_rank() == 0:
282
+ print("Checkpoint loaded and OnebitAdam compression stage starts/continues.")
283
+ if self.adam_freeze_key is False:
284
+ self.adam_freeze_key = True
285
+ if self.using_pipeline:
286
+ self.deepspeed.pipeline_enable_backward_allreduce = False
287
+ else:
288
+ self.deepspeed.enable_backward_allreduce = False
289
+ # We reset the compression errors when loading checkpoints for 3 reasons:
290
+ # 1) The worker and server error at each GPU are distinct, so in current implementation
291
+ # only rank 0's errors are saved in the checkpoint. Thus we have to reset the errors.
292
+ # If we want to save them correctly we need O(num_gpu*model_size) memory in order to
293
+ # gather all the error, which is a very large memory requirement. It's possible to save
294
+ # them in a distributed way, but it will make the checkpoint saving/loading much more complicated.
295
+ # 2) Even if we are able to save the compression errors correctly, you need to have the
296
+ # exact same number of GPUs in order to load them correctly.
297
+ # 3) We verified on BERT pre-training that occasionally resetting the compression error
298
+ # at checkpoint loading does not affect the convergence.
299
+ # However, please avoid frequent checkpoint loading which could break the error
300
+ # compensation mechanism thus affect the convergence.
301
+ for group in self.param_groups:
302
+ for p in group['params']:
303
+ if 'worker_error' in self.state[p]:
304
+ self.state[p].pop('worker_error')
305
+ if 'server_error' in self.state[p]:
306
+ self.state[p].pop('server_error')
infer_4_47_1/lib/python3.10/site-packages/torch/bin/protoc ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3390873b2da56c1397adec3728f1588c51e182f15b123d3b4d4f248d31c1f4da
3
+ size 5330888
infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/__pycache__/check_kernel_launches.cpython-310.pyc ADDED
Binary file (3.85 kB). View file
 
infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/__pycache__/common_dtype.cpython-310.pyc ADDED
Binary file (4.65 kB). View file
 
infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/__pycache__/common_modules.cpython-310.pyc ADDED
Binary file (92.9 kB). View file
 
infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/__pycache__/custom_op_db.cpython-310.pyc ADDED
Binary file (18.8 kB). View file
 
infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/__pycache__/custom_tensor.cpython-310.pyc ADDED
Binary file (2.87 kB). View file
 
infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/__pycache__/quantization_torch_package_models.cpython-310.pyc ADDED
Binary file (1.62 kB). View file
 
infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/data/__init__.py ADDED
@@ -0,0 +1 @@
 
 
1
+ # mypy: ignore-errors
infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/data/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (186 Bytes). View file
 
infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/data/__pycache__/network1.cpython-310.pyc ADDED
Binary file (606 Bytes). View file
 
infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/data/__pycache__/network2.cpython-310.pyc ADDED
Binary file (630 Bytes). View file
 
infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/distributed/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (193 Bytes). View file
 
infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/distributed/__pycache__/checkpoint_utils.cpython-310.pyc ADDED
Binary file (1.45 kB). View file
 
infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/distributed/__pycache__/common_state_dict.cpython-310.pyc ADDED
Binary file (3.85 kB). View file
 
infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/distributed/__pycache__/ddp_under_dist_autograd_test.cpython-310.pyc ADDED
Binary file (19.5 kB). View file
 
infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/distributed/__pycache__/distributed_utils.cpython-310.pyc ADDED
Binary file (2.47 kB). View file
 
infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/distributed/__pycache__/fake_pg.cpython-310.pyc ADDED
Binary file (1.4 kB). View file
 
infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/distributed/__pycache__/multi_threaded_pg.cpython-310.pyc ADDED
Binary file (17.8 kB). View file
 
infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/distributed/__pycache__/rpc_utils.cpython-310.pyc ADDED
Binary file (4.86 kB). View file
 
infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/distributed/_shard/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (200 Bytes). View file
 
infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/distributed/_shard/__pycache__/test_common.cpython-310.pyc ADDED
Binary file (1.83 kB). View file
 
infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/distributed/_shard/sharded_tensor/__pycache__/_test_st_common.cpython-310.pyc ADDED
Binary file (1.89 kB). View file
 
infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/distributed/_tensor/__init__.py ADDED
File without changes
infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/distributed/_tensor/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (201 Bytes). View file
 
infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/distributed/_tensor/__pycache__/common_dtensor.cpython-310.pyc ADDED
Binary file (16.1 kB). View file
 
infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/distributed/_tensor/common_dtensor.py ADDED
@@ -0,0 +1,548 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # mypy: allow-untyped-defs
2
+
3
+ # Copyright (c) Meta Platforms, Inc. and affiliates
4
+
5
+ import itertools
6
+ import sys
7
+ from dataclasses import dataclass
8
+ from functools import wraps
9
+ from typing import Any, Callable, cast, Dict, Iterator, List, Sequence, Tuple, TypeVar
10
+
11
+ import torch
12
+ import torch.distributed as dist
13
+ import torch.nn as nn
14
+ import torch.nn.functional as F
15
+
16
+ from torch.distributed._tensor import DeviceMesh, distribute_tensor, Replicate, Shard
17
+ from torch.distributed._tensor.placement_types import Placement
18
+ from torch.distributed.tensor.parallel import (
19
+ ColwiseParallel,
20
+ parallelize_module,
21
+ PrepareModuleInput,
22
+ RowwiseParallel,
23
+ SequenceParallel,
24
+ )
25
+ from torch.testing._internal.common_distributed import (
26
+ MultiProcessTestCase,
27
+ MultiThreadedTestCase,
28
+ skip_if_lt_x_gpu,
29
+ run_subtests,
30
+ TEST_SKIPS,
31
+ )
32
+
33
+ from torch.utils._pytree import tree_flatten, tree_unflatten, TreeSpec
34
+
35
+ DEVICE_TYPE = (
36
+ "cuda" if torch.cuda.is_available() and torch.cuda.device_count() > 1 else "cpu"
37
+ )
38
+
39
+ NUM_DEVICES = 4
40
+
41
+ # We use this as a proxy for "multiple GPUs exist"
42
+ if torch.cuda.is_available() and torch.cuda.device_count() > 1:
43
+ # when we actually have multiple GPUs, relax the requirement to smaller counts.
44
+ NUM_DEVICES = min(NUM_DEVICES, torch.cuda.device_count())
45
+
46
+ T = TypeVar("T")
47
+
48
+
49
+ # simple RMSNorm layer for testing
50
+ class RMSNormPython(torch.nn.Module):
51
+ def __init__(self, dim: int, eps: float = 1e-6):
52
+ super().__init__()
53
+ self.eps = eps
54
+ self.weight = torch.nn.Parameter(torch.ones(dim))
55
+
56
+ def _norm(self, x):
57
+ return x * torch.rsqrt(x.pow(2).mean(-1, keepdim=True) + self.eps)
58
+
59
+ def forward(self, x):
60
+ output = self._norm(x)
61
+ return output * self.weight
62
+
63
+
64
+ class MLPModule(nn.Module):
65
+ def __init__(self, device, bias: bool = True):
66
+ super().__init__()
67
+ torch.manual_seed(5)
68
+ self.net1 = nn.Linear(10, 16, bias=bias, device=device)
69
+ self.relu = nn.ReLU()
70
+ self.net2 = nn.Linear(16, 10, bias=bias, device=device)
71
+
72
+ def forward(self, x):
73
+ return self.net2(self.relu(self.net1(x)))
74
+
75
+ def reset_parameters(self):
76
+ self.net1.reset_parameters()
77
+ self.net2.reset_parameters()
78
+
79
+
80
+ class MLPStacked(nn.Module):
81
+ def __init__(self, device, n_layers: int = 2):
82
+ super().__init__()
83
+ self.layers = nn.ModuleList([MLPModule(device) for i in range(n_layers)])
84
+
85
+ def forward(self, x):
86
+ for layer in self.layers:
87
+ x = layer(x)
88
+ return x
89
+
90
+
91
+ @dataclass
92
+ class ModelArgs:
93
+ n_layers: int = 2
94
+ vocab_size: int = 8
95
+ max_seq_len: int = 16
96
+ dim: int = 16
97
+ n_heads: int = 4
98
+ dropout_p: float = 0.1
99
+ use_attn_mask: bool = True
100
+ weight_tying: bool = True
101
+ checkpoint_activations: bool = False
102
+
103
+
104
+ class Attention(nn.Module):
105
+ def __init__(self, args: ModelArgs):
106
+ super().__init__()
107
+ assert args.dim % args.n_heads == 0
108
+ self.head_dim = args.dim // args.n_heads
109
+ self.n_heads = args.n_heads
110
+ self.dropout_p = args.dropout_p
111
+ self.resid_dropout = nn.Dropout(args.dropout_p)
112
+ self.use_attn_mask = args.use_attn_mask
113
+
114
+ self.wq = nn.Linear(args.dim, args.dim, bias=False)
115
+ self.wk = nn.Linear(args.dim, args.dim, bias=False)
116
+ self.wv = nn.Linear(args.dim, args.dim, bias=False)
117
+ self.wo = nn.Linear(args.dim, args.dim, bias=False)
118
+
119
+ def forward(self, x):
120
+ bsz, seq_len, _ = x.size()
121
+ queries, keys, values = self.wq(x), self.wk(x), self.wv(x)
122
+ queries = queries.view(bsz, seq_len, self.n_heads, self.head_dim)
123
+ keys = keys.view(bsz, seq_len, self.n_heads, self.head_dim)
124
+ values = values.view(bsz, seq_len, self.n_heads, self.head_dim)
125
+
126
+ queries = queries.transpose(1, 2) # (bsz, n_heads, seq_len, head_dim)
127
+ keys = keys.transpose(1, 2) # (bsz, n_heads, seq_len, head_dim)
128
+ values = values.transpose(1, 2) # (bsz, n_heads, seq_len, head_dim)
129
+
130
+ output = F.scaled_dot_product_attention(
131
+ queries,
132
+ keys,
133
+ values,
134
+ None,
135
+ self.dropout_p if self.training else 0,
136
+ self.use_attn_mask,
137
+ )
138
+ output = output.transpose(1, 2).contiguous().view(bsz, seq_len, -1)
139
+ return self.resid_dropout(self.wo(output))
140
+
141
+
142
+ class FeedForward(nn.Module):
143
+ def __init__(self, dim, hidden_dim, dropout_p):
144
+ super().__init__()
145
+ self.w1 = nn.Linear(dim, hidden_dim)
146
+ self.gelu = nn.GELU()
147
+ self.w2 = nn.Linear(hidden_dim, dim)
148
+ self.resid_dropout = nn.Dropout(dropout_p)
149
+
150
+ def forward(self, x):
151
+ return self.resid_dropout(self.w2(self.gelu(self.w1(x))))
152
+
153
+
154
+ class TransformerBlock(nn.Module):
155
+ def __init__(self, args: ModelArgs):
156
+ super().__init__()
157
+ self.attention_norm = nn.LayerNorm(args.dim)
158
+ self.attention = Attention(args)
159
+ self.ffn_norm = nn.LayerNorm(args.dim)
160
+ self.feed_forward = FeedForward(
161
+ args.dim, hidden_dim=4 * args.dim, dropout_p=args.dropout_p
162
+ )
163
+
164
+ def forward(self, x):
165
+ h = x + self.attention(self.attention_norm(x))
166
+ out = h + self.feed_forward(self.ffn_norm(h))
167
+ return out
168
+
169
+
170
+ # A toy transformer model, partly inspired by the nanoGPT model:
171
+ # https://github.com/karpathy/nanoGPT.
172
+ class Transformer(nn.Module):
173
+ def __init__(self, args: ModelArgs):
174
+ super().__init__()
175
+ assert args.vocab_size is not None
176
+ assert args.max_seq_len is not None
177
+ self.model_args = args
178
+ self.max_seq_len = args.max_seq_len
179
+ self.tok_embeddings = nn.Embedding(args.vocab_size, args.dim)
180
+ self.pos_embeddings = nn.Embedding(args.max_seq_len, args.dim)
181
+ self.dropout = nn.Dropout(args.dropout_p)
182
+ self.layers = nn.ModuleList()
183
+ for _ in range(args.n_layers):
184
+ self.layers.append(TransformerBlock(args))
185
+ self.norm = nn.LayerNorm(args.dim)
186
+ self.output = nn.Linear(args.dim, args.vocab_size, bias=False)
187
+ if args.weight_tying:
188
+ self.output.weight = self.tok_embeddings.weight
189
+ self.checkpoint_activations = args.checkpoint_activations
190
+
191
+ def forward(self, tokens):
192
+ _bsz, seq_len = tokens.size()
193
+ assert seq_len <= self.max_seq_len
194
+ h = self.tok_embeddings(tokens)
195
+ pos = torch.arange(0, seq_len, device=tokens.device)
196
+ p = self.pos_embeddings(pos) # positional embeddings of shape (seq_len, dim)
197
+ h = h + p
198
+ h = self.dropout(h)
199
+ for layer in self.layers:
200
+ if self.checkpoint_activations:
201
+ h = torch.utils.checkpoint.checkpoint(layer, h, use_reentrant=False)
202
+ else:
203
+ h = layer(h)
204
+ h = self.norm(h)
205
+ output = self.output(h).float()
206
+ return output
207
+
208
+ @staticmethod
209
+ def parallelize(
210
+ module: "Transformer", device_mesh: DeviceMesh, use_seq_parallel: bool, local_output_for_attn: bool = False
211
+ ) -> nn.Module:
212
+ assert isinstance(module, Transformer), f"Requires Transformer but got {module}"
213
+ # Parallelize the root submodules.
214
+ if use_seq_parallel:
215
+ root_plan = {
216
+ "tok_embeddings": RowwiseParallel(input_layouts=Replicate(), output_layouts=Shard(1)),
217
+ "pos_embeddings": RowwiseParallel(input_layouts=Replicate(), output_layouts=Shard(0)),
218
+ "norm": SequenceParallel(),
219
+ }
220
+ else:
221
+ root_plan = {
222
+ "tok_embeddings": RowwiseParallel(input_layouts=Replicate(), output_layouts=Replicate()),
223
+ "pos_embeddings": RowwiseParallel(input_layouts=Replicate(), output_layouts=Replicate()),
224
+ }
225
+
226
+ module_tp = parallelize_module(module, device_mesh, root_plan)
227
+ # Parallelize the attention and feed forward submodules.
228
+ for layer in module_tp.layers:
229
+ layer_parallelize_plan = {}
230
+ if use_seq_parallel:
231
+ layer_parallelize_plan["attention"] = PrepareModuleInput(
232
+ input_layouts=Shard(1),
233
+ desired_input_layouts=Replicate(),
234
+ )
235
+ # shard the RMSNorms
236
+ layer_parallelize_plan["attention_norm"] = SequenceParallel()
237
+ layer_parallelize_plan["ffn_norm"] = SequenceParallel()
238
+ layer_parallelize_plan["attention.wq"] = ColwiseParallel(use_local_output=local_output_for_attn)
239
+ layer_parallelize_plan["attention.wk"] = ColwiseParallel(use_local_output=local_output_for_attn)
240
+ layer_parallelize_plan["attention.wv"] = ColwiseParallel(use_local_output=local_output_for_attn)
241
+ layer_parallelize_plan["attention.wo"] = (
242
+ RowwiseParallel(output_layouts=Shard(1))
243
+ if use_seq_parallel
244
+ else RowwiseParallel()
245
+ )
246
+
247
+ layer_parallelize_plan["feed_forward.w1"] = (
248
+ ColwiseParallel(input_layouts=Shard(1))
249
+ if use_seq_parallel
250
+ else ColwiseParallel()
251
+ )
252
+ layer_parallelize_plan["feed_forward.w2"] = (
253
+ RowwiseParallel(output_layouts=Shard(1))
254
+ if use_seq_parallel
255
+ else RowwiseParallel()
256
+ )
257
+
258
+ parallelize_module(layer, device_mesh, layer_parallelize_plan)
259
+
260
+ # Parallelize the output submodule. If weight tying is enabled, we need to
261
+ # make sure output.weight is sharded consistently as tok_embeddings.weight,
262
+ # at the cost of the all_reduce operation using RowwiseParallel.
263
+ output_parallelize_plan = (
264
+ ColwiseParallel(
265
+ input_layouts=Shard(1),
266
+ output_layouts=Replicate(),
267
+ )
268
+ if use_seq_parallel
269
+ else ColwiseParallel(output_layouts=Replicate())
270
+ )
271
+ parallelize_module(module_tp.output, device_mesh, output_parallelize_plan)
272
+
273
+ if local_output_for_attn:
274
+ for layer in module_tp.layers:
275
+ layer.attention.n_heads = module_tp.model_args.n_heads // device_mesh.size()
276
+
277
+ # Manually set output.weight so that parameters and gradients are shared.
278
+ if module_tp.model_args.weight_tying:
279
+ module_tp.output.weight = module_tp.tok_embeddings.weight
280
+
281
+ return module_tp
282
+
283
+
284
+ def skip_unless_torch_gpu(method: T) -> T:
285
+ """
286
+ Test decorator which skips the test unless there's a GPU available to torch.
287
+
288
+ >>> # xdoctest: +SKIP
289
+ >>> @skip_unless_torch_gpu
290
+ >>> def test_some_method(self) -> None:
291
+ >>> ...
292
+ """
293
+ # The builtin @skip_if_no_gpu relies on os.environ['WORLD_SIZE'] being set.
294
+ return cast(T, skip_if_lt_x_gpu(NUM_DEVICES)(method))
295
+
296
+
297
+ class DTensorTestBase(MultiProcessTestCase):
298
+ @property
299
+ def world_size(self) -> int:
300
+ return NUM_DEVICES
301
+
302
+ @property
303
+ def backend(self) -> str:
304
+ backend = "nccl" if self.device_type == "cuda" else "gloo"
305
+ return backend
306
+
307
+ def build_device_mesh(self) -> DeviceMesh:
308
+ return DeviceMesh(self.device_type, list(range(self.world_size)))
309
+
310
+ def init_pg(self) -> None:
311
+ if "nccl" in self.backend and torch.cuda.device_count() < self.world_size:
312
+ sys.exit(TEST_SKIPS[f"multi-gpu-{self.world_size}"].exit_code)
313
+
314
+ if self.backend not in ["nccl", "gloo", "mpi", "cpu:gloo,cuda:nccl"]:
315
+ raise RuntimeError(f"Backend {self.backend} not supported!")
316
+
317
+ dist.init_process_group(
318
+ backend=self.backend,
319
+ world_size=self.world_size,
320
+ rank=self.rank, # pyre-ignore[16]
321
+ init_method=f"file://{self.file_name}", # pyre-ignore[16]
322
+ )
323
+
324
+ # set device for nccl pg for collectives
325
+ if "nccl" in self.backend:
326
+ torch.cuda.set_device(self.rank)
327
+
328
+ def destroy_pg(self) -> None:
329
+ # Wait for all ranks to reach here before starting shutdown.
330
+ # FIXME dist.barrier deadlocks with multiple threads and NCCL: https://github.com/pytorch/pytorch/issues/95895
331
+ # dist.all_reduce(torch.zeros((1,), device="cuda" if torch.cuda.is_available() else "cpu"))
332
+ # FIXME can't use the above all_reduce as it causes hangs on bionic and focal. It hangs:
333
+ # test_dtensor.py -- DTensorMeshTest.test_dtensor_device_mesh_device_conversion
334
+ dist.barrier()
335
+ dist.destroy_process_group()
336
+
337
+ def setUp(self) -> None:
338
+ super().setUp()
339
+ self._spawn_processes()
340
+
341
+ # pyre-ignore[2]:
342
+ def _test_op(self, mesh: DeviceMesh, op_call, *args, **kwargs) -> None:
343
+ out = op_call(*args, **kwargs)
344
+ dtc = DTensorConverter(mesh, args, kwargs)
345
+ for d_args, d_kwargs in dtc:
346
+ # pyre can't find assertTrue anymore?
347
+ self.assertEqual(dtc.successful(), True)
348
+ d_out = op_call(*d_args, **d_kwargs)
349
+ self.assertEqual(d_out.full_tensor(), out)
350
+
351
+ def run_subtests(self, *args, **kwargs):
352
+ return run_subtests(self, *args, **kwargs)
353
+
354
+
355
+ TestFunc = Callable[[object], object]
356
+
357
+
358
+ # wrapper to initialize comms (processgroup)
359
+ def with_comms(func: TestFunc) -> TestFunc:
360
+ assert func is not None
361
+
362
+ @wraps(func) # pyre-ignore[6]
363
+ def wrapper(
364
+ self, *args: Tuple[object], **kwargs: Dict[str, Any] # type: ignore[misc]
365
+ ) -> None:
366
+ # if enough GPU we can use GPU, otherwise we fallback to CPU
367
+ if not torch.cuda.is_available() or torch.cuda.device_count() < self.world_size:
368
+ self.device_type = "cpu"
369
+ else:
370
+ self.device_type = DEVICE_TYPE
371
+
372
+ self.init_pg()
373
+
374
+ try:
375
+ func(self, *args, **kwargs) # type: ignore[misc]
376
+ except Exception as e:
377
+ dist.destroy_process_group()
378
+ raise e
379
+
380
+ self.destroy_pg()
381
+
382
+ return wrapper
383
+
384
+
385
+ class DTensorOpTestBase(MultiThreadedTestCase):
386
+ @property
387
+ def world_size(self) -> int:
388
+ return NUM_DEVICES
389
+
390
+ @property
391
+ def device_type(self) -> str:
392
+ return DEVICE_TYPE
393
+
394
+ def build_device_mesh(self):
395
+ return DeviceMesh(self.device_type, list(range(self.world_size)))
396
+
397
+ def setUp(self) -> None:
398
+ super().setUp()
399
+ self._spawn_threads()
400
+
401
+
402
+ # This is a class for converting args/kwargs of an op into distributed args/kwargs
403
+ class DTensorConverter:
404
+ def __init__(
405
+ self,
406
+ mesh: DeviceMesh,
407
+ args: Tuple[object, ...],
408
+ kwargs: Dict[str, object],
409
+ ) -> None:
410
+ self.hit = 0
411
+ self.miss = 0
412
+ self.mesh = mesh
413
+ self.args = args
414
+ self.kwargs = kwargs
415
+ flatten_args, flatten_args_spec = tree_flatten(args)
416
+ flatten_kwargs, flatten_kwargs_spec = tree_flatten(kwargs)
417
+
418
+ self.flatten_args: List[object] = flatten_args
419
+ self.flatten_args_spec: TreeSpec = flatten_args_spec
420
+ self.flatten_kwargs: List[object] = flatten_kwargs
421
+ self.flatten_kwargs_spec: TreeSpec = flatten_kwargs_spec
422
+
423
+ choices_for_args = []
424
+ for arg in self.flatten_args:
425
+ if isinstance(arg, torch.Tensor):
426
+ choices_for_args.append(self.gen_sharding_choices_for_arg(arg))
427
+
428
+ for arg in self.flatten_kwargs:
429
+ if isinstance(arg, torch.Tensor):
430
+ choices_for_args.append(self.gen_sharding_choices_for_arg(arg))
431
+
432
+ self.sharding_combs: Iterator[Sequence[Placement]] = iter(
433
+ itertools.product(*choices_for_args)
434
+ )
435
+
436
+ def successful(self) -> bool:
437
+ return self.hit > 0 and self.miss == 0
438
+
439
+ def is_supported_tensor(self, t: torch.Tensor) -> bool:
440
+ # TODO: dist tensor need to support quantized and sparse
441
+ # tensors, quantized tensor might be relatively easy, but
442
+ # sparse tensor have special layouts that we need to possibly
443
+ # deal with, until we are clear about them, we don't officially
444
+ # support them.
445
+ return not any(
446
+ [
447
+ t.is_sparse_csr,
448
+ t.is_sparse,
449
+ t.is_mkldnn,
450
+ t.is_quantized,
451
+ t.is_nested,
452
+ torch._is_functional_tensor(t),
453
+ t.is_neg(),
454
+ t.is_conj(),
455
+ t.device.type in ("lazy", "meta"),
456
+ # We need a way to test if a tensor is batched but there
457
+ # is no official APi to do it
458
+ # torch._C._is_batched(t),
459
+ ]
460
+ )
461
+
462
+ def gen_sharding_choices_for_arg(self, arg: torch.Tensor) -> Sequence[Placement]:
463
+ mesh_size = self.mesh.size()
464
+ sharding_choices: List[Placement] = [Replicate()]
465
+ # c10d collective does not support bool tensor
466
+ # for bool tensor we treat it as replicated
467
+ if arg.dtype != torch.bool:
468
+ # only generating choices with: replicate, or sharding
469
+ # evenly on a dimension that could be sharded
470
+ sharding_choices = sharding_choices + [
471
+ Shard(i)
472
+ for i, s in enumerate(arg.shape)
473
+ if s > 1 and s % mesh_size == 0
474
+ ]
475
+ # TODO: add multi mesh choices
476
+ # all_choices = itertools.product(
477
+ # *(self.mesh.ndim * [sharding_choices])
478
+ # )
479
+ return sharding_choices
480
+
481
+ def __iter__(self) -> "DTensorConverter":
482
+ return self
483
+
484
+ def __next__(self) -> Tuple[Tuple[object, ...], Dict[str, object]]:
485
+ try:
486
+ next_sharding_choices = next(self.sharding_combs)
487
+ idx = 0
488
+
489
+ new_args: List[object] = []
490
+ for arg in self.flatten_args:
491
+ if isinstance(arg, torch.Tensor):
492
+ new_args.append(
493
+ self.to_dist_tensor(
494
+ arg, self.mesh, [next_sharding_choices[idx]]
495
+ )
496
+ )
497
+ idx += 1
498
+ else:
499
+ new_args.append(arg)
500
+
501
+ new_kwargs: List[object] = []
502
+ for arg in self.flatten_kwargs:
503
+ if isinstance(arg, torch.Tensor):
504
+ new_kwargs.append(
505
+ self.to_dist_tensor(
506
+ arg, self.mesh, [next_sharding_choices[idx]]
507
+ )
508
+ )
509
+ idx += 1
510
+ else:
511
+ new_kwargs.append(arg)
512
+
513
+ return (
514
+ tree_unflatten(new_args, self.flatten_args_spec),
515
+ tree_unflatten(new_kwargs, self.flatten_kwargs_spec),
516
+ )
517
+ except StopIteration as e:
518
+ raise StopIteration from e
519
+
520
+ def to_dist_tensor(
521
+ self, t: torch.Tensor, mesh: DeviceMesh, placements: List[Placement]
522
+ ) -> torch.Tensor:
523
+ if type(t) is torch.Tensor or type(t) is nn.Parameter:
524
+ if self.is_supported_tensor(t):
525
+ self.hit += 1
526
+ if t.ndim == 0:
527
+ # scalar tensor by default will be replicated
528
+ r = distribute_tensor(t, mesh, [Replicate()] * mesh.ndim)
529
+ else:
530
+ # distribute non-scalar tensors
531
+ r = distribute_tensor(t, mesh, placements)
532
+ if type(t) is nn.Parameter:
533
+ r = nn.Parameter( # type: ignore[assignment]
534
+ r, requires_grad=r.requires_grad
535
+ )
536
+ return r
537
+ else:
538
+ self.miss += 1
539
+ return t
540
+ elif torch.overrides.is_tensor_like(t):
541
+ # Blindly converting tensor subclasses to dist tensor can cause
542
+ # unpredictable problems, we explicitly disable this conversion
543
+ # for now (i.e. we don't support DTensor holding tensor subclass
544
+ # until there's a strong reason later).
545
+ self.miss += 1
546
+ return t
547
+ else:
548
+ raise RuntimeError(f"Trying to convert to DTensor, but got {type(t)}")
infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/distributed/common_state_dict.py ADDED
@@ -0,0 +1,122 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # mypy: allow-untyped-defs
2
+
3
+ # Owner(s): ["oncall: distributed"]
4
+
5
+ import copy
6
+ from itertools import chain
7
+ from typing import Any, Dict
8
+
9
+ import torch
10
+ import torch.nn as nn
11
+ from torch.distributed._sharded_tensor import ShardedTensor
12
+ from torch.distributed._state_dict_utils import _gather_state_dict
13
+ from torch.distributed._tensor import DTensor
14
+ from torch.distributed.checkpoint.state_dict import (
15
+ _PG,
16
+ _STATE,
17
+ set_state_dict,
18
+ StateDictOptions,
19
+ )
20
+
21
+
22
+ class VerifyStateDictMixin:
23
+ def _compare_tensor(self, orig_tensor, dist_tensor, offload_to_cpu=False):
24
+ if isinstance(dist_tensor, (DTensor, ShardedTensor)):
25
+ dist_tensor = _gather_state_dict({"mykey": dist_tensor}).pop("mykey")
26
+
27
+ if offload_to_cpu:
28
+ orig_tensor = orig_tensor.cpu()
29
+ dist_tensor = dist_tensor.cpu()
30
+ self.assertTrue(isinstance(dist_tensor, torch.Tensor))
31
+ self.assertTrue(torch.allclose(orig_tensor, dist_tensor))
32
+
33
+ def _verify_msd(
34
+ self,
35
+ msd: Dict[str, Any],
36
+ dist_msd: Dict[str, Any],
37
+ options: StateDictOptions = StateDictOptions(),
38
+ offload_to_cpu=False,
39
+ ) -> None:
40
+ if not options.ignore_frozen_params:
41
+ self.assertEqual(len(msd), len(dist_msd))
42
+ for fqn, param in msd.items():
43
+ dist_param = dist_msd.get(fqn, None)
44
+ if not options.ignore_frozen_params:
45
+ self.assertIsNotNone(dist_param, f"{fqn=}")
46
+ try:
47
+ self._compare_tensor(param, dist_param, offload_to_cpu)
48
+ except AssertionError as e:
49
+ raise AssertionError(
50
+ f"{fqn} has mismatched value {param} {dist_param}"
51
+ ) from e
52
+ elif dist_param is None:
53
+ self.assertFalse(param.requires_grad, f"{fqn=}")
54
+
55
+ def _verify_osd(
56
+ self,
57
+ model: nn.Module,
58
+ optim: torch.optim.Optimizer,
59
+ osd: Dict[str, Any],
60
+ dist_osd: Dict[str, Any],
61
+ ) -> None:
62
+ params = list(chain.from_iterable(g["params"] for g in optim.param_groups))
63
+ param_pid_mapping = dict(zip(params, range(len(params))))
64
+ fqn_pid_mapping = {}
65
+ for fqn, param in model.named_parameters():
66
+ pid = param_pid_mapping[param]
67
+ fqn_pid_mapping[fqn] = pid
68
+ fqn_pid_mapping[pid] = fqn
69
+ # Check optimizer_state_dict state
70
+
71
+ self.assertEqual(len(osd[_STATE]), len(dist_osd[_STATE]))
72
+ for pid, states in osd[_STATE].items():
73
+ fqn = fqn_pid_mapping[pid]
74
+ dist_states = dist_osd[_STATE].get(fqn, None)
75
+ self.assertIsNotNone(dist_states, fqn)
76
+ self.assertEqual(len(states), len(dist_states))
77
+ for key, state in states.items():
78
+ dist_state = states.get(key, None)
79
+ self.assertIsNotNone(dist_state)
80
+ self._compare_tensor(state, dist_state)
81
+
82
+ # Check optimizer_state_dict param_group
83
+ old_dist_osd_pg = dist_osd[_PG]
84
+ if len(osd[_PG]) != len(dist_osd[_PG]):
85
+ self.assertTrue(len(dist_osd[_PG]) > len(osd[_PG]))
86
+ new_pg = copy.deepcopy(dist_osd[_PG][0])
87
+ new_pg["params"] = []
88
+ for dist_group in dist_osd[_PG]:
89
+ new_pg["params"].extend(dist_group["params"])
90
+ dist_osd[_PG] = [new_pg]
91
+
92
+ self.assertEqual(len(osd[_PG]), len(dist_osd[_PG]))
93
+ for group, dist_group in zip(osd[_PG], dist_osd[_PG]):
94
+ self.assertEqual(len(group), len(dist_group))
95
+ for key, value in group.items():
96
+ # Below doesn't work because param_groups can have None
97
+ # values.
98
+ # dist_value = dist_group.get(key, None)
99
+ # self.assertIsNotNone(dist_value, (dist_group, group))
100
+ dist_value = dist_group[key]
101
+ if key == "params":
102
+ fqns = [fqn_pid_mapping[pid] for pid in value]
103
+ self.assertEqual(sorted(fqns), sorted(dist_value))
104
+ else:
105
+ self.assertEqual(value, dist_value)
106
+ dist_osd[_PG] = old_dist_osd_pg
107
+
108
+ def _verify_osd_by_load(
109
+ self,
110
+ model: nn.Module,
111
+ optim: torch.optim.Optimizer,
112
+ new_optim: torch.optim.Optimizer,
113
+ dist_osd: Dict[str, Any],
114
+ ) -> None:
115
+ new_dist_osd = _gather_state_dict(dist_osd)
116
+ set_state_dict(
117
+ model,
118
+ optimizers=new_optim,
119
+ model_state_dict={},
120
+ optim_state_dict=new_dist_osd,
121
+ )
122
+ self.assertEqual(optim.state_dict(), new_optim.state_dict())
infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/distributed/nn/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (196 Bytes). View file
 
infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/distributed/nn/api/__pycache__/remote_module_test.cpython-310.pyc ADDED
Binary file (21.9 kB). View file
 
infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/distributed/nn/api/remote_module_test.py ADDED
@@ -0,0 +1,734 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # mypy: allow-untyped-defs
2
+
3
+ import enum
4
+ from typing import Tuple
5
+
6
+ import torch
7
+ import torch.distributed.rpc as rpc
8
+ import torch.testing._internal.dist_utils as dist_utils
9
+ from torch import Tensor, nn
10
+ from torch._jit_internal import Future
11
+ from torch.distributed.nn import RemoteModule
12
+ from torch.distributed.nn.api.remote_module import _REMOTE_MODULE_PICKLED_ATTRIBUTES
13
+ from torch.distributed.nn.api.remote_module import _RemoteModule
14
+ from torch.testing._internal.common_distributed import skip_if_lt_x_gpu
15
+ from torch.testing._internal.common_utils import TemporaryFileName
16
+ from torch.testing._internal.distributed.rpc.rpc_agent_test_fixture import (
17
+ RpcAgentTestFixture,
18
+ )
19
+
20
+
21
+ _PARAM_VAL = torch.nn.Parameter(torch.ones(1))
22
+
23
+
24
+ # RPC handler for querying the device on the destination worker.
25
+ def remote_device(module_rref):
26
+ for param in module_rref.local_value().parameters():
27
+ return param.device
28
+
29
+
30
+ # RPC handler for querying __dict__ on the destination worker.
31
+ def remote_module_attributes(remote_module):
32
+ return remote_module.__dict__
33
+
34
+
35
+ # RPC handler for running forward on the destination worker.
36
+ def remote_forward(remote_module, args):
37
+ return remote_module.forward(*args)
38
+
39
+ # RPC handler for running forward_async on the destination worker.
40
+ def remote_forward_async(remote_module, args):
41
+ # Since future cannot be pickled and sent over the RPC layer,
42
+ # have to wait and behave just like ``forward_sync``.
43
+ return remote_module.forward_async(*args).wait()
44
+
45
+ # RPC handler for getting training mode on the destination worker.
46
+ def get_remote_training_arg(module_rref):
47
+ return module_rref.local_value().training
48
+
49
+ class ModuleCreationMode(enum.Enum):
50
+ MODULE_CTOR_WITH_INTERFACE = "module_ctor_with_interface"
51
+ MODULE_CTOR = "module_ctor"
52
+
53
+
54
+ @torch.jit.interface
55
+ class MyModuleInterface:
56
+ def forward(
57
+ self, tensor: Tensor, number: int, word: str = "default"
58
+ ) -> Tuple[str, int, Tensor]:
59
+ # pyre-ignore[7]: Pyre and torch.jit.interface don't mix well
60
+ pass
61
+
62
+
63
+ @torch.jit.interface
64
+ class RemoteMyModuleInterface:
65
+ def forward(
66
+ self, tensor: Tensor, number: int, word: str = "default"
67
+ ) -> Tuple[str, int, Tensor]:
68
+ # pyre-ignore[7]: Pyre and torch.jit.interface don't mix well
69
+ pass
70
+
71
+ def forward_async(
72
+ self, tensor: Tensor, number: int, word: str = "default"
73
+ ) -> Future[Tuple[str, int, Tensor]]:
74
+ pass
75
+
76
+
77
+ class MyModule(nn.Module):
78
+ def __init__(self, first_arg, first_kwarg=-1):
79
+ super().__init__()
80
+ self.param1 = _PARAM_VAL
81
+
82
+ def forward(
83
+ self, tensor: Tensor, number: int, word: str = "default"
84
+ ) -> Tuple[str, int, Tensor]:
85
+ return word, number, tensor
86
+
87
+
88
+ class BadModule:
89
+ def __init__(self, first_arg, first_kwarg=-1):
90
+ pass
91
+
92
+
93
+ def create_scripted_module(first_arg, first_kwarg=-1):
94
+ module = MyModule(first_arg, first_kwarg=first_kwarg)
95
+ scripted_module = torch.jit.script(module)
96
+ return scripted_module
97
+
98
+
99
+ # Common utils for both CPU and CUDA test suites
100
+ class CommonRemoteModuleTest(RpcAgentTestFixture):
101
+ @property
102
+ def world_size(self): # Override setting in RpcAgentTestFixture
103
+ return 2
104
+
105
+ @staticmethod
106
+ def _create_remote_module_iter(remote_device, modes=None):
107
+ if modes is None:
108
+ modes = ModuleCreationMode.__members__.values()
109
+
110
+ args = (1,)
111
+ kwargs = dict(first_kwarg=2)
112
+
113
+ if ModuleCreationMode.MODULE_CTOR in modes:
114
+ remote_module = RemoteModule(remote_device, MyModule, args, kwargs)
115
+ yield remote_module
116
+
117
+ if ModuleCreationMode.MODULE_CTOR_WITH_INTERFACE in modes:
118
+ remote_module = _RemoteModule(
119
+ remote_device,
120
+ create_scripted_module,
121
+ args,
122
+ kwargs,
123
+ _module_interface_cls=MyModuleInterface,
124
+ )
125
+ scripted_remote_module = torch.jit.script(remote_module)
126
+ yield scripted_remote_module
127
+
128
+
129
+ class RemoteModuleTest(CommonRemoteModuleTest):
130
+ @dist_utils.dist_init
131
+ def test_bad_module(self):
132
+ if self.rank != 0:
133
+ return
134
+ dst_worker_name = dist_utils.worker_name((self.rank + 1) % self.world_size)
135
+ remote_device = f"{dst_worker_name}/cpu"
136
+ args = (1,)
137
+ kwargs = dict(first_kwarg=2)
138
+
139
+ with self.assertRaisesRegex(
140
+ ValueError,
141
+ r"Expect `module_cls\(\*args, \*\*kwargs\)` returns an instance of <class nn.Module>,",
142
+ ):
143
+ RemoteModule(remote_device, BadModule, args, kwargs).forward()
144
+
145
+ with self.assertRaisesRegex(
146
+ ValueError,
147
+ r"Expect `module_cls\(\*args, \*\*kwargs\)` returns an instance of <class nn.Module>,",
148
+ ):
149
+ RemoteModule(remote_device, BadModule, args, kwargs).forward()
150
+
151
+
152
+ @dist_utils.dist_init
153
+ def test_forward_async(self):
154
+ if self.rank != 0:
155
+ return
156
+ dst_worker_name = dist_utils.worker_name((self.rank + 1) % self.world_size)
157
+ args = (torch.ones(1), 2, "3")
158
+ for remote_module in self._create_remote_module_iter(dst_worker_name):
159
+ ret_fut = remote_module.forward_async(*args)
160
+ ret = ret_fut.wait()
161
+ self.assertEqual(ret, tuple(reversed(args)))
162
+
163
+ @dist_utils.dist_init
164
+ def test_forward_async_script(self):
165
+ if self.rank != 0:
166
+ return
167
+ dst_worker_name = dist_utils.worker_name((self.rank + 1) % self.world_size)
168
+
169
+ scripted_remote_module = next(
170
+ self._create_remote_module_iter(
171
+ dst_worker_name, modes=[ModuleCreationMode.MODULE_CTOR_WITH_INTERFACE]
172
+ )
173
+ )
174
+
175
+ @torch.jit.script
176
+ def run_forward_async(scripted_remote_module: RemoteMyModuleInterface):
177
+ ret_fut = scripted_remote_module.forward_async(torch.ones(1), 2, "3")
178
+ ret = ret_fut.wait()
179
+ return ret
180
+
181
+ ret = run_forward_async(scripted_remote_module)
182
+
183
+ self.assertEqual(ret, ("3", 2, torch.ones(1)))
184
+
185
+ @dist_utils.dist_init
186
+ def test_forward_sync(self):
187
+ if self.rank != 0:
188
+ return
189
+ dst_worker_name = dist_utils.worker_name((self.rank + 1) % self.world_size)
190
+ args = (torch.ones(1), 2, "3")
191
+ for remote_module in self._create_remote_module_iter(dst_worker_name):
192
+ ret = remote_module.forward(*args)
193
+ self.assertEqual(ret, tuple(reversed(args)))
194
+
195
+ @dist_utils.dist_init
196
+ def test_forward_sync_script(self):
197
+ if self.rank != 0:
198
+ return
199
+ dst_worker_name = dist_utils.worker_name((self.rank + 1) % self.world_size)
200
+
201
+ scripted_remote_module = next(
202
+ self._create_remote_module_iter(
203
+ dst_worker_name, modes=[ModuleCreationMode.MODULE_CTOR_WITH_INTERFACE]
204
+ )
205
+ )
206
+
207
+ @torch.jit.script
208
+ def run_forward(scripted_remote_module: MyModuleInterface):
209
+ ret = scripted_remote_module.forward(torch.ones(1), 2, "3")
210
+ return ret
211
+
212
+ ret = run_forward(scripted_remote_module)
213
+
214
+ self.assertEqual(ret, ("3", 2, torch.ones(1)))
215
+
216
+ @dist_utils.dist_init
217
+ def test_forward_with_kwargs(self):
218
+ if self.rank != 0:
219
+ return
220
+ dst_worker_name = dist_utils.worker_name((self.rank + 1) % self.world_size)
221
+ args = (torch.ones(1), 2)
222
+ kwargs = dict(word="3")
223
+ # Only test Python nn.Module, because script module methods don't support taking kwargs.
224
+ for remote_module in self._create_remote_module_iter(
225
+ dst_worker_name, modes=[ModuleCreationMode.MODULE_CTOR]
226
+ ):
227
+ ret_fut = remote_module.forward_async(*args, **kwargs)
228
+ ret = ret_fut.wait()
229
+ self.assertEqual(ret, tuple(reversed(args + ("3",))))
230
+
231
+ ret = remote_module.forward(*args, **kwargs)
232
+ self.assertEqual(ret, tuple(reversed(args + ("3",))))
233
+
234
+ @dist_utils.dist_init
235
+ def test_remote_parameters(self):
236
+ if self.rank != 0:
237
+ return
238
+ dst_worker_name = dist_utils.worker_name((self.rank + 1) % self.world_size)
239
+
240
+ # Only test Python nn.Module, because script module methods don't support ``remote_parameters``.
241
+ for remote_module in self._create_remote_module_iter(
242
+ dst_worker_name, modes=[ModuleCreationMode.MODULE_CTOR]
243
+ ):
244
+ param_rrefs = remote_module.remote_parameters()
245
+ self.assertEqual(len(param_rrefs), 1)
246
+ self.assertTrue(torch.equal(param_rrefs[0].to_here(), _PARAM_VAL))
247
+
248
+ @dist_utils.dist_init
249
+ def test_get_module_rref(self):
250
+ if self.rank != 0:
251
+ return
252
+ dst_worker_name = dist_utils.worker_name((self.rank + 1) % self.world_size)
253
+
254
+ # Only test Python nn.Module, because script module methods don't support ``get_module_rref``.
255
+ for remote_module in self._create_remote_module_iter(
256
+ dst_worker_name, modes=[ModuleCreationMode.MODULE_CTOR]
257
+ ):
258
+ rref = remote_module.get_module_rref()
259
+ self.assertEqual(rref, remote_module.module_rref)
260
+ for param in rref.to_here().parameters():
261
+ self.assertTrue(torch.equal(param, _PARAM_VAL))
262
+
263
+ @dist_utils.dist_init
264
+ def test_train_eval(self):
265
+ if self.rank != 0:
266
+ return
267
+ dst_worker_name = dist_utils.worker_name((self.rank + 1) % self.world_size)
268
+
269
+ for remote_module in self._create_remote_module_iter(
270
+ dst_worker_name, modes=[ModuleCreationMode.MODULE_CTOR]
271
+ ):
272
+ remote_module.train()
273
+ ret1 = rpc.rpc_sync(dst_worker_name, get_remote_training_arg, args=(remote_module.get_module_rref(),))
274
+ self.assertEqual(ret1, True)
275
+
276
+ remote_module.eval()
277
+ ret2 = rpc.rpc_sync(dst_worker_name, get_remote_training_arg, args=(remote_module.get_module_rref(),))
278
+ self.assertEqual(ret2, False)
279
+
280
+ @dist_utils.dist_init
281
+ def test_unsupported_methods(self):
282
+ if self.rank != 0:
283
+ return
284
+ dst_worker_name = dist_utils.worker_name((self.rank + 1) % self.world_size)
285
+
286
+ for remote_module in self._create_remote_module_iter(
287
+ dst_worker_name, modes=[ModuleCreationMode.MODULE_CTOR]
288
+ ):
289
+ with self.assertRaisesRegex(
290
+ ValueError, r"Method ``register_buffer`` not supported for RemoteModule"
291
+ ):
292
+ remote_module.register_buffer("buffer", torch.ones(5))
293
+ with self.assertRaisesRegex(
294
+ ValueError,
295
+ r"Method ``register_parameter`` not supported for RemoteModule",
296
+ ):
297
+ remote_module.register_parameter(
298
+ "param", torch.nn.Parameter(torch.ones(1))
299
+ )
300
+ with self.assertRaisesRegex(
301
+ ValueError, r"Method ``add_module`` not supported for RemoteModule"
302
+ ):
303
+ remote_module.add_module("empty", None)
304
+
305
+ with self.assertRaisesRegex(
306
+ ValueError, r"Method ``apply`` not supported for RemoteModule"
307
+ ):
308
+ fn = torch.rand((3, 3), requires_grad=False)
309
+ remote_module.apply(fn)
310
+
311
+ with self.assertRaisesRegex(
312
+ ValueError, r"Method ``cuda`` not supported for RemoteModule"
313
+ ):
314
+ remote_module.cuda()
315
+ with self.assertRaisesRegex(
316
+ ValueError, r"Method ``cpu`` not supported for RemoteModule"
317
+ ):
318
+ remote_module.cpu()
319
+ with self.assertRaisesRegex(
320
+ ValueError, r"Method ``type`` not supported for RemoteModule"
321
+ ):
322
+ remote_module.type(torch.FloatTensor)
323
+ with self.assertRaisesRegex(
324
+ ValueError, r"Method ``float`` not supported for RemoteModule"
325
+ ):
326
+ remote_module.float()
327
+ with self.assertRaisesRegex(
328
+ ValueError, r"Method ``double`` not supported for RemoteModule"
329
+ ):
330
+ remote_module.double()
331
+ with self.assertRaisesRegex(
332
+ ValueError, r"Method ``bfloat16`` not supported for RemoteModule"
333
+ ):
334
+ remote_module.bfloat16()
335
+ with self.assertRaisesRegex(
336
+ ValueError, r"Method ``to`` not supported for RemoteModule"
337
+ ):
338
+ remote_module.to("cpu", dtype=torch.int32)
339
+
340
+ def hook(module, grad_input, grad_output):
341
+ pass
342
+
343
+ with self.assertRaisesRegex(
344
+ ValueError,
345
+ r"Method ``register_backward_hook`` not supported for RemoteModule",
346
+ ):
347
+ remote_module.register_backward_hook(hook)
348
+ with self.assertRaisesRegex(
349
+ ValueError,
350
+ r"Method ``register_forward_pre_hook`` not supported for RemoteModule",
351
+ ):
352
+ remote_module.register_forward_pre_hook(hook)
353
+ with self.assertRaisesRegex(
354
+ ValueError,
355
+ r"Method ``register_forward_hook`` not supported for RemoteModule",
356
+ ):
357
+ remote_module.register_forward_hook(hook)
358
+
359
+ with self.assertRaisesRegex(
360
+ ValueError, r"Method ``state_dict`` not supported for RemoteModule"
361
+ ):
362
+ remote_module.state_dict()
363
+ with self.assertRaisesRegex(
364
+ ValueError, r"Method ``load_state_dict`` not supported for RemoteModule"
365
+ ):
366
+ remote_module.load_state_dict({})
367
+
368
+ with self.assertRaisesRegex(
369
+ ValueError,
370
+ r"Method ``parameters`` not supported for RemoteModule. Please use ``remote_parameters`` instead.",
371
+ ):
372
+ remote_module.parameters()
373
+ with self.assertRaisesRegex(
374
+ ValueError,
375
+ r"Method ``named_parameters`` not supported for RemoteModule",
376
+ ):
377
+ remote_module.named_parameters()
378
+ with self.assertRaisesRegex(
379
+ ValueError, r"Method ``buffers`` not supported for RemoteModule"
380
+ ):
381
+ remote_module.buffers()
382
+ with self.assertRaisesRegex(
383
+ ValueError, r"Method ``named_buffers`` not supported for RemoteModule"
384
+ ):
385
+ remote_module.named_buffers()
386
+ with self.assertRaisesRegex(
387
+ ValueError, r"Method ``children`` not supported for RemoteModule"
388
+ ):
389
+ remote_module.children()
390
+ with self.assertRaisesRegex(
391
+ ValueError, r"Method ``named_children`` not supported for RemoteModule"
392
+ ):
393
+ remote_module.named_children()
394
+ with self.assertRaisesRegex(
395
+ ValueError, r"Method ``modules`` not supported for RemoteModule"
396
+ ):
397
+ remote_module.modules()
398
+ with self.assertRaisesRegex(
399
+ ValueError, r"Method ``named_modules`` not supported for RemoteModule"
400
+ ):
401
+ remote_module.named_modules()
402
+
403
+ with self.assertRaisesRegex(
404
+ ValueError, r"Method ``requires_grad_`` not supported for RemoteModule"
405
+ ):
406
+ remote_module.requires_grad_()
407
+ with self.assertRaisesRegex(
408
+ ValueError, r"Method ``zero_grad`` not supported for RemoteModule"
409
+ ):
410
+ remote_module.zero_grad()
411
+ with self.assertRaisesRegex(
412
+ ValueError, r"Method ``share_memory`` not supported for RemoteModule"
413
+ ):
414
+ remote_module.share_memory()
415
+ with self.assertRaisesRegex(
416
+ ValueError, r"Method ``extra_repr`` not supported for RemoteModule"
417
+ ):
418
+ remote_module.extra_repr()
419
+
420
+ @dist_utils.dist_init
421
+ def test_send_remote_module_with_a_new_attribute_not_pickled_over_the_wire(self):
422
+ if self.rank != 0:
423
+ return
424
+ dst_worker_name = dist_utils.worker_name((self.rank + 1) % self.world_size)
425
+
426
+ # If a new attribute is added to this RemoteModule after the initialization,
427
+ # and it will be sent over the wire by RPC,
428
+ # this new field will not be pickled, because it's not specified in _REMOTE_MODULE_PICKLED_ATTRIBUTES.
429
+ # Note that adding a new attribute out of constructor should rarely happen.
430
+ # If a new attribute is added to RemoteModule constructor,
431
+ # there is a sanity check to enforce developers to add this attribute to either
432
+ # _REMOTE_MODULE_PICKLED_ATTRIBUTES or _REMOTE_MODULE_ATTRIBUTES_IGNORE_FOR_PICKLING.
433
+ for remote_module in self._create_remote_module_iter(
434
+ dst_worker_name, modes=[ModuleCreationMode.MODULE_CTOR]
435
+ ):
436
+ new_attr_name = "new_attr"
437
+ setattr(remote_module, new_attr_name, 1)
438
+
439
+ attrs = rpc.rpc_sync(
440
+ dst_worker_name, remote_module_attributes, (remote_module,)
441
+ )
442
+ self.assertNotIn(new_attr_name, attrs)
443
+
444
+ @dist_utils.dist_init
445
+ def test_remote_module_py_pickle_not_supported(self):
446
+ if self.rank != 0:
447
+ return
448
+ dst_worker_name = dist_utils.worker_name((self.rank + 1) % self.world_size)
449
+
450
+ for remote_module in self._create_remote_module_iter(
451
+ dst_worker_name, modes=[ModuleCreationMode.MODULE_CTOR]
452
+ ):
453
+ with TemporaryFileName() as fname:
454
+ with self.assertRaisesRegex(
455
+ RuntimeError,
456
+ "Cannot pickle RemoteModule in python pickler. RemoteModule can only be pickled when using RPC",
457
+ ):
458
+ torch.save(remote_module, fname)
459
+
460
+ @dist_utils.dist_init
461
+ def test_remote_module_py_pickle_not_supported_script(self):
462
+ if self.rank != 0:
463
+ return
464
+ dst_worker_name = dist_utils.worker_name((self.rank + 1) % self.world_size)
465
+
466
+ for remote_module in self._create_remote_module_iter(
467
+ dst_worker_name, modes=[ModuleCreationMode.MODULE_CTOR_WITH_INTERFACE]
468
+ ):
469
+ with TemporaryFileName() as fname:
470
+ with self.assertRaisesRegex(torch.jit.Error, "can only be pickled when using RPC"):
471
+ torch.save(remote_module, fname)
472
+
473
+
474
+ class ThreeWorkersRemoteModuleTest(CommonRemoteModuleTest):
475
+ @property
476
+ def world_size(self): # Override setting in CommonRemoteModuleTest
477
+ return 3
478
+
479
+ @dist_utils.dist_init
480
+ def test_send_remote_module_over_the_wire(self):
481
+ if self.rank != 0:
482
+ return
483
+ dst_worker1_name = dist_utils.worker_name((self.rank + 1) % self.world_size)
484
+ dst_worker2_name = dist_utils.worker_name((self.rank + 2) % self.world_size)
485
+
486
+ # Unpickled attributes include both the inherent attributes of RemoteModule
487
+ # (not inherited from the superclass) and two installed methods.
488
+ expected_unpickled_attrs = list(_REMOTE_MODULE_PICKLED_ATTRIBUTES)
489
+ expected_unpickled_attrs.append("forward_async")
490
+ expected_unpickled_attrs.append("forward")
491
+
492
+ # Create a remote module on worker1 and then pass it to worker2 over the RPC layer.
493
+ for remote_module in self._create_remote_module_iter(
494
+ dst_worker1_name, modes=[ModuleCreationMode.MODULE_CTOR]
495
+ ):
496
+ # Test querying some simple attributes from worker2.
497
+ attrs = rpc.rpc_sync(
498
+ dst_worker2_name, remote_module_attributes, (remote_module,)
499
+ )
500
+ self.assertListEqual(list(attrs.keys()), expected_unpickled_attrs)
501
+ self.assertEqual(attrs["on"], "worker1")
502
+ self.assertEqual(attrs["device"], "cpu")
503
+ self.assertFalse(attrs["is_device_map_set"])
504
+ self.assertFalse(attrs["is_scriptable"])
505
+
506
+ # Test the installed methods on worker1's can be initiated by worker2 over RPC layer.
507
+ # NOTE: In practice a remote module should be directly stored on the worker that runs ``forward``` or ``forward_async``,
508
+ # not have another worker to initiate forward over the RPC layer.
509
+ args = (torch.ones(1), 2, "3")
510
+ ret1 = rpc.rpc_sync(dst_worker2_name, remote_forward, (remote_module, args))
511
+ self.assertEqual(ret1, tuple(reversed(args)))
512
+ ret2 = rpc.rpc_sync(
513
+ dst_worker2_name, remote_forward_async, (remote_module, args)
514
+ )
515
+ self.assertEqual(ret2, tuple(reversed(args)))
516
+
517
+ @dist_utils.dist_init
518
+ def test_send_remote_module_over_the_wire_script_not_supported(self):
519
+ if self.rank != 0:
520
+ return
521
+ dst_worker1_name = dist_utils.worker_name((self.rank + 1) % self.world_size)
522
+ dst_worker2_name = dist_utils.worker_name((self.rank + 2) % self.world_size)
523
+
524
+ # Unpickled attributes include both the inherent attributes of RemoteModule
525
+ # (not inherited from the superclass) and two installed methods.
526
+ expected_unpickled_attrs = list(_REMOTE_MODULE_PICKLED_ATTRIBUTES)
527
+ expected_unpickled_attrs.append("forward_async")
528
+ expected_unpickled_attrs.append("forward")
529
+
530
+ with self.assertRaisesRegex(
531
+ RuntimeError, "Passing a script RemoteModule over RPC is not supported."
532
+ ):
533
+ # Create a remote module on worker1 and then pass it to worker2 over the RPC layer.
534
+ for remote_module in self._create_remote_module_iter(
535
+ dst_worker1_name, modes=[ModuleCreationMode.MODULE_CTOR_WITH_INTERFACE]
536
+ ):
537
+ # Test querying some simple attributes from worker2.
538
+ attrs = rpc.rpc_sync(
539
+ dst_worker2_name, remote_module_attributes, (remote_module,)
540
+ )
541
+
542
+ @dist_utils.dist_init
543
+ def test_create_remote_module_from_module_rref(self):
544
+ if self.rank != 0:
545
+ return
546
+ dst_worker1_name = dist_utils.worker_name((self.rank + 1) % self.world_size)
547
+ dst_worker2_name = dist_utils.worker_name((self.rank + 2) % self.world_size)
548
+
549
+ # Create a remote module on worker1 and then pass its `module_rref` to worker2 over the RPC layer.
550
+ for remote_module in self._create_remote_module_iter(
551
+ dst_worker1_name, modes=[ModuleCreationMode.MODULE_CTOR]
552
+ ):
553
+ remote_module2 = rpc.rpc_sync(
554
+ dst_worker2_name,
555
+ RemoteModule.init_from_module_rref,
556
+ (dst_worker2_name, remote_module.get_module_rref()),
557
+ )
558
+
559
+ args = (torch.ones(1), 2, "3")
560
+ ret1 = rpc.rpc_sync(
561
+ dst_worker1_name, remote_forward, (remote_module, args)
562
+ )
563
+ ret2 = rpc.rpc_sync(
564
+ dst_worker2_name, remote_forward, (remote_module2, args)
565
+ )
566
+ self.assertEqual(ret2, ret2)
567
+
568
+
569
+ class CudaRemoteModuleTest(CommonRemoteModuleTest):
570
+ @skip_if_lt_x_gpu(1)
571
+ @dist_utils.dist_init
572
+ def test_valid_device(self):
573
+ if self.rank != 0:
574
+ return
575
+ dst_rank = (self.rank + 1) % self.world_size
576
+ dst_worker_name = dist_utils.worker_name(dst_rank)
577
+
578
+ for remote_module in self._create_remote_module_iter(
579
+ f"{dst_worker_name}/cuda:0", modes=[ModuleCreationMode.MODULE_CTOR]
580
+ ):
581
+ device = rpc.rpc_sync(
582
+ dst_worker_name, remote_device, (remote_module.module_rref,)
583
+ )
584
+ self.assertEqual(device.type, "cuda")
585
+ self.assertEqual(device.index, 0)
586
+
587
+ # Test rank works as well.
588
+ for remote_module in self._create_remote_module_iter(
589
+ f"rank:{dst_rank}/cuda:0", modes=[ModuleCreationMode.MODULE_CTOR]
590
+ ):
591
+ device = rpc.rpc_sync(
592
+ dst_worker_name, remote_device, (remote_module.module_rref,)
593
+ )
594
+ self.assertEqual(device.type, "cuda")
595
+ self.assertEqual(device.index, 0)
596
+
597
+ @skip_if_lt_x_gpu(1)
598
+ @dist_utils.dist_init
599
+ def test_invalid_devices(self):
600
+ if self.rank != 0:
601
+ return
602
+ dst_worker_name = dist_utils.worker_name((self.rank + 1) % self.world_size)
603
+
604
+ with self.assertRaisesRegex(
605
+ RuntimeError,
606
+ r"Expected one of .+ device type at start of device string",
607
+ ):
608
+ [
609
+ m.forward()
610
+ for m in self._create_remote_module_iter(
611
+ f"{dst_worker_name}/foo",
612
+ modes=[ModuleCreationMode.MODULE_CTOR],
613
+ )
614
+ ]
615
+
616
+ with self.assertRaisesRegex(
617
+ RuntimeError, r"CUDA error: invalid device ordinal"
618
+ ):
619
+ [
620
+ m.forward()
621
+ for m in self._create_remote_module_iter(
622
+ f"{dst_worker_name}/cuda:100",
623
+ modes=[ModuleCreationMode.MODULE_CTOR],
624
+ )
625
+ ]
626
+
627
+ with self.assertRaisesRegex(RuntimeError, r"Invalid device string: 'cpu2'"):
628
+ [
629
+ m.forward()
630
+ for m in self._create_remote_module_iter(
631
+ f"{dst_worker_name}/cpu2",
632
+ modes=[ModuleCreationMode.MODULE_CTOR],
633
+ )
634
+ ]
635
+
636
+ with self.assertRaisesRegex(RuntimeError, r"Device string must not be empty"):
637
+ [
638
+ m.forward()
639
+ for m in self._create_remote_module_iter(
640
+ f"{dst_worker_name}/",
641
+ modes=[ModuleCreationMode.MODULE_CTOR],
642
+ )
643
+ ]
644
+
645
+ with self.assertRaisesRegex(
646
+ ValueError,
647
+ r"Could not parse remote_device: worker1/cuda:0/cuda:1. The valid format is '<workername>/<device>'",
648
+ ):
649
+ [
650
+ m.forward()
651
+ for m in self._create_remote_module_iter(
652
+ f"{dst_worker_name}/cuda:0/cuda:1",
653
+ modes=[ModuleCreationMode.MODULE_CTOR],
654
+ )
655
+ ]
656
+
657
+ with self.assertRaisesRegex(
658
+ ValueError,
659
+ r"Could not parse remote_device: /. The valid format is '<workername>/<device>'",
660
+ ):
661
+ [
662
+ m.forward()
663
+ for m in self._create_remote_module_iter(
664
+ "/",
665
+ modes=[ModuleCreationMode.MODULE_CTOR],
666
+ )
667
+ ]
668
+
669
+ with self.assertRaisesRegex(
670
+ ValueError,
671
+ r"Could not parse remote_device: /cuda:0. The valid format is '<workername>/<device>'",
672
+ ):
673
+ [
674
+ m.forward()
675
+ for m in self._create_remote_module_iter(
676
+ "/cuda:0",
677
+ modes=[ModuleCreationMode.MODULE_CTOR],
678
+ )
679
+ ]
680
+
681
+ @skip_if_lt_x_gpu(1)
682
+ @dist_utils.dist_init
683
+ def test_input_moved_to_cuda_device(self):
684
+ if self.rank != 0:
685
+ return
686
+ dst_worker_name = dist_utils.worker_name((self.rank + 1) % self.world_size)
687
+
688
+ # These two CPU tensors (in args and kwargs) should be implicitly moved to an appropriate cuda device.
689
+ t1 = torch.ones(1)
690
+ args = (t1, 2)
691
+ t2 = t1 * 2
692
+ kwargs = dict(word=t2)
693
+
694
+ # Only test Python nn.Module, because script module methods don't support taking kwargs.
695
+ for remote_module in self._create_remote_module_iter(
696
+ f"{dst_worker_name}/cuda:0", modes=[ModuleCreationMode.MODULE_CTOR]
697
+ ):
698
+ ret_fut = remote_module.forward_async(*args, **kwargs)
699
+ ret = ret_fut.wait()
700
+ self.assertEqual(ret, tuple(reversed(args + (t2,))))
701
+ # TODO: Once the RPC backend can support directly sending GPU tensors, the expected device type should be "cuda:0".
702
+ self.assertEqual(ret[0].device.type, "cpu")
703
+ self.assertEqual(ret[2].device.type, "cpu")
704
+
705
+ ret = remote_module.forward(*args, **kwargs)
706
+ self.assertEqual(ret, tuple(reversed(args + (t2,))))
707
+ # TODO: Once the RPC backend can support directly sending GPU tensors, the expected device type should be "cuda:0".
708
+ self.assertEqual(ret[0].device.type, "cpu")
709
+ self.assertEqual(ret[2].device.type, "cpu")
710
+
711
+ @skip_if_lt_x_gpu(1)
712
+ @dist_utils.dist_init
713
+ def test_input_moved_to_cuda_device_script(self):
714
+ if self.rank != 0:
715
+ return
716
+ dst_worker_name = dist_utils.worker_name((self.rank + 1) % self.world_size)
717
+
718
+ scripted_remote_module = next(
719
+ self._create_remote_module_iter(
720
+ f"{dst_worker_name}/cuda:0",
721
+ modes=[ModuleCreationMode.MODULE_CTOR_WITH_INTERFACE],
722
+ )
723
+ )
724
+
725
+ @torch.jit.script
726
+ def run_forward(scripted_remote_module: MyModuleInterface):
727
+ ret = scripted_remote_module.forward(torch.ones(1), 2, "3")
728
+ return ret
729
+
730
+ ret = run_forward(scripted_remote_module)
731
+
732
+ self.assertEqual(ret, ("3", 2, torch.ones(1)))
733
+ # TODO: Once the RPC backend can support directly sending GPU tensors, the expected device type should be "cuda:0".
734
+ self.assertEqual(ret[2].device.type, "cpu")