cyd0806 commited on
Commit
4ed3182
·
verified ·
1 Parent(s): 71eb649

Upload apex-master/tests/L0/run_transformer/test_pipeline_parallel_fwd_bwd.py with huggingface_hub

Browse files
apex-master/tests/L0/run_transformer/test_pipeline_parallel_fwd_bwd.py ADDED
@@ -0,0 +1,725 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import contextlib
2
+ import logging
3
+ import itertools
4
+ import os
5
+ from datetime import datetime
6
+ from packaging.version import parse, Version
7
+ import re
8
+ from typing import Optional, Tuple, List
9
+ import unittest
10
+
11
+ import torch
12
+ from torch.testing._internal import common_utils
13
+
14
+ from apex._autocast_utils import _get_autocast_dtypes
15
+ from apex.transformer import parallel_state
16
+ from apex.transformer.enums import ModelType
17
+ from apex.transformer.pipeline_parallel import utils as pp_utils
18
+ from apex.transformer.pipeline_parallel.schedules.common import (
19
+ FwdStepFunc,
20
+ build_model,
21
+ _get_params_for_weight_decay_optimization,
22
+ )
23
+ from apex.transformer.pipeline_parallel.schedules.fwd_bwd_no_pipelining import (
24
+ forward_backward_no_pipelining,
25
+ )
26
+ from apex.transformer.pipeline_parallel.schedules.fwd_bwd_pipelining_with_interleaving import (
27
+ _forward_backward_pipelining_with_interleaving,
28
+ )
29
+ from apex.transformer.pipeline_parallel.schedules.fwd_bwd_pipelining_without_interleaving import (
30
+ forward_backward_pipelining_without_interleaving,
31
+ )
32
+ from apex.transformer.testing.distributed_test_base import NcclDistributedTestBase
33
+ from apex.transformer.testing.distributed_test_base import UccDistributedTestBase
34
+ from apex.transformer.testing.distributed_test_base import HAS_TORCH_UCC_COMPAT_NVIDIA_DRIVER
35
+ from apex.transformer.testing import commons as testing_utils
36
+ from apex.transformer._ucc_util import HAS_UCC
37
+
38
+ logging.getLogger("torch").setLevel(logging.WARNING)
39
+ logging.getLogger("apex").setLevel(logging.WARNING)
40
+
41
+ weight_coeff = 1024
42
+
43
+ # Guard for https://github.com/pytorch/pytorch/pull/82450
44
+ def get_nvidia_pytorch_version():
45
+ ver = os.getenv("NVIDIA_PYTORCH_VERSION", "22.08")
46
+ if "master" in ver:
47
+ ver = datetime.today().strftime("%y.%m")
48
+ elif "update_for_" in ver:
49
+ ver = ver.replace("update_for_", "")
50
+ return ver
51
+
52
+ CAN_SKIP_SYNC_AFTER_BATCH_ISEND_IRECV = False
53
+ ngc_container_2209, pytorch_113 = Version("22.09"), Version("1.13")
54
+ if parse(torch.__version__) >= pytorch_113:
55
+ CAN_SKIP_SYNC_AFTER_BATCH_ISEND_IRECV = True
56
+ elif parse(get_nvidia_pytorch_version()) >= ngc_container_2209:
57
+ CAN_SKIP_SYNC_AFTER_BATCH_ISEND_IRECV = True
58
+ else:
59
+ CAN_SKIP_SYNC_AFTER_BATCH_ISEND_IRECV = False
60
+
61
+
62
+ def get_init_weights_func(offset: int = 0):
63
+ @torch.no_grad()
64
+ def init_weights(m):
65
+ rank = parallel_state.get_pipeline_model_parallel_rank()
66
+ if isinstance(m, torch.nn.Linear):
67
+ m.weight.fill_((rank + offset + 1.0) / weight_coeff)
68
+ m.bias.fill_(1.0)
69
+ return init_weights
70
+
71
+
72
+ def get_dtype_for_comparison():
73
+ if(torch.cuda.get_device_capability() >= (8, 0)):
74
+ return torch.float64
75
+ return torch.float32
76
+
77
+
78
+ def get_target_loss_and_model(global_batch_shape: tuple, hidden_size: int, total_layers: int) -> Tuple[torch.Tensor, List[torch.Tensor]]:
79
+ model = []
80
+ dtype = get_dtype_for_comparison()
81
+ data = torch.ones(global_batch_shape, dtype=dtype)
82
+ for i in range(total_layers):
83
+ w = torch.ones((hidden_size, hidden_size), dtype=dtype) * (i + 1.0) / weight_coeff
84
+ b = torch.ones(hidden_size, dtype=dtype)
85
+
86
+ w.requires_grad_()
87
+ b.requires_grad_()
88
+
89
+ # don't need to care about transpose semantics as all values are the same
90
+ data = torch.matmul(w, data) + b
91
+ model.append([w, b])
92
+
93
+ loss = data.sum() / global_batch_shape[0]
94
+ loss.backward()
95
+
96
+ return loss, model
97
+
98
+
99
+ def _get_default_world_sizes_model_parallel_world_size(pipeline_model_parallel_world_size: Optional[int] = None
100
+ ) -> Tuple[int, int, int]:
101
+ # TODO: revisit if we can fold this into the class for skip logic / avoid duplication
102
+ # of world size computation
103
+ world_size = torch.cuda.device_count()
104
+ tensor_model_parallel_world_size = 1
105
+ data_parallel_size = 1 + (world_size >= 8 and world_size % 2 == 0)
106
+
107
+ if pipeline_model_parallel_world_size is None:
108
+ pipeline_model_parallel_world_size = world_size // (tensor_model_parallel_world_size * data_parallel_size)
109
+ else:
110
+ data_parallel_size = world_size // (tensor_model_parallel_world_size * pipeline_model_parallel_world_size)
111
+
112
+ return tensor_model_parallel_world_size, data_parallel_size, pipeline_model_parallel_world_size
113
+
114
+
115
+ class PipelineParallelForwardBackwardTestBase:
116
+
117
+ GLOBAL_BATCH_SIZE = 16
118
+ MICRO_BATCH_SIZE = 2
119
+ HIDDEN_SIZE = 32
120
+
121
+ deallocate_options = (True, False)
122
+ # If :obj:`None`, (torch.float32, torch.float16, torch.bfloat16) are dtype options on Ampere.
123
+ # You can limit the options by overriding the following `dtypes`.
124
+ dtypes = None
125
+
126
+ def _forward_backward_test_impl(
127
+ self,
128
+ forward_only: bool,
129
+ fwd_bwd_func: FwdStepFunc,
130
+ pipeline_model_parallel_world_size: Optional[int],
131
+ virtual_pipeline_model_parallel_size: Optional[int],
132
+ async_comm: bool = False,
133
+ *,
134
+ default_backend: Optional[str] = None,
135
+ p2p_backend: Optional[str] = None,
136
+ sync_batch_comm: bool = True,
137
+ ) -> None:
138
+ if fwd_bwd_func == _forward_backward_pipelining_with_interleaving:
139
+ self.assertIsNotNone(virtual_pipeline_model_parallel_size)
140
+ self.assertGreater(virtual_pipeline_model_parallel_size, 1)
141
+ dtype_options = self.dtypes or [torch.float32, torch.double] + _get_autocast_dtypes()
142
+
143
+ for dtype, deallocate_pipeline_outputs in itertools.product(
144
+ dtype_options, self.deallocate_options,
145
+ ):
146
+ grad_scaler = (
147
+ torch.amp.GradScaler('cuda', init_scale=4.0)
148
+ if dtype == torch.half
149
+ else None
150
+ )
151
+
152
+ (tensor_model_parallel_world_size,
153
+ data_parallel_size,
154
+ pipeline_model_parallel_world_size) = _get_default_world_sizes_model_parallel_world_size(pipeline_model_parallel_world_size)
155
+
156
+ parallel_state.initialize_model_parallel(
157
+ tensor_model_parallel_size_=tensor_model_parallel_world_size,
158
+ pipeline_model_parallel_size_=pipeline_model_parallel_world_size,
159
+ virtual_pipeline_model_parallel_size_=virtual_pipeline_model_parallel_size,
160
+ default_backend=default_backend,
161
+ p2p_backend=p2p_backend,
162
+ )
163
+ pp_utils._reconfigure_microbatch_calculator(
164
+ rank=parallel_state.get_tensor_model_parallel_rank(),
165
+ rampup_batch_size=None,
166
+ global_batch_size=self.GLOBAL_BATCH_SIZE,
167
+ micro_batch_size=self.MICRO_BATCH_SIZE,
168
+ data_parallel_size=parallel_state.get_data_parallel_world_size(),
169
+ )
170
+
171
+ global_batch_shape = (
172
+ self.GLOBAL_BATCH_SIZE
173
+ // parallel_state.get_data_parallel_world_size(),
174
+ self.HIDDEN_SIZE,
175
+ self.HIDDEN_SIZE,
176
+ )
177
+
178
+ batch = None
179
+ if parallel_state.is_pipeline_first_stage():
180
+ batch = (torch.ones(global_batch_shape, dtype=dtype).cuda(), )
181
+
182
+ model = build_model(
183
+ testing_utils.model_provider_func,
184
+ # Use DDP only when it's better to have
185
+ wrap_with_ddp=data_parallel_size > 1,
186
+ virtual_pipeline_model_parallel_size=virtual_pipeline_model_parallel_size,
187
+ hidden_size=self.HIDDEN_SIZE,
188
+ )
189
+
190
+ offset = pipeline_model_parallel_world_size if virtual_pipeline_model_parallel_size is not None else 0
191
+ for idx, model_module in enumerate(model):
192
+ model_module = model_module.to(dtype)
193
+ model_module.apply(get_init_weights_func(idx*offset))
194
+
195
+ _param_groups = _get_params_for_weight_decay_optimization(model)
196
+ optimizer = torch.optim.Adam(_param_groups, lr=1e-3)
197
+
198
+ pp_utils.update_num_microbatches(0)
199
+
200
+ loss = fwd_bwd_func(
201
+ testing_utils.fwd_step_func,
202
+ batch,
203
+ model,
204
+ forward_only=forward_only,
205
+ # `tensor_shape` is the shape of micro batch.
206
+ tensor_shape=(
207
+ self.MICRO_BATCH_SIZE,
208
+ self.HIDDEN_SIZE,
209
+ self.HIDDEN_SIZE,
210
+ ),
211
+ dtype=dtype,
212
+ async_comm=async_comm,
213
+ grad_scaler=grad_scaler,
214
+ deallocate_pipeline_output=deallocate_pipeline_outputs,
215
+ sync_batch_comm=sync_batch_comm,
216
+ )
217
+
218
+ if dtype == get_dtype_for_comparison():
219
+ torch.cuda.synchronize()
220
+ hidden_size = self.HIDDEN_SIZE
221
+ microbatch_size = self.MICRO_BATCH_SIZE
222
+ total_layers = pipeline_model_parallel_world_size
223
+ if virtual_pipeline_model_parallel_size is not None:
224
+ total_layers *= virtual_pipeline_model_parallel_size
225
+ target_loss, target_model = get_target_loss_and_model(global_batch_shape, hidden_size, total_layers)
226
+
227
+ for loss_item in loss:
228
+ x = loss_item['avg']
229
+ self.assertEqual(x.item() / microbatch_size, target_loss.item())
230
+
231
+ if not forward_only:
232
+ for vm_id, model_module in enumerate(model):
233
+ params = list(model_module.parameters())
234
+ rank = params[0].get_device()
235
+ offset = pipeline_model_parallel_world_size
236
+ param_id = rank // data_parallel_size + vm_id * offset
237
+ target_params = target_model[param_id]
238
+
239
+ self.assertEqual(params[0].cpu(), target_params[0])
240
+ self.assertEqual(params[1].cpu(), target_params[1])
241
+ self.assertEqual(params[0].grad.cpu() / microbatch_size, target_params[0].grad)
242
+ self.assertEqual(params[1].grad.cpu() / microbatch_size, target_params[1].grad)
243
+
244
+ if not forward_only:
245
+ for m in model:
246
+ for p in m.parameters():
247
+ self.assertIsNotNone(p.grad)
248
+ optimizer.step()
249
+ optimizer.zero_grad(set_to_none=True)
250
+
251
+ parallel_state.destroy_model_parallel()
252
+
253
+ def test_learning_no_pipelining(self):
254
+ self._forward_backward_test_impl(False, forward_backward_no_pipelining, 1, None)
255
+
256
+ def test_inference_no_pipelining(self):
257
+ self._forward_backward_test_impl(True, forward_backward_no_pipelining, 1, None)
258
+
259
+ def test_learning_pipelining_without_interleaving(self, sync_batch_comm: bool = True):
260
+ self._forward_backward_test_impl(
261
+ False, forward_backward_pipelining_without_interleaving, None, None, sync_batch_comm=sync_batch_comm,
262
+ )
263
+
264
+ def test_inference_pipelining_without_interleaving(self, sync_batch_comm: bool = True):
265
+ self._forward_backward_test_impl(
266
+ True, forward_backward_pipelining_without_interleaving, None, None, sync_batch_comm=sync_batch_comm,
267
+ )
268
+
269
+ def test_learning_async_pipelining_without_interleaving(self, sync_batch_comm: bool = True):
270
+ self._forward_backward_test_impl(
271
+ False, forward_backward_pipelining_without_interleaving, None, None, async_comm=True,
272
+ sync_batch_comm=sync_batch_comm,
273
+ )
274
+
275
+ def test_inference_async_pipelining_without_interleaving(self, sync_batch_comm: bool = True):
276
+ self._forward_backward_test_impl(
277
+ True, forward_backward_pipelining_without_interleaving, None, None, async_comm=True,
278
+ sync_batch_comm=sync_batch_comm,
279
+ )
280
+
281
+ # fails on native ucc: times out
282
+ @unittest.skipUnless(_get_default_world_sizes_model_parallel_world_size()[-1] > 2, "Interleaved schedule requires pipeline_model_parallel_world_size > 2")
283
+ def test_learning_pipelining_with_interleaving(self, sync_batch_comm: bool = True):
284
+ self._forward_backward_test_impl(
285
+ False, _forward_backward_pipelining_with_interleaving, None, virtual_pipeline_model_parallel_size=2,
286
+ sync_batch_comm=sync_batch_comm,
287
+ )
288
+
289
+ # fails on native ucc: times out
290
+ @unittest.skipUnless(_get_default_world_sizes_model_parallel_world_size()[-1] > 2, "Interleaved schedule requires pipeline_model_parallel_world_size > 2")
291
+ def test_inference_pipelining_with_interleaving(self, sync_batch_comm: bool = True):
292
+ self._forward_backward_test_impl(
293
+ True, _forward_backward_pipelining_with_interleaving, None, virtual_pipeline_model_parallel_size=2,
294
+ sync_batch_comm=sync_batch_comm,
295
+ )
296
+
297
+ # fails on native ucc: times out
298
+ @unittest.skipUnless(_get_default_world_sizes_model_parallel_world_size()[-1] > 2, "Interleaved schedule requires pipeline_model_parallel_world_size > 2")
299
+ def test_learning_async_pipelining_with_interleaving(self, sync_batch_comm: bool = True):
300
+ self._forward_backward_test_impl(
301
+ False, _forward_backward_pipelining_with_interleaving, None, virtual_pipeline_model_parallel_size=2, async_comm=True,
302
+ sync_batch_comm=sync_batch_comm,
303
+ )
304
+
305
+ # fails on native ucc: times out
306
+ @unittest.skipUnless(_get_default_world_sizes_model_parallel_world_size()[-1] > 2, "Interleaved schedule requires pipeline_model_parallel_world_size > 2")
307
+ def test_inference_async_pipelining_with_interleaving(self, sync_batch_comm: bool = True):
308
+ self._forward_backward_test_impl(
309
+ True, _forward_backward_pipelining_with_interleaving, None, virtual_pipeline_model_parallel_size=2, async_comm=True,
310
+ sync_batch_comm=sync_batch_comm,
311
+ )
312
+
313
+
314
+ class NcclPipelineParallelForwardBackwardTest(NcclDistributedTestBase, PipelineParallelForwardBackwardTestBase):
315
+
316
+ @property
317
+ def world_size(self) -> int:
318
+ return min(torch.cuda.device_count(), 8)
319
+
320
+ def _run_hybrid_distributed_backend(self, forward_only: bool) -> None:
321
+ self._forward_backward_test_impl(
322
+ forward_only, forward_backward_pipelining_without_interleaving, None, None,
323
+ default_backend="nccl", p2p_backend="ucc",
324
+ )
325
+
326
+ @unittest.skipUnless(HAS_TORCH_UCC_COMPAT_NVIDIA_DRIVER, "Needs driver >= 470.42.01")
327
+ def _test_hybrid_backends(self, forward_only: bool) -> None:
328
+ if HAS_UCC:
329
+ self._run_hybrid_distributed_backend(forward_only)
330
+ else:
331
+ with self.assertRaisesRegex(
332
+ ImportError,
333
+ re.escape("UCC backend requires pytorch source build with UCC installed and enabled"),
334
+ ):
335
+ self._run_hybrid_distributed_backend(forward_only)
336
+
337
+ def test_learning_pipelining_without_interleaving_ucc_for_p2p(self):
338
+ self._test_hybrid_backends(False)
339
+
340
+ def test_inference_pipelining_without_interleaving_ucc_for_p2p(self):
341
+ self._test_hybrid_backends(True)
342
+
343
+ @unittest.skipUnless(CAN_SKIP_SYNC_AFTER_BATCH_ISEND_IRECV, "Requires https://github.com/pytorch/pytorch/pull/82450")
344
+ def test_learning_pipelining_without_interleaving_skyp_sync_after_batch_isend_irecv(self):
345
+ self.test_learning_pipelining_without_interleaving(sync_batch_comm=False)
346
+
347
+ @unittest.skipUnless(CAN_SKIP_SYNC_AFTER_BATCH_ISEND_IRECV, "Requires https://github.com/pytorch/pytorch/pull/82450")
348
+ def test_inference_pipelining_without_interleaving_skip_sync_after_batch_isend_irecv(self):
349
+ self.test_inference_pipelining_without_interleaving(sync_batch_comm=False)
350
+
351
+ @unittest.skipUnless(CAN_SKIP_SYNC_AFTER_BATCH_ISEND_IRECV, "Requires https://github.com/pytorch/pytorch/pull/82450")
352
+ def test_learning_async_pipelining_without_interleaving_skip_sync_after_batch_isend_irecv(self):
353
+ self.test_learning_async_pipelining_without_interleaving(sync_batch_comm=False)
354
+
355
+ @unittest.skipUnless(CAN_SKIP_SYNC_AFTER_BATCH_ISEND_IRECV, "Requires https://github.com/pytorch/pytorch/pull/82450")
356
+ def test_inference_async_pipelining_without_interleaving_skip_sync_after_batch_isend_irecv(self):
357
+ self.test_inference_async_pipelining_without_interleaving(sync_batch_comm=False)
358
+
359
+ @unittest.skipUnless(CAN_SKIP_SYNC_AFTER_BATCH_ISEND_IRECV, "Requires https://github.com/pytorch/pytorch/pull/82450")
360
+ def test_learning_pipelining_with_interleaving_skip_sync_after_batch_isend_irecv(self):
361
+ self.test_learning_pipelining_with_interleaving(sync_batch_comm=False)
362
+
363
+ @unittest.skipUnless(CAN_SKIP_SYNC_AFTER_BATCH_ISEND_IRECV, "Requires https://github.com/pytorch/pytorch/pull/82450")
364
+ def test_inference_pipelining_with_interleaving_skip_sync_after_batch_isend_irecv(self):
365
+ self.test_inference_pipelining_with_interleaving(sync_batch_comm=False)
366
+
367
+ @unittest.skipUnless(CAN_SKIP_SYNC_AFTER_BATCH_ISEND_IRECV, "Requires https://github.com/pytorch/pytorch/pull/82450")
368
+ def test_learning_async_pipelining_with_interleaving_skip_sync_after_batch_isend_irecv(self):
369
+ self.test_learning_async_pipelining_with_interleaving(sync_batch_comm=False)
370
+
371
+ @unittest.skipUnless(CAN_SKIP_SYNC_AFTER_BATCH_ISEND_IRECV, "Requires https://github.com/pytorch/pytorch/pull/82450")
372
+ def test_inference_async_pipelining_with_interleaving_skip_sync_after_batch_isend_irecv(self):
373
+ self.test_inference_async_pipelining_with_interleaving(sync_batch_comm=False)
374
+
375
+
376
+ # n.b.(mkozuki): pipeline parallel w/o interleaving with UCX_TLS=tcp,sm fails.
377
+ class UccPipelineParallelForwardBackwardTest(UccDistributedTestBase, PipelineParallelForwardBackwardTestBase):
378
+
379
+ @property
380
+ def world_size(self) -> int:
381
+ return min(torch.cuda.device_count(), 8)
382
+
383
+ deallocate_options = (False,)
384
+ dtypes = (torch.float32,)
385
+
386
+
387
+ # Sanity checking the functionality of `forward_backward_pipelining_without_interleaving` with
388
+ # `model_type=ModelType.encoder_and_decoder` which is used for pipeline training of transformer
389
+ # models such as T5.
390
+ @unittest.skipIf(torch.cuda.device_count() < 4, "Requires >= 4 GPUs")
391
+ class NcclPipelineParallelWithToyParallelMLP(NcclDistributedTestBase):
392
+
393
+ GLOBAL_BATCH_SIZE: int = 16
394
+ MICRO_BATCH_SIZE: int = 2
395
+ HIDDEN_SIZE: int = 64
396
+ # TODO(mkozuki): Change `DECODER_SEQUENCE_LENGTH` to a value different from `ENCODER_SEQUENCE_LENGTH`.
397
+ # To test forward_backward_pipelining_without_interleaving with `model_type=ModelType.encoder_and_decoder`,
398
+ # `decoder_seq_length` is necessary and ideally should be different from `encoder_sequence_length`
399
+ # but my laziness let me use the same value.
400
+ # Note that you may have to either update `MyModel` def or define another `MyModel`.
401
+ # to support different `DECODER_SEQUENCE_LENGTH`.
402
+ ENCODER_SEQUENCE_LENGTH: int = 32
403
+ DECODER_SEQUENCE_LENGTH: int = 32
404
+
405
+ @property
406
+ def world_size(self) -> int:
407
+ return min(torch.cuda.device_count(), 8)
408
+
409
+ # TODO(mkozuki): Set `tensor_model_parallel>1` for encoder_and_decoder as well if there's enough GPUs
410
+ # in order to let `sequence_parallel_enabled` have an effect on tensor shape logic.
411
+ def _forward_backward_test_impl(
412
+ self,
413
+ *,
414
+ forward_only: bool,
415
+ sequence_parallel_enabled: bool,
416
+ model_type: ModelType,
417
+ dtype: torch.dtype = torch.float32,
418
+ ) -> None:
419
+ # N.B.(mkozuki): It might be better to set `tensor_model_parallel_size` to >1
420
+ # if `self.world_size > 5`. Otherwise, `pipeline_model_parallel_split_rank`
421
+ # can be 1, which can be too far real usecase.
422
+ tensor_model_parallel_size = 1 + int(self.world_size >= 4)
423
+ pipeline_model_parallel_world_size = self.world_size // tensor_model_parallel_size
424
+ if model_type == ModelType.encoder_and_decoder:
425
+ pipeline_model_parallel_split_rank = pipeline_model_parallel_world_size // 2
426
+ else:
427
+ pipeline_model_parallel_split_rank = None
428
+
429
+ parallel_state.initialize_model_parallel(
430
+ tensor_model_parallel_size_=tensor_model_parallel_size,
431
+ pipeline_model_parallel_size_=pipeline_model_parallel_world_size,
432
+ virtual_pipeline_model_parallel_size_=None,
433
+ pipeline_model_parallel_split_rank_=pipeline_model_parallel_split_rank,
434
+ )
435
+ testing_utils.set_random_seed(567)
436
+ pp_utils._reconfigure_microbatch_calculator(
437
+ rank=parallel_state.get_tensor_model_parallel_rank(),
438
+ rampup_batch_size=None,
439
+ global_batch_size=self.GLOBAL_BATCH_SIZE,
440
+ micro_batch_size=self.MICRO_BATCH_SIZE,
441
+ data_parallel_size=parallel_state.get_data_parallel_world_size(),
442
+ )
443
+ # TODO(mkozuki): Call `build_model` with `model_type`.
444
+ model = build_model(
445
+ testing_utils.mlp_provider_func,
446
+ wrap_with_ddp=False,
447
+ virtual_pipeline_model_parallel_size=None,
448
+ hidden_size=self.HIDDEN_SIZE,
449
+ sequence_parallel_enabled=sequence_parallel_enabled,
450
+ )
451
+ model = [m.to(dtype=dtype) for m in model]
452
+
453
+ if parallel_state.is_pipeline_first_stage():
454
+ batch: Tuple[torch.Tensor] = (
455
+ torch.ones(
456
+ (self.GLOBAL_BATCH_SIZE, self.ENCODER_SEQUENCE_LENGTH, self.HIDDEN_SIZE),
457
+ dtype=dtype,
458
+ device="cuda",
459
+ ),
460
+ )
461
+ else:
462
+ batch = None
463
+
464
+ forward_backward_pipelining_without_interleaving(
465
+ forward_step_func=testing_utils.ToyParallelMLPFwdBwdStepFunc(
466
+ sequence_parallel_enabled=sequence_parallel_enabled,
467
+ ),
468
+ batch=batch,
469
+ model=model,
470
+ forward_only=forward_only,
471
+ tensor_shape=(
472
+ self.ENCODER_SEQUENCE_LENGTH,
473
+ self.MICRO_BATCH_SIZE,
474
+ self.HIDDEN_SIZE,
475
+ ),
476
+ model_type=model_type,
477
+ decoder_sequence_length=self.DECODER_SEQUENCE_LENGTH,
478
+ async_comm=False,
479
+ grad_scaler=None,
480
+ deallocate_pipeline_outputs=False,
481
+ dtype=dtype,
482
+ sequence_parallel_enabled=sequence_parallel_enabled,
483
+ )
484
+
485
+ def test_pipelining_without_interleaving_encoder_and_decoder(self) -> None:
486
+ self._forward_backward_test_impl(forward_only=False, sequence_parallel_enabled=False, model_type=ModelType.encoder_and_decoder)
487
+
488
+ def test_pipelining_without_interleaving_inferenc_encoder_and_decoder(self) -> None:
489
+ self._forward_backward_test_impl(forward_only=True, sequence_parallel_enabled=False, model_type=ModelType.encoder_and_decoder)
490
+
491
+ def test_pipelining_without_interleaving_sequence_paralle_encoder_and_decoder(self) -> None:
492
+ self._forward_backward_test_impl(forward_only=False, sequence_parallel_enabled=True, model_type=ModelType.encoder_and_decoder)
493
+
494
+ def test_pipelining_without_interleaving_inference_sequence_paralle_encoder_and_decoder(self) -> None:
495
+ self._forward_backward_test_impl(forward_only=True, sequence_parallel_enabled=True, model_type=ModelType.encoder_and_decoder)
496
+
497
+ def test_pipelining_without_interleaving_encoder_or_decoder(self) -> None:
498
+ self._forward_backward_test_impl(forward_only=False, sequence_parallel_enabled=False, model_type=ModelType.encoder_or_decoder)
499
+
500
+ def test_pipelining_without_interleaving_sequence_parallel_encoder_or_decoder(self) -> None:
501
+ self._forward_backward_test_impl(forward_only=False, sequence_parallel_enabled=True, model_type=ModelType.encoder_or_decoder)
502
+
503
+ def test_pipelining_without_interleaving_sequence_parallel_encoder_or_decoder_half(self) -> None:
504
+ self._forward_backward_test_impl(forward_only=False, sequence_parallel_enabled=True, model_type=ModelType.encoder_or_decoder, dtype=torch.half)
505
+
506
+
507
+ class NcclPipelineParallelWithCustomSyncContextHandler(NcclDistributedTestBase):
508
+
509
+ GLOBAL_BATCH_SIZE = 32
510
+ MICRO_BATCH_SIZE = 1
511
+ HIDDEN_SIZE = 1
512
+
513
+ @property
514
+ def world_size(self) -> int:
515
+ return min(torch.cuda.device_count(), 8)
516
+
517
+ @unittest.skipIf(torch.cuda.device_count() < 2 or torch.cuda.device_count() % 2 != 0, "Requires >= 2 GPUs")
518
+ def test_pipelining_without_interleaving_with_custom_sync_context_handler(self) -> None:
519
+
520
+ # Parallel configuration
521
+ world_size = torch.cuda.device_count()
522
+ tensor_model_parallel_world_size = 1
523
+ data_parallel_size = 2 if world_size > 2 else 1
524
+ pipeline_model_parallel_world_size = world_size // data_parallel_size
525
+
526
+ # Initialize pipeline parallelism
527
+ parallel_state.initialize_model_parallel(
528
+ tensor_model_parallel_size_=tensor_model_parallel_world_size,
529
+ pipeline_model_parallel_size_=pipeline_model_parallel_world_size,
530
+ )
531
+ pp_utils._reconfigure_microbatch_calculator(
532
+ rank=parallel_state.get_tensor_model_parallel_rank(),
533
+ rampup_batch_size=None,
534
+ global_batch_size=self.GLOBAL_BATCH_SIZE,
535
+ micro_batch_size=self.MICRO_BATCH_SIZE,
536
+ data_parallel_size=parallel_state.get_data_parallel_world_size(),
537
+ )
538
+ pp_utils.update_num_microbatches(0)
539
+
540
+ # Construct synthetic data
541
+ dtype = get_dtype_for_comparison()
542
+ hidden_size = self.HIDDEN_SIZE
543
+ microbatch_size = self.MICRO_BATCH_SIZE
544
+ global_batch_shape = (
545
+ self.GLOBAL_BATCH_SIZE
546
+ // parallel_state.get_data_parallel_world_size(),
547
+ hidden_size,
548
+ hidden_size,
549
+ )
550
+ batch = None
551
+ if parallel_state.is_pipeline_first_stage():
552
+ batch = (torch.ones(global_batch_shape, dtype=dtype).cuda(), )
553
+
554
+ # Construct model
555
+ model = build_model(
556
+ testing_utils.model_provider_func,
557
+ wrap_with_ddp=True,
558
+ hidden_size=hidden_size,
559
+ )[0]
560
+ model = model.to(dtype)
561
+ model.module.apply(get_init_weights_func(0))
562
+
563
+ # Construct context that destroys all grads on exit
564
+ has_entered_grad_sync_context = False
565
+ has_exited_grad_sync_context = False
566
+ has_called_grad_sync_func = False
567
+ @contextlib.contextmanager
568
+ def custom_grad_sync_context():
569
+ try:
570
+ nonlocal has_entered_grad_sync_context
571
+ has_entered_grad_sync_context = True
572
+ yield
573
+ finally:
574
+ nonlocal has_exited_grad_sync_context
575
+ has_exited_grad_sync_context = True
576
+ for param in model.parameters():
577
+ param.grad = None
578
+ def custom_grad_sync_func():
579
+ nonlocal has_called_grad_sync_func
580
+ has_called_grad_sync_func = True
581
+
582
+ # Training step with pipeline parallelism
583
+ loss = forward_backward_pipelining_without_interleaving(
584
+ testing_utils.fwd_step_func,
585
+ batch,
586
+ model,
587
+ forward_only=False,
588
+ tensor_shape=(microbatch_size, hidden_size, hidden_size),
589
+ dtype=dtype,
590
+ async_comm=False,
591
+ grad_scaler=None,
592
+ deallocate_pipeline_outputs=False,
593
+ sequence_parallel_enabled=False,
594
+ custom_sync_context_handler=custom_grad_sync_context,
595
+ custom_grad_sync_func=custom_grad_sync_func,
596
+ )
597
+ torch.cuda.synchronize()
598
+
599
+ # Check if model has initialized gradients
600
+ has_any_grads = any(param.grad is not None for param in model.parameters())
601
+ has_all_grads = all(param.grad is not None for param in model.parameters())
602
+
603
+ # Check context behavior
604
+ self.assertTrue(has_entered_grad_sync_context, 'Has not entered custom sync context')
605
+ self.assertTrue(has_exited_grad_sync_context, 'Has not exited custom sync context')
606
+ self.assertEqual(
607
+ has_any_grads,
608
+ has_all_grads,
609
+ 'Expected gradients to all be uninitialized or all be initialized',
610
+ )
611
+ self.assertEqual(
612
+ has_all_grads,
613
+ parallel_state.is_pipeline_first_stage(),
614
+ 'Expected gradients to be initialized only in first pipeline stage',
615
+ )
616
+
617
+ # Clean up
618
+ parallel_state.destroy_model_parallel()
619
+
620
+ @unittest.skipIf(torch.cuda.device_count() < 4 or torch.cuda.device_count() % 2 != 0, "Requires >= 4 GPUs")
621
+ def test_pipelining_with_interleaving_with_custom_sync_context_handler(self) -> None:
622
+
623
+ # Parallel configuration
624
+ world_size = torch.cuda.device_count()
625
+ tensor_model_parallel_world_size = 1
626
+ data_parallel_size = 2 if world_size > 4 else 1
627
+ pipeline_model_parallel_world_size = world_size // data_parallel_size
628
+ virtual_pipeline_model_parallel_size = 2
629
+
630
+ # Initialize pipeline parallelism
631
+ parallel_state.initialize_model_parallel(
632
+ tensor_model_parallel_size_=tensor_model_parallel_world_size,
633
+ pipeline_model_parallel_size_=pipeline_model_parallel_world_size,
634
+ virtual_pipeline_model_parallel_size_=virtual_pipeline_model_parallel_size,
635
+ )
636
+ pp_utils._reconfigure_microbatch_calculator(
637
+ rank=parallel_state.get_tensor_model_parallel_rank(),
638
+ rampup_batch_size=None,
639
+ global_batch_size=self.GLOBAL_BATCH_SIZE,
640
+ micro_batch_size=self.MICRO_BATCH_SIZE,
641
+ data_parallel_size=parallel_state.get_data_parallel_world_size(),
642
+ )
643
+ pp_utils.update_num_microbatches(0)
644
+
645
+ # Construct synthetic data
646
+ dtype = get_dtype_for_comparison()
647
+ hidden_size = self.HIDDEN_SIZE
648
+ microbatch_size = self.MICRO_BATCH_SIZE
649
+ global_batch_shape = (
650
+ self.GLOBAL_BATCH_SIZE
651
+ // parallel_state.get_data_parallel_world_size(),
652
+ hidden_size,
653
+ hidden_size,
654
+ )
655
+ batch = None
656
+ if parallel_state.is_pipeline_first_stage():
657
+ batch = (torch.ones(global_batch_shape, dtype=dtype).cuda(), )
658
+
659
+ # Construct model
660
+ model = build_model(
661
+ testing_utils.model_provider_func,
662
+ wrap_with_ddp=True,
663
+ virtual_pipeline_model_parallel_size=virtual_pipeline_model_parallel_size,
664
+ hidden_size=hidden_size,
665
+ )
666
+ for module in model:
667
+ module.to(dtype)
668
+ module.module.apply(get_init_weights_func(0))
669
+
670
+ # Construct context that keeps track whenever entered/exited
671
+ grad_sync_context_enter_count = 0
672
+ grad_sync_context_exit_count = 0
673
+ @contextlib.contextmanager
674
+ def custom_grad_sync_context():
675
+ try:
676
+ nonlocal grad_sync_context_enter_count
677
+ grad_sync_context_enter_count += 1
678
+ yield
679
+ finally:
680
+ nonlocal grad_sync_context_exit_count
681
+ grad_sync_context_exit_count += 1
682
+ for module in model:
683
+ for param in module.parameters():
684
+ param.grad = None
685
+
686
+ # Training step with pipeline parallelism
687
+ loss = _forward_backward_pipelining_with_interleaving(
688
+ testing_utils.fwd_step_func,
689
+ batch,
690
+ model,
691
+ forward_only=False,
692
+ tensor_shape=(microbatch_size, hidden_size, hidden_size),
693
+ dtype=dtype,
694
+ async_comm=False,
695
+ grad_scaler=None,
696
+ deallocate_pipeline_outputs=False,
697
+ sequence_parallel_enabled=False,
698
+ custom_sync_context_handler=custom_grad_sync_context,
699
+ )
700
+ torch.cuda.synchronize()
701
+
702
+ # Check context behavior
703
+ self.assertTrue(
704
+ grad_sync_context_enter_count > 0,
705
+ 'Has not entered custom sync context',
706
+ )
707
+ self.assertEqual(
708
+ grad_sync_context_enter_count,
709
+ grad_sync_context_exit_count,
710
+ 'Has not entered and exited custom sync context '
711
+ 'the same number of times',
712
+ )
713
+ self.assertEqual(
714
+ grad_sync_context_exit_count,
715
+ virtual_pipeline_model_parallel_size + 1,
716
+ 'Expected to exit custom sync context once per model chunk '
717
+ 'and once at the function end',
718
+ )
719
+
720
+ # Clean up
721
+ parallel_state.destroy_model_parallel()
722
+
723
+
724
+ if __name__ == "__main__":
725
+ common_utils.run_tests()