cyd0806 commited on
Commit
6e77ee9
·
verified ·
1 Parent(s): 1f878eb

Upload apex-master/tests/L1/transformer/pipeline_parallel_fwd_bwd_ucc_async.py with huggingface_hub

Browse files
apex-master/tests/L1/transformer/pipeline_parallel_fwd_bwd_ucc_async.py ADDED
@@ -0,0 +1,219 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import logging
3
+ import itertools
4
+ from typing import Optional, Tuple, List
5
+ import unittest
6
+
7
+ import torch
8
+ from torch.testing._internal import common_utils
9
+ from torch.testing._internal import common_cuda
10
+ from torch.testing._internal import common_distributed
11
+
12
+ from apex._autocast_utils import _get_autocast_dtypes
13
+ from apex.transformer import parallel_state
14
+ from apex.transformer.pipeline_parallel import utils as pp_utils
15
+ from apex.transformer.pipeline_parallel.schedules.common import (
16
+ FwdStepFunc,
17
+ build_model,
18
+ _get_params_for_weight_decay_optimization,
19
+ )
20
+ from apex.transformer.pipeline_parallel.schedules.fwd_bwd_no_pipelining import (
21
+ forward_backward_no_pipelining,
22
+ )
23
+ from apex.transformer.pipeline_parallel.schedules.fwd_bwd_pipelining_with_interleaving import (
24
+ _forward_backward_pipelining_with_interleaving,
25
+ )
26
+ from apex.transformer.pipeline_parallel.schedules.fwd_bwd_pipelining_without_interleaving import (
27
+ forward_backward_pipelining_without_interleaving,
28
+ )
29
+ from apex.transformer.testing.distributed_test_base import UccDistributedTestBase
30
+ from apex.transformer.testing import commons as testing_utils
31
+
32
+
33
+ logging.getLogger("torch").setLevel(logging.WARNING)
34
+ logging.getLogger("apex").setLevel(logging.WARNING)
35
+
36
+
37
+ def _get_default_world_sizes_model_parallel_world_size(pipeline_model_parallel_world_size: Optional[int] = None
38
+ ) -> Tuple[int, int, int]:
39
+ # TODO: revisit if we can fold this into the class for skip logic / avoid duplication
40
+ # of world size computation
41
+ world_size = torch.cuda.device_count()
42
+ tensor_model_parallel_world_size = 1
43
+ data_parallel_size = 1 + (world_size >= 8 and world_size % 2 == 0)
44
+
45
+ if pipeline_model_parallel_world_size is None:
46
+ pipeline_model_parallel_world_size = world_size // (tensor_model_parallel_world_size * data_parallel_size)
47
+ else:
48
+ data_parallel_size = world_size // (tensor_model_parallel_world_size * pipeline_model_parallel_world_size)
49
+
50
+ return tensor_model_parallel_world_size, data_parallel_size, pipeline_model_parallel_world_size
51
+
52
+
53
+ class UccPipelineParallelForwardBackwardProf(UccDistributedTestBase):
54
+
55
+ # The purpose of this class is to test and confirm asynchronous communication via profiling.
56
+ # Having that in mind, it is safe to skip all the numerical checks.
57
+ # For unit testing with numerical checks please refer to `tests/L0/run_transformer/test_pipeline_parallel_fwd_bwd.py`.
58
+
59
+ def __init__(self, *args, **kwargs) -> None:
60
+ super().__init__(*args, **kwargs)
61
+ self.GLOBAL_BATCH_SIZE = 1024
62
+ self.MICRO_BATCH_SIZE = 64
63
+ self.HIDDEN_SIZE = 256
64
+ self.NUM_FWD_BWD_ITERATIONS = 4
65
+ self.deallocate_options = (False,)
66
+ self.dtypes = (torch.float32,)
67
+
68
+ @property
69
+ def world_size(self) -> int:
70
+ return min(torch.cuda.device_count(), 8)
71
+
72
+ def _forward_backward_test_impl(
73
+ self,
74
+ forward_only: bool,
75
+ fwd_bwd_func: FwdStepFunc,
76
+ pipeline_model_parallel_world_size: Optional[int],
77
+ virtual_pipeline_model_parallel_size: Optional[int],
78
+ async_comm: bool = False,
79
+ *,
80
+ default_backend: Optional[str] = None,
81
+ p2p_backend: Optional[str] = None,
82
+ ) -> None:
83
+ if fwd_bwd_func == _forward_backward_pipelining_with_interleaving:
84
+ self.assertIsNotNone(virtual_pipeline_model_parallel_size)
85
+ self.assertGreater(virtual_pipeline_model_parallel_size, 1)
86
+ dtype_options = self.dtypes or [torch.float32, torch.double] + _get_autocast_dtypes()
87
+
88
+ for dtype, deallocate_pipeline_outputs in itertools.product(
89
+ dtype_options, self.deallocate_options,
90
+ ):
91
+ grad_scaler = (
92
+ torch.amp.GradScaler('cuda', init_scale=4.0)
93
+ if dtype == torch.half
94
+ else None
95
+ )
96
+
97
+ (tensor_model_parallel_world_size,
98
+ data_parallel_size,
99
+ pipeline_model_parallel_world_size) = _get_default_world_sizes_model_parallel_world_size(pipeline_model_parallel_world_size)
100
+
101
+ parallel_state.initialize_model_parallel(
102
+ tensor_model_parallel_size_=tensor_model_parallel_world_size,
103
+ pipeline_model_parallel_size_=pipeline_model_parallel_world_size,
104
+ virtual_pipeline_model_parallel_size_=virtual_pipeline_model_parallel_size,
105
+ default_backend=default_backend,
106
+ p2p_backend=p2p_backend,
107
+ )
108
+ pp_utils._reconfigure_microbatch_calculator(
109
+ rank=parallel_state.get_tensor_model_parallel_rank(),
110
+ rampup_batch_size=None,
111
+ global_batch_size=self.GLOBAL_BATCH_SIZE,
112
+ micro_batch_size=self.MICRO_BATCH_SIZE,
113
+ data_parallel_size=parallel_state.get_data_parallel_world_size(),
114
+ )
115
+
116
+ global_batch_shape = (
117
+ self.GLOBAL_BATCH_SIZE
118
+ // parallel_state.get_data_parallel_world_size(),
119
+ self.HIDDEN_SIZE,
120
+ self.HIDDEN_SIZE,
121
+ )
122
+
123
+ batch = None
124
+ if parallel_state.is_pipeline_first_stage():
125
+ batch = (torch.ones(global_batch_shape, dtype=dtype).cuda(), )
126
+
127
+ model = build_model(
128
+ testing_utils.model_provider_func,
129
+ # Use DDP only when it's better to have
130
+ wrap_with_ddp=data_parallel_size > 1,
131
+ virtual_pipeline_model_parallel_size=virtual_pipeline_model_parallel_size,
132
+ hidden_size=self.HIDDEN_SIZE,
133
+ )
134
+
135
+
136
+ offset = pipeline_model_parallel_world_size if virtual_pipeline_model_parallel_size is not None else 0
137
+ for idx, model_module in enumerate(model):
138
+ model_module = model_module.to(dtype)
139
+
140
+ _param_groups = _get_params_for_weight_decay_optimization(model)
141
+ optimizer = torch.optim.Adam(_param_groups, lr=1e-3)
142
+
143
+ pp_utils.update_num_microbatches(0)
144
+
145
+ for _ in range(self.NUM_FWD_BWD_ITERATIONS):
146
+ loss = fwd_bwd_func(
147
+ testing_utils.fwd_step_func,
148
+ batch,
149
+ model,
150
+ forward_only=forward_only,
151
+ # `tensor_shape` is the shape of micro batch.
152
+ tensor_shape=(
153
+ self.MICRO_BATCH_SIZE,
154
+ self.HIDDEN_SIZE,
155
+ self.HIDDEN_SIZE,
156
+ ),
157
+ dtype=dtype,
158
+ async_comm=async_comm,
159
+ grad_scaler=grad_scaler,
160
+ deallocate_pipeline_output=deallocate_pipeline_outputs,
161
+ )
162
+
163
+ parallel_state.destroy_model_parallel()
164
+
165
+ def test_learning_no_pipelining(self):
166
+ self._forward_backward_test_impl(False, forward_backward_no_pipelining, 1, None)
167
+
168
+ def test_inference_no_pipelining(self):
169
+ self._forward_backward_test_impl(True, forward_backward_no_pipelining, 1, None)
170
+
171
+ def test_learning_pipelining_without_interleaving(self):
172
+ self._forward_backward_test_impl(
173
+ False, forward_backward_pipelining_without_interleaving, None, None
174
+ )
175
+
176
+ def test_inference_pipelining_without_interleaving(self):
177
+ self._forward_backward_test_impl(
178
+ True, forward_backward_pipelining_without_interleaving, None, None
179
+ )
180
+
181
+ def test_learning_async_pipelining_without_interleaving(self):
182
+ self._forward_backward_test_impl(
183
+ False, forward_backward_pipelining_without_interleaving, None, None, async_comm=True
184
+ )
185
+
186
+ def test_inference_async_pipelining_without_interleaving(self):
187
+ self._forward_backward_test_impl(
188
+ True, forward_backward_pipelining_without_interleaving, None, None, async_comm=True
189
+ )
190
+
191
+ @unittest.skipUnless(_get_default_world_sizes_model_parallel_world_size()[-1] > 2, "Interleaved schedule requires pipeline_model_parallel_world_size > 2")
192
+ def test_learning_pipelining_with_interleaving(self):
193
+ self._forward_backward_test_impl(
194
+ False, _forward_backward_pipelining_with_interleaving, None, virtual_pipeline_model_parallel_size=2
195
+ )
196
+
197
+ @unittest.skipUnless(_get_default_world_sizes_model_parallel_world_size()[-1] > 2, "Interleaved schedule requires pipeline_model_parallel_world_size > 2")
198
+ def test_inference_pipelining_with_interleaving(self):
199
+ self._forward_backward_test_impl(
200
+ True, _forward_backward_pipelining_with_interleaving, None, virtual_pipeline_model_parallel_size=2
201
+ )
202
+
203
+ @unittest.skipUnless(_get_default_world_sizes_model_parallel_world_size()[-1] > 2, "Interleaved schedule requires pipeline_model_parallel_world_size > 2")
204
+ def test_learning_async_pipelining_with_interleaving(self):
205
+ self._forward_backward_test_impl(
206
+ False, _forward_backward_pipelining_with_interleaving, None, virtual_pipeline_model_parallel_size=2, async_comm=True
207
+ )
208
+
209
+ @unittest.skipUnless(_get_default_world_sizes_model_parallel_world_size()[-1] > 2, "Interleaved schedule requires pipeline_model_parallel_world_size > 2")
210
+ def test_inference_async_pipelining_with_interleaving(self):
211
+ self._forward_backward_test_impl(
212
+ True, _forward_backward_pipelining_with_interleaving, None, virtual_pipeline_model_parallel_size=2, async_comm=True
213
+ )
214
+
215
+
216
+ if __name__ == "__main__":
217
+ os.environ["UCC_TLS"] = "ucp,cuda"
218
+ common_distributed.TIMEOUT_DEFAULT = 500
219
+ common_utils.run_tests()