cyd0806 commited on
Commit
66587a1
·
verified ·
1 Parent(s): c3750e0

Upload apex-master/tests/L0/run_transformer/test_dynamic_batchsize.py with huggingface_hub

Browse files
apex-master/tests/L0/run_transformer/test_dynamic_batchsize.py ADDED
@@ -0,0 +1,220 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Tuple, List
2
+
3
+ import torch
4
+ import unittest
5
+
6
+ from apex.transformer import parallel_state
7
+ from apex.transformer.pipeline_parallel.utils import get_num_microbatches
8
+ from apex.transformer.pipeline_parallel.schedules.common import (
9
+ _get_params_for_weight_decay_optimization, build_model
10
+ )
11
+ from apex.transformer.pipeline_parallel.schedules.fwd_bwd_pipelining_with_interleaving import (
12
+ _forward_backward_pipelining_with_interleaving,
13
+ )
14
+ from apex.transformer.pipeline_parallel.utils import (
15
+ setup_microbatch_calculator, _reconfigure_microbatch_calculator, update_num_microbatches
16
+ )
17
+ from apex.transformer.testing import global_vars
18
+ from apex.transformer.testing.commons import (
19
+ print_separator, fwd_step_func, model_provider_func
20
+ )
21
+ from apex.transformer.log_util import get_transformer_logger
22
+ from apex.transformer._data import MegatronPretrainingRandomSampler, MegatronPretrainingSampler
23
+ from apex.transformer.testing.distributed_test_base import NcclDistributedTestBase
24
+
25
+ from torch.testing._internal import common_utils
26
+
27
+ # note(mkozuki): To see warmup, steady, cooldown iterations, uncomment the line below
28
+ # set_logging_level("INFO")
29
+ _logger = get_transformer_logger("pipeline_parallel_test")
30
+ # note(mkozuki): To see if local batch size increases, uncomment the line below
31
+ # _logger.setLevel("INFO")
32
+
33
+
34
+ NUM_ITERATIONS = 20
35
+ NUM_SAMPLES = 16384 // 2
36
+ HIDDEN_SIZE = 16
37
+
38
+
39
+ def Dataset(num_samples: int) -> List[Tuple[torch.Tensor, torch.Tensor]]:
40
+ return [
41
+ (
42
+ torch.randn(HIDDEN_SIZE, HIDDEN_SIZE),
43
+ torch.randn(HIDDEN_SIZE // 2, HIDDEN_SIZE // 2),
44
+ )
45
+ for _ in range(num_samples)
46
+ ]
47
+
48
+
49
+ # Run forward & backward with dynamic batch size.
50
+ def run_interleaved_with_dynamic_batch_size(
51
+ pipeline_model_parallel_size: int, forward_only: bool, BatchSamplerCls,
52
+ ) -> None:
53
+ args = global_vars.get_args()
54
+ _reconfigure_microbatch_calculator(
55
+ args.rank,
56
+ args.rampup_batch_size,
57
+ args.global_batch_size,
58
+ args.micro_batch_size,
59
+ 1, # args.data_parallel_size,
60
+ )
61
+ virtual_pipeline_model_parallel_size = 2
62
+ # NOTE (mkozuki): `virtual_pipeline_model_parallel_size` is a requisite for the interleaving scheduling
63
+ # In megatron, `args.virtual_pipeline_model_parallel_size` is computed in megatron/arguments.py and
64
+ # used ubiquitously but this test uses custom model so it's safe to abuse.
65
+ parallel_state.initialize_model_parallel(
66
+ 1, pipeline_model_parallel_size, virtual_pipeline_model_parallel_size
67
+ )
68
+ pipeline_model_parallel_size = (
69
+ parallel_state.get_pipeline_model_parallel_world_size()
70
+ )
71
+
72
+ print_separator(
73
+ f"BatchSamplerCls: {BatchSamplerCls.__name__}, forward_only: {forward_only}"
74
+ )
75
+
76
+ model = build_model(
77
+ model_provider_func,
78
+ wrap_with_ddp=True,
79
+ virtual_pipeline_model_parallel_size=virtual_pipeline_model_parallel_size,
80
+ hidden_size=HIDDEN_SIZE,
81
+ )
82
+ assert isinstance(model, list)
83
+ assert len(model) == virtual_pipeline_model_parallel_size
84
+ optimizer = torch.optim.Adam(
85
+ _get_params_for_weight_decay_optimization(model))
86
+
87
+ initial_local_minibatch_size = get_num_microbatches() * args.micro_batch_size
88
+ dataset = Dataset(NUM_SAMPLES)
89
+ data_loader = torch.utils.data.DataLoader(
90
+ dataset,
91
+ batch_sampler=BatchSamplerCls(
92
+ NUM_SAMPLES,
93
+ 0,
94
+ initial_local_minibatch_size,
95
+ parallel_state.get_data_parallel_rank(),
96
+ parallel_state.get_data_parallel_world_size(),
97
+ ),
98
+ )
99
+ data_iter = iter(data_loader)
100
+
101
+ def get_num_samples(batch):
102
+ if isinstance(batch, torch.Tensor):
103
+ return len(batch)
104
+ assert isinstance(batch, (list, tuple))
105
+ return [get_num_samples(b) for b in batch]
106
+
107
+ tensor_shape = [args.micro_batch_size, HIDDEN_SIZE, HIDDEN_SIZE]
108
+ consumed_samples = 0
109
+ for i in range(NUM_ITERATIONS):
110
+ update_num_microbatches(consumed_samples, consistency_check=False)
111
+ local_batch_size = get_num_microbatches() * args.micro_batch_size
112
+ data_iter._index_sampler.local_minibatch_size = local_batch_size
113
+ local_mini_batch = next(data_iter)
114
+
115
+ _logger.info(
116
+ f"iter: {i} / {NUM_ITERATIONS} "
117
+ f"local batchsize: {get_num_samples(local_mini_batch)} "
118
+ f"consumed_samples: {consumed_samples} / {NUM_SAMPLES}"
119
+ )
120
+ _forward_backward_pipelining_with_interleaving(
121
+ fwd_step_func,
122
+ local_mini_batch,
123
+ model,
124
+ forward_only=forward_only,
125
+ tensor_shape=tensor_shape,
126
+ )
127
+
128
+ consumed_samples += (
129
+ parallel_state.get_data_parallel_world_size()
130
+ * get_num_microbatches()
131
+ * args.micro_batch_size
132
+ )
133
+
134
+ if not forward_only:
135
+ for m in model:
136
+ for p in m.parameters():
137
+ if p.grad is None:
138
+ raise RuntimeError("grad not found")
139
+ else:
140
+ optimizer.zero_grad(set_to_none=True)
141
+
142
+ torch.cuda.synchronize()
143
+
144
+
145
+ class DynamicBatchsizeTestBase:
146
+ @unittest.skipUnless(torch.cuda.device_count() > 2, "requires at least 3 gpus")
147
+ def test_dynamic_batchsize(self):
148
+
149
+ n_tests = 0
150
+ failures = []
151
+
152
+ override_args = {
153
+ "micro_batch_size": 2,
154
+ "num_layers": 16,
155
+ "hidden_size": 256,
156
+ "num_attention_heads": 8,
157
+ "max_position_embeddings": 512,
158
+ "seq_length": 512,
159
+ "global_batch_size": 128,
160
+ "use_cpu_initialization": True,
161
+ "world_size": self.world_size,
162
+ "rank": self.rank,
163
+ }
164
+
165
+ global_vars.set_global_variables(
166
+ args_defaults={"global_batch_size": 512,
167
+ "rampup_batch_size": [64, 64, 1000], },
168
+ ignore_unknown_args=True,
169
+ override_args=override_args,
170
+ )
171
+
172
+ args = global_vars.get_args()
173
+
174
+ setup_microbatch_calculator(
175
+ args.rank,
176
+ args.rampup_batch_size,
177
+ args.global_batch_size,
178
+ args.micro_batch_size,
179
+ 1, # args.data_parallel_size,
180
+ )
181
+ for BatchSamplerCls in (
182
+ MegatronPretrainingSampler,
183
+ MegatronPretrainingRandomSampler,
184
+ ):
185
+ for forward_only in (False, True):
186
+ n_tests += 1
187
+ pipeline_model_parallel_size = self.world_size
188
+ try:
189
+ run_interleaved_with_dynamic_batch_size(
190
+ pipeline_model_parallel_size, forward_only, BatchSamplerCls,
191
+ )
192
+ except Exception as e:
193
+ msg = (
194
+ f"\tforward_only: {forward_only}\n"
195
+ f"pipeline rank: {parallel_state.get_pipeline_model_parallel_rank()}, "
196
+ f"virtual pipeline rank: {parallel_state.get_virtual_pipeline_model_parallel_rank()}\n"
197
+ f"{str(e)}"
198
+ )
199
+ raise RuntimeError(msg)
200
+ finally:
201
+ parallel_state.destroy_model_parallel()
202
+ if failures:
203
+ print_separator("TEST FAILED:")
204
+ print("\n".join(failures))
205
+ msg = f"{len(failures)} / {n_tests} cases failed"
206
+ raise RuntimeError(msg)
207
+ else:
208
+ if torch.distributed.get_rank() == 0:
209
+ print_separator("TEST RESULT: ### PASS!")
210
+
211
+
212
+ class NcclDynamicBatchsizeTest(DynamicBatchsizeTestBase, NcclDistributedTestBase):
213
+ pass
214
+
215
+ # TODO: (Fuzzkatt) UCC still doesn't work with fwd_bwd_pipelining_with_interleaving
216
+
217
+
218
+ if __name__ == "__main__":
219
+ torch.backends.cuda.matmul.allow_tf32 = False
220
+ common_utils.run_tests()