cyd0806 commited on
Commit
8ad53b1
·
verified ·
1 Parent(s): 5169fd4

Upload apex-master/tests/L0/run_transformer/test_gpt_minimal.py with huggingface_hub

Browse files
apex-master/tests/L0/run_transformer/test_gpt_minimal.py ADDED
@@ -0,0 +1,235 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from functools import partial
2
+ from typing import List
3
+ import time
4
+
5
+ import torch
6
+
7
+ import unittest
8
+
9
+ from apex.transformer._ucc_util import HAS_UCC
10
+ from apex.transformer import parallel_state
11
+ from apex.transformer.enums import ModelType
12
+ from apex.transformer.tensor_parallel import model_parallel_cuda_manual_seed
13
+ from apex.transformer.pipeline_parallel.utils import (
14
+ average_losses_across_data_parallel_group, unwrap_model, setup_microbatch_calculator,
15
+ get_ltor_masks_and_position_ids
16
+ )
17
+ from apex.transformer.pipeline_parallel.schedules.common import (
18
+ _get_params_for_weight_decay_optimization, build_model
19
+ )
20
+ from apex.transformer.pipeline_parallel.schedules.fwd_bwd_pipelining_without_interleaving import (
21
+ forward_backward_pipelining_without_interleaving,
22
+ )
23
+ from apex.transformer.testing.standalone_gpt import gpt_model_provider
24
+ from apex.transformer.testing import global_vars
25
+
26
+ from apex.transformer.testing.distributed_test_base import UccDistributedTestBase, NcclDistributedTestBase
27
+
28
+ from torch.testing._internal import common_utils
29
+ from torch.testing._internal.common_device_type import instantiate_device_type_tests
30
+
31
+
32
+ class GptTestBase:
33
+
34
+ def _download_fancy_data(self):
35
+ text = """
36
+ An original sentence not subject to any license restrictions, copyright, or royalty payments. Nothing to see here. Commercial or non-commercial use. Research or non-research purposes. The quick brown fox jumps over the lazy dog. Lorem ipsum.
37
+ """
38
+ text = text * 1024
39
+ encoded = text.encode("ascii", "replace")
40
+ ints = [int(encoded[i]) for i in range(len(encoded))]
41
+ return torch.tensor(ints)
42
+
43
+ # build a batch given sequence_len and batch size
44
+ def _generate_fancy_data_labels(self, sequence_len, batch_size):
45
+ temps = list()
46
+ for i in range(batch_size):
47
+ if self.inds is None or self.data_idx >= len(self.inds):
48
+ # hack as use of RNG will fall out of sync due to pipelines being different
49
+ model_parallel_cuda_manual_seed(self.MANUAL_SEED)
50
+ self.inds = torch.randperm(effective_length, device="cuda")
51
+ self.MANUAL_SEED += 1
52
+ self.data_idx = 0
53
+ data_idx_ = self.data_idx
54
+ offset = self.inds[data_idx_]
55
+ self.data_idx += 1
56
+ curr = fancy_data[offset: offset +
57
+ sequence_len + 1].clone().detach()
58
+ temps.append(curr)
59
+ temp = torch.stack(temps, dim=0).cuda()
60
+ return temp
61
+
62
+ def _get_batch(self, int_tensors: List[torch.Tensor]):
63
+ data = int_tensors[0]
64
+ # Unpack.
65
+ tokens_ = data.long()
66
+ labels = tokens_[:, 1:].contiguous()
67
+ tokens = tokens_[:, :-1].contiguous()
68
+ # Get the masks and position ids.
69
+ attention_mask, loss_mask, position_ids = get_ltor_masks_and_position_ids(
70
+ tokens,
71
+ self.N_VOCAB, # tokenizer.eod,
72
+ False, # args.reset_position_ids,
73
+ False, # args.reset_attention_mask,
74
+ False, # args.eod_mask_loss,
75
+ )
76
+ return tokens, labels, loss_mask, attention_mask, position_ids
77
+
78
+ # Ref: https://github.com/NVIDIA/Megatron-LM/blob/b31e1296354e979722627a6c4dedafe19b51fa97/pretrain_gpt.py#L75
79
+ def _loss_func(self, loss_mask, output_tensor):
80
+ losses = output_tensor.float()
81
+ loss_mask = loss_mask.view(-1).float()
82
+ loss = torch.sum(losses.view(-1) * loss_mask) / loss_mask.sum()
83
+
84
+ # Reduce loss for logging.
85
+ averaged_loss = average_losses_across_data_parallel_group([loss])
86
+
87
+ return loss, {"lm loss": averaged_loss[0]}
88
+
89
+ # Ref: https://github.com/NVIDIA/Megatron-LM/blob/b31e1296354e979722627a6c4dedafe19b51fa97/pretrain_gpt.py#L86
90
+ def _fwd_step_func(self, batch, model):
91
+ """Forward step."""
92
+ tokens, labels, loss_mask, attention_mask, position_ids = self._get_batch(
93
+ batch)
94
+ output_tensor = model(tokens, position_ids,
95
+ attention_mask, labels=labels)
96
+ return output_tensor, partial(self._loss_func, loss_mask)
97
+
98
+ def _train(self, model, optim, pipeline_model_parallel_size, async_comm):
99
+ args = global_vars.get_args()
100
+ fwd_bwd_func = forward_backward_pipelining_without_interleaving
101
+
102
+ tensor_shape = (args.seq_length, args.micro_batch_size,
103
+ args.hidden_size)
104
+ runtime = 0
105
+ # training loop
106
+ for i in range(3):
107
+ since = time.time()
108
+ if torch.distributed.get_rank() == 0:
109
+ print("begin iter", i)
110
+ batch = [
111
+ self._generate_fancy_data_labels(
112
+ args.seq_length, args.global_batch_size)
113
+ for _ in range(pipeline_model_parallel_size)
114
+ ]
115
+ if torch.distributed.get_rank() == 0:
116
+ print("finished making batch...")
117
+ optim.zero_grad()
118
+ fwd_bwd_func(
119
+ self._fwd_step_func,
120
+ batch,
121
+ model,
122
+ forward_only=False,
123
+ tensor_shape=tensor_shape,
124
+ async_comm=async_comm,
125
+ sequence_parallel_enabled=args.sequence_parallel,
126
+ )
127
+ if torch.distributed.get_rank() == 0:
128
+ print("finished forward step")
129
+ # All-reduce layernorm parameters across model parallel nodes
130
+ # when sequence parallelism is used
131
+ if parallel_state.get_tensor_model_parallel_world_size() > 1 and global_vars.get_args().sequence_parallel:
132
+ for model_module in model:
133
+ unwrapped_model = unwrap_model(model_module)
134
+ for param in unwrapped_model.parameters():
135
+ if getattr(param, 'sequence_parallel_enabled', False):
136
+ grad = param.grad
137
+ torch.distributed.all_reduce(
138
+ grad, group=parallel_state.get_tensor_model_parallel_group())
139
+ optim.step()
140
+ if torch.distributed.get_rank() == 0:
141
+ print("finished iter", i)
142
+ runtime += time.time() - since
143
+ return runtime / 3.0
144
+
145
+ @unittest.skipUnless(torch.cuda.device_count() > 2, "requires at least 3 gpus")
146
+ def test_gpt(self):
147
+ self.MANUAL_SEED = 42
148
+ self.inds = None
149
+ self.data_idx = 0
150
+ self.N_VOCAB = 128
151
+ init = True
152
+
153
+ tensor_model_parallel_size = 2 if self.world_size % 2 == 0 and self.world_size >= 4 else 1
154
+ pipeline_model_parallel_size = self.world_size // tensor_model_parallel_size
155
+
156
+ override_args = {
157
+ "micro_batch_size": 2,
158
+ "num_layers": 16,
159
+ "hidden_size": 256,
160
+ "num_attention_heads": 8,
161
+ "max_position_embeddings": 512,
162
+ "seq_length": 512,
163
+ "global_batch_size": 128,
164
+ "pipeline_model_parallel_size": pipeline_model_parallel_size,
165
+ "tensor_model_parallel_size": tensor_model_parallel_size,
166
+ "world_size": self.world_size,
167
+ "rank": self.rank,
168
+ }
169
+
170
+ global_vars.set_global_variables(override_args=override_args, ignore_unknown_args=True)
171
+ args = global_vars.get_args()
172
+
173
+ for async_comm in (False,) if args.sequence_parallel else (False, True):
174
+ global fancy_data
175
+ global effective_length
176
+
177
+ if init:
178
+ init = False
179
+
180
+ fancy_data = self._download_fancy_data()
181
+ args = global_vars.get_args()
182
+ args.model_type = ModelType.encoder_or_decoder
183
+ effective_length = fancy_data.size(0) // args.seq_length
184
+ effective_length = fancy_data.size(0) - args.seq_length
185
+
186
+ args.padded_vocab_size = 128
187
+ setup_microbatch_calculator(
188
+ args.rank,
189
+ args.rampup_batch_size,
190
+ args.global_batch_size,
191
+ args.micro_batch_size,
192
+ args.data_parallel_size,
193
+ )
194
+
195
+ print(args.tensor_model_parallel_size, "MODEL PARALLEL SIZE")
196
+
197
+ parallel_state.initialize_model_parallel(
198
+ tensor_model_parallel_size_=args.tensor_model_parallel_size,
199
+ pipeline_model_parallel_size_=args.pipeline_model_parallel_size,
200
+ default_backend="nccl",
201
+ p2p_backend=self.DISTRIBUTED_BACKEND,
202
+ )
203
+
204
+ model_parallel_cuda_manual_seed(0)
205
+ model = build_model(
206
+ gpt_model_provider,
207
+ wrap_with_ddp=parallel_state.get_data_parallel_world_size() > 1,
208
+ virtual_pipeline_model_parallel_size=None,
209
+ cpu_offload=args.cpu_offload,
210
+ )
211
+ assert isinstance(model, list), model
212
+ _param_groups = _get_params_for_weight_decay_optimization(model)
213
+ optim = torch.optim.Adam(_param_groups)
214
+ runtime = self._train(
215
+ model, optim, args.pipeline_model_parallel_size, async_comm)
216
+
217
+ parallel_state.destroy_model_parallel()
218
+ torch.cuda.synchronize()
219
+
220
+
221
+ class NcclGptTest(GptTestBase, NcclDistributedTestBase):
222
+ @property
223
+ def world_size(self) -> int:
224
+ return min(torch.cuda.device_count(), 8)
225
+
226
+
227
+ @unittest.skipUnless(HAS_UCC, "requires pytorch to be built with native ucc")
228
+ class UccGptTest(GptTestBase, UccDistributedTestBase):
229
+ @property
230
+ def world_size(self) -> int:
231
+ return min(torch.cuda.device_count(), 8)
232
+
233
+
234
+ if __name__ == "__main__":
235
+ common_utils.run_tests()