cyd0806 commited on
Commit
0e80ec6
·
verified ·
1 Parent(s): 64fa5b0

Upload apex-master/tests/L0/run_transformer/test_batch_sampler.py with huggingface_hub

Browse files
apex-master/tests/L0/run_transformer/test_batch_sampler.py ADDED
@@ -0,0 +1,141 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ from torch.testing._internal import common_utils
3
+ from torch.utils.data import Dataset
4
+ from torch.utils.data import DataLoader
5
+
6
+ from apex.transformer.pipeline_parallel.utils import _split_batch_into_microbatch as split_batch_into_microbatch
7
+
8
+
9
+ class MyIterableDataset(Dataset):
10
+ def __init__(self, start, end):
11
+ super().__init__()
12
+ assert end > start, "this example code only works with end >= start"
13
+ self.start = start
14
+ self.end = end
15
+ self.samples = list(range(self.start, self.end))
16
+
17
+ def __iter__(self):
18
+ return iter(range(self.start, self.end))
19
+
20
+ def __getitem__(self, index):
21
+ return self.samples[index]
22
+
23
+
24
+ class MegatronPretrainingRandomSampler:
25
+
26
+ def __init__(self, total_samples, consumed_samples, micro_batch_size,
27
+ data_parallel_rank, data_parallel_size):
28
+ # Keep a copy of input params for later use.
29
+ self.total_samples = total_samples
30
+ self.consumed_samples = consumed_samples
31
+ self.micro_batch_size = micro_batch_size
32
+ self.data_parallel_rank = data_parallel_rank
33
+ self.data_parallel_size = data_parallel_size
34
+ self.micro_batch_times_data_parallel_size = \
35
+ self.micro_batch_size * data_parallel_size
36
+ self.last_batch_size = \
37
+ self.total_samples % self.micro_batch_times_data_parallel_size
38
+
39
+ # Sanity checks.
40
+ assert self.total_samples > 0, \
41
+ 'no sample to consume: {}'.format(self.total_samples)
42
+ assert self.micro_batch_size > 0
43
+ assert data_parallel_size > 0
44
+ assert self.data_parallel_rank < data_parallel_size, \
45
+ 'data_parallel_rank should be smaller than data size: {}, ' \
46
+ '{}'.format(self.data_parallel_rank, data_parallel_size)
47
+
48
+ def __len__(self):
49
+ return self.total_samples
50
+
51
+ def __iter__(self):
52
+ active_total_samples = self.total_samples - self.last_batch_size
53
+ self.epoch = self.consumed_samples // active_total_samples
54
+ current_epoch_samples = self.consumed_samples % active_total_samples
55
+ assert current_epoch_samples % self.micro_batch_times_data_parallel_size == 0
56
+
57
+ # data sharding and random sampling
58
+ bucket_size = (self.total_samples // self.micro_batch_times_data_parallel_size) * self.micro_batch_size
59
+ bucket_offset = current_epoch_samples // self.data_parallel_size
60
+ start_idx = self.data_parallel_rank * bucket_size
61
+
62
+ g = torch.Generator()
63
+ g.manual_seed(self.epoch)
64
+ random_idx = torch.randperm(bucket_size, generator=g).tolist()
65
+ idx_range = [start_idx + x for x in random_idx[bucket_offset:]]
66
+
67
+ batch = []
68
+ # Last batch if not complete will be dropped.
69
+ for idx in idx_range:
70
+ batch.append(idx)
71
+ if len(batch) == self.micro_batch_size:
72
+ self.consumed_samples += self.micro_batch_times_data_parallel_size
73
+ yield batch
74
+ batch = []
75
+
76
+
77
+ # Samples 8 tensors in total.
78
+ # First sample 4 tensors twice, then sample 2 tensors fourth.
79
+ class TestBatchSamplerBehavior(common_utils.TestCase):
80
+ def tearDown(self) -> None:
81
+ torch.cuda.empty_cache()
82
+ super().tearDown()
83
+
84
+ def test_batch_sampler_behavior(self):
85
+ dataset = MyIterableDataset(0, 100)
86
+
87
+ for num_workers in (1, 2, 4):
88
+ torch.manual_seed(42)
89
+ loader = DataLoader(dataset, batch_sampler=MegatronPretrainingRandomSampler(100, 0, 4, 0, 1), num_workers=num_workers)
90
+ samples = []
91
+ for i, batch in enumerate(loader):
92
+ samples.append(batch)
93
+ if i == 2 - 1:
94
+ break
95
+
96
+ torch.manual_seed(42)
97
+ loader = DataLoader(dataset, batch_sampler=MegatronPretrainingRandomSampler(100, 0, 2, 0, 1), num_workers=num_workers)
98
+ samples2 = []
99
+ for i, batch in enumerate(loader):
100
+ samples2.append(batch)
101
+ if i == 4 - 1:
102
+ break
103
+ self.assertEqual(torch.cat(samples), torch.cat(samples2), msg=f"num_workers={num_workers}")
104
+
105
+ def test_split_batch(self):
106
+
107
+ class MyIterableDataset(Dataset):
108
+ def __init__(self, start, end):
109
+ super().__init__()
110
+ assert end > start, "this example code only works with end >= start"
111
+ self.start = start
112
+ self.end = end
113
+ self.samples = list(range(self.start, self.end))
114
+
115
+ def __len__(self):
116
+ return self.end - self.start
117
+
118
+ def __iter__(self):
119
+ return iter(range(self.start, self.end))
120
+
121
+ def __getitem__(self, index):
122
+ return (torch.tensor([index, index]), torch.tensor([index // 2, index // 2]))
123
+
124
+ dataset = MyIterableDataset(0, 100)
125
+ torch.manual_seed(42)
126
+ global_batch_size = 16
127
+ loader = DataLoader(dataset, batch_sampler=MegatronPretrainingRandomSampler(100, 0, global_batch_size, 0, 1), num_workers=2)
128
+ batch = next(iter(loader))
129
+
130
+ for _micro_batch_size in (1, 2, 4, 8):
131
+ microbatches = list(split_batch_into_microbatch(
132
+ batch,
133
+ _micro_batch_size=_micro_batch_size,
134
+ _global_batch_size=global_batch_size,
135
+ ))
136
+ self.assertEqual(len(microbatches), global_batch_size // _micro_batch_size)
137
+ self.assertEqual(len(microbatches[0][0]), _micro_batch_size)
138
+
139
+
140
+ if __name__ == "__main__":
141
+ common_utils.run_tests()