cyd0806 commited on
Commit
b59d8f7
·
verified ·
1 Parent(s): 66587a1

Upload apex-master/tests/L0/run_transformer/test_data.py with huggingface_hub

Browse files
apex-master/tests/L0/run_transformer/test_data.py ADDED
@@ -0,0 +1,64 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import logging
2
+
3
+ import torch.testing
4
+ from torch.testing._internal import common_utils
5
+
6
+ logging.getLogger("torch").setLevel(logging.WARNING)
7
+
8
+ from apex.transformer import parallel_state
9
+ from apex.transformer.tensor_parallel import data as data_utils
10
+ from apex.transformer.testing.distributed_test_base import NcclDistributedTestBase
11
+ from apex.transformer.testing.distributed_test_base import UccDistributedTestBase
12
+
13
+ logging.getLogger("torch").setLevel(logging.WARNING)
14
+
15
+
16
+ class BroadcastDataTestBase:
17
+ def test_broadcast_data(self):
18
+ tensor_model_parallel_world_size: int = self.world_size // (
19
+ 1 + self.world_size > 1
20
+ )
21
+ parallel_state.initialize_model_parallel(
22
+ tensor_model_parallel_size_=tensor_model_parallel_world_size
23
+ )
24
+
25
+ target_key_size = {
26
+ "key1": [7, 11],
27
+ "key2": [8, 2, 1],
28
+ "key3": [13],
29
+ "key4": [5, 1, 2],
30
+ "key5": [5, 12],
31
+ }
32
+ keys = [k for k in target_key_size]
33
+
34
+ data = {}
35
+ data_t = {}
36
+ with torch.no_grad():
37
+ for key in target_key_size:
38
+ data[key] = torch.randint(0, 1000, size=target_key_size[key])
39
+ data_t[key] = data[key].clone()
40
+ # "key_x" is supposed to be ignored.
41
+ data["key_x"] = torch.rand(5)
42
+ data_t["key_x"] = data["key_x"].clone()
43
+ if parallel_state.get_tensor_model_parallel_rank() != 0:
44
+ data = None
45
+
46
+ data_utils._check_data_types(keys, data_t, torch.int64)
47
+ key_size, _, _ = data_utils._build_key_size_numel_dictionaries(keys, data)
48
+
49
+ for key in keys:
50
+ self.assertEqual(target_key_size[key], key_size[key])
51
+
52
+ broadcasted_data = data_utils.broadcast_data(keys, data, torch.int64)
53
+ for key in keys:
54
+ self.assertEqual(broadcasted_data[key], data_t[key].cuda())
55
+
56
+ parallel_state.destroy_model_parallel()
57
+
58
+
59
+ class NcclBroadcastDataTest(BroadcastDataTestBase, NcclDistributedTestBase): pass
60
+ class UccBroadcastDataTest(BroadcastDataTestBase, UccDistributedTestBase): pass
61
+
62
+
63
+ if __name__ == "__main__":
64
+ common_utils.run_tests()