cyd0806 commited on
Commit
bc3ded4
·
verified ·
1 Parent(s): 8382845

Upload apex-master/tests/L0/run_transformer/test_mapping.py with huggingface_hub

Browse files
apex-master/tests/L0/run_transformer/test_mapping.py ADDED
@@ -0,0 +1,87 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import logging
2
+
3
+ import torch
4
+ from torch.testing._internal import common_utils
5
+
6
+ from apex.transformer import parallel_state
7
+ from apex.transformer.tensor_parallel import mappings
8
+ from apex.transformer.testing.distributed_test_base import NcclDistributedTestBase
9
+ from apex.transformer.testing.distributed_test_base import UccDistributedTestBase
10
+
11
+
12
+ logging.getLogger("torch").setLevel(logging.WARNING)
13
+ logging.getLogger("apex").setLevel(logging.WARNING)
14
+
15
+
16
+ class MappingTestBase:
17
+ def test_reduce(self):
18
+ for tensor_model_paralell_world_size in range(1, self.world_size + 1):
19
+ if self.world_size % tensor_model_paralell_world_size > 0:
20
+ continue
21
+ parallel_state.initialize_model_parallel(
22
+ tensor_model_parallel_size_=tensor_model_paralell_world_size
23
+ )
24
+ t = torch.full((10, 10, 10, 10), 50, device=f"cuda:{self.rank}")
25
+ expected = torch.full(
26
+ (10, 10, 10, 10),
27
+ 50 * tensor_model_paralell_world_size,
28
+ device=f"cuda:{self.rank}",
29
+ )
30
+ self.assertTrue(
31
+ torch.equal(mappings._reduce(t), expected),
32
+ msg=f"tensor_model_paralell_world_size: {tensor_model_paralell_world_size}",
33
+ )
34
+ parallel_state.destroy_model_parallel()
35
+
36
+ def test_split(self):
37
+ for tensor_model_paralell_world_size in range(1, self.world_size + 1):
38
+ if self.world_size % tensor_model_paralell_world_size > 0:
39
+ continue
40
+ parallel_state.initialize_model_parallel(
41
+ tensor_model_parallel_size_=tensor_model_paralell_world_size
42
+ )
43
+
44
+ tensors = [
45
+ torch.randn(10, 1)
46
+ for _ in range(tensor_model_paralell_world_size)
47
+ ]
48
+ x = torch.cat(tensors, 1)
49
+ out = mappings._split_along_last_dim(x)
50
+ self.assertTrue(
51
+ torch.equal(
52
+ out, tensors[parallel_state.get_tensor_model_parallel_rank()]
53
+ ),
54
+ msg=f"tensor_model_paralell_world_size: {tensor_model_paralell_world_size}"
55
+ )
56
+ parallel_state.destroy_model_parallel()
57
+
58
+ def test_gather(self):
59
+ for tensor_model_paralell_world_size in range(1, self.world_size + 1):
60
+ if self.world_size % tensor_model_paralell_world_size > 0:
61
+ continue
62
+ parallel_state.initialize_model_parallel(
63
+ tensor_model_parallel_size_=tensor_model_paralell_world_size
64
+ )
65
+ device = f"cuda:{self.rank}"
66
+ gathered = mappings._gather_along_last_dim(
67
+ torch.tensor(
68
+ [parallel_state.get_tensor_model_parallel_rank()], device=device
69
+ )
70
+ )
71
+ expected = torch.tensor(
72
+ [rank for rank in range(tensor_model_paralell_world_size)],
73
+ device=device,
74
+ )
75
+ self.assertTrue(
76
+ torch.equal(gathered, expected),
77
+ msg=f"tensor_model_paralell_world_size: {tensor_model_paralell_world_size}",
78
+ )
79
+ parallel_state.destroy_model_parallel()
80
+
81
+
82
+ class NcclMappingTest(MappingTestBase, NcclDistributedTestBase): pass
83
+ class UccMappingTest(MappingTestBase, UccDistributedTestBase): pass
84
+
85
+
86
+ if __name__ == "__main__":
87
+ common_utils.run_tests()