cyd0806 commited on
Commit
c9b77c1
·
verified ·
1 Parent(s): bacbb4b

Upload apex-master/tests/L0/run_transformer/test_p2p_comm.py with huggingface_hub

Browse files
apex-master/tests/L0/run_transformer/test_p2p_comm.py ADDED
@@ -0,0 +1,123 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import logging
2
+ import unittest
3
+
4
+ import torch
5
+ from torch.testing._internal import common_utils
6
+
7
+ logging.getLogger("torch").setLevel(logging.WARNING)
8
+
9
+ from apex.transformer import parallel_state
10
+ from apex.transformer.pipeline_parallel import p2p_communication
11
+ from apex.transformer.testing.distributed_test_base import NcclDistributedTestBase
12
+ from apex.transformer.testing.distributed_test_base import UccDistributedTestBase
13
+
14
+ logging.getLogger("apex").setLevel(logging.DEBUG)
15
+
16
+
17
+ # [P2P Ops Involved in Pipeline Model Parallel forward/backward]
18
+ # **forward_backward_pipelining_without_interleaving**
19
+ # - send_forward / recv_forward
20
+ # - send_backward / recv_backward
21
+ # - send_forward_recv_backward
22
+ # - send_backward_recv_forward
23
+ # **forward_backward_pipelining_with_interleaving**
24
+ # - send_backward_recv_backward
25
+ # - recv_backward
26
+ # - recv_forward
27
+ # - send_forward_backward_recv_forward_backward
28
+ # - send_forward_recv_forward
29
+ class P2PCommTestBase:
30
+
31
+ numel = 4
32
+ shape = (2, 2)
33
+ dtype = torch.float32
34
+
35
+ @property
36
+ def world_size(self):
37
+ return min(2, torch.cuda.device_count())
38
+
39
+ def _init_model_parallel(self):
40
+ parallel_state.initialize_model_parallel(
41
+ tensor_model_parallel_size_=1,
42
+ pipeline_model_parallel_size_=self.world_size,
43
+ virtual_pipeline_model_parallel_size_=None,
44
+ )
45
+
46
+ def create_tensor(self, value: int = None):
47
+ return torch.tensor(
48
+ [value] * self.numel).view(self.shape).to(device="cuda", dtype=self.dtype)
49
+
50
+ # Brief: Simulate warm-up.
51
+ # Brief: test `recv_forward` & `send_forward`.
52
+ def test_no_interleaving_warmup(self):
53
+ self.assertEqual(self.world_size, 2)
54
+ self._init_model_parallel()
55
+ input_tensor = None
56
+ if parallel_state.is_pipeline_first_stage():
57
+ tensor = self.create_tensor(self.rank)
58
+ print(tensor)
59
+ p2p_communication.send_forward(output_tensor=tensor, tensor_shape=self.shape, dtype=self.dtype)
60
+ else:
61
+ input_tensor = p2p_communication.recv_forward(tensor_shape=self.shape, dtype=self.dtype)
62
+
63
+ if parallel_state.is_pipeline_first_stage():
64
+ self.assertIsNone(input_tensor)
65
+ else:
66
+ expected_input_tensor = self.create_tensor(self.rank - 1)
67
+ self.assertEqual(input_tensor, expected_input_tensor)
68
+
69
+ # Brief: test `send_forward`, `send_forward_recv_forward`, and `recv_forward`.
70
+ def test_send_forward_recv_forward(self):
71
+ self._init_model_parallel()
72
+ prev_tensor = None
73
+ tensor = self.create_tensor(self.rank)
74
+ if parallel_state.is_pipeline_first_stage():
75
+ p2p_communication.send_forward(output_tensor=tensor, tensor_shape=self.shape, dtype=self.dtype)
76
+ elif parallel_state.is_pipeline_last_stage():
77
+ prev_tensor = p2p_communication.recv_forward(tensor_shape=self.shape, dtype=self.dtype)
78
+ else:
79
+ prev_tensor = p2p_communication.send_forward_recv_forward(
80
+ output_tensor=tensor,
81
+ recv_prev=True,
82
+ tensor_shape=self.shape,
83
+ dtype=self.dtype,
84
+ )
85
+
86
+ if parallel_state.is_pipeline_first_stage():
87
+ self.assertIsNone(prev_tensor)
88
+ else:
89
+ expected_prev_tensor = self.create_tensor(self.rank - 1)
90
+ self.assertEqual(prev_tensor, expected_prev_tensor)
91
+
92
+ # Brief: test `send_backward`, `send_backward_recv_backward`, and `recv_backward`.
93
+ def test_send_backward_recv_backward(self):
94
+ self._init_model_parallel()
95
+ tensor = self.create_tensor(self.rank)
96
+
97
+ next_tensor = None
98
+ if parallel_state.is_pipeline_first_stage():
99
+ next_tensor = p2p_communication.recv_backward(tensor_shape=self.shape, dtype=self.dtype)
100
+ elif parallel_state.is_pipeline_last_stage():
101
+ p2p_communication.send_backward(input_tensor_grad=tensor, tensor_shape=self.shape, dtype=self.dtype)
102
+ else:
103
+ next_tensor = p2p_communication.send_backward_recv_backward(
104
+ input_tensor_grad=tensor,
105
+ recv_next=True,
106
+ tensor_shape=self.shape,
107
+ dtype=self.dtype,
108
+ )
109
+
110
+ if parallel_state.is_pipeline_last_stage():
111
+ self.assertIsNone(next_tensor)
112
+ else:
113
+ expected_next_tensor = self.create_tensor(self.rank + 1)
114
+ self.assertEqual(next_tensor, expected_next_tensor)
115
+
116
+
117
+ # n.b.(mkozuki): Intentionally skip NCCL backend tests as I trust pytorch/pytorch repo.
118
+ @unittest.skipIf(torch.cuda.device_count() < 2, "Requires >= 2 GPUs")
119
+ class UccP2PCommTest(P2PCommTestBase, UccDistributedTestBase): pass
120
+
121
+
122
+ if __name__ == "__main__":
123
+ common_utils.run_tests()