cyd0806 commited on
Commit
c3750e0
·
verified ·
1 Parent(s): a952c62

Upload apex-master/tests/L0/run_transformer/test_fused_rope.py with huggingface_hub

Browse files
apex-master/tests/L0/run_transformer/test_fused_rope.py ADDED
@@ -0,0 +1,336 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Test for fused RoPE functions.
2
+
3
+ Ref: https://github.com/NVIDIA/Megatron-LM/blob/40becfc96c4144985458ac0e0fae45dbb111fbd2/megatron/fused_kernels/tests/test_fused_kernels.py
4
+ """ # NOQA
5
+
6
+ import itertools
7
+
8
+ import torch
9
+ from torch.testing._internal import common_utils
10
+ from apex.transformer.functional import (
11
+ fused_apply_rotary_pos_emb,
12
+ fused_apply_rotary_pos_emb_cached,
13
+ fused_apply_rotary_pos_emb_thd,
14
+ fused_apply_rotary_pos_emb_2d,
15
+ )
16
+
17
+
18
+ def _rotate_half(x: torch.Tensor) -> torch.Tensor:
19
+ """Change sign so the last dimension becomes [-odd, +even]
20
+
21
+ Args:
22
+ x (Tensor): Input tensor
23
+
24
+ Returns:
25
+ Tensor: Tensor rotated half
26
+ """
27
+
28
+ x1, x2 = torch.chunk(x, 2, dim=-1)
29
+ return torch.cat((-x2, x1), dim=-1)
30
+
31
+
32
+ # Copied from Megatron-Core for testing.
33
+ # https://github.com/NVIDIA/Megatron-LM/blob/5f2877d85cb26e47ce6dcdae4b80adf376abf4e8/megatron/core/models/common/embeddings/rotary_pos_embedding.py#L139
34
+ def apply_rotary_pos_emb(t: torch.Tensor, freqs: torch.Tensor) -> torch.Tensor:
35
+ """Apply rotary positional embedding to input tensor T.
36
+
37
+ check https://kexue.fm/archives/8265 for detailed formulas
38
+
39
+ Args:
40
+ t (Tensor): Input tensor T is of shape [seq_length, ... , dim]
41
+ freqs (Tensor): Rotary Positional embedding tensor freq is of shape [seq_length, ..., dim]
42
+
43
+ Returns:
44
+ Tensor: The input tensor after applying RoPE
45
+ """
46
+ rot_dim = freqs.shape[-1]
47
+
48
+ # ideally t_pass is empty so rotary pos embedding is applied to all tensor t
49
+ t, t_pass = t[..., :rot_dim], t[..., rot_dim:]
50
+
51
+ # first part is cosine component
52
+ # second part is sine component, need to change signs with _rotate_half method
53
+ cos_ = torch.cos(freqs).to(t.dtype)
54
+ sin_ = torch.sin(freqs).to(t.dtype)
55
+
56
+ t = (t * cos_) + (_rotate_half(t) * sin_)
57
+ return torch.cat((t, t_pass), dim=-1)
58
+
59
+
60
+ def apply_rotary_pos_emb_thd(
61
+ t: torch.Tensor, cu_seqlens: torch.Tensor, freqs: torch.Tensor
62
+ ) -> torch.Tensor:
63
+ """A baseline implementation of applying RoPE for `thd` format.
64
+
65
+ Args:
66
+ t (Tensor): Input tensor T is of shape [t, h, d]
67
+ cu_seqlens(Tensor): Cumulative sum of sequence lengths in a batch for `t`,
68
+ with shape [b + 1] and dtype torch.int32.
69
+ freqs (Tensor): Rotary Positional embedding tensor freq is of shape [max_s, 1, 1, d]
70
+
71
+ Returns:
72
+ Tensor: Shape [t, h, d]. The input tensor after applying RoPE.
73
+ """
74
+ seqlens = (cu_seqlens[1:] - cu_seqlens[:-1]).tolist()
75
+ return torch.cat(
76
+ [
77
+ apply_rotary_pos_emb(x.unsqueeze(1), freqs[: x.size(0)])
78
+ for x in torch.split(t, seqlens)
79
+ ]
80
+ ).squeeze(1)
81
+
82
+
83
+ def apply_rotary_pos_emb_2d(q, img_h, img_w, cos_h, sin_h, cos_w, sin_w):
84
+ q = q.view(q.shape[0], img_h, img_w, q.shape[2], q.shape[3])
85
+ q1, q2 = q.chunk(2, dim=-1)
86
+ cos_h = cos_h[:, :img_h].unsqueeze(2) # [1, H, 1, 1, D//2]
87
+ sin_h = sin_h[:, :img_h].unsqueeze(2) # [1, H, 1, 1, D//2]
88
+ q1 = (q1 * cos_h) + (_rotate_half(q1) * sin_h)
89
+ cos_w = cos_w[:, :img_w].unsqueeze(1) # [1, 1, W, 1, D//2]
90
+ sin_w = sin_w[:, :img_w].unsqueeze(1) # [1, 1, W, 1, D//2]
91
+ q2 = (q2 * cos_w) + (_rotate_half(q2) * sin_w)
92
+ return torch.cat([q1, q2], dim=-1).view(q.shape[0], -1, q.shape[3], q.shape[4])
93
+
94
+
95
+ class TestFusedRoPE(common_utils.TestCase):
96
+ def setUp(self):
97
+ super().setUp()
98
+ self.batch_size = 2
99
+ self.head_num = 64
100
+ self.seq_length = [2048, 4096]
101
+ self.hidden_size = [128, 256]
102
+ self.rotary_percent = [0.5, 1.0]
103
+ self.dtype = [torch.float32, torch.bfloat16, torch.float16]
104
+ self.transpose = [None, (0, 1), (2, 3)]
105
+ self.transpose_output_memory = [False, True]
106
+ self.loss_func = [self._overlapping_grad, self._non_overlapping_grad]
107
+ self.cached = [False, True]
108
+ self.device = torch.cuda.current_device()
109
+ # for 2D RoPE
110
+ self.img_h = [32, 64]
111
+ self.img_w = [32, 64]
112
+
113
+ def tearDown(self) -> None:
114
+ torch.cuda.empty_cache()
115
+ super().tearDown()
116
+
117
+ def _overlapping_grad(self, output) -> torch.Tensor:
118
+ return output.sum() * 2
119
+
120
+ def _non_overlapping_grad(self, output) -> torch.Tensor:
121
+ t = torch.ones_like(output)
122
+ return torch.sum(output * t)
123
+
124
+ def test_forward_backward(self):
125
+ for (
126
+ dtype,
127
+ seq_length,
128
+ hidden_size,
129
+ rotary_percent,
130
+ transpose,
131
+ transpose_output_memory,
132
+ loss_func,
133
+ cached,
134
+ ) in itertools.product(
135
+ self.dtype,
136
+ self.seq_length,
137
+ self.hidden_size,
138
+ self.rotary_percent,
139
+ self.transpose,
140
+ self.transpose_output_memory,
141
+ self.loss_func,
142
+ self.cached,
143
+ ):
144
+ t = torch.rand(
145
+ (seq_length, self.batch_size, self.head_num, hidden_size),
146
+ dtype=dtype,
147
+ device=self.device,
148
+ )
149
+ if transpose:
150
+ t = t.transpose(*transpose).contiguous().transpose(*transpose)
151
+ t.requires_grad = True
152
+
153
+ emb = torch.rand(
154
+ (seq_length, 1, 1, int(hidden_size * rotary_percent)),
155
+ dtype=torch.float32,
156
+ device=self.device,
157
+ )
158
+
159
+ # unfused
160
+ output_unfused = apply_rotary_pos_emb(t, emb)
161
+ loss_unfused = loss_func(output_unfused)
162
+ loss_unfused.backward()
163
+ grad_unfused = t.grad.detach().clone()
164
+ t.grad = None
165
+
166
+ # fused
167
+ if cached:
168
+ cos, sin = emb.cos(), emb.sin()
169
+ output_fused = fused_apply_rotary_pos_emb_cached(
170
+ t, cos, sin, transpose_output_memory=transpose_output_memory
171
+ )
172
+ else:
173
+ output_fused = fused_apply_rotary_pos_emb(
174
+ t, emb, transpose_output_memory=transpose_output_memory
175
+ )
176
+ loss_fused = loss_func(output_fused)
177
+ loss_fused.backward()
178
+ grad_fused = t.grad.detach().clone()
179
+ t.grad = None
180
+
181
+ self.assertEqual(
182
+ output_unfused,
183
+ output_fused,
184
+ msg=f"{dtype=}, {seq_length=}, {hidden_size=}, {rotary_percent=}, "
185
+ f"{transpose=}, {transpose_output_memory=}, loss_func={loss_func.__name__}",
186
+ )
187
+ self.assertEqual(
188
+ grad_unfused,
189
+ grad_fused,
190
+ msg=f"{dtype=}, {seq_length=}, {hidden_size=}, {rotary_percent=}, "
191
+ f"{transpose=}, {transpose_output_memory=}, loss_func={loss_func.__name__}",
192
+ )
193
+ assert (
194
+ output_fused.transpose(0, 1).is_contiguous() is transpose_output_memory
195
+ )
196
+
197
+ def test_thd_forward_backward(self):
198
+ cu_seqlens = torch.tensor(
199
+ [0, 400, 542, 711, 727, 752, 1270, 1426, 1450, 1954, 2044, 2048],
200
+ dtype=torch.int32,
201
+ device=self.device,
202
+ )
203
+ for (
204
+ dtype,
205
+ hidden_size,
206
+ rotary_percent,
207
+ transpose,
208
+ loss_func,
209
+ ) in itertools.product(
210
+ self.dtype,
211
+ self.hidden_size,
212
+ self.rotary_percent,
213
+ [None, [1, 2]],
214
+ self.loss_func,
215
+ ):
216
+ t = torch.rand(
217
+ (cu_seqlens[-1], self.head_num, hidden_size),
218
+ dtype=dtype,
219
+ device=self.device,
220
+ )
221
+ if transpose:
222
+ t = t.transpose(*transpose).contiguous().transpose(*transpose)
223
+ t.requires_grad = True
224
+
225
+ emb = torch.rand(
226
+ (cu_seqlens[-1], 1, 1, int(hidden_size * rotary_percent)),
227
+ dtype=torch.float32,
228
+ device=self.device,
229
+ )
230
+
231
+ # unfused
232
+ output_unfused = apply_rotary_pos_emb_thd(t, cu_seqlens, emb)
233
+ loss_unfused = loss_func(output_unfused)
234
+ loss_unfused.backward()
235
+ grad_unfused = t.grad.detach().clone()
236
+ t.grad = None
237
+
238
+ # fused
239
+ output_fused = fused_apply_rotary_pos_emb_thd(
240
+ t,
241
+ cu_seqlens,
242
+ emb,
243
+ )
244
+ loss_fused = loss_func(output_fused)
245
+ loss_fused.backward()
246
+ grad_fused = t.grad.detach().clone()
247
+ t.grad = None
248
+
249
+ self.assertEqual(
250
+ output_unfused,
251
+ output_fused,
252
+ msg=f"{dtype=}, {cu_seqlens=}, {hidden_size=}, {rotary_percent=}, "
253
+ f"{transpose=}, loss_func={loss_func.__name__}",
254
+ )
255
+ self.assertEqual(
256
+ grad_unfused,
257
+ grad_fused,
258
+ msg=f"{dtype=}, {cu_seqlens=}, {hidden_size=}, {rotary_percent=}, "
259
+ f"{transpose=}, loss_func={loss_func.__name__}",
260
+ )
261
+
262
+ def test_2d_forward_backward(self):
263
+ for (
264
+ dtype,
265
+ img_h,
266
+ img_w,
267
+ hidden_size,
268
+ transpose,
269
+ loss_func,
270
+ margin,
271
+ ) in itertools.product(
272
+ self.dtype,
273
+ self.img_h,
274
+ self.img_w,
275
+ self.hidden_size,
276
+ self.transpose,
277
+ self.loss_func,
278
+ [0, 3],
279
+ ):
280
+ t = torch.rand(
281
+ (self.batch_size, img_h * img_w, self.head_num, hidden_size),
282
+ dtype=dtype,
283
+ device=self.device,
284
+ )
285
+ if transpose:
286
+ t = t.transpose(*transpose).contiguous().transpose(*transpose)
287
+ t.requires_grad = True
288
+
289
+ emb_h = torch.rand(
290
+ (1, img_h + margin, 1, hidden_size // 2),
291
+ dtype=torch.float32,
292
+ device=self.device,
293
+ )
294
+ cos_h, sin_h = emb_h.cos().to(dtype), emb_h.sin().to(dtype)
295
+
296
+ emb_w = torch.rand(
297
+ (1, img_w + margin, 1, hidden_size // 2),
298
+ dtype=torch.float32,
299
+ device=self.device,
300
+ )
301
+ cos_w, sin_w = emb_w.cos().to(dtype), emb_w.sin().to(dtype)
302
+
303
+ # unfused
304
+ output_unfused = apply_rotary_pos_emb_2d(
305
+ t, img_h, img_w, cos_h, sin_h, cos_w, sin_w
306
+ )
307
+ loss_unfused = loss_func(output_unfused)
308
+ loss_unfused.backward()
309
+ grad_unfused = t.grad.detach().clone()
310
+ t.grad = None
311
+
312
+ # fused
313
+ output_fused = fused_apply_rotary_pos_emb_2d(
314
+ t, img_h, img_w, cos_h, sin_h, cos_w, sin_w
315
+ )
316
+ loss_fused = loss_func(output_fused)
317
+ loss_fused.backward()
318
+ grad_fused = t.grad.detach().clone()
319
+ t.grad = None
320
+
321
+ self.assertEqual(
322
+ output_unfused,
323
+ output_fused,
324
+ msg=f"{dtype=}, {img_h=}, {img_w=}, {hidden_size=}, "
325
+ f"{transpose=}, loss_func={loss_func.__name__}",
326
+ )
327
+ self.assertEqual(
328
+ grad_unfused,
329
+ grad_fused,
330
+ msg=f"{dtype=}, {img_h=}, {img_w=}, {hidden_size=}, "
331
+ f"{transpose=}, loss_func={loss_func.__name__}",
332
+ )
333
+
334
+
335
+ if __name__ == "__main__":
336
+ common_utils.run_tests()