cyd0806 commited on
Commit
d50117d
·
verified ·
1 Parent(s): 500c911

Upload apex-master/tests/L0/run_optimizers/test_adam.py with huggingface_hub

Browse files
apex-master/tests/L0/run_optimizers/test_adam.py ADDED
@@ -0,0 +1,254 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import copy
2
+ import math
3
+ import random
4
+ import unittest
5
+
6
+ import torch
7
+ import torch.nn.functional as F
8
+ from torch import nn
9
+ from torch.testing._internal.common_device_type import largeTensorTest
10
+
11
+ try:
12
+ import apex
13
+ except ImportError as e:
14
+ HAS_APEX = False
15
+ else:
16
+ HAS_APEX = True
17
+
18
+
19
+ class Model(torch.nn.Module):
20
+ def __init__(self):
21
+ super(Model, self).__init__()
22
+ self.conv1 = nn.Conv2d(1, 6, 5)
23
+ self.relu1 = nn.ReLU()
24
+ self.pool1 = nn.MaxPool2d(2)
25
+ self.conv2 = nn.Conv2d(6, 16, 5)
26
+ self.relu2 = nn.ReLU()
27
+ self.pool2 = nn.MaxPool2d(2)
28
+ self.fc1 = nn.Linear(256, 120)
29
+ self.relu3 = nn.ReLU()
30
+ self.fc2 = nn.Linear(120, 84)
31
+ self.relu4 = nn.ReLU()
32
+ self.fc3 = nn.Linear(84, 10)
33
+ self.relu5 = nn.ReLU()
34
+
35
+ def forward(self, x):
36
+ y = self.conv1(x)
37
+ y = self.relu1(y)
38
+ y = self.pool1(y)
39
+ y = self.conv2(y)
40
+ y = self.relu2(y)
41
+ y = self.pool2(y)
42
+ y = y.reshape(y.shape[0], -1)
43
+ y = self.fc1(y)
44
+ y = self.relu3(y)
45
+ y = self.fc2(y)
46
+ y = self.relu4(y)
47
+ y = self.fc3(y)
48
+ y = self.relu5(y)
49
+ return y
50
+
51
+
52
+ @unittest.skipIf(not HAS_APEX, "`apex` is not found.")
53
+ class AdamTest(unittest.TestCase):
54
+ def setUp(self, seed=0):
55
+ super().setUp()
56
+ torch.manual_seed(seed)
57
+
58
+ self.model = Model().cuda()
59
+ self.model_ = Model().cuda()
60
+ self.model_.load_state_dict(copy.deepcopy(self.model.state_dict()))
61
+
62
+ self.lr = 0.00001
63
+ params = [p for p in self.model.parameters() if p.requires_grad]
64
+ self.optimizer = torch.optim.Adam(params, lr=self.lr)
65
+
66
+ def testGradScaler(self):
67
+ params_ = [p for p in self.model_.parameters() if p.requires_grad]
68
+ optimizer_ = apex.optimizers.FusedAdam(params_, lr=self.lr, capturable=False)
69
+ scaler = torch.amp.GradScaler('cuda', enabled=True)
70
+ scaler_ = torch.amp.GradScaler('cuda', enabled=True)
71
+
72
+ for i in range(100):
73
+ x = torch.rand([32, 1, 28, 28]).cuda().to(memory_format=torch.channels_last)
74
+ x_ = x.clone()
75
+ gt = torch.rand([32, 10]).cuda()
76
+ gt_ = gt.clone()
77
+
78
+ # Reference
79
+ with torch.amp.autocast('cuda', enabled=True):
80
+ y = self.model(x)
81
+ loss = ((gt - y) ** 2).mean()
82
+
83
+ scaler.scale(loss).backward()
84
+ scaler.step(self.optimizer)
85
+ scaler.update()
86
+
87
+ # DUT
88
+ with torch.amp.autocast('cuda', enabled=True):
89
+ y = self.model_(x)
90
+ loss_ = ((gt_ - y) ** 2).mean()
91
+
92
+ scaler_.scale(loss_).backward()
93
+ scaler_.step(optimizer_)
94
+ scaler_.update()
95
+
96
+ for module in zip(self.model.modules(), self.model_.modules()):
97
+ m = module[0]
98
+ m_ = module[1]
99
+ if isinstance(m, nn.Conv2d) or isinstance(m_, nn.Linear):
100
+ torch.testing.assert_close(m.weight, m_.weight, atol=1e-3, rtol=1e-3, equal_nan=True)
101
+ torch.testing.assert_close(m.weight.grad, m_.weight.grad, atol=1e-3, rtol=1e-3, equal_nan=True)
102
+
103
+ # Init for next iteration
104
+ self.optimizer.zero_grad()
105
+ optimizer_.zero_grad()
106
+
107
+ self.model_.load_state_dict(copy.deepcopy(self.model.state_dict()))
108
+
109
+ def testGradScalerCapturable(self):
110
+ params_ = [p for p in self.model_.parameters() if p.requires_grad]
111
+ optimizer_ = apex.optimizers.FusedAdam(params_, lr=self.lr, capturable=True)
112
+ scaler = torch.amp.GradScaler('cuda', enabled=True)
113
+ scaler_ = torch.amp.GradScaler('cuda', enabled=True)
114
+
115
+ for i in range(100):
116
+ x = torch.rand([32, 1, 28, 28]).cuda().to(memory_format=torch.channels_last)
117
+ x_ = x.clone()
118
+ gt = torch.rand([32, 10]).cuda()
119
+ gt_ = gt.clone()
120
+
121
+ # Reference
122
+ with torch.amp.autocast('cuda', enabled=True):
123
+ y = self.model(x)
124
+ loss = ((gt - y) ** 2).mean()
125
+
126
+ scaler.scale(loss).backward()
127
+ scaler.step(self.optimizer)
128
+ scaler.update()
129
+
130
+ # DUT
131
+ with torch.amp.autocast('cuda', enabled=True):
132
+ y = self.model_(x)
133
+ loss_ = ((gt_ - y) ** 2).mean()
134
+
135
+ scaler_.scale(loss_).backward()
136
+ scaler_.step(optimizer_)
137
+ scaler_.update()
138
+
139
+ for module in zip(self.model.modules(), self.model_.modules()):
140
+ m = module[0]
141
+ m_ = module[1]
142
+ if isinstance(m, nn.Conv2d) or isinstance(m_, nn.Linear):
143
+ torch.testing.assert_close(m.weight, m_.weight, atol=1e-3, rtol=1e-3, equal_nan=True)
144
+ torch.testing.assert_close(m.weight.grad, m_.weight.grad, atol=1e-3, rtol=1e-3, equal_nan=True)
145
+
146
+ # Init for next iteration
147
+ self.optimizer.zero_grad()
148
+ optimizer_.zero_grad()
149
+
150
+ self.model_.load_state_dict(copy.deepcopy(self.model.state_dict()))
151
+
152
+ def testGradScalerCapturableMaster(self):
153
+ # Cast conv layers to FP16
154
+ for m in self.model_.modules():
155
+ if m.__class__ in [torch.nn.Conv2d]:
156
+ m.half()
157
+ params_ = [p for p in self.model_.parameters() if p.requires_grad]
158
+ optimizer_ = apex.optimizers.FusedAdam(params_, lr=self.lr, capturable=True, master_weights=True)
159
+ scaler = torch.amp.GradScaler('cuda', enabled=True)
160
+ scaler_ = torch.amp.GradScaler('cuda', enabled=True)
161
+
162
+ for i in range(100):
163
+ x = torch.rand([32, 1, 28, 28]).cuda().to(memory_format=torch.channels_last)
164
+ x_ = x.clone()
165
+ gt = torch.rand([32, 10]).cuda()
166
+ gt_ = gt.clone()
167
+
168
+ # Reference
169
+ with torch.amp.autocast('cuda', enabled=True):
170
+ y = self.model(x)
171
+ loss = ((gt - y) ** 2).mean()
172
+
173
+ scaler.scale(loss).backward()
174
+ scaler.step(self.optimizer)
175
+ scaler.update()
176
+
177
+ # DUT
178
+ with torch.amp.autocast('cuda', enabled=True):
179
+ y = self.model_(x)
180
+ loss_ = ((gt_ - y) ** 2).mean()
181
+
182
+ scaler_.scale(loss_).backward()
183
+ scaler_.step(optimizer_)
184
+ scaler_.update()
185
+
186
+ for module in zip(self.model.modules(), self.model_.modules()):
187
+ m = module[0]
188
+ m_ = module[1]
189
+ if isinstance(m, nn.Conv2d) or isinstance(m_, nn.Linear):
190
+ torch.testing.assert_close(m.weight, m_.weight.float(), atol=1e-3, rtol=1e-3, equal_nan=True)
191
+ torch.testing.assert_close(m.weight.grad, m_.weight.grad.float(), atol=1e-3, rtol=1e-3, equal_nan=True)
192
+
193
+ # Init for next iteration
194
+ self.optimizer.zero_grad()
195
+ optimizer_.zero_grad()
196
+
197
+ self.model_.load_state_dict(copy.deepcopy(self.model.state_dict()))
198
+
199
+ def testNative(self):
200
+ params_ = [p for p in self.model_.parameters() if p.requires_grad]
201
+ optimizer_ = apex.optimizers.FusedAdam(params_, lr=self.lr, capturable=False)
202
+
203
+ for i in range(100):
204
+ x = torch.rand([32, 1, 28, 28]).cuda().to(memory_format=torch.channels_last)
205
+ x_ = x.clone()
206
+ gt = torch.rand([32, 10]).cuda()
207
+ gt_ = gt.clone()
208
+
209
+ # Reference
210
+ y = self.model(x)
211
+ loss = ((gt - y) ** 2).mean()
212
+
213
+ loss.backward()
214
+ self.optimizer.step()
215
+
216
+ # DUT
217
+ y = self.model_(x)
218
+ loss_ = ((gt_ - y) ** 2).mean()
219
+
220
+ loss_.backward()
221
+ optimizer_.step()
222
+
223
+ for module in zip(self.model.modules(), self.model_.modules()):
224
+ m = module[0]
225
+ m_ = module[1]
226
+ if isinstance(m, nn.Conv2d) or isinstance(m_, nn.Linear):
227
+ torch.testing.assert_close(m.weight, m_.weight, atol=1e-3, rtol=1e-3, equal_nan=True)
228
+ torch.testing.assert_close(m.weight.grad, m_.weight.grad, atol=1e-3, rtol=1e-3, equal_nan=True)
229
+
230
+ # Init for next iteration
231
+ self.optimizer.zero_grad()
232
+ optimizer_.zero_grad()
233
+
234
+ self.model_.load_state_dict(copy.deepcopy(self.model.state_dict()))
235
+
236
+ @largeTensorTest('60GB', 'cuda')
237
+ def testLargeTensor(self):
238
+ t = torch.zeros(2359332864, dtype=torch.half, device='cuda')
239
+ t2 = torch.zeros(2359332864, dtype=torch.half, device='cuda')
240
+ grad = torch.randn_like(t)
241
+ t.grad = grad
242
+ t2.grad = grad
243
+ params = [t]
244
+ params2 = [t2]
245
+ optimizer = apex.optimizers.FusedAdam(params, lr=self.lr)
246
+ optimizer.step()
247
+ optimizer2 = torch.optim.Adam(params2, lr=self.lr)
248
+ torch.testing.assert_close(t, t2)
249
+ torch.cuda.synchronize()
250
+
251
+
252
+ if __name__ == '__main__':
253
+ unittest.main()
254
+