cyd0806 commited on
Commit
8fb6e04
·
verified ·
1 Parent(s): 960e7c9

Upload apex-master/tests/distributed/synced_batchnorm/two_gpu_test_different_batch_size.py with huggingface_hub

Browse files
apex-master/tests/distributed/synced_batchnorm/two_gpu_test_different_batch_size.py ADDED
@@ -0,0 +1,158 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import torch.nn as nn
3
+ from torch.nn.parallel import DistributedDataParallel as DDP
4
+ from apex.parallel import SyncBatchNorm as ApexSyncBatchNorm
5
+
6
+ import argparse
7
+ import os
8
+ import numpy as np
9
+
10
+ var_batch = 16
11
+
12
+ def compare(desc, inp1, inp2, error= 1e-5):
13
+ a = inp1.clone().detach().cpu().numpy()
14
+ b = inp2.clone().detach().cpu().numpy()
15
+ close = np.allclose(a,b, error, error)
16
+ if not close:
17
+ print(desc, close)
18
+ z = a - b
19
+ index = (np.abs(z) >= error + error * np.abs(b)).nonzero()
20
+ print("dif : ", z[index])
21
+ print("inp1 : ", a[index])
22
+ print("inp2 : ", b[index])
23
+ return close
24
+
25
+ parser = argparse.ArgumentParser()
26
+ parser.add_argument('--local_rank', type=int, default=0)
27
+ parser.add_argument('--apex', action='store_true')
28
+ args = parser.parse_args()
29
+
30
+
31
+ torch.manual_seed(2809)
32
+ # Setup DDP
33
+ torch.cuda.set_device(args.local_rank)
34
+ device = torch.device('cuda:{}'.format(args.local_rank))
35
+
36
+ torch.distributed.init_process_group(
37
+ 'nccl',
38
+ init_method='env://',
39
+ rank=args.local_rank,
40
+ )
41
+
42
+ # Setup model
43
+ if args.apex:
44
+ model = nn.Sequential(
45
+ nn.Conv2d(3, 6, 3, 1, 1),
46
+ ApexSyncBatchNorm(6)
47
+ )
48
+ else:
49
+ model = nn.Sequential(
50
+ nn.Conv2d(3, 6, 3, 1, 1),
51
+ nn.SyncBatchNorm(6)
52
+ )
53
+
54
+ # Setup reference model
55
+ model_reference = nn.Sequential(
56
+ nn.Conv2d(3, 6, 3, 1, 1),
57
+ nn.BatchNorm2d(6)
58
+ )
59
+
60
+ with torch.no_grad():
61
+ model_reference[0].weight.copy_(model[0].weight)
62
+ model_reference[0].bias.copy_(model[0].bias)
63
+ model_reference.to(device)
64
+
65
+ model = model.to(device)
66
+ model = DDP(model, device_ids=[args.local_rank], output_device=args.local_rank)
67
+
68
+ global_batch_size = var_batch + 8
69
+ # Create random data
70
+ if args.local_rank == 0:
71
+ data = torch.randn(var_batch, 3, 8, 8, device=device, dtype=torch.float) * 50.0
72
+ grad = torch.randint(0, 10, (var_batch, 6, 8, 8), device=device, dtype=torch.float) / 10.0
73
+ else:
74
+ data = torch.randn(8, 3, 8, 8, device=device)
75
+ grad = torch.randint(0, 10, (8, 6, 8, 8), device=device, dtype=torch.float) / 10.0
76
+
77
+ data.requires_grad_()
78
+ data.retain_grad = True
79
+
80
+ weighted_gradient = True
81
+
82
+ # DDP forward/backward
83
+ output = model(data)
84
+
85
+ if weighted_gradient:
86
+ output.backward(grad * 2 / global_batch_size)
87
+ else:
88
+ output.backward(grad / output.size(0))
89
+
90
+ d_list = [torch.randn(8, 3, 8, 8, device=device) for i in range(int(os.environ['WORLD_SIZE']))]
91
+ y_list = [torch.randn(8, 6, 8, 8, device=device) for i in range(int(os.environ['WORLD_SIZE']))]
92
+ dgrad_list = [torch.randn(8, 3, 8, 8, device=device) for i in range(int(os.environ['WORLD_SIZE']))]
93
+ grad_list = [torch.randn(8, 6, 8, 8, device=device) for i in range(int(os.environ['WORLD_SIZE']))]
94
+ if args.local_rank == 0:
95
+ # placeholder, these random data will later be discarded.
96
+ torch.distributed.all_gather(d_list, torch.randn(8, 3, 8, 8, device=device))
97
+ torch.distributed.all_gather(y_list, torch.randn(8, 6, 8, 8, device=device))
98
+ torch.distributed.all_gather(dgrad_list, torch.randn(8, 3, 8, 8, device=device))
99
+ torch.distributed.all_gather(grad_list, torch.randn(8, 6, 8, 8, device=device))
100
+ else:
101
+ torch.distributed.all_gather(d_list, data)
102
+ torch.distributed.all_gather(y_list, output)
103
+ torch.distributed.all_gather(dgrad_list, data.grad)
104
+ torch.distributed.all_gather(grad_list, grad)
105
+
106
+ torch.distributed.barrier()
107
+
108
+ if args.local_rank == 0:
109
+ ref_tensor = d_list[1:]
110
+ ref_tensor.insert(0, data)
111
+ assert(ref_tensor[0].equal(data))
112
+ ref_tensor = torch.cat(ref_tensor, 0)
113
+ ref_tensor = ref_tensor.detach()
114
+ ref_tensor.requires_grad_()
115
+ ref_tensor.retain_grad()
116
+
117
+ # Reference forward/backward
118
+ output_reference = model_reference(ref_tensor)
119
+ grad_tensor = grad_list[1:]
120
+ grad_tensor.insert(0, grad)
121
+ assert(grad_tensor[0].equal(grad))
122
+ grad_tensor = torch.cat(grad_tensor, 0)
123
+ if weighted_gradient:
124
+ output_reference.backward(grad_tensor / output_reference.size(0))
125
+ else:
126
+ output_reference.backward(grad_tensor / output_reference.size(0))
127
+
128
+ dgrad_tensor = dgrad_list[1:]
129
+ dgrad_tensor.insert(0, data.grad)
130
+ dgrad_tensor = torch.cat(dgrad_tensor, 0)
131
+ # check output
132
+ output_tensor = y_list[1:]
133
+ output_tensor.insert(0, output)
134
+ output_tensor = torch.cat(output_tensor, 0)
135
+ passed = True
136
+ passed = passed and compare("check output",
137
+ output_tensor,
138
+ output_reference)
139
+ # check stats
140
+ passed = passed and compare("check running mean failed",
141
+ model_reference[1].running_mean,
142
+ model.module[1].running_mean)
143
+ passed = passed and compare("check running var failed",
144
+ model_reference[1].running_var,
145
+ model.module[1].running_var)
146
+ passed = passed and compare("bn wgrad check failed!",
147
+ model_reference[1].weight.grad,
148
+ model.module[1].weight.grad, 1e-6)
149
+ passed = passed and compare("conv wgrad check failed!",
150
+ model_reference[0].weight.grad,
151
+ model.module[0].weight.grad)
152
+ # can't really compare dgrad directly, as we need to scale it to account for
153
+ # DDP
154
+ # passed = passed and compare("dgrad check failed!", ref_tensor.grad, dgrad_tensor)
155
+ if passed:
156
+ print("====SBN two gpu with different batches test passed")
157
+ else:
158
+ assert("*failed two gpu with different batches tests*")