cyd0806 commited on
Commit
f6aefde
·
verified ·
1 Parent(s): 0e80ec6

Upload apex-master/tests/L0/run_transformer/gpt_scaling_test.py with huggingface_hub

Browse files
apex-master/tests/L0/run_transformer/gpt_scaling_test.py ADDED
@@ -0,0 +1,116 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import subprocess
2
+ import os
3
+
4
+ from apex.transformer.testing.commons import TEST_SUCCESS_MESSAGE
5
+
6
+
7
+ def run_gpt(cmd):
8
+ args = list(cmd.split(" "))
9
+ p = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
10
+ outs, errs = p.communicate()
11
+ outs = list(str((outs).decode("utf-8")).splitlines())
12
+ success = False
13
+ runtime = 0
14
+ num_params = 0
15
+ for out in outs:
16
+ out = str(out)
17
+ if "Average Iteration Time:" in str(out):
18
+ slicey = out[out.find(":") + 2 :]
19
+ try:
20
+ runtime = float(slicey)
21
+ except:
22
+ print(slicey)
23
+ quit()
24
+ if "Number of Parameters:" in str(out):
25
+ slicey = out[out.find(":") + 2 :]
26
+ try:
27
+ num_params = int(slicey)
28
+ except:
29
+ print(slicey)
30
+ quit()
31
+ if str(out) == str(TEST_SUCCESS_MESSAGE):
32
+ success = True
33
+ return runtime, round(float(int(num_params)) / 10.0 ** 9, 3), success, errs
34
+
35
+
36
+ def plot(runtimes):
37
+ import matplotlib.pyplot as plt
38
+
39
+ for distributed_setting in runtimes.keys():
40
+ plt.scatter(
41
+ runtimes[distributed_setting].keys(),
42
+ runtimes[distributed_setting].values(),
43
+ label=distributed_setting,
44
+ )
45
+ plt.legend()
46
+ plt.xlabel("Parameters (Billions)")
47
+ plt.ylabel("Training Iteration time (s)")
48
+ plt.title(str("GPT Scaling w/ Offloading"))
49
+ plt.savefig("offload_gpt_scaling.png")
50
+ plt.close()
51
+ if not os.path.exists("/my_workspace/"):
52
+ os.system("mkdir /my_workspace/")
53
+ os.system("cp *.png /my_workspace/")
54
+
55
+
56
+ def main():
57
+ runtimes = {}
58
+ nlist = (
59
+ list(range(2000, 10000, 2000))
60
+ + list(range(10000, 50000, 5000))
61
+ + list(range(50000, 100000, 10000))
62
+ )
63
+ print("N-List:", nlist)
64
+ for data_parr, tens_parr, pipe_parr in [(8, 1, 1), (4, 2, 1), (2, 1, 4), (1, 2, 4)]:
65
+ for offload in [True, False]:
66
+ dist_setting = (
67
+ "ddp="
68
+ + str(data_parr)
69
+ + ", tensor_parr="
70
+ + str(tens_parr)
71
+ + ", pipe_parr="
72
+ + str(pipe_parr)
73
+ + ", offload="
74
+ + str(offload)
75
+ )
76
+ runtimes[dist_setting] = {}
77
+ print("Beginning Testing for", dist_setting)
78
+ for n in nlist:
79
+ cmd = "python3 -m torch.distributed.launch --nproc_per_node=8 run_gpt_minimal_test.py"
80
+ cmd += (
81
+ " --micro-batch-size 1 --num-layers "
82
+ + str(n)
83
+ + " --hidden-size 128 --num-attention-heads 16"
84
+ )
85
+ cmd += (
86
+ " --max-position-embeddings 128 --seq-length 128 --tensor-model-parallel-size "
87
+ + str(tens_parr)
88
+ )
89
+ cmd += (
90
+ " --pipeline-model-parallel-size "
91
+ + str(pipe_parr)
92
+ + (" --cpu-offload" if offload else "")
93
+ )
94
+ print(cmd)
95
+ runtime, bill_params, success, errs = run_gpt(cmd)
96
+ if success:
97
+ runtimes[dist_setting][bill_params] = runtime
98
+ print(
99
+ str(runtime) + "s per training iter for",
100
+ str(bill_params) + "B parameter GPT-2",
101
+ )
102
+ if n >= 10000:
103
+ plot(runtimes)
104
+ else:
105
+ print("GPT-2 w/", n, "layers failed using", dist_setting)
106
+ print("Moving on to the next distributed setting...")
107
+ print("#" * (25))
108
+ print()
109
+ plot(runtimes)
110
+ break
111
+ print(runtimes)
112
+ plot(runtimes)
113
+
114
+
115
+ if __name__ == "__main__":
116
+ main()