BriHug commited on
Commit
ad7e59a
·
verified ·
1 Parent(s): cd472e8

Upload folder using huggingface_hub

Browse files
config.json ADDED
@@ -0,0 +1,39 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "bigcode/starcoderbase-1b",
3
+ "activation_function": "gelu_pytorch_tanh",
4
+ "architectures": [
5
+ "GPTBigCodeForCausalLM"
6
+ ],
7
+ "attention_softmax_in_fp32": true,
8
+ "attn_pdrop": 0.1,
9
+ "bos_token_id": 0,
10
+ "embd_pdrop": 0.1,
11
+ "eos_token_id": 0,
12
+ "inference_runner": 0,
13
+ "initializer_range": 0.02,
14
+ "layer_norm_epsilon": 1e-05,
15
+ "max_batch_size": null,
16
+ "max_sequence_length": null,
17
+ "model_type": "gpt_bigcode",
18
+ "multi_query": true,
19
+ "n_embd": 2048,
20
+ "n_head": 16,
21
+ "n_inner": 8192,
22
+ "n_layer": 24,
23
+ "n_positions": 8192,
24
+ "pad_key_length": true,
25
+ "pre_allocate_kv_cache": false,
26
+ "resid_pdrop": 0.1,
27
+ "scale_attention_softmax_in_fp32": true,
28
+ "scale_attn_weights": true,
29
+ "summary_activation": null,
30
+ "summary_first_dropout": 0.1,
31
+ "summary_proj_to_labels": true,
32
+ "summary_type": "cls_index",
33
+ "summary_use_proj": true,
34
+ "torch_dtype": "float32",
35
+ "transformers_version": "4.43.3",
36
+ "use_cache": true,
37
+ "validate_runner_input": true,
38
+ "vocab_size": 49152
39
+ }
generation_config.json ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ {
2
+ "_from_model_config": true,
3
+ "bos_token_id": 0,
4
+ "eos_token_id": 0,
5
+ "transformers_version": "4.43.3"
6
+ }
latest ADDED
@@ -0,0 +1 @@
 
 
1
+ global_step2531
model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d40dbe43b802748e541db585d72d1ba0a809a771f40b1f1b09ef432c99e22f21
3
+ size 4951513104
rng_state_0.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c85b06c544eceba5a58f86c48a8c19eb57d5eba3c63bfc5640918259cf40de28
3
+ size 14512
rng_state_1.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:22ebc1f46b583424e579078140c6d47fb7778af697cedecf9973c40e040aa779
3
+ size 14512
scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:557d22107fb2d5c1d6dcedb2d5886f7e1e6270e624b3364e56c64a86ff92cc9c
3
+ size 1064
trainer_state.json ADDED
@@ -0,0 +1,1804 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": null,
3
+ "best_model_checkpoint": null,
4
+ "epoch": 0.9996544063194273,
5
+ "eval_steps": 500,
6
+ "global_step": 2531,
7
+ "is_hyper_param_search": false,
8
+ "is_local_process_zero": true,
9
+ "is_world_process_zero": true,
10
+ "log_history": [
11
+ {
12
+ "epoch": 0.003949642063687978,
13
+ "grad_norm": 0.7681758999824524,
14
+ "learning_rate": 3e-05,
15
+ "loss": 3.3209,
16
+ "step": 10
17
+ },
18
+ {
19
+ "epoch": 0.007899284127375956,
20
+ "grad_norm": 0.7526655793190002,
21
+ "learning_rate": 2.9999953706169412e-05,
22
+ "loss": 0.1659,
23
+ "step": 20
24
+ },
25
+ {
26
+ "epoch": 0.011848926191063935,
27
+ "grad_norm": 0.573715090751648,
28
+ "learning_rate": 2.999981482496339e-05,
29
+ "loss": 0.1512,
30
+ "step": 30
31
+ },
32
+ {
33
+ "epoch": 0.015798568254751913,
34
+ "grad_norm": 0.602210521697998,
35
+ "learning_rate": 2.9999583357239188e-05,
36
+ "loss": 0.1438,
37
+ "step": 40
38
+ },
39
+ {
40
+ "epoch": 0.01974821031843989,
41
+ "grad_norm": 0.6056187748908997,
42
+ "learning_rate": 2.9999259304425536e-05,
43
+ "loss": 0.1456,
44
+ "step": 50
45
+ },
46
+ {
47
+ "epoch": 0.02369785238212787,
48
+ "grad_norm": 0.6448855400085449,
49
+ "learning_rate": 2.9998842668522657e-05,
50
+ "loss": 0.1573,
51
+ "step": 60
52
+ },
53
+ {
54
+ "epoch": 0.027647494445815848,
55
+ "grad_norm": 0.5019297003746033,
56
+ "learning_rate": 2.9998333452102237e-05,
57
+ "loss": 0.1432,
58
+ "step": 70
59
+ },
60
+ {
61
+ "epoch": 0.031597136509503826,
62
+ "grad_norm": 0.5146121382713318,
63
+ "learning_rate": 2.9997731658307427e-05,
64
+ "loss": 0.1414,
65
+ "step": 80
66
+ },
67
+ {
68
+ "epoch": 0.03554677857319181,
69
+ "grad_norm": 0.5668686628341675,
70
+ "learning_rate": 2.99970372908528e-05,
71
+ "loss": 0.1406,
72
+ "step": 90
73
+ },
74
+ {
75
+ "epoch": 0.03949642063687978,
76
+ "grad_norm": 0.5978260040283203,
77
+ "learning_rate": 2.9996250354024345e-05,
78
+ "loss": 0.1389,
79
+ "step": 100
80
+ },
81
+ {
82
+ "epoch": 0.043446062700567764,
83
+ "grad_norm": 0.7445759773254395,
84
+ "learning_rate": 2.9995370852679447e-05,
85
+ "loss": 0.1468,
86
+ "step": 110
87
+ },
88
+ {
89
+ "epoch": 0.04739570476425574,
90
+ "grad_norm": 0.6418613791465759,
91
+ "learning_rate": 2.9994398792246826e-05,
92
+ "loss": 0.1486,
93
+ "step": 120
94
+ },
95
+ {
96
+ "epoch": 0.05134534682794372,
97
+ "grad_norm": 0.4584648907184601,
98
+ "learning_rate": 2.9993334178726546e-05,
99
+ "loss": 0.1404,
100
+ "step": 130
101
+ },
102
+ {
103
+ "epoch": 0.055294988891631695,
104
+ "grad_norm": 0.5104970932006836,
105
+ "learning_rate": 2.9992177018689935e-05,
106
+ "loss": 0.1398,
107
+ "step": 140
108
+ },
109
+ {
110
+ "epoch": 0.05924463095531968,
111
+ "grad_norm": 0.6897855401039124,
112
+ "learning_rate": 2.9990927319279584e-05,
113
+ "loss": 0.1546,
114
+ "step": 150
115
+ },
116
+ {
117
+ "epoch": 0.06319427301900765,
118
+ "grad_norm": 0.5376682281494141,
119
+ "learning_rate": 2.998958508820927e-05,
120
+ "loss": 0.15,
121
+ "step": 160
122
+ },
123
+ {
124
+ "epoch": 0.06714391508269563,
125
+ "grad_norm": 0.5601758360862732,
126
+ "learning_rate": 2.9988150333763933e-05,
127
+ "loss": 0.1471,
128
+ "step": 170
129
+ },
130
+ {
131
+ "epoch": 0.07109355714638362,
132
+ "grad_norm": 0.4657880961894989,
133
+ "learning_rate": 2.998662306479961e-05,
134
+ "loss": 0.1394,
135
+ "step": 180
136
+ },
137
+ {
138
+ "epoch": 0.07504319921007159,
139
+ "grad_norm": 0.5285632014274597,
140
+ "learning_rate": 2.9985003290743385e-05,
141
+ "loss": 0.1452,
142
+ "step": 190
143
+ },
144
+ {
145
+ "epoch": 0.07899284127375956,
146
+ "grad_norm": 0.4982571601867676,
147
+ "learning_rate": 2.9983291021593326e-05,
148
+ "loss": 0.1402,
149
+ "step": 200
150
+ },
151
+ {
152
+ "epoch": 0.08294248333744754,
153
+ "grad_norm": 0.48557624220848083,
154
+ "learning_rate": 2.998148626791844e-05,
155
+ "loss": 0.142,
156
+ "step": 210
157
+ },
158
+ {
159
+ "epoch": 0.08689212540113553,
160
+ "grad_norm": 0.5023711919784546,
161
+ "learning_rate": 2.9979589040858586e-05,
162
+ "loss": 0.1624,
163
+ "step": 220
164
+ },
165
+ {
166
+ "epoch": 0.0908417674648235,
167
+ "grad_norm": 0.47005024552345276,
168
+ "learning_rate": 2.9977599352124413e-05,
169
+ "loss": 0.155,
170
+ "step": 230
171
+ },
172
+ {
173
+ "epoch": 0.09479140952851148,
174
+ "grad_norm": 0.5279797315597534,
175
+ "learning_rate": 2.9975517213997302e-05,
176
+ "loss": 0.1532,
177
+ "step": 240
178
+ },
179
+ {
180
+ "epoch": 0.09874105159219945,
181
+ "grad_norm": 0.43386149406433105,
182
+ "learning_rate": 2.9973342639329272e-05,
183
+ "loss": 0.1481,
184
+ "step": 250
185
+ },
186
+ {
187
+ "epoch": 0.10269069365588744,
188
+ "grad_norm": 0.5564565062522888,
189
+ "learning_rate": 2.997107564154291e-05,
190
+ "loss": 0.1358,
191
+ "step": 260
192
+ },
193
+ {
194
+ "epoch": 0.10664033571957542,
195
+ "grad_norm": 0.6061131358146667,
196
+ "learning_rate": 2.996871623463128e-05,
197
+ "loss": 0.1464,
198
+ "step": 270
199
+ },
200
+ {
201
+ "epoch": 0.11058997778326339,
202
+ "grad_norm": 0.5967995524406433,
203
+ "learning_rate": 2.996626443315785e-05,
204
+ "loss": 0.1451,
205
+ "step": 280
206
+ },
207
+ {
208
+ "epoch": 0.11453961984695137,
209
+ "grad_norm": 0.5291288495063782,
210
+ "learning_rate": 2.9963720252256387e-05,
211
+ "loss": 0.1436,
212
+ "step": 290
213
+ },
214
+ {
215
+ "epoch": 0.11848926191063935,
216
+ "grad_norm": 0.5956757068634033,
217
+ "learning_rate": 2.9961083707630877e-05,
218
+ "loss": 0.1492,
219
+ "step": 300
220
+ },
221
+ {
222
+ "epoch": 0.12243890397432733,
223
+ "grad_norm": 0.5079193711280823,
224
+ "learning_rate": 2.9958354815555426e-05,
225
+ "loss": 0.1388,
226
+ "step": 310
227
+ },
228
+ {
229
+ "epoch": 0.1263885460380153,
230
+ "grad_norm": 0.44773226976394653,
231
+ "learning_rate": 2.995553359287414e-05,
232
+ "loss": 0.1311,
233
+ "step": 320
234
+ },
235
+ {
236
+ "epoch": 0.13033818810170328,
237
+ "grad_norm": 0.5200162529945374,
238
+ "learning_rate": 2.9952620057001055e-05,
239
+ "loss": 0.1401,
240
+ "step": 330
241
+ },
242
+ {
243
+ "epoch": 0.13428783016539125,
244
+ "grad_norm": 0.5840851068496704,
245
+ "learning_rate": 2.994961422591999e-05,
246
+ "loss": 0.1484,
247
+ "step": 340
248
+ },
249
+ {
250
+ "epoch": 0.13823747222907923,
251
+ "grad_norm": 0.5336028933525085,
252
+ "learning_rate": 2.9946516118184484e-05,
253
+ "loss": 0.1298,
254
+ "step": 350
255
+ },
256
+ {
257
+ "epoch": 0.14218711429276723,
258
+ "grad_norm": 0.7243465781211853,
259
+ "learning_rate": 2.9943325752917633e-05,
260
+ "loss": 0.1463,
261
+ "step": 360
262
+ },
263
+ {
264
+ "epoch": 0.1461367563564552,
265
+ "grad_norm": 0.5500628352165222,
266
+ "learning_rate": 2.9940043149812006e-05,
267
+ "loss": 0.1465,
268
+ "step": 370
269
+ },
270
+ {
271
+ "epoch": 0.15008639842014318,
272
+ "grad_norm": 0.5050541162490845,
273
+ "learning_rate": 2.993666832912949e-05,
274
+ "loss": 0.1434,
275
+ "step": 380
276
+ },
277
+ {
278
+ "epoch": 0.15403604048383115,
279
+ "grad_norm": 0.5059782266616821,
280
+ "learning_rate": 2.9933201311701222e-05,
281
+ "loss": 0.1385,
282
+ "step": 390
283
+ },
284
+ {
285
+ "epoch": 0.15798568254751913,
286
+ "grad_norm": 0.5439670085906982,
287
+ "learning_rate": 2.9929642118927397e-05,
288
+ "loss": 0.1421,
289
+ "step": 400
290
+ },
291
+ {
292
+ "epoch": 0.1619353246112071,
293
+ "grad_norm": 0.5325440168380737,
294
+ "learning_rate": 2.992599077277717e-05,
295
+ "loss": 0.1482,
296
+ "step": 410
297
+ },
298
+ {
299
+ "epoch": 0.16588496667489508,
300
+ "grad_norm": 0.5057934522628784,
301
+ "learning_rate": 2.992224729578851e-05,
302
+ "loss": 0.1415,
303
+ "step": 420
304
+ },
305
+ {
306
+ "epoch": 0.16983460873858305,
307
+ "grad_norm": 0.5029751062393188,
308
+ "learning_rate": 2.9918411711068074e-05,
309
+ "loss": 0.1517,
310
+ "step": 430
311
+ },
312
+ {
313
+ "epoch": 0.17378425080227106,
314
+ "grad_norm": 0.38729503750801086,
315
+ "learning_rate": 2.9914484042291053e-05,
316
+ "loss": 0.1367,
317
+ "step": 440
318
+ },
319
+ {
320
+ "epoch": 0.17773389286595903,
321
+ "grad_norm": 0.4877079725265503,
322
+ "learning_rate": 2.991046431370102e-05,
323
+ "loss": 0.1446,
324
+ "step": 450
325
+ },
326
+ {
327
+ "epoch": 0.181683534929647,
328
+ "grad_norm": 0.48335975408554077,
329
+ "learning_rate": 2.9906352550109787e-05,
330
+ "loss": 0.1372,
331
+ "step": 460
332
+ },
333
+ {
334
+ "epoch": 0.18563317699333498,
335
+ "grad_norm": 0.48935794830322266,
336
+ "learning_rate": 2.990214877689727e-05,
337
+ "loss": 0.144,
338
+ "step": 470
339
+ },
340
+ {
341
+ "epoch": 0.18958281905702296,
342
+ "grad_norm": 0.44793424010276794,
343
+ "learning_rate": 2.9897853020011298e-05,
344
+ "loss": 0.1298,
345
+ "step": 480
346
+ },
347
+ {
348
+ "epoch": 0.19353246112071093,
349
+ "grad_norm": 0.6513413190841675,
350
+ "learning_rate": 2.9893465305967483e-05,
351
+ "loss": 0.1361,
352
+ "step": 490
353
+ },
354
+ {
355
+ "epoch": 0.1974821031843989,
356
+ "grad_norm": 0.5480667948722839,
357
+ "learning_rate": 2.9888985661849028e-05,
358
+ "loss": 0.1497,
359
+ "step": 500
360
+ },
361
+ {
362
+ "epoch": 0.20143174524808688,
363
+ "grad_norm": 0.4912254810333252,
364
+ "learning_rate": 2.988441411530659e-05,
365
+ "loss": 0.1461,
366
+ "step": 510
367
+ },
368
+ {
369
+ "epoch": 0.20538138731177488,
370
+ "grad_norm": 0.4925342798233032,
371
+ "learning_rate": 2.987975069455809e-05,
372
+ "loss": 0.1418,
373
+ "step": 520
374
+ },
375
+ {
376
+ "epoch": 0.20933102937546286,
377
+ "grad_norm": 0.46876174211502075,
378
+ "learning_rate": 2.987499542838854e-05,
379
+ "loss": 0.1409,
380
+ "step": 530
381
+ },
382
+ {
383
+ "epoch": 0.21328067143915083,
384
+ "grad_norm": 0.5577364563941956,
385
+ "learning_rate": 2.9870148346149865e-05,
386
+ "loss": 0.1423,
387
+ "step": 540
388
+ },
389
+ {
390
+ "epoch": 0.2172303135028388,
391
+ "grad_norm": 0.48950615525245667,
392
+ "learning_rate": 2.9865209477760746e-05,
393
+ "loss": 0.1367,
394
+ "step": 550
395
+ },
396
+ {
397
+ "epoch": 0.22117995556652678,
398
+ "grad_norm": 0.4494156241416931,
399
+ "learning_rate": 2.9860178853706397e-05,
400
+ "loss": 0.1384,
401
+ "step": 560
402
+ },
403
+ {
404
+ "epoch": 0.22512959763021476,
405
+ "grad_norm": 0.4439913034439087,
406
+ "learning_rate": 2.9855056505038395e-05,
407
+ "loss": 0.1447,
408
+ "step": 570
409
+ },
410
+ {
411
+ "epoch": 0.22907923969390273,
412
+ "grad_norm": 0.5027551054954529,
413
+ "learning_rate": 2.984984246337449e-05,
414
+ "loss": 0.1526,
415
+ "step": 580
416
+ },
417
+ {
418
+ "epoch": 0.2330288817575907,
419
+ "grad_norm": 0.4503665566444397,
420
+ "learning_rate": 2.984453676089842e-05,
421
+ "loss": 0.1333,
422
+ "step": 590
423
+ },
424
+ {
425
+ "epoch": 0.2369785238212787,
426
+ "grad_norm": 0.5127356052398682,
427
+ "learning_rate": 2.9839139430359684e-05,
428
+ "loss": 0.1372,
429
+ "step": 600
430
+ },
431
+ {
432
+ "epoch": 0.24092816588496668,
433
+ "grad_norm": 0.672027051448822,
434
+ "learning_rate": 2.983365050507336e-05,
435
+ "loss": 0.1359,
436
+ "step": 610
437
+ },
438
+ {
439
+ "epoch": 0.24487780794865466,
440
+ "grad_norm": 0.5182546377182007,
441
+ "learning_rate": 2.9828070018919902e-05,
442
+ "loss": 0.1504,
443
+ "step": 620
444
+ },
445
+ {
446
+ "epoch": 0.24882745001234263,
447
+ "grad_norm": 0.5020663738250732,
448
+ "learning_rate": 2.9822398006344923e-05,
449
+ "loss": 0.1416,
450
+ "step": 630
451
+ },
452
+ {
453
+ "epoch": 0.2527770920760306,
454
+ "grad_norm": 0.41956228017807007,
455
+ "learning_rate": 2.9816634502358976e-05,
456
+ "loss": 0.1306,
457
+ "step": 640
458
+ },
459
+ {
460
+ "epoch": 0.2567267341397186,
461
+ "grad_norm": 0.42414599657058716,
462
+ "learning_rate": 2.9810779542537357e-05,
463
+ "loss": 0.1412,
464
+ "step": 650
465
+ },
466
+ {
467
+ "epoch": 0.26067637620340656,
468
+ "grad_norm": 0.5931263566017151,
469
+ "learning_rate": 2.9804833163019866e-05,
470
+ "loss": 0.1354,
471
+ "step": 660
472
+ },
473
+ {
474
+ "epoch": 0.26462601826709453,
475
+ "grad_norm": 0.49056369066238403,
476
+ "learning_rate": 2.9798795400510588e-05,
477
+ "loss": 0.1313,
478
+ "step": 670
479
+ },
480
+ {
481
+ "epoch": 0.2685756603307825,
482
+ "grad_norm": 0.5098786950111389,
483
+ "learning_rate": 2.9792666292277687e-05,
484
+ "loss": 0.1264,
485
+ "step": 680
486
+ },
487
+ {
488
+ "epoch": 0.2725253023944705,
489
+ "grad_norm": 0.5509768128395081,
490
+ "learning_rate": 2.9786445876153147e-05,
491
+ "loss": 0.1418,
492
+ "step": 690
493
+ },
494
+ {
495
+ "epoch": 0.27647494445815846,
496
+ "grad_norm": 0.6949456930160522,
497
+ "learning_rate": 2.978013419053255e-05,
498
+ "loss": 0.1399,
499
+ "step": 700
500
+ },
501
+ {
502
+ "epoch": 0.28042458652184643,
503
+ "grad_norm": 0.5233505368232727,
504
+ "learning_rate": 2.9773731274374847e-05,
505
+ "loss": 0.1415,
506
+ "step": 710
507
+ },
508
+ {
509
+ "epoch": 0.28437422858553446,
510
+ "grad_norm": 0.539608895778656,
511
+ "learning_rate": 2.9767237167202105e-05,
512
+ "loss": 0.1458,
513
+ "step": 720
514
+ },
515
+ {
516
+ "epoch": 0.28832387064922244,
517
+ "grad_norm": 0.4299115240573883,
518
+ "learning_rate": 2.976065190909927e-05,
519
+ "loss": 0.1351,
520
+ "step": 730
521
+ },
522
+ {
523
+ "epoch": 0.2922735127129104,
524
+ "grad_norm": 0.46829068660736084,
525
+ "learning_rate": 2.975397554071392e-05,
526
+ "loss": 0.1349,
527
+ "step": 740
528
+ },
529
+ {
530
+ "epoch": 0.2962231547765984,
531
+ "grad_norm": 0.490376353263855,
532
+ "learning_rate": 2.9747208103256007e-05,
533
+ "loss": 0.1439,
534
+ "step": 750
535
+ },
536
+ {
537
+ "epoch": 0.30017279684028636,
538
+ "grad_norm": 0.5873934030532837,
539
+ "learning_rate": 2.9740349638497614e-05,
540
+ "loss": 0.1395,
541
+ "step": 760
542
+ },
543
+ {
544
+ "epoch": 0.30412243890397433,
545
+ "grad_norm": 0.5811406373977661,
546
+ "learning_rate": 2.973340018877269e-05,
547
+ "loss": 0.1342,
548
+ "step": 770
549
+ },
550
+ {
551
+ "epoch": 0.3080720809676623,
552
+ "grad_norm": 0.5323910713195801,
553
+ "learning_rate": 2.972635979697678e-05,
554
+ "loss": 0.1471,
555
+ "step": 780
556
+ },
557
+ {
558
+ "epoch": 0.3120217230313503,
559
+ "grad_norm": 0.5084981918334961,
560
+ "learning_rate": 2.9719228506566792e-05,
561
+ "loss": 0.1296,
562
+ "step": 790
563
+ },
564
+ {
565
+ "epoch": 0.31597136509503826,
566
+ "grad_norm": 0.5692681670188904,
567
+ "learning_rate": 2.9712006361560685e-05,
568
+ "loss": 0.1341,
569
+ "step": 800
570
+ },
571
+ {
572
+ "epoch": 0.31992100715872623,
573
+ "grad_norm": 0.525729775428772,
574
+ "learning_rate": 2.9704693406537222e-05,
575
+ "loss": 0.1454,
576
+ "step": 810
577
+ },
578
+ {
579
+ "epoch": 0.3238706492224142,
580
+ "grad_norm": 0.4046003520488739,
581
+ "learning_rate": 2.9697289686635703e-05,
582
+ "loss": 0.1342,
583
+ "step": 820
584
+ },
585
+ {
586
+ "epoch": 0.3278202912861022,
587
+ "grad_norm": 0.47330015897750854,
588
+ "learning_rate": 2.968979524755567e-05,
589
+ "loss": 0.1417,
590
+ "step": 830
591
+ },
592
+ {
593
+ "epoch": 0.33176993334979016,
594
+ "grad_norm": 0.4547816812992096,
595
+ "learning_rate": 2.968221013555662e-05,
596
+ "loss": 0.1298,
597
+ "step": 840
598
+ },
599
+ {
600
+ "epoch": 0.33571957541347813,
601
+ "grad_norm": 0.4318365156650543,
602
+ "learning_rate": 2.9674534397457747e-05,
603
+ "loss": 0.1454,
604
+ "step": 850
605
+ },
606
+ {
607
+ "epoch": 0.3396692174771661,
608
+ "grad_norm": 0.4720039665699005,
609
+ "learning_rate": 2.9666768080637622e-05,
610
+ "loss": 0.1321,
611
+ "step": 860
612
+ },
613
+ {
614
+ "epoch": 0.3436188595408541,
615
+ "grad_norm": 0.5289425849914551,
616
+ "learning_rate": 2.965891123303392e-05,
617
+ "loss": 0.1301,
618
+ "step": 870
619
+ },
620
+ {
621
+ "epoch": 0.3475685016045421,
622
+ "grad_norm": 0.49078208208084106,
623
+ "learning_rate": 2.9650963903143124e-05,
624
+ "loss": 0.1452,
625
+ "step": 880
626
+ },
627
+ {
628
+ "epoch": 0.3515181436682301,
629
+ "grad_norm": 0.7317320704460144,
630
+ "learning_rate": 2.9642926140020203e-05,
631
+ "loss": 0.1516,
632
+ "step": 890
633
+ },
634
+ {
635
+ "epoch": 0.35546778573191806,
636
+ "grad_norm": 0.5247913599014282,
637
+ "learning_rate": 2.9634797993278337e-05,
638
+ "loss": 0.1408,
639
+ "step": 900
640
+ },
641
+ {
642
+ "epoch": 0.35941742779560604,
643
+ "grad_norm": 0.5061410665512085,
644
+ "learning_rate": 2.9626579513088606e-05,
645
+ "loss": 0.1396,
646
+ "step": 910
647
+ },
648
+ {
649
+ "epoch": 0.363367069859294,
650
+ "grad_norm": 0.5871759057044983,
651
+ "learning_rate": 2.9618270750179665e-05,
652
+ "loss": 0.152,
653
+ "step": 920
654
+ },
655
+ {
656
+ "epoch": 0.367316711922982,
657
+ "grad_norm": 0.4584594666957855,
658
+ "learning_rate": 2.9609871755837436e-05,
659
+ "loss": 0.1274,
660
+ "step": 930
661
+ },
662
+ {
663
+ "epoch": 0.37126635398666996,
664
+ "grad_norm": 0.405700147151947,
665
+ "learning_rate": 2.9601382581904816e-05,
666
+ "loss": 0.1284,
667
+ "step": 940
668
+ },
669
+ {
670
+ "epoch": 0.37521599605035794,
671
+ "grad_norm": 0.4593953788280487,
672
+ "learning_rate": 2.9592803280781306e-05,
673
+ "loss": 0.1359,
674
+ "step": 950
675
+ },
676
+ {
677
+ "epoch": 0.3791656381140459,
678
+ "grad_norm": 0.5641497373580933,
679
+ "learning_rate": 2.9584133905422744e-05,
680
+ "loss": 0.1454,
681
+ "step": 960
682
+ },
683
+ {
684
+ "epoch": 0.3831152801777339,
685
+ "grad_norm": 0.3449844419956207,
686
+ "learning_rate": 2.9575374509340935e-05,
687
+ "loss": 0.1385,
688
+ "step": 970
689
+ },
690
+ {
691
+ "epoch": 0.38706492224142186,
692
+ "grad_norm": 0.46647313237190247,
693
+ "learning_rate": 2.956652514660336e-05,
694
+ "loss": 0.1328,
695
+ "step": 980
696
+ },
697
+ {
698
+ "epoch": 0.39101456430510984,
699
+ "grad_norm": 0.4442940950393677,
700
+ "learning_rate": 2.9557585871832787e-05,
701
+ "loss": 0.1379,
702
+ "step": 990
703
+ },
704
+ {
705
+ "epoch": 0.3949642063687978,
706
+ "grad_norm": 0.5622376799583435,
707
+ "learning_rate": 2.9548556740207e-05,
708
+ "loss": 0.1525,
709
+ "step": 1000
710
+ },
711
+ {
712
+ "epoch": 0.3989138484324858,
713
+ "grad_norm": 0.5095304250717163,
714
+ "learning_rate": 2.9539437807458404e-05,
715
+ "loss": 0.1317,
716
+ "step": 1010
717
+ },
718
+ {
719
+ "epoch": 0.40286349049617376,
720
+ "grad_norm": 0.36938050389289856,
721
+ "learning_rate": 2.9530229129873715e-05,
722
+ "loss": 0.1361,
723
+ "step": 1020
724
+ },
725
+ {
726
+ "epoch": 0.4068131325598618,
727
+ "grad_norm": 0.4959389865398407,
728
+ "learning_rate": 2.9520930764293586e-05,
729
+ "loss": 0.1475,
730
+ "step": 1030
731
+ },
732
+ {
733
+ "epoch": 0.41076277462354976,
734
+ "grad_norm": 0.5631204843521118,
735
+ "learning_rate": 2.9511542768112284e-05,
736
+ "loss": 0.1391,
737
+ "step": 1040
738
+ },
739
+ {
740
+ "epoch": 0.41471241668723774,
741
+ "grad_norm": 0.4227543771266937,
742
+ "learning_rate": 2.9502065199277312e-05,
743
+ "loss": 0.1402,
744
+ "step": 1050
745
+ },
746
+ {
747
+ "epoch": 0.4186620587509257,
748
+ "grad_norm": 0.43038052320480347,
749
+ "learning_rate": 2.9492498116289072e-05,
750
+ "loss": 0.1239,
751
+ "step": 1060
752
+ },
753
+ {
754
+ "epoch": 0.4226117008146137,
755
+ "grad_norm": 0.5115047097206116,
756
+ "learning_rate": 2.9482841578200486e-05,
757
+ "loss": 0.1417,
758
+ "step": 1070
759
+ },
760
+ {
761
+ "epoch": 0.42656134287830166,
762
+ "grad_norm": 0.4372217059135437,
763
+ "learning_rate": 2.9473095644616634e-05,
764
+ "loss": 0.139,
765
+ "step": 1080
766
+ },
767
+ {
768
+ "epoch": 0.43051098494198964,
769
+ "grad_norm": 0.792674720287323,
770
+ "learning_rate": 2.94632603756944e-05,
771
+ "loss": 0.1355,
772
+ "step": 1090
773
+ },
774
+ {
775
+ "epoch": 0.4344606270056776,
776
+ "grad_norm": 0.46272650361061096,
777
+ "learning_rate": 2.945333583214208e-05,
778
+ "loss": 0.1513,
779
+ "step": 1100
780
+ },
781
+ {
782
+ "epoch": 0.4384102690693656,
783
+ "grad_norm": 0.3727450668811798,
784
+ "learning_rate": 2.9443322075219036e-05,
785
+ "loss": 0.1317,
786
+ "step": 1110
787
+ },
788
+ {
789
+ "epoch": 0.44235991113305356,
790
+ "grad_norm": 0.39475393295288086,
791
+ "learning_rate": 2.9433219166735285e-05,
792
+ "loss": 0.126,
793
+ "step": 1120
794
+ },
795
+ {
796
+ "epoch": 0.44630955319674154,
797
+ "grad_norm": 0.5749325156211853,
798
+ "learning_rate": 2.9423027169051134e-05,
799
+ "loss": 0.1509,
800
+ "step": 1130
801
+ },
802
+ {
803
+ "epoch": 0.4502591952604295,
804
+ "grad_norm": 0.44618451595306396,
805
+ "learning_rate": 2.9412746145076804e-05,
806
+ "loss": 0.1257,
807
+ "step": 1140
808
+ },
809
+ {
810
+ "epoch": 0.4542088373241175,
811
+ "grad_norm": 0.46040260791778564,
812
+ "learning_rate": 2.9402376158272026e-05,
813
+ "loss": 0.1306,
814
+ "step": 1150
815
+ },
816
+ {
817
+ "epoch": 0.45815847938780546,
818
+ "grad_norm": 0.6470154523849487,
819
+ "learning_rate": 2.9391917272645654e-05,
820
+ "loss": 0.147,
821
+ "step": 1160
822
+ },
823
+ {
824
+ "epoch": 0.46210812145149344,
825
+ "grad_norm": 0.4042102098464966,
826
+ "learning_rate": 2.9381369552755268e-05,
827
+ "loss": 0.1358,
828
+ "step": 1170
829
+ },
830
+ {
831
+ "epoch": 0.4660577635151814,
832
+ "grad_norm": 0.5040680766105652,
833
+ "learning_rate": 2.937073306370679e-05,
834
+ "loss": 0.1364,
835
+ "step": 1180
836
+ },
837
+ {
838
+ "epoch": 0.47000740557886944,
839
+ "grad_norm": 0.44574257731437683,
840
+ "learning_rate": 2.936000787115406e-05,
841
+ "loss": 0.1468,
842
+ "step": 1190
843
+ },
844
+ {
845
+ "epoch": 0.4739570476425574,
846
+ "grad_norm": 0.4155598282814026,
847
+ "learning_rate": 2.9349194041298437e-05,
848
+ "loss": 0.138,
849
+ "step": 1200
850
+ },
851
+ {
852
+ "epoch": 0.4779066897062454,
853
+ "grad_norm": 0.43807128071784973,
854
+ "learning_rate": 2.9338291640888413e-05,
855
+ "loss": 0.1376,
856
+ "step": 1210
857
+ },
858
+ {
859
+ "epoch": 0.48185633176993337,
860
+ "grad_norm": 0.6164836883544922,
861
+ "learning_rate": 2.9327300737219164e-05,
862
+ "loss": 0.1415,
863
+ "step": 1220
864
+ },
865
+ {
866
+ "epoch": 0.48580597383362134,
867
+ "grad_norm": 0.4064141511917114,
868
+ "learning_rate": 2.9316221398132163e-05,
869
+ "loss": 0.1457,
870
+ "step": 1230
871
+ },
872
+ {
873
+ "epoch": 0.4897556158973093,
874
+ "grad_norm": 0.47821277379989624,
875
+ "learning_rate": 2.930505369201475e-05,
876
+ "loss": 0.144,
877
+ "step": 1240
878
+ },
879
+ {
880
+ "epoch": 0.4937052579609973,
881
+ "grad_norm": 0.4229309558868408,
882
+ "learning_rate": 2.9293797687799717e-05,
883
+ "loss": 0.1286,
884
+ "step": 1250
885
+ },
886
+ {
887
+ "epoch": 0.49765490002468526,
888
+ "grad_norm": 0.42858126759529114,
889
+ "learning_rate": 2.9282453454964856e-05,
890
+ "loss": 0.1388,
891
+ "step": 1260
892
+ },
893
+ {
894
+ "epoch": 0.5016045420883732,
895
+ "grad_norm": 0.47248193621635437,
896
+ "learning_rate": 2.9271021063532586e-05,
897
+ "loss": 0.1279,
898
+ "step": 1270
899
+ },
900
+ {
901
+ "epoch": 0.5055541841520612,
902
+ "grad_norm": 0.5147600769996643,
903
+ "learning_rate": 2.9259500584069444e-05,
904
+ "loss": 0.1281,
905
+ "step": 1280
906
+ },
907
+ {
908
+ "epoch": 0.5095038262157492,
909
+ "grad_norm": 0.4137686789035797,
910
+ "learning_rate": 2.924789208768573e-05,
911
+ "loss": 0.1441,
912
+ "step": 1290
913
+ },
914
+ {
915
+ "epoch": 0.5134534682794372,
916
+ "grad_norm": 0.484967440366745,
917
+ "learning_rate": 2.923619564603501e-05,
918
+ "loss": 0.1328,
919
+ "step": 1300
920
+ },
921
+ {
922
+ "epoch": 0.5174031103431251,
923
+ "grad_norm": 0.5038776397705078,
924
+ "learning_rate": 2.922441133131369e-05,
925
+ "loss": 0.1442,
926
+ "step": 1310
927
+ },
928
+ {
929
+ "epoch": 0.5213527524068131,
930
+ "grad_norm": 0.4918186366558075,
931
+ "learning_rate": 2.921253921626058e-05,
932
+ "loss": 0.1285,
933
+ "step": 1320
934
+ },
935
+ {
936
+ "epoch": 0.5253023944705011,
937
+ "grad_norm": 0.447346568107605,
938
+ "learning_rate": 2.9200579374156447e-05,
939
+ "loss": 0.1261,
940
+ "step": 1330
941
+ },
942
+ {
943
+ "epoch": 0.5292520365341891,
944
+ "grad_norm": 0.4736550748348236,
945
+ "learning_rate": 2.9188531878823532e-05,
946
+ "loss": 0.133,
947
+ "step": 1340
948
+ },
949
+ {
950
+ "epoch": 0.533201678597877,
951
+ "grad_norm": 0.586494505405426,
952
+ "learning_rate": 2.9176396804625135e-05,
953
+ "loss": 0.1409,
954
+ "step": 1350
955
+ },
956
+ {
957
+ "epoch": 0.537151320661565,
958
+ "grad_norm": 0.49870672821998596,
959
+ "learning_rate": 2.9164174226465134e-05,
960
+ "loss": 0.1444,
961
+ "step": 1360
962
+ },
963
+ {
964
+ "epoch": 0.541100962725253,
965
+ "grad_norm": 0.404547780752182,
966
+ "learning_rate": 2.9151864219787522e-05,
967
+ "loss": 0.1303,
968
+ "step": 1370
969
+ },
970
+ {
971
+ "epoch": 0.545050604788941,
972
+ "grad_norm": 0.42132681608200073,
973
+ "learning_rate": 2.913946686057595e-05,
974
+ "loss": 0.1276,
975
+ "step": 1380
976
+ },
977
+ {
978
+ "epoch": 0.5490002468526289,
979
+ "grad_norm": 0.4928096830844879,
980
+ "learning_rate": 2.9126982225353243e-05,
981
+ "loss": 0.1348,
982
+ "step": 1390
983
+ },
984
+ {
985
+ "epoch": 0.5529498889163169,
986
+ "grad_norm": 0.44450655579566956,
987
+ "learning_rate": 2.911441039118095e-05,
988
+ "loss": 0.1417,
989
+ "step": 1400
990
+ },
991
+ {
992
+ "epoch": 0.5568995309800049,
993
+ "grad_norm": 0.5710647702217102,
994
+ "learning_rate": 2.910175143565886e-05,
995
+ "loss": 0.1284,
996
+ "step": 1410
997
+ },
998
+ {
999
+ "epoch": 0.5608491730436929,
1000
+ "grad_norm": 0.3675592243671417,
1001
+ "learning_rate": 2.9089005436924506e-05,
1002
+ "loss": 0.1505,
1003
+ "step": 1420
1004
+ },
1005
+ {
1006
+ "epoch": 0.564798815107381,
1007
+ "grad_norm": 0.4794444441795349,
1008
+ "learning_rate": 2.90761724736527e-05,
1009
+ "loss": 0.1325,
1010
+ "step": 1430
1011
+ },
1012
+ {
1013
+ "epoch": 0.5687484571710689,
1014
+ "grad_norm": 0.5743889808654785,
1015
+ "learning_rate": 2.906325262505505e-05,
1016
+ "loss": 0.1358,
1017
+ "step": 1440
1018
+ },
1019
+ {
1020
+ "epoch": 0.5726980992347569,
1021
+ "grad_norm": 0.4955087900161743,
1022
+ "learning_rate": 2.9050245970879455e-05,
1023
+ "loss": 0.1387,
1024
+ "step": 1450
1025
+ },
1026
+ {
1027
+ "epoch": 0.5766477412984449,
1028
+ "grad_norm": 0.42035970091819763,
1029
+ "learning_rate": 2.9037152591409635e-05,
1030
+ "loss": 0.1369,
1031
+ "step": 1460
1032
+ },
1033
+ {
1034
+ "epoch": 0.5805973833621328,
1035
+ "grad_norm": 0.4199492335319519,
1036
+ "learning_rate": 2.9023972567464606e-05,
1037
+ "loss": 0.1461,
1038
+ "step": 1470
1039
+ },
1040
+ {
1041
+ "epoch": 0.5845470254258208,
1042
+ "grad_norm": 0.43724125623703003,
1043
+ "learning_rate": 2.9010705980398217e-05,
1044
+ "loss": 0.1219,
1045
+ "step": 1480
1046
+ },
1047
+ {
1048
+ "epoch": 0.5884966674895088,
1049
+ "grad_norm": 0.39386245608329773,
1050
+ "learning_rate": 2.8997352912098616e-05,
1051
+ "loss": 0.1255,
1052
+ "step": 1490
1053
+ },
1054
+ {
1055
+ "epoch": 0.5924463095531968,
1056
+ "grad_norm": 0.3640863597393036,
1057
+ "learning_rate": 2.8983913444987754e-05,
1058
+ "loss": 0.1273,
1059
+ "step": 1500
1060
+ },
1061
+ {
1062
+ "epoch": 0.5963959516168847,
1063
+ "grad_norm": 0.40772178769111633,
1064
+ "learning_rate": 2.8970387662020898e-05,
1065
+ "loss": 0.1326,
1066
+ "step": 1510
1067
+ },
1068
+ {
1069
+ "epoch": 0.6003455936805727,
1070
+ "grad_norm": 0.4535306990146637,
1071
+ "learning_rate": 2.895677564668608e-05,
1072
+ "loss": 0.1273,
1073
+ "step": 1520
1074
+ },
1075
+ {
1076
+ "epoch": 0.6042952357442607,
1077
+ "grad_norm": 0.5429089665412903,
1078
+ "learning_rate": 2.894307748300361e-05,
1079
+ "loss": 0.1245,
1080
+ "step": 1530
1081
+ },
1082
+ {
1083
+ "epoch": 0.6082448778079487,
1084
+ "grad_norm": 0.38951486349105835,
1085
+ "learning_rate": 2.8929293255525563e-05,
1086
+ "loss": 0.1437,
1087
+ "step": 1540
1088
+ },
1089
+ {
1090
+ "epoch": 0.6121945198716366,
1091
+ "grad_norm": 0.4131280183792114,
1092
+ "learning_rate": 2.8915423049335214e-05,
1093
+ "loss": 0.1249,
1094
+ "step": 1550
1095
+ },
1096
+ {
1097
+ "epoch": 0.6161441619353246,
1098
+ "grad_norm": 0.44423356652259827,
1099
+ "learning_rate": 2.890146695004657e-05,
1100
+ "loss": 0.1315,
1101
+ "step": 1560
1102
+ },
1103
+ {
1104
+ "epoch": 0.6200938039990126,
1105
+ "grad_norm": 0.4929848313331604,
1106
+ "learning_rate": 2.88874250438038e-05,
1107
+ "loss": 0.1399,
1108
+ "step": 1570
1109
+ },
1110
+ {
1111
+ "epoch": 0.6240434460627006,
1112
+ "grad_norm": 0.44524630904197693,
1113
+ "learning_rate": 2.8873297417280724e-05,
1114
+ "loss": 0.1304,
1115
+ "step": 1580
1116
+ },
1117
+ {
1118
+ "epoch": 0.6279930881263885,
1119
+ "grad_norm": 0.4765247404575348,
1120
+ "learning_rate": 2.885908415768027e-05,
1121
+ "loss": 0.1422,
1122
+ "step": 1590
1123
+ },
1124
+ {
1125
+ "epoch": 0.6319427301900765,
1126
+ "grad_norm": 0.44227954745292664,
1127
+ "learning_rate": 2.884478535273393e-05,
1128
+ "loss": 0.1573,
1129
+ "step": 1600
1130
+ },
1131
+ {
1132
+ "epoch": 0.6358923722537645,
1133
+ "grad_norm": 0.4304993152618408,
1134
+ "learning_rate": 2.8830401090701234e-05,
1135
+ "loss": 0.1365,
1136
+ "step": 1610
1137
+ },
1138
+ {
1139
+ "epoch": 0.6398420143174525,
1140
+ "grad_norm": 0.42231836915016174,
1141
+ "learning_rate": 2.8815931460369198e-05,
1142
+ "loss": 0.1328,
1143
+ "step": 1620
1144
+ },
1145
+ {
1146
+ "epoch": 0.6437916563811404,
1147
+ "grad_norm": 0.44187867641448975,
1148
+ "learning_rate": 2.880137655105176e-05,
1149
+ "loss": 0.1228,
1150
+ "step": 1630
1151
+ },
1152
+ {
1153
+ "epoch": 0.6477412984448284,
1154
+ "grad_norm": 0.433136910200119,
1155
+ "learning_rate": 2.8786736452589265e-05,
1156
+ "loss": 0.133,
1157
+ "step": 1640
1158
+ },
1159
+ {
1160
+ "epoch": 0.6516909405085164,
1161
+ "grad_norm": 0.4308445453643799,
1162
+ "learning_rate": 2.8772011255347875e-05,
1163
+ "loss": 0.127,
1164
+ "step": 1650
1165
+ },
1166
+ {
1167
+ "epoch": 0.6556405825722044,
1168
+ "grad_norm": 0.4352281391620636,
1169
+ "learning_rate": 2.8757201050219027e-05,
1170
+ "loss": 0.1276,
1171
+ "step": 1660
1172
+ },
1173
+ {
1174
+ "epoch": 0.6595902246358923,
1175
+ "grad_norm": 0.450520396232605,
1176
+ "learning_rate": 2.874230592861887e-05,
1177
+ "loss": 0.1233,
1178
+ "step": 1670
1179
+ },
1180
+ {
1181
+ "epoch": 0.6635398666995803,
1182
+ "grad_norm": 0.4648306369781494,
1183
+ "learning_rate": 2.8727325982487705e-05,
1184
+ "loss": 0.1243,
1185
+ "step": 1680
1186
+ },
1187
+ {
1188
+ "epoch": 0.6674895087632683,
1189
+ "grad_norm": 0.5166367888450623,
1190
+ "learning_rate": 2.871226130428941e-05,
1191
+ "loss": 0.1308,
1192
+ "step": 1690
1193
+ },
1194
+ {
1195
+ "epoch": 0.6714391508269563,
1196
+ "grad_norm": 0.6115042567253113,
1197
+ "learning_rate": 2.8697111987010868e-05,
1198
+ "loss": 0.1339,
1199
+ "step": 1700
1200
+ },
1201
+ {
1202
+ "epoch": 0.6753887928906442,
1203
+ "grad_norm": 0.3470801115036011,
1204
+ "learning_rate": 2.868187812416141e-05,
1205
+ "loss": 0.1305,
1206
+ "step": 1710
1207
+ },
1208
+ {
1209
+ "epoch": 0.6793384349543322,
1210
+ "grad_norm": 0.40242600440979004,
1211
+ "learning_rate": 2.8666559809772217e-05,
1212
+ "loss": 0.1325,
1213
+ "step": 1720
1214
+ },
1215
+ {
1216
+ "epoch": 0.6832880770180202,
1217
+ "grad_norm": 0.4116344749927521,
1218
+ "learning_rate": 2.8651157138395744e-05,
1219
+ "loss": 0.1385,
1220
+ "step": 1730
1221
+ },
1222
+ {
1223
+ "epoch": 0.6872377190817082,
1224
+ "grad_norm": 0.39455336332321167,
1225
+ "learning_rate": 2.863567020510515e-05,
1226
+ "loss": 0.1291,
1227
+ "step": 1740
1228
+ },
1229
+ {
1230
+ "epoch": 0.6911873611453963,
1231
+ "grad_norm": 0.49655675888061523,
1232
+ "learning_rate": 2.86200991054937e-05,
1233
+ "loss": 0.1363,
1234
+ "step": 1750
1235
+ },
1236
+ {
1237
+ "epoch": 0.6951370032090842,
1238
+ "grad_norm": 0.4002642035484314,
1239
+ "learning_rate": 2.8604443935674164e-05,
1240
+ "loss": 0.1421,
1241
+ "step": 1760
1242
+ },
1243
+ {
1244
+ "epoch": 0.6990866452727722,
1245
+ "grad_norm": 0.43481770157814026,
1246
+ "learning_rate": 2.8588704792278248e-05,
1247
+ "loss": 0.1254,
1248
+ "step": 1770
1249
+ },
1250
+ {
1251
+ "epoch": 0.7030362873364602,
1252
+ "grad_norm": 0.49691149592399597,
1253
+ "learning_rate": 2.8572881772455993e-05,
1254
+ "loss": 0.1393,
1255
+ "step": 1780
1256
+ },
1257
+ {
1258
+ "epoch": 0.7069859294001481,
1259
+ "grad_norm": 0.47778138518333435,
1260
+ "learning_rate": 2.8556974973875152e-05,
1261
+ "loss": 0.1387,
1262
+ "step": 1790
1263
+ },
1264
+ {
1265
+ "epoch": 0.7109355714638361,
1266
+ "grad_norm": 0.3887634873390198,
1267
+ "learning_rate": 2.854098449472061e-05,
1268
+ "loss": 0.1301,
1269
+ "step": 1800
1270
+ },
1271
+ {
1272
+ "epoch": 0.7148852135275241,
1273
+ "grad_norm": 0.3825758695602417,
1274
+ "learning_rate": 2.852491043369377e-05,
1275
+ "loss": 0.1292,
1276
+ "step": 1810
1277
+ },
1278
+ {
1279
+ "epoch": 0.7188348555912121,
1280
+ "grad_norm": 0.44277575612068176,
1281
+ "learning_rate": 2.8508752890011957e-05,
1282
+ "loss": 0.1263,
1283
+ "step": 1820
1284
+ },
1285
+ {
1286
+ "epoch": 0.7227844976549,
1287
+ "grad_norm": 0.54979008436203,
1288
+ "learning_rate": 2.849251196340777e-05,
1289
+ "loss": 0.1487,
1290
+ "step": 1830
1291
+ },
1292
+ {
1293
+ "epoch": 0.726734139718588,
1294
+ "grad_norm": 0.5191593170166016,
1295
+ "learning_rate": 2.847618775412851e-05,
1296
+ "loss": 0.1355,
1297
+ "step": 1840
1298
+ },
1299
+ {
1300
+ "epoch": 0.730683781782276,
1301
+ "grad_norm": 0.42348307371139526,
1302
+ "learning_rate": 2.8459780362935532e-05,
1303
+ "loss": 0.1356,
1304
+ "step": 1850
1305
+ },
1306
+ {
1307
+ "epoch": 0.734633423845964,
1308
+ "grad_norm": 0.41457122564315796,
1309
+ "learning_rate": 2.8443289891103634e-05,
1310
+ "loss": 0.1268,
1311
+ "step": 1860
1312
+ },
1313
+ {
1314
+ "epoch": 0.738583065909652,
1315
+ "grad_norm": 0.559184193611145,
1316
+ "learning_rate": 2.842671644042043e-05,
1317
+ "loss": 0.1273,
1318
+ "step": 1870
1319
+ },
1320
+ {
1321
+ "epoch": 0.7425327079733399,
1322
+ "grad_norm": 0.46100959181785583,
1323
+ "learning_rate": 2.8410060113185724e-05,
1324
+ "loss": 0.1357,
1325
+ "step": 1880
1326
+ },
1327
+ {
1328
+ "epoch": 0.7464823500370279,
1329
+ "grad_norm": 0.5634859204292297,
1330
+ "learning_rate": 2.8393321012210877e-05,
1331
+ "loss": 0.1271,
1332
+ "step": 1890
1333
+ },
1334
+ {
1335
+ "epoch": 0.7504319921007159,
1336
+ "grad_norm": 0.4173336327075958,
1337
+ "learning_rate": 2.8376499240818164e-05,
1338
+ "loss": 0.1302,
1339
+ "step": 1900
1340
+ },
1341
+ {
1342
+ "epoch": 0.7543816341644038,
1343
+ "grad_norm": 0.40243804454803467,
1344
+ "learning_rate": 2.8359594902840152e-05,
1345
+ "loss": 0.1333,
1346
+ "step": 1910
1347
+ },
1348
+ {
1349
+ "epoch": 0.7583312762280918,
1350
+ "grad_norm": 0.3762458562850952,
1351
+ "learning_rate": 2.8342608102619052e-05,
1352
+ "loss": 0.1271,
1353
+ "step": 1920
1354
+ },
1355
+ {
1356
+ "epoch": 0.7622809182917798,
1357
+ "grad_norm": 0.43715864419937134,
1358
+ "learning_rate": 2.832553894500607e-05,
1359
+ "loss": 0.1484,
1360
+ "step": 1930
1361
+ },
1362
+ {
1363
+ "epoch": 0.7662305603554678,
1364
+ "grad_norm": 0.3971126675605774,
1365
+ "learning_rate": 2.8308387535360763e-05,
1366
+ "loss": 0.1258,
1367
+ "step": 1940
1368
+ },
1369
+ {
1370
+ "epoch": 0.7701802024191557,
1371
+ "grad_norm": 0.40626007318496704,
1372
+ "learning_rate": 2.829115397955039e-05,
1373
+ "loss": 0.1336,
1374
+ "step": 1950
1375
+ },
1376
+ {
1377
+ "epoch": 0.7741298444828437,
1378
+ "grad_norm": 0.503835141658783,
1379
+ "learning_rate": 2.827383838394926e-05,
1380
+ "loss": 0.135,
1381
+ "step": 1960
1382
+ },
1383
+ {
1384
+ "epoch": 0.7780794865465317,
1385
+ "grad_norm": 0.5298701524734497,
1386
+ "learning_rate": 2.8256440855438074e-05,
1387
+ "loss": 0.1409,
1388
+ "step": 1970
1389
+ },
1390
+ {
1391
+ "epoch": 0.7820291286102197,
1392
+ "grad_norm": 0.5498703122138977,
1393
+ "learning_rate": 2.8238961501403266e-05,
1394
+ "loss": 0.1453,
1395
+ "step": 1980
1396
+ },
1397
+ {
1398
+ "epoch": 0.7859787706739076,
1399
+ "grad_norm": 0.4256785809993744,
1400
+ "learning_rate": 2.8221400429736332e-05,
1401
+ "loss": 0.1297,
1402
+ "step": 1990
1403
+ },
1404
+ {
1405
+ "epoch": 0.7899284127375956,
1406
+ "grad_norm": 0.38886457681655884,
1407
+ "learning_rate": 2.820375774883318e-05,
1408
+ "loss": 0.13,
1409
+ "step": 2000
1410
+ },
1411
+ {
1412
+ "epoch": 0.7938780548012836,
1413
+ "grad_norm": 0.5477973222732544,
1414
+ "learning_rate": 2.8186033567593445e-05,
1415
+ "loss": 0.1398,
1416
+ "step": 2010
1417
+ },
1418
+ {
1419
+ "epoch": 0.7978276968649716,
1420
+ "grad_norm": 0.4944402277469635,
1421
+ "learning_rate": 2.8168227995419828e-05,
1422
+ "loss": 0.1259,
1423
+ "step": 2020
1424
+ },
1425
+ {
1426
+ "epoch": 0.8017773389286595,
1427
+ "grad_norm": 0.4402163624763489,
1428
+ "learning_rate": 2.8150341142217407e-05,
1429
+ "loss": 0.1368,
1430
+ "step": 2030
1431
+ },
1432
+ {
1433
+ "epoch": 0.8057269809923475,
1434
+ "grad_norm": 0.4140058755874634,
1435
+ "learning_rate": 2.8132373118392985e-05,
1436
+ "loss": 0.1402,
1437
+ "step": 2040
1438
+ },
1439
+ {
1440
+ "epoch": 0.8096766230560355,
1441
+ "grad_norm": 0.5238107442855835,
1442
+ "learning_rate": 2.8114324034854378e-05,
1443
+ "loss": 0.1336,
1444
+ "step": 2050
1445
+ },
1446
+ {
1447
+ "epoch": 0.8136262651197236,
1448
+ "grad_norm": 0.45435237884521484,
1449
+ "learning_rate": 2.809619400300975e-05,
1450
+ "loss": 0.1421,
1451
+ "step": 2060
1452
+ },
1453
+ {
1454
+ "epoch": 0.8175759071834116,
1455
+ "grad_norm": 0.5276714563369751,
1456
+ "learning_rate": 2.8077983134766914e-05,
1457
+ "loss": 0.1234,
1458
+ "step": 2070
1459
+ },
1460
+ {
1461
+ "epoch": 0.8215255492470995,
1462
+ "grad_norm": 0.4083622395992279,
1463
+ "learning_rate": 2.8059691542532657e-05,
1464
+ "loss": 0.13,
1465
+ "step": 2080
1466
+ },
1467
+ {
1468
+ "epoch": 0.8254751913107875,
1469
+ "grad_norm": 0.3944040834903717,
1470
+ "learning_rate": 2.8041319339212017e-05,
1471
+ "loss": 0.1229,
1472
+ "step": 2090
1473
+ },
1474
+ {
1475
+ "epoch": 0.8294248333744755,
1476
+ "grad_norm": 0.5149401426315308,
1477
+ "learning_rate": 2.802286663820763e-05,
1478
+ "loss": 0.1349,
1479
+ "step": 2100
1480
+ },
1481
+ {
1482
+ "epoch": 0.8333744754381635,
1483
+ "grad_norm": 0.5086573362350464,
1484
+ "learning_rate": 2.800433355341898e-05,
1485
+ "loss": 0.1367,
1486
+ "step": 2110
1487
+ },
1488
+ {
1489
+ "epoch": 0.8373241175018514,
1490
+ "grad_norm": 0.47434648871421814,
1491
+ "learning_rate": 2.7985720199241736e-05,
1492
+ "loss": 0.1458,
1493
+ "step": 2120
1494
+ },
1495
+ {
1496
+ "epoch": 0.8412737595655394,
1497
+ "grad_norm": 0.5867214798927307,
1498
+ "learning_rate": 2.796702669056703e-05,
1499
+ "loss": 0.1319,
1500
+ "step": 2130
1501
+ },
1502
+ {
1503
+ "epoch": 0.8452234016292274,
1504
+ "grad_norm": 0.4446616768836975,
1505
+ "learning_rate": 2.794825314278074e-05,
1506
+ "loss": 0.1266,
1507
+ "step": 2140
1508
+ },
1509
+ {
1510
+ "epoch": 0.8491730436929154,
1511
+ "grad_norm": 0.44527551531791687,
1512
+ "learning_rate": 2.7929399671762794e-05,
1513
+ "loss": 0.1396,
1514
+ "step": 2150
1515
+ },
1516
+ {
1517
+ "epoch": 0.8531226857566033,
1518
+ "grad_norm": 0.4233611524105072,
1519
+ "learning_rate": 2.791046639388644e-05,
1520
+ "loss": 0.1265,
1521
+ "step": 2160
1522
+ },
1523
+ {
1524
+ "epoch": 0.8570723278202913,
1525
+ "grad_norm": 0.42697539925575256,
1526
+ "learning_rate": 2.7891453426017552e-05,
1527
+ "loss": 0.129,
1528
+ "step": 2170
1529
+ },
1530
+ {
1531
+ "epoch": 0.8610219698839793,
1532
+ "grad_norm": 0.5311276912689209,
1533
+ "learning_rate": 2.7872360885513862e-05,
1534
+ "loss": 0.1351,
1535
+ "step": 2180
1536
+ },
1537
+ {
1538
+ "epoch": 0.8649716119476673,
1539
+ "grad_norm": 0.45064228773117065,
1540
+ "learning_rate": 2.7853188890224292e-05,
1541
+ "loss": 0.1132,
1542
+ "step": 2190
1543
+ },
1544
+ {
1545
+ "epoch": 0.8689212540113552,
1546
+ "grad_norm": 0.39009493589401245,
1547
+ "learning_rate": 2.7833937558488185e-05,
1548
+ "loss": 0.1327,
1549
+ "step": 2200
1550
+ },
1551
+ {
1552
+ "epoch": 0.8728708960750432,
1553
+ "grad_norm": 0.39206671714782715,
1554
+ "learning_rate": 2.7814607009134595e-05,
1555
+ "loss": 0.1209,
1556
+ "step": 2210
1557
+ },
1558
+ {
1559
+ "epoch": 0.8768205381387312,
1560
+ "grad_norm": 0.35631102323532104,
1561
+ "learning_rate": 2.7795197361481545e-05,
1562
+ "loss": 0.1267,
1563
+ "step": 2220
1564
+ },
1565
+ {
1566
+ "epoch": 0.8807701802024192,
1567
+ "grad_norm": 0.4283501207828522,
1568
+ "learning_rate": 2.7775708735335293e-05,
1569
+ "loss": 0.135,
1570
+ "step": 2230
1571
+ },
1572
+ {
1573
+ "epoch": 0.8847198222661071,
1574
+ "grad_norm": 0.3623165190219879,
1575
+ "learning_rate": 2.7756141250989593e-05,
1576
+ "loss": 0.1277,
1577
+ "step": 2240
1578
+ },
1579
+ {
1580
+ "epoch": 0.8886694643297951,
1581
+ "grad_norm": 0.42114606499671936,
1582
+ "learning_rate": 2.773649502922495e-05,
1583
+ "loss": 0.1378,
1584
+ "step": 2250
1585
+ },
1586
+ {
1587
+ "epoch": 0.8926191063934831,
1588
+ "grad_norm": 0.4476473033428192,
1589
+ "learning_rate": 2.7716770191307887e-05,
1590
+ "loss": 0.1296,
1591
+ "step": 2260
1592
+ },
1593
+ {
1594
+ "epoch": 0.896568748457171,
1595
+ "grad_norm": 0.3927001655101776,
1596
+ "learning_rate": 2.7696966858990172e-05,
1597
+ "loss": 0.1348,
1598
+ "step": 2270
1599
+ },
1600
+ {
1601
+ "epoch": 0.900518390520859,
1602
+ "grad_norm": 0.4335472881793976,
1603
+ "learning_rate": 2.7677085154508085e-05,
1604
+ "loss": 0.1243,
1605
+ "step": 2280
1606
+ },
1607
+ {
1608
+ "epoch": 0.904468032584547,
1609
+ "grad_norm": 0.432326078414917,
1610
+ "learning_rate": 2.7657125200581666e-05,
1611
+ "loss": 0.1232,
1612
+ "step": 2290
1613
+ },
1614
+ {
1615
+ "epoch": 0.908417674648235,
1616
+ "grad_norm": 0.42572349309921265,
1617
+ "learning_rate": 2.7637087120413937e-05,
1618
+ "loss": 0.1197,
1619
+ "step": 2300
1620
+ },
1621
+ {
1622
+ "epoch": 0.912367316711923,
1623
+ "grad_norm": 0.5097776651382446,
1624
+ "learning_rate": 2.761697103769017e-05,
1625
+ "loss": 0.1106,
1626
+ "step": 2310
1627
+ },
1628
+ {
1629
+ "epoch": 0.9163169587756109,
1630
+ "grad_norm": 0.4214634895324707,
1631
+ "learning_rate": 2.7596777076577105e-05,
1632
+ "loss": 0.1306,
1633
+ "step": 2320
1634
+ },
1635
+ {
1636
+ "epoch": 0.9202666008392989,
1637
+ "grad_norm": 0.5993767380714417,
1638
+ "learning_rate": 2.7576505361722174e-05,
1639
+ "loss": 0.1308,
1640
+ "step": 2330
1641
+ },
1642
+ {
1643
+ "epoch": 0.9242162429029869,
1644
+ "grad_norm": 0.44176799058914185,
1645
+ "learning_rate": 2.755615601825276e-05,
1646
+ "loss": 0.1348,
1647
+ "step": 2340
1648
+ },
1649
+ {
1650
+ "epoch": 0.9281658849666748,
1651
+ "grad_norm": 0.4011238217353821,
1652
+ "learning_rate": 2.7535729171775406e-05,
1653
+ "loss": 0.1357,
1654
+ "step": 2350
1655
+ },
1656
+ {
1657
+ "epoch": 0.9321155270303628,
1658
+ "grad_norm": 0.35617443919181824,
1659
+ "learning_rate": 2.7515224948375038e-05,
1660
+ "loss": 0.1299,
1661
+ "step": 2360
1662
+ },
1663
+ {
1664
+ "epoch": 0.9360651690940508,
1665
+ "grad_norm": 0.3995439112186432,
1666
+ "learning_rate": 2.7494643474614197e-05,
1667
+ "loss": 0.1327,
1668
+ "step": 2370
1669
+ },
1670
+ {
1671
+ "epoch": 0.9400148111577389,
1672
+ "grad_norm": 0.35780492424964905,
1673
+ "learning_rate": 2.7473984877532247e-05,
1674
+ "loss": 0.1407,
1675
+ "step": 2380
1676
+ },
1677
+ {
1678
+ "epoch": 0.9439644532214269,
1679
+ "grad_norm": 0.46763497591018677,
1680
+ "learning_rate": 2.745324928464461e-05,
1681
+ "loss": 0.1316,
1682
+ "step": 2390
1683
+ },
1684
+ {
1685
+ "epoch": 0.9479140952851148,
1686
+ "grad_norm": 0.5247623324394226,
1687
+ "learning_rate": 2.743243682394195e-05,
1688
+ "loss": 0.1353,
1689
+ "step": 2400
1690
+ },
1691
+ {
1692
+ "epoch": 0.9518637373488028,
1693
+ "grad_norm": 0.5231720805168152,
1694
+ "learning_rate": 2.7411547623889397e-05,
1695
+ "loss": 0.127,
1696
+ "step": 2410
1697
+ },
1698
+ {
1699
+ "epoch": 0.9558133794124908,
1700
+ "grad_norm": 0.38919833302497864,
1701
+ "learning_rate": 2.7390581813425776e-05,
1702
+ "loss": 0.1197,
1703
+ "step": 2420
1704
+ },
1705
+ {
1706
+ "epoch": 0.9597630214761788,
1707
+ "grad_norm": 0.4457249045372009,
1708
+ "learning_rate": 2.736953952196277e-05,
1709
+ "loss": 0.1333,
1710
+ "step": 2430
1711
+ },
1712
+ {
1713
+ "epoch": 0.9637126635398667,
1714
+ "grad_norm": 0.5078391432762146,
1715
+ "learning_rate": 2.734842087938415e-05,
1716
+ "loss": 0.1318,
1717
+ "step": 2440
1718
+ },
1719
+ {
1720
+ "epoch": 0.9676623056035547,
1721
+ "grad_norm": 0.5152226090431213,
1722
+ "learning_rate": 2.7327226016044965e-05,
1723
+ "loss": 0.133,
1724
+ "step": 2450
1725
+ },
1726
+ {
1727
+ "epoch": 0.9716119476672427,
1728
+ "grad_norm": 0.4484505355358124,
1729
+ "learning_rate": 2.7305955062770738e-05,
1730
+ "loss": 0.1291,
1731
+ "step": 2460
1732
+ },
1733
+ {
1734
+ "epoch": 0.9755615897309307,
1735
+ "grad_norm": 0.38752976059913635,
1736
+ "learning_rate": 2.728460815085665e-05,
1737
+ "loss": 0.1274,
1738
+ "step": 2470
1739
+ },
1740
+ {
1741
+ "epoch": 0.9795112317946186,
1742
+ "grad_norm": 0.433149129152298,
1743
+ "learning_rate": 2.7263185412066756e-05,
1744
+ "loss": 0.1205,
1745
+ "step": 2480
1746
+ },
1747
+ {
1748
+ "epoch": 0.9834608738583066,
1749
+ "grad_norm": 0.4824409782886505,
1750
+ "learning_rate": 2.724168697863313e-05,
1751
+ "loss": 0.1369,
1752
+ "step": 2490
1753
+ },
1754
+ {
1755
+ "epoch": 0.9874105159219946,
1756
+ "grad_norm": 0.4385254383087158,
1757
+ "learning_rate": 2.722011298325509e-05,
1758
+ "loss": 0.1249,
1759
+ "step": 2500
1760
+ },
1761
+ {
1762
+ "epoch": 0.9913601579856826,
1763
+ "grad_norm": 0.44593289494514465,
1764
+ "learning_rate": 2.719846355909835e-05,
1765
+ "loss": 0.1336,
1766
+ "step": 2510
1767
+ },
1768
+ {
1769
+ "epoch": 0.9953098000493705,
1770
+ "grad_norm": 0.5583507418632507,
1771
+ "learning_rate": 2.7176738839794218e-05,
1772
+ "loss": 0.1402,
1773
+ "step": 2520
1774
+ },
1775
+ {
1776
+ "epoch": 0.9992594421130585,
1777
+ "grad_norm": 0.47735145688056946,
1778
+ "learning_rate": 2.7154938959438757e-05,
1779
+ "loss": 0.1241,
1780
+ "step": 2530
1781
+ }
1782
+ ],
1783
+ "logging_steps": 10,
1784
+ "max_steps": 12655,
1785
+ "num_input_tokens_seen": 0,
1786
+ "num_train_epochs": 5,
1787
+ "save_steps": 500,
1788
+ "stateful_callbacks": {
1789
+ "TrainerControl": {
1790
+ "args": {
1791
+ "should_epoch_stop": false,
1792
+ "should_evaluate": false,
1793
+ "should_log": false,
1794
+ "should_save": true,
1795
+ "should_training_stop": false
1796
+ },
1797
+ "attributes": {}
1798
+ }
1799
+ },
1800
+ "total_flos": 2.5381326285910835e+17,
1801
+ "train_batch_size": 1,
1802
+ "trial_name": null,
1803
+ "trial_params": null
1804
+ }
training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9324568b2c7c9c39c186e2316cb0aa8f175a4aad44de48f0412c4bcf6ab62528
3
+ size 6072
zero_to_fp32.py ADDED
@@ -0,0 +1,604 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python
2
+
3
+ # Copyright (c) Microsoft Corporation.
4
+ # SPDX-License-Identifier: Apache-2.0
5
+
6
+ # DeepSpeed Team
7
+
8
+ # This script extracts fp32 consolidated weights from a zero 1, 2 and 3 DeepSpeed checkpoints. It gets
9
+ # copied into the top level checkpoint dir, so the user can easily do the conversion at any point in
10
+ # the future. Once extracted, the weights don't require DeepSpeed and can be used in any
11
+ # application.
12
+ #
13
+ # example: python zero_to_fp32.py . pytorch_model.bin
14
+
15
+ import argparse
16
+ import torch
17
+ import glob
18
+ import math
19
+ import os
20
+ import re
21
+ from collections import OrderedDict
22
+ from dataclasses import dataclass
23
+
24
+ # while this script doesn't use deepspeed to recover data, since the checkpoints are pickled with
25
+ # DeepSpeed data structures it has to be available in the current python environment.
26
+ from deepspeed.utils import logger
27
+ from deepspeed.checkpoint.constants import (DS_VERSION, OPTIMIZER_STATE_DICT, SINGLE_PARTITION_OF_FP32_GROUPS,
28
+ FP32_FLAT_GROUPS, ZERO_STAGE, PARTITION_COUNT, PARAM_SHAPES, BUFFER_NAMES,
29
+ FROZEN_PARAM_SHAPES, FROZEN_PARAM_FRAGMENTS)
30
+
31
+
32
+ @dataclass
33
+ class zero_model_state:
34
+ buffers: dict()
35
+ param_shapes: dict()
36
+ shared_params: list
37
+ ds_version: int
38
+ frozen_param_shapes: dict()
39
+ frozen_param_fragments: dict()
40
+
41
+
42
+ debug = 0
43
+
44
+ # load to cpu
45
+ device = torch.device('cpu')
46
+
47
+
48
+ def atoi(text):
49
+ return int(text) if text.isdigit() else text
50
+
51
+
52
+ def natural_keys(text):
53
+ '''
54
+ alist.sort(key=natural_keys) sorts in human order
55
+ http://nedbatchelder.com/blog/200712/human_sorting.html
56
+ (See Toothy's implementation in the comments)
57
+ '''
58
+ return [atoi(c) for c in re.split(r'(\d+)', text)]
59
+
60
+
61
+ def get_model_state_file(checkpoint_dir, zero_stage):
62
+ if not os.path.isdir(checkpoint_dir):
63
+ raise FileNotFoundError(f"Directory '{checkpoint_dir}' doesn't exist")
64
+
65
+ # there should be only one file
66
+ if zero_stage <= 2:
67
+ file = os.path.join(checkpoint_dir, "mp_rank_00_model_states.pt")
68
+ elif zero_stage == 3:
69
+ file = os.path.join(checkpoint_dir, "zero_pp_rank_0_mp_rank_00_model_states.pt")
70
+
71
+ if not os.path.exists(file):
72
+ raise FileNotFoundError(f"can't find model states file at '{file}'")
73
+
74
+ return file
75
+
76
+
77
+ def get_checkpoint_files(checkpoint_dir, glob_pattern):
78
+ # XXX: need to test that this simple glob rule works for multi-node setup too
79
+ ckpt_files = sorted(glob.glob(os.path.join(checkpoint_dir, glob_pattern)), key=natural_keys)
80
+
81
+ if len(ckpt_files) == 0:
82
+ raise FileNotFoundError(f"can't find {glob_pattern} files in directory '{checkpoint_dir}'")
83
+
84
+ return ckpt_files
85
+
86
+
87
+ def get_optim_files(checkpoint_dir):
88
+ return get_checkpoint_files(checkpoint_dir, "*_optim_states.pt")
89
+
90
+
91
+ def get_model_state_files(checkpoint_dir):
92
+ return get_checkpoint_files(checkpoint_dir, "*_model_states.pt")
93
+
94
+
95
+ def parse_model_states(files):
96
+ zero_model_states = []
97
+ for file in files:
98
+ state_dict = torch.load(file, map_location=device)
99
+
100
+ if BUFFER_NAMES not in state_dict:
101
+ raise ValueError(f"{file} is not a model state checkpoint")
102
+ buffer_names = state_dict[BUFFER_NAMES]
103
+ if debug:
104
+ print("Found buffers:", buffer_names)
105
+
106
+ # recover just the buffers while restoring them to fp32 if they were saved in fp16
107
+ buffers = {k: v.float() for k, v in state_dict["module"].items() if k in buffer_names}
108
+ param_shapes = state_dict[PARAM_SHAPES]
109
+
110
+ # collect parameters that are included in param_shapes
111
+ param_names = []
112
+ for s in param_shapes:
113
+ for name in s.keys():
114
+ param_names.append(name)
115
+
116
+ # update with frozen parameters
117
+ frozen_param_shapes = state_dict.get(FROZEN_PARAM_SHAPES, None)
118
+ if frozen_param_shapes is not None:
119
+ if debug:
120
+ print(f"Found frozen_param_shapes: {frozen_param_shapes}")
121
+ param_names += list(frozen_param_shapes.keys())
122
+
123
+ # handle shared params
124
+ shared_params = [[k, v] for k, v in state_dict["shared_params"].items()]
125
+
126
+ ds_version = state_dict.get(DS_VERSION, None)
127
+
128
+ frozen_param_fragments = state_dict.get(FROZEN_PARAM_FRAGMENTS, None)
129
+
130
+ z_model_state = zero_model_state(buffers=buffers,
131
+ param_shapes=param_shapes,
132
+ shared_params=shared_params,
133
+ ds_version=ds_version,
134
+ frozen_param_shapes=frozen_param_shapes,
135
+ frozen_param_fragments=frozen_param_fragments)
136
+ zero_model_states.append(z_model_state)
137
+
138
+ return zero_model_states
139
+
140
+
141
+ def parse_optim_states(files, ds_checkpoint_dir):
142
+
143
+ total_files = len(files)
144
+ state_dicts = []
145
+ for f in files:
146
+ state_dict = torch.load(f, map_location=device)
147
+ # immediately discard the potentially huge 2 optimizer states as we only care for fp32 master weights
148
+ # and also handle the case where it was already removed by another helper script
149
+ state_dict["optimizer_state_dict"].pop("optimizer_state_dict", None)
150
+ state_dicts.append(state_dict)
151
+
152
+ if not ZERO_STAGE in state_dicts[0][OPTIMIZER_STATE_DICT]:
153
+ raise ValueError(f"{files[0]} is not a zero checkpoint")
154
+ zero_stage = state_dicts[0][OPTIMIZER_STATE_DICT][ZERO_STAGE]
155
+ world_size = state_dicts[0][OPTIMIZER_STATE_DICT][PARTITION_COUNT]
156
+
157
+ # For ZeRO-2 each param group can have different partition_count as data parallelism for expert
158
+ # parameters can be different from data parallelism for non-expert parameters. So we can just
159
+ # use the max of the partition_count to get the dp world_size.
160
+
161
+ if type(world_size) is list:
162
+ world_size = max(world_size)
163
+
164
+ if world_size != total_files:
165
+ raise ValueError(
166
+ f"Expected {world_size} of '*_optim_states.pt' under '{ds_checkpoint_dir}' but found {total_files} files. "
167
+ "Possibly due to an overwrite of an old checkpoint, or a checkpoint didn't get saved by one or more processes."
168
+ )
169
+
170
+ # the groups are named differently in each stage
171
+ if zero_stage <= 2:
172
+ fp32_groups_key = SINGLE_PARTITION_OF_FP32_GROUPS
173
+ elif zero_stage == 3:
174
+ fp32_groups_key = FP32_FLAT_GROUPS
175
+ else:
176
+ raise ValueError(f"unknown zero stage {zero_stage}")
177
+
178
+ if zero_stage <= 2:
179
+ fp32_flat_groups = [state_dicts[i][OPTIMIZER_STATE_DICT][fp32_groups_key] for i in range(len(state_dicts))]
180
+ elif zero_stage == 3:
181
+ # if there is more than one param group, there will be multiple flattened tensors - one
182
+ # flattened tensor per group - for simplicity merge them into a single tensor
183
+ #
184
+ # XXX: could make the script more memory efficient for when there are multiple groups - it
185
+ # will require matching the sub-lists of param_shapes for each param group flattened tensor
186
+
187
+ fp32_flat_groups = [
188
+ torch.cat(state_dicts[i][OPTIMIZER_STATE_DICT][fp32_groups_key], 0) for i in range(len(state_dicts))
189
+ ]
190
+
191
+ return zero_stage, world_size, fp32_flat_groups
192
+
193
+
194
+ def _get_fp32_state_dict_from_zero_checkpoint(ds_checkpoint_dir, exclude_frozen_parameters):
195
+ """
196
+ Returns fp32 state_dict reconstructed from ds checkpoint
197
+
198
+ Args:
199
+ - ``ds_checkpoint_dir``: path to the deepspeed checkpoint folder (where the optimizer files are)
200
+
201
+ """
202
+ print(f"Processing zero checkpoint '{ds_checkpoint_dir}'")
203
+
204
+ optim_files = get_optim_files(ds_checkpoint_dir)
205
+ zero_stage, world_size, fp32_flat_groups = parse_optim_states(optim_files, ds_checkpoint_dir)
206
+ print(f"Detected checkpoint of type zero stage {zero_stage}, world_size: {world_size}")
207
+
208
+ model_files = get_model_state_files(ds_checkpoint_dir)
209
+
210
+ zero_model_states = parse_model_states(model_files)
211
+ print(f'Parsing checkpoint created by deepspeed=={zero_model_states[0].ds_version}')
212
+
213
+ if zero_stage <= 2:
214
+ return _get_fp32_state_dict_from_zero2_checkpoint(world_size, fp32_flat_groups, zero_model_states,
215
+ exclude_frozen_parameters)
216
+ elif zero_stage == 3:
217
+ return _get_fp32_state_dict_from_zero3_checkpoint(world_size, fp32_flat_groups, zero_model_states,
218
+ exclude_frozen_parameters)
219
+
220
+
221
+ def _zero2_merge_frozen_params(state_dict, zero_model_states):
222
+ if zero_model_states[0].frozen_param_shapes is None or len(zero_model_states[0].frozen_param_shapes) == 0:
223
+ return
224
+
225
+ frozen_param_shapes = zero_model_states[0].frozen_param_shapes
226
+ frozen_param_fragments = zero_model_states[0].frozen_param_fragments
227
+
228
+ if debug:
229
+ num_elem = sum(s.numel() for s in frozen_param_shapes.values())
230
+ print(f'rank 0: {FROZEN_PARAM_SHAPES}.numel = {num_elem}')
231
+
232
+ wanted_params = len(frozen_param_shapes)
233
+ wanted_numel = sum(s.numel() for s in frozen_param_shapes.values())
234
+ avail_numel = sum([p.numel() for p in frozen_param_fragments.values()])
235
+ print(f'Frozen params: Have {avail_numel} numels to process.')
236
+ print(f'Frozen params: Need {wanted_numel} numels in {wanted_params} params')
237
+
238
+ total_params = 0
239
+ total_numel = 0
240
+ for name, shape in frozen_param_shapes.items():
241
+ total_params += 1
242
+ unpartitioned_numel = shape.numel()
243
+ total_numel += unpartitioned_numel
244
+
245
+ state_dict[name] = frozen_param_fragments[name]
246
+
247
+ if debug:
248
+ print(f"{name} full shape: {shape} unpartitioned numel {unpartitioned_numel} ")
249
+
250
+ print(f"Reconstructed Frozen fp32 state dict with {total_params} params {total_numel} elements")
251
+
252
+
253
+ def _has_callable(obj, fn):
254
+ attr = getattr(obj, fn, None)
255
+ return callable(attr)
256
+
257
+
258
+ def _zero2_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states):
259
+ param_shapes = zero_model_states[0].param_shapes
260
+
261
+ # Reconstruction protocol:
262
+ #
263
+ # XXX: document this
264
+
265
+ if debug:
266
+ for i in range(world_size):
267
+ for j in range(len(fp32_flat_groups[0])):
268
+ print(f"{FP32_FLAT_GROUPS}[{i}][{j}].shape={fp32_flat_groups[i][j].shape}")
269
+
270
+ # XXX: memory usage doubles here (zero2)
271
+ num_param_groups = len(fp32_flat_groups[0])
272
+ merged_single_partition_of_fp32_groups = []
273
+ for i in range(num_param_groups):
274
+ merged_partitions = [sd[i] for sd in fp32_flat_groups]
275
+ full_single_fp32_vector = torch.cat(merged_partitions, 0)
276
+ merged_single_partition_of_fp32_groups.append(full_single_fp32_vector)
277
+ avail_numel = sum(
278
+ [full_single_fp32_vector.numel() for full_single_fp32_vector in merged_single_partition_of_fp32_groups])
279
+
280
+ if debug:
281
+ wanted_params = sum([len(shapes) for shapes in param_shapes])
282
+ wanted_numel = sum([sum(shape.numel() for shape in shapes.values()) for shapes in param_shapes])
283
+ # not asserting if there is a mismatch due to possible padding
284
+ print(f"Have {avail_numel} numels to process.")
285
+ print(f"Need {wanted_numel} numels in {wanted_params} params.")
286
+
287
+ # params
288
+ # XXX: for huge models that can't fit into the host's RAM we will have to recode this to support
289
+ # out-of-core computing solution
290
+ total_numel = 0
291
+ total_params = 0
292
+ for shapes, full_single_fp32_vector in zip(param_shapes, merged_single_partition_of_fp32_groups):
293
+ offset = 0
294
+ avail_numel = full_single_fp32_vector.numel()
295
+ for name, shape in shapes.items():
296
+
297
+ unpartitioned_numel = shape.numel() if _has_callable(shape, 'numel') else math.prod(shape)
298
+ total_numel += unpartitioned_numel
299
+ total_params += 1
300
+
301
+ if debug:
302
+ print(f"{name} full shape: {shape} unpartitioned numel {unpartitioned_numel} ")
303
+ state_dict[name] = full_single_fp32_vector.narrow(0, offset, unpartitioned_numel).view(shape)
304
+ offset += unpartitioned_numel
305
+
306
+ # Z2 started to align to 2*world_size to improve nccl performance. Therefore both offset and
307
+ # avail_numel can differ by anywhere between 0..2*world_size. Due to two unrelated complex
308
+ # paddings performed in the code it's almost impossible to predict the exact numbers w/o the
309
+ # live optimizer object, so we are checking that the numbers are within the right range
310
+ align_to = 2 * world_size
311
+
312
+ def zero2_align(x):
313
+ return align_to * math.ceil(x / align_to)
314
+
315
+ if debug:
316
+ print(f"original offset={offset}, avail_numel={avail_numel}")
317
+
318
+ offset = zero2_align(offset)
319
+ avail_numel = zero2_align(avail_numel)
320
+
321
+ if debug:
322
+ print(f"aligned offset={offset}, avail_numel={avail_numel}")
323
+
324
+ # Sanity check
325
+ if offset != avail_numel:
326
+ raise ValueError(f"consumed {offset} numels out of {avail_numel} - something is wrong")
327
+
328
+ print(f"Reconstructed fp32 state dict with {total_params} params {total_numel} elements")
329
+
330
+
331
+ def _get_fp32_state_dict_from_zero2_checkpoint(world_size, fp32_flat_groups, zero_model_states,
332
+ exclude_frozen_parameters):
333
+ state_dict = OrderedDict()
334
+
335
+ # buffers
336
+ buffers = zero_model_states[0].buffers
337
+ state_dict.update(buffers)
338
+ if debug:
339
+ print(f"added {len(buffers)} buffers")
340
+
341
+ if not exclude_frozen_parameters:
342
+ _zero2_merge_frozen_params(state_dict, zero_model_states)
343
+
344
+ _zero2_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states)
345
+
346
+ # recover shared parameters
347
+ for pair in zero_model_states[0].shared_params:
348
+ if pair[1] in state_dict:
349
+ state_dict[pair[0]] = state_dict[pair[1]]
350
+
351
+ return state_dict
352
+
353
+
354
+ def zero3_partitioned_param_info(unpartitioned_numel, world_size):
355
+ remainder = unpartitioned_numel % world_size
356
+ padding_numel = (world_size - remainder) if remainder else 0
357
+ partitioned_numel = math.ceil(unpartitioned_numel / world_size)
358
+ return partitioned_numel, padding_numel
359
+
360
+
361
+ def _zero3_merge_frozen_params(state_dict, world_size, zero_model_states):
362
+ if zero_model_states[0].frozen_param_shapes is None or len(zero_model_states[0].frozen_param_shapes) == 0:
363
+ return
364
+
365
+ if debug:
366
+ for i in range(world_size):
367
+ num_elem = sum(s.numel() for s in zero_model_states[i].frozen_param_fragments.values())
368
+ print(f'rank {i}: {FROZEN_PARAM_SHAPES}.numel = {num_elem}')
369
+
370
+ frozen_param_shapes = zero_model_states[0].frozen_param_shapes
371
+ wanted_params = len(frozen_param_shapes)
372
+ wanted_numel = sum(s.numel() for s in frozen_param_shapes.values())
373
+ avail_numel = sum([p.numel() for p in zero_model_states[0].frozen_param_fragments.values()]) * world_size
374
+ print(f'Frozen params: Have {avail_numel} numels to process.')
375
+ print(f'Frozen params: Need {wanted_numel} numels in {wanted_params} params')
376
+
377
+ total_params = 0
378
+ total_numel = 0
379
+ for name, shape in zero_model_states[0].frozen_param_shapes.items():
380
+ total_params += 1
381
+ unpartitioned_numel = shape.numel()
382
+ total_numel += unpartitioned_numel
383
+
384
+ param_frags = tuple(model_state.frozen_param_fragments[name] for model_state in zero_model_states)
385
+ state_dict[name] = torch.cat(param_frags, 0).narrow(0, 0, unpartitioned_numel).view(shape)
386
+
387
+ partitioned_numel, partitioned_padding_numel = zero3_partitioned_param_info(unpartitioned_numel, world_size)
388
+
389
+ if debug:
390
+ print(
391
+ f"Frozen params: {total_params} {name} full shape: {shape} partition0 numel={partitioned_numel} partitioned_padding_numel={partitioned_padding_numel}"
392
+ )
393
+
394
+ print(f"Reconstructed Frozen fp32 state dict with {total_params} params {total_numel} elements")
395
+
396
+
397
+ def _zero3_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states):
398
+ param_shapes = zero_model_states[0].param_shapes
399
+ avail_numel = fp32_flat_groups[0].numel() * world_size
400
+ # Reconstruction protocol: For zero3 we need to zip the partitions together at boundary of each
401
+ # param, re-consolidating each param, while dealing with padding if any
402
+
403
+ # merge list of dicts, preserving order
404
+ param_shapes = {k: v for d in param_shapes for k, v in d.items()}
405
+
406
+ if debug:
407
+ for i in range(world_size):
408
+ print(f"{FP32_FLAT_GROUPS}[{i}].shape={fp32_flat_groups[i].shape}")
409
+
410
+ wanted_params = len(param_shapes)
411
+ wanted_numel = sum(shape.numel() for shape in param_shapes.values())
412
+ # not asserting if there is a mismatch due to possible padding
413
+ avail_numel = fp32_flat_groups[0].numel() * world_size
414
+ print(f"Trainable params: Have {avail_numel} numels to process.")
415
+ print(f"Trainable params: Need {wanted_numel} numels in {wanted_params} params.")
416
+
417
+ # params
418
+ # XXX: for huge models that can't fit into the host's RAM we will have to recode this to support
419
+ # out-of-core computing solution
420
+ offset = 0
421
+ total_numel = 0
422
+ total_params = 0
423
+ for name, shape in param_shapes.items():
424
+
425
+ unpartitioned_numel = shape.numel()
426
+ total_numel += unpartitioned_numel
427
+ total_params += 1
428
+
429
+ partitioned_numel, partitioned_padding_numel = zero3_partitioned_param_info(unpartitioned_numel, world_size)
430
+
431
+ if debug:
432
+ print(
433
+ f"Trainable params: {total_params} {name} full shape: {shape} partition0 numel={partitioned_numel} partitioned_padding_numel={partitioned_padding_numel}"
434
+ )
435
+
436
+ # XXX: memory usage doubles here
437
+ state_dict[name] = torch.cat(
438
+ tuple(fp32_flat_groups[i].narrow(0, offset, partitioned_numel) for i in range(world_size)),
439
+ 0).narrow(0, 0, unpartitioned_numel).view(shape)
440
+ offset += partitioned_numel
441
+
442
+ offset *= world_size
443
+
444
+ # Sanity check
445
+ if offset != avail_numel:
446
+ raise ValueError(f"consumed {offset} numels out of {avail_numel} - something is wrong")
447
+
448
+ print(f"Reconstructed Trainable fp32 state dict with {total_params} params {total_numel} elements")
449
+
450
+
451
+ def _get_fp32_state_dict_from_zero3_checkpoint(world_size, fp32_flat_groups, zero_model_states,
452
+ exclude_frozen_parameters):
453
+ state_dict = OrderedDict()
454
+
455
+ # buffers
456
+ buffers = zero_model_states[0].buffers
457
+ state_dict.update(buffers)
458
+ if debug:
459
+ print(f"added {len(buffers)} buffers")
460
+
461
+ if not exclude_frozen_parameters:
462
+ _zero3_merge_frozen_params(state_dict, world_size, zero_model_states)
463
+
464
+ _zero3_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states)
465
+
466
+ # recover shared parameters
467
+ for pair in zero_model_states[0].shared_params:
468
+ if pair[1] in state_dict:
469
+ state_dict[pair[0]] = state_dict[pair[1]]
470
+
471
+ return state_dict
472
+
473
+
474
+ def get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir, tag=None, exclude_frozen_parameters=False):
475
+ """
476
+ Convert ZeRO 2 or 3 checkpoint into a single fp32 consolidated state_dict that can be loaded with
477
+ ``load_state_dict()`` and used for training without DeepSpeed or shared with others, for example
478
+ via a model hub.
479
+
480
+ Args:
481
+ - ``checkpoint_dir``: path to the desired checkpoint folder
482
+ - ``tag``: checkpoint tag used as a unique identifier for checkpoint. If not provided will attempt to load tag in 'latest' file. e.g., ``global_step14``
483
+ - ``exclude_frozen_parameters``: exclude frozen parameters
484
+
485
+ Returns:
486
+ - pytorch ``state_dict``
487
+
488
+ Note: this approach may not work if your application doesn't have sufficient free CPU memory and
489
+ you may need to use the offline approach using the ``zero_to_fp32.py`` script that is saved with
490
+ the checkpoint.
491
+
492
+ A typical usage might be ::
493
+
494
+ from deepspeed.utils.zero_to_fp32 import get_fp32_state_dict_from_zero_checkpoint
495
+ # do the training and checkpoint saving
496
+ state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir) # already on cpu
497
+ model = model.cpu() # move to cpu
498
+ model.load_state_dict(state_dict)
499
+ # submit to model hub or save the model to share with others
500
+
501
+ In this example the ``model`` will no longer be usable in the deepspeed context of the same
502
+ application. i.e. you will need to re-initialize the deepspeed engine, since
503
+ ``model.load_state_dict(state_dict)`` will remove all the deepspeed magic from it.
504
+
505
+ If you want it all done for you, use ``load_state_dict_from_zero_checkpoint`` instead.
506
+
507
+ """
508
+ if tag is None:
509
+ latest_path = os.path.join(checkpoint_dir, 'latest')
510
+ if os.path.isfile(latest_path):
511
+ with open(latest_path, 'r') as fd:
512
+ tag = fd.read().strip()
513
+ else:
514
+ raise ValueError(f"Unable to find 'latest' file at {latest_path}")
515
+
516
+ ds_checkpoint_dir = os.path.join(checkpoint_dir, tag)
517
+
518
+ if not os.path.isdir(ds_checkpoint_dir):
519
+ raise FileNotFoundError(f"Directory '{ds_checkpoint_dir}' doesn't exist")
520
+
521
+ return _get_fp32_state_dict_from_zero_checkpoint(ds_checkpoint_dir, exclude_frozen_parameters)
522
+
523
+
524
+ def convert_zero_checkpoint_to_fp32_state_dict(checkpoint_dir, output_file, tag=None, exclude_frozen_parameters=False):
525
+ """
526
+ Convert ZeRO 2 or 3 checkpoint into a single fp32 consolidated ``state_dict`` file that can be
527
+ loaded with ``torch.load(file)`` + ``load_state_dict()`` and used for training without DeepSpeed.
528
+
529
+ Args:
530
+ - ``checkpoint_dir``: path to the desired checkpoint folder. (one that contains the tag-folder, like ``global_step14``)
531
+ - ``output_file``: path to the pytorch fp32 state_dict output file (e.g. path/pytorch_model.bin)
532
+ - ``tag``: checkpoint tag used as a unique identifier for checkpoint. If not provided will attempt to load tag in the file named ``latest`` in the checkpoint folder, e.g., ``global_step14``
533
+ - ``exclude_frozen_parameters``: exclude frozen parameters
534
+ """
535
+
536
+ state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir, tag, exclude_frozen_parameters)
537
+ print(f"Saving fp32 state dict to {output_file}")
538
+ torch.save(state_dict, output_file)
539
+
540
+
541
+ def load_state_dict_from_zero_checkpoint(model, checkpoint_dir, tag=None):
542
+ """
543
+ 1. Put the provided model to cpu
544
+ 2. Convert ZeRO 2 or 3 checkpoint into a single fp32 consolidated ``state_dict``
545
+ 3. Load it into the provided model
546
+
547
+ Args:
548
+ - ``model``: the model object to update
549
+ - ``checkpoint_dir``: path to the desired checkpoint folder. (one that contains the tag-folder, like ``global_step14``)
550
+ - ``tag``: checkpoint tag used as a unique identifier for checkpoint. If not provided will attempt to load tag in the file named ``latest`` in the checkpoint folder, e.g., ``global_step14``
551
+
552
+ Returns:
553
+ - ``model`: modified model
554
+
555
+ Make sure you have plenty of CPU memory available before you call this function. If you don't
556
+ have enough use the ``zero_to_fp32.py`` utility to do the conversion. You will find it
557
+ conveniently placed for you in the checkpoint folder.
558
+
559
+ A typical usage might be ::
560
+
561
+ from deepspeed.utils.zero_to_fp32 import load_state_dict_from_zero_checkpoint
562
+ model = load_state_dict_from_zero_checkpoint(trainer.model, checkpoint_dir)
563
+ # submit to model hub or save the model to share with others
564
+
565
+ Note, that once this was run, the ``model`` will no longer be usable in the deepspeed context
566
+ of the same application. i.e. you will need to re-initialize the deepspeed engine, since
567
+ ``model.load_state_dict(state_dict)`` will remove all the deepspeed magic from it.
568
+
569
+ """
570
+ logger.info(f"Extracting fp32 weights")
571
+ state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir, tag)
572
+
573
+ logger.info(f"Overwriting model with fp32 weights")
574
+ model = model.cpu()
575
+ model.load_state_dict(state_dict, strict=False)
576
+
577
+ return model
578
+
579
+
580
+ if __name__ == "__main__":
581
+
582
+ parser = argparse.ArgumentParser()
583
+ parser.add_argument("checkpoint_dir",
584
+ type=str,
585
+ help="path to the desired checkpoint folder, e.g., path/checkpoint-12")
586
+ parser.add_argument(
587
+ "output_file",
588
+ type=str,
589
+ help="path to the pytorch fp32 state_dict output file (e.g. path/checkpoint-12/pytorch_model.bin)")
590
+ parser.add_argument("-t",
591
+ "--tag",
592
+ type=str,
593
+ default=None,
594
+ help="checkpoint tag used as a unique identifier for checkpoint. e.g., global_step1")
595
+ parser.add_argument("--exclude_frozen_parameters", action='store_true', help="exclude frozen parameters")
596
+ parser.add_argument("-d", "--debug", action='store_true', help="enable debug")
597
+ args = parser.parse_args()
598
+
599
+ debug = args.debug
600
+
601
+ convert_zero_checkpoint_to_fp32_state_dict(args.checkpoint_dir,
602
+ args.output_file,
603
+ tag=args.tag,
604
+ exclude_frozen_parameters=args.exclude_frozen_parameters)