File size: 9,809 Bytes
d89cceb
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
{
  "best_global_step": null,
  "best_metric": null,
  "best_model_checkpoint": null,
  "epoch": 0.8662696264212236,
  "eval_steps": 500,
  "global_step": 8000,
  "is_hyper_param_search": false,
  "is_local_process_zero": true,
  "is_world_process_zero": true,
  "log_history": [
    {
      "epoch": 0.02707092582566324,
      "grad_norm": 0.9713733196258545,
      "learning_rate": 9.910124526258799e-05,
      "loss": 3.4945,
      "step": 250
    },
    {
      "epoch": 0.05414185165132648,
      "grad_norm": 0.9521434307098389,
      "learning_rate": 9.819888106839921e-05,
      "loss": 3.5097,
      "step": 500
    },
    {
      "epoch": 0.05414185165132648,
      "eval_loss": 2.95473575592041,
      "eval_runtime": 87.829,
      "eval_samples_per_second": 112.15,
      "eval_steps_per_second": 7.014,
      "step": 500
    },
    {
      "epoch": 0.08121277747698971,
      "grad_norm": 0.9334385395050049,
      "learning_rate": 9.729651687421044e-05,
      "loss": 3.4922,
      "step": 750
    },
    {
      "epoch": 0.10828370330265295,
      "grad_norm": 1.2239603996276855,
      "learning_rate": 9.639415268002166e-05,
      "loss": 3.477,
      "step": 1000
    },
    {
      "epoch": 0.10828370330265295,
      "eval_loss": 2.9319565296173096,
      "eval_runtime": 87.8964,
      "eval_samples_per_second": 112.064,
      "eval_steps_per_second": 7.008,
      "step": 1000
    },
    {
      "epoch": 0.1353546291283162,
      "grad_norm": 1.1727226972579956,
      "learning_rate": 9.549178848583288e-05,
      "loss": 3.493,
      "step": 1250
    },
    {
      "epoch": 0.16242555495397942,
      "grad_norm": 0.966973602771759,
      "learning_rate": 9.458942429164411e-05,
      "loss": 3.5013,
      "step": 1500
    },
    {
      "epoch": 0.16242555495397942,
      "eval_loss": 2.9275825023651123,
      "eval_runtime": 87.899,
      "eval_samples_per_second": 112.06,
      "eval_steps_per_second": 7.008,
      "step": 1500
    },
    {
      "epoch": 0.18949648077964265,
      "grad_norm": 0.9813922047615051,
      "learning_rate": 9.368706009745533e-05,
      "loss": 3.4907,
      "step": 1750
    },
    {
      "epoch": 0.2165674066053059,
      "grad_norm": 1.027085542678833,
      "learning_rate": 9.278469590326656e-05,
      "loss": 3.4588,
      "step": 2000
    },
    {
      "epoch": 0.2165674066053059,
      "eval_loss": 2.9044992923736572,
      "eval_runtime": 87.8776,
      "eval_samples_per_second": 112.088,
      "eval_steps_per_second": 7.01,
      "step": 2000
    },
    {
      "epoch": 0.24363833243096913,
      "grad_norm": 1.0764214992523193,
      "learning_rate": 9.188233170907778e-05,
      "loss": 3.4531,
      "step": 2250
    },
    {
      "epoch": 0.2707092582566324,
      "grad_norm": 1.0297119617462158,
      "learning_rate": 9.0979967514889e-05,
      "loss": 3.4445,
      "step": 2500
    },
    {
      "epoch": 0.2707092582566324,
      "eval_loss": 2.8855738639831543,
      "eval_runtime": 87.8728,
      "eval_samples_per_second": 112.094,
      "eval_steps_per_second": 7.01,
      "step": 2500
    },
    {
      "epoch": 0.2977801840822956,
      "grad_norm": 0.9697523713111877,
      "learning_rate": 9.007760332070024e-05,
      "loss": 3.4349,
      "step": 2750
    },
    {
      "epoch": 0.32485110990795885,
      "grad_norm": 0.9611329436302185,
      "learning_rate": 8.917523912651147e-05,
      "loss": 3.4213,
      "step": 3000
    },
    {
      "epoch": 0.32485110990795885,
      "eval_loss": 2.8725759983062744,
      "eval_runtime": 87.9054,
      "eval_samples_per_second": 112.052,
      "eval_steps_per_second": 7.008,
      "step": 3000
    },
    {
      "epoch": 0.3519220357336221,
      "grad_norm": 1.000977873802185,
      "learning_rate": 8.827287493232269e-05,
      "loss": 3.4239,
      "step": 3250
    },
    {
      "epoch": 0.3789929615592853,
      "grad_norm": 1.0573837757110596,
      "learning_rate": 8.737051073813391e-05,
      "loss": 3.4015,
      "step": 3500
    },
    {
      "epoch": 0.3789929615592853,
      "eval_loss": 2.8721706867218018,
      "eval_runtime": 87.8848,
      "eval_samples_per_second": 112.078,
      "eval_steps_per_second": 7.009,
      "step": 3500
    },
    {
      "epoch": 0.4060638873849486,
      "grad_norm": 0.9532322883605957,
      "learning_rate": 8.646814654394514e-05,
      "loss": 3.401,
      "step": 3750
    },
    {
      "epoch": 0.4331348132106118,
      "grad_norm": 1.0036464929580688,
      "learning_rate": 8.556578234975636e-05,
      "loss": 3.3984,
      "step": 4000
    },
    {
      "epoch": 0.4331348132106118,
      "eval_loss": 2.863417863845825,
      "eval_runtime": 87.8316,
      "eval_samples_per_second": 112.146,
      "eval_steps_per_second": 7.013,
      "step": 4000
    },
    {
      "epoch": 0.46020573903627504,
      "grad_norm": 0.8860335350036621,
      "learning_rate": 8.466341815556758e-05,
      "loss": 3.4081,
      "step": 4250
    },
    {
      "epoch": 0.48727666486193827,
      "grad_norm": 1.4852999448776245,
      "learning_rate": 8.376105396137882e-05,
      "loss": 3.4105,
      "step": 4500
    },
    {
      "epoch": 0.48727666486193827,
      "eval_loss": 2.8409109115600586,
      "eval_runtime": 87.9025,
      "eval_samples_per_second": 112.056,
      "eval_steps_per_second": 7.008,
      "step": 4500
    },
    {
      "epoch": 0.5143475906876015,
      "grad_norm": 0.936912477016449,
      "learning_rate": 8.285868976719005e-05,
      "loss": 3.3952,
      "step": 4750
    },
    {
      "epoch": 0.5414185165132648,
      "grad_norm": 1.0409356355667114,
      "learning_rate": 8.195632557300127e-05,
      "loss": 3.4039,
      "step": 5000
    },
    {
      "epoch": 0.5414185165132648,
      "eval_loss": 2.8329713344573975,
      "eval_runtime": 87.8287,
      "eval_samples_per_second": 112.15,
      "eval_steps_per_second": 7.014,
      "step": 5000
    },
    {
      "epoch": 0.568489442338928,
      "grad_norm": 0.9606480002403259,
      "learning_rate": 8.10539613788125e-05,
      "loss": 3.3819,
      "step": 5250
    },
    {
      "epoch": 0.5955603681645912,
      "grad_norm": 1.142527461051941,
      "learning_rate": 8.015159718462372e-05,
      "loss": 3.3898,
      "step": 5500
    },
    {
      "epoch": 0.5955603681645912,
      "eval_loss": 2.8344321250915527,
      "eval_runtime": 87.8057,
      "eval_samples_per_second": 112.18,
      "eval_steps_per_second": 7.015,
      "step": 5500
    },
    {
      "epoch": 0.6226312939902545,
      "grad_norm": 1.2294474840164185,
      "learning_rate": 7.924923299043494e-05,
      "loss": 3.3769,
      "step": 5750
    },
    {
      "epoch": 0.6497022198159177,
      "grad_norm": 1.0300960540771484,
      "learning_rate": 7.834686879624617e-05,
      "loss": 3.3621,
      "step": 6000
    },
    {
      "epoch": 0.6497022198159177,
      "eval_loss": 2.8137757778167725,
      "eval_runtime": 87.8367,
      "eval_samples_per_second": 112.14,
      "eval_steps_per_second": 7.013,
      "step": 6000
    },
    {
      "epoch": 0.676773145641581,
      "grad_norm": 1.076827883720398,
      "learning_rate": 7.74445046020574e-05,
      "loss": 3.3704,
      "step": 6250
    },
    {
      "epoch": 0.7038440714672441,
      "grad_norm": 1.4118528366088867,
      "learning_rate": 7.654214040786863e-05,
      "loss": 3.367,
      "step": 6500
    },
    {
      "epoch": 0.7038440714672441,
      "eval_loss": 2.8218181133270264,
      "eval_runtime": 87.9044,
      "eval_samples_per_second": 112.053,
      "eval_steps_per_second": 7.008,
      "step": 6500
    },
    {
      "epoch": 0.7309149972929074,
      "grad_norm": 1.0568158626556396,
      "learning_rate": 7.563977621367985e-05,
      "loss": 3.3509,
      "step": 6750
    },
    {
      "epoch": 0.7579859231185706,
      "grad_norm": 1.0436064004898071,
      "learning_rate": 7.473741201949108e-05,
      "loss": 3.3496,
      "step": 7000
    },
    {
      "epoch": 0.7579859231185706,
      "eval_loss": 2.8048973083496094,
      "eval_runtime": 87.8448,
      "eval_samples_per_second": 112.13,
      "eval_steps_per_second": 7.012,
      "step": 7000
    },
    {
      "epoch": 0.7850568489442339,
      "grad_norm": 0.9466687440872192,
      "learning_rate": 7.38350478253023e-05,
      "loss": 3.3381,
      "step": 7250
    },
    {
      "epoch": 0.8121277747698972,
      "grad_norm": 0.9336101412773132,
      "learning_rate": 7.293268363111352e-05,
      "loss": 3.3394,
      "step": 7500
    },
    {
      "epoch": 0.8121277747698972,
      "eval_loss": 2.80117130279541,
      "eval_runtime": 87.8737,
      "eval_samples_per_second": 112.093,
      "eval_steps_per_second": 7.01,
      "step": 7500
    },
    {
      "epoch": 0.8391987005955603,
      "grad_norm": 1.0283994674682617,
      "learning_rate": 7.203031943692475e-05,
      "loss": 3.3379,
      "step": 7750
    },
    {
      "epoch": 0.8662696264212236,
      "grad_norm": 0.9297059774398804,
      "learning_rate": 7.112795524273597e-05,
      "loss": 3.3196,
      "step": 8000
    },
    {
      "epoch": 0.8662696264212236,
      "eval_loss": 2.8071343898773193,
      "eval_runtime": 87.7691,
      "eval_samples_per_second": 112.226,
      "eval_steps_per_second": 7.018,
      "step": 8000
    }
  ],
  "logging_steps": 250,
  "max_steps": 27705,
  "num_input_tokens_seen": 0,
  "num_train_epochs": 3,
  "save_steps": 1000,
  "stateful_callbacks": {
    "TrainerControl": {
      "args": {
        "should_epoch_stop": false,
        "should_evaluate": false,
        "should_log": false,
        "should_save": true,
        "should_training_stop": false
      },
      "attributes": {}
    }
  },
  "total_flos": 2888432615424000.0,
  "train_batch_size": 16,
  "trial_name": null,
  "trial_params": null
}