File size: 12,660 Bytes
9dddc0a
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
{
  "best_metric": 1.6345868110656738,
  "best_model_checkpoint": "/data/user_data/gonilude/python_and_text_pythia_410m/checkpoint-150",
  "epoch": 3.0,
  "eval_steps": 50,
  "global_step": 237,
  "is_hyper_param_search": false,
  "is_local_process_zero": true,
  "is_world_process_zero": true,
  "log_history": [
    {
      "epoch": 0,
      "eval_accuracy": 0.23943661971830985,
      "eval_loss": 2.654968023300171,
      "eval_runtime": 0.502,
      "eval_samples_per_second": 141.43,
      "eval_steps_per_second": 17.928,
      "num_input_tokens_seen": 0,
      "step": 0
    },
    {
      "epoch": 0.012658227848101266,
      "grad_norm": NaN,
      "learning_rate": 0.0,
      "loss": 2.196,
      "num_input_tokens_seen": 8192,
      "step": 1
    },
    {
      "epoch": 0.06329113924050633,
      "grad_norm": Infinity,
      "learning_rate": 0.0,
      "loss": 2.6755,
      "num_input_tokens_seen": 40960,
      "step": 5
    },
    {
      "epoch": 0.12658227848101267,
      "grad_norm": 403.4698181152344,
      "learning_rate": 1.25e-05,
      "loss": 3.1957,
      "num_input_tokens_seen": 81920,
      "step": 10
    },
    {
      "epoch": 0.189873417721519,
      "grad_norm": 226.32037353515625,
      "learning_rate": 1.9999058994907564e-05,
      "loss": 3.0085,
      "num_input_tokens_seen": 122880,
      "step": 15
    },
    {
      "epoch": 0.25316455696202533,
      "grad_norm": 128.1693878173828,
      "learning_rate": 1.99661424082419e-05,
      "loss": 2.7458,
      "num_input_tokens_seen": 163840,
      "step": 20
    },
    {
      "epoch": 0.31645569620253167,
      "grad_norm": 231.92202758789062,
      "learning_rate": 1.9886352515311134e-05,
      "loss": 2.0427,
      "num_input_tokens_seen": 204800,
      "step": 25
    },
    {
      "epoch": 0.379746835443038,
      "grad_norm": 62.582794189453125,
      "learning_rate": 1.9760064588305347e-05,
      "loss": 1.7791,
      "num_input_tokens_seen": 245760,
      "step": 30
    },
    {
      "epoch": 0.4430379746835443,
      "grad_norm": 87.08434295654297,
      "learning_rate": 1.9587872591512583e-05,
      "loss": 2.2494,
      "num_input_tokens_seen": 286720,
      "step": 35
    },
    {
      "epoch": 0.5063291139240507,
      "grad_norm": 63.20038604736328,
      "learning_rate": 1.9370586387753532e-05,
      "loss": 2.0989,
      "num_input_tokens_seen": 327680,
      "step": 40
    },
    {
      "epoch": 0.569620253164557,
      "grad_norm": 32.26015090942383,
      "learning_rate": 1.9109227929390378e-05,
      "loss": 2.1393,
      "num_input_tokens_seen": 368640,
      "step": 45
    },
    {
      "epoch": 0.6329113924050633,
      "grad_norm": 51.239463806152344,
      "learning_rate": 1.8805026451824547e-05,
      "loss": 1.8381,
      "num_input_tokens_seen": 409600,
      "step": 50
    },
    {
      "epoch": 0.6329113924050633,
      "eval_accuracy": 0.1267605633802817,
      "eval_loss": 2.1300408840179443,
      "eval_runtime": 0.3672,
      "eval_samples_per_second": 193.345,
      "eval_steps_per_second": 24.509,
      "num_input_tokens_seen": 409600,
      "step": 50
    },
    {
      "epoch": 0.6962025316455697,
      "grad_norm": 45.65270233154297,
      "learning_rate": 1.8459412692089497e-05,
      "loss": 1.9123,
      "num_input_tokens_seen": 450560,
      "step": 55
    },
    {
      "epoch": 0.759493670886076,
      "grad_norm": 33.92768096923828,
      "learning_rate": 1.8074012159730034e-05,
      "loss": 1.6314,
      "num_input_tokens_seen": 491520,
      "step": 60
    },
    {
      "epoch": 0.8227848101265823,
      "grad_norm": 34.99441146850586,
      "learning_rate": 1.765063749161688e-05,
      "loss": 1.8143,
      "num_input_tokens_seen": 532480,
      "step": 65
    },
    {
      "epoch": 0.8860759493670886,
      "grad_norm": 47.59036636352539,
      "learning_rate": 1.719127992665376e-05,
      "loss": 1.8323,
      "num_input_tokens_seen": 573440,
      "step": 70
    },
    {
      "epoch": 0.9493670886075949,
      "grad_norm": 19.0941162109375,
      "learning_rate": 1.6698099940473644e-05,
      "loss": 1.7099,
      "num_input_tokens_seen": 614400,
      "step": 75
    },
    {
      "epoch": 1.0126582278481013,
      "grad_norm": 56.19622039794922,
      "learning_rate": 1.6173417084171537e-05,
      "loss": 1.7048,
      "num_input_tokens_seen": 655360,
      "step": 80
    },
    {
      "epoch": 1.0759493670886076,
      "grad_norm": 50.44072723388672,
      "learning_rate": 1.5619699074864864e-05,
      "loss": 1.817,
      "num_input_tokens_seen": 696320,
      "step": 85
    },
    {
      "epoch": 1.139240506329114,
      "grad_norm": 47.04330825805664,
      "learning_rate": 1.50395501893913e-05,
      "loss": 1.8287,
      "num_input_tokens_seen": 737280,
      "step": 90
    },
    {
      "epoch": 1.2025316455696202,
      "grad_norm": 29.618629455566406,
      "learning_rate": 1.4435699015731449e-05,
      "loss": 1.6956,
      "num_input_tokens_seen": 778240,
      "step": 95
    },
    {
      "epoch": 1.2658227848101267,
      "grad_norm": 22.177780151367188,
      "learning_rate": 1.3810985619764573e-05,
      "loss": 1.8597,
      "num_input_tokens_seen": 819200,
      "step": 100
    },
    {
      "epoch": 1.2658227848101267,
      "eval_accuracy": 0.19718309859154928,
      "eval_loss": 1.6786352396011353,
      "eval_runtime": 0.3701,
      "eval_samples_per_second": 191.836,
      "eval_steps_per_second": 24.317,
      "num_input_tokens_seen": 819200,
      "step": 100
    },
    {
      "epoch": 1.3291139240506329,
      "grad_norm": 32.201202392578125,
      "learning_rate": 1.3168348187715353e-05,
      "loss": 1.6832,
      "num_input_tokens_seen": 860160,
      "step": 105
    },
    {
      "epoch": 1.3924050632911391,
      "grad_norm": 31.627992630004883,
      "learning_rate": 1.2510809207115666e-05,
      "loss": 1.663,
      "num_input_tokens_seen": 901120,
      "step": 110
    },
    {
      "epoch": 1.4556962025316456,
      "grad_norm": 20.219005584716797,
      "learning_rate": 1.1841461251275868e-05,
      "loss": 1.7544,
      "num_input_tokens_seen": 942080,
      "step": 115
    },
    {
      "epoch": 1.518987341772152,
      "grad_norm": 22.077184677124023,
      "learning_rate": 1.1163452434124773e-05,
      "loss": 1.6455,
      "num_input_tokens_seen": 983040,
      "step": 120
    },
    {
      "epoch": 1.5822784810126582,
      "grad_norm": 24.04667091369629,
      "learning_rate": 1.0479971603828001e-05,
      "loss": 1.6255,
      "num_input_tokens_seen": 1024000,
      "step": 125
    },
    {
      "epoch": 1.6455696202531644,
      "grad_norm": 24.609832763671875,
      "learning_rate": 9.79423334482279e-06,
      "loss": 1.8148,
      "num_input_tokens_seen": 1064960,
      "step": 130
    },
    {
      "epoch": 1.7088607594936709,
      "grad_norm": 16.160457611083984,
      "learning_rate": 9.109462858808586e-06,
      "loss": 1.6357,
      "num_input_tokens_seen": 1105920,
      "step": 135
    },
    {
      "epoch": 1.7721518987341773,
      "grad_norm": 23.06271743774414,
      "learning_rate": 8.428880795801965e-06,
      "loss": 1.6064,
      "num_input_tokens_seen": 1146880,
      "step": 140
    },
    {
      "epoch": 1.8354430379746836,
      "grad_norm": 27.028121948242188,
      "learning_rate": 7.75568810659924e-06,
      "loss": 1.6333,
      "num_input_tokens_seen": 1187840,
      "step": 145
    },
    {
      "epoch": 1.8987341772151898,
      "grad_norm": 48.31101608276367,
      "learning_rate": 7.093050987889547e-06,
      "loss": 1.694,
      "num_input_tokens_seen": 1228800,
      "step": 150
    },
    {
      "epoch": 1.8987341772151898,
      "eval_accuracy": 0.2112676056338028,
      "eval_loss": 1.6345868110656738,
      "eval_runtime": 0.3747,
      "eval_samples_per_second": 189.475,
      "eval_steps_per_second": 24.018,
      "num_input_tokens_seen": 1228800,
      "step": 150
    },
    {
      "epoch": 1.9620253164556962,
      "grad_norm": 12.064297676086426,
      "learning_rate": 6.444085990825338e-06,
      "loss": 1.5869,
      "num_input_tokens_seen": 1269760,
      "step": 155
    },
    {
      "epoch": 2.0253164556962027,
      "grad_norm": 21.297130584716797,
      "learning_rate": 5.811845363088477e-06,
      "loss": 1.5669,
      "num_input_tokens_seen": 1310720,
      "step": 160
    },
    {
      "epoch": 2.088607594936709,
      "grad_norm": 26.849414825439453,
      "learning_rate": 5.199302693391958e-06,
      "loss": 1.6514,
      "num_input_tokens_seen": 1351680,
      "step": 165
    },
    {
      "epoch": 2.151898734177215,
      "grad_norm": 28.412883758544922,
      "learning_rate": 4.609338925934743e-06,
      "loss": 1.6545,
      "num_input_tokens_seen": 1392640,
      "step": 170
    },
    {
      "epoch": 2.2151898734177213,
      "grad_norm": 38.271270751953125,
      "learning_rate": 4.044728810587406e-06,
      "loss": 1.5853,
      "num_input_tokens_seen": 1433600,
      "step": 175
    },
    {
      "epoch": 2.278481012658228,
      "grad_norm": 48.123687744140625,
      "learning_rate": 3.508127852536698e-06,
      "loss": 1.6596,
      "num_input_tokens_seen": 1474560,
      "step": 180
    },
    {
      "epoch": 2.3417721518987342,
      "grad_norm": 30.174638748168945,
      "learning_rate": 3.0020598227682794e-06,
      "loss": 1.6385,
      "num_input_tokens_seen": 1515520,
      "step": 185
    },
    {
      "epoch": 2.4050632911392404,
      "grad_norm": 15.243988037109375,
      "learning_rate": 2.5289048881289256e-06,
      "loss": 1.5851,
      "num_input_tokens_seen": 1556480,
      "step": 190
    },
    {
      "epoch": 2.4683544303797467,
      "grad_norm": 50.41596603393555,
      "learning_rate": 2.090888416795582e-06,
      "loss": 1.6231,
      "num_input_tokens_seen": 1597440,
      "step": 195
    },
    {
      "epoch": 2.5316455696202533,
      "grad_norm": 17.154624938964844,
      "learning_rate": 1.69007051180199e-06,
      "loss": 1.586,
      "num_input_tokens_seen": 1638400,
      "step": 200
    },
    {
      "epoch": 2.5316455696202533,
      "eval_accuracy": 0.14084507042253522,
      "eval_loss": 1.6395212411880493,
      "eval_runtime": 0.4282,
      "eval_samples_per_second": 165.806,
      "eval_steps_per_second": 21.018,
      "num_input_tokens_seen": 1638400,
      "step": 200
    },
    {
      "epoch": 2.5949367088607596,
      "grad_norm": 25.179792404174805,
      "learning_rate": 1.3283363218493962e-06,
      "loss": 1.5881,
      "num_input_tokens_seen": 1679360,
      "step": 205
    },
    {
      "epoch": 2.6582278481012658,
      "grad_norm": 14.442487716674805,
      "learning_rate": 1.0073871749720221e-06,
      "loss": 1.5929,
      "num_input_tokens_seen": 1720320,
      "step": 210
    },
    {
      "epoch": 2.721518987341772,
      "grad_norm": 24.72681999206543,
      "learning_rate": 7.287325767579756e-07,
      "loss": 1.5963,
      "num_input_tokens_seen": 1761280,
      "step": 215
    },
    {
      "epoch": 2.7848101265822782,
      "grad_norm": 9.948562622070312,
      "learning_rate": 4.936831107599749e-07,
      "loss": 1.5503,
      "num_input_tokens_seen": 1802240,
      "step": 220
    },
    {
      "epoch": 2.848101265822785,
      "grad_norm": 30.714181900024414,
      "learning_rate": 3.033442744870685e-07,
      "loss": 1.6379,
      "num_input_tokens_seen": 1843200,
      "step": 225
    },
    {
      "epoch": 2.911392405063291,
      "grad_norm": 20.53106117248535,
      "learning_rate": 1.5861127996827597e-07,
      "loss": 1.5714,
      "num_input_tokens_seen": 1884160,
      "step": 230
    },
    {
      "epoch": 2.9746835443037973,
      "grad_norm": 37.22603225708008,
      "learning_rate": 6.016484334238515e-08,
      "loss": 1.5922,
      "num_input_tokens_seen": 1925120,
      "step": 235
    },
    {
      "epoch": 3.0,
      "num_input_tokens_seen": 1941504,
      "step": 237,
      "total_flos": 3521692676653056.0,
      "train_loss": 1.8175702477306253,
      "train_runtime": 153.0605,
      "train_samples_per_second": 12.368,
      "train_steps_per_second": 1.548
    }
  ],
  "logging_steps": 5,
  "max_steps": 237,
  "num_input_tokens_seen": 1941504,
  "num_train_epochs": 3,
  "save_steps": 50,
  "stateful_callbacks": {
    "TrainerControl": {
      "args": {
        "should_epoch_stop": false,
        "should_evaluate": false,
        "should_log": false,
        "should_save": true,
        "should_training_stop": true
      },
      "attributes": {}
    }
  },
  "total_flos": 3521692676653056.0,
  "train_batch_size": 4,
  "trial_name": null,
  "trial_params": null
}