File size: 18,007 Bytes
48cc69b
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
{
  "best_global_step": null,
  "best_metric": null,
  "best_model_checkpoint": null,
  "epoch": 59.7710843373494,
  "eval_steps": 10,
  "global_step": 60,
  "is_hyper_param_search": false,
  "is_local_process_zero": true,
  "is_world_process_zero": true,
  "log_history": [
    {
      "epoch": 0.7710843373493976,
      "grad_norm": 3.914616675051536,
      "learning_rate": 5.000000000000001e-07,
      "loss": 0.6485056281089783,
      "memory(GiB)": 76.1,
      "step": 1,
      "token_acc": 0.8288129939443704,
      "train_speed(iter/s)": 0.00248
    },
    {
      "epoch": 1.7710843373493976,
      "grad_norm": 7.135952401058273,
      "learning_rate": 1.0000000000000002e-06,
      "loss": 1.306803822517395,
      "memory(GiB)": 77.59,
      "step": 2,
      "token_acc": 0.8355539032171605,
      "train_speed(iter/s)": 0.002291
    },
    {
      "epoch": 2.7710843373493974,
      "grad_norm": 7.25317611851469,
      "learning_rate": 1.5e-06,
      "loss": 1.2851154804229736,
      "memory(GiB)": 77.59,
      "step": 3,
      "token_acc": 0.834915035973237,
      "train_speed(iter/s)": 0.002247
    },
    {
      "epoch": 3.7710843373493974,
      "grad_norm": 6.865015999508222,
      "learning_rate": 2.0000000000000003e-06,
      "loss": 1.290574073791504,
      "memory(GiB)": 77.59,
      "step": 4,
      "token_acc": 0.8338767344065504,
      "train_speed(iter/s)": 0.002214
    },
    {
      "epoch": 4.771084337349397,
      "grad_norm": 6.57767604145361,
      "learning_rate": 2.5e-06,
      "loss": 1.261496901512146,
      "memory(GiB)": 77.59,
      "step": 5,
      "token_acc": 0.8348027461456405,
      "train_speed(iter/s)": 0.0022
    },
    {
      "epoch": 5.771084337349397,
      "grad_norm": 5.131561904948073,
      "learning_rate": 3e-06,
      "loss": 1.20093834400177,
      "memory(GiB)": 77.59,
      "step": 6,
      "token_acc": 0.8447540089861448,
      "train_speed(iter/s)": 0.002191
    },
    {
      "epoch": 6.771084337349397,
      "grad_norm": 4.169361922366313,
      "learning_rate": 3.5e-06,
      "loss": 1.118795394897461,
      "memory(GiB)": 77.59,
      "step": 7,
      "token_acc": 0.845426595733518,
      "train_speed(iter/s)": 0.002187
    },
    {
      "epoch": 7.771084337349397,
      "grad_norm": 4.513043229540615,
      "learning_rate": 4.000000000000001e-06,
      "loss": 1.0338046550750732,
      "memory(GiB)": 77.59,
      "step": 8,
      "token_acc": 0.8524168712340995,
      "train_speed(iter/s)": 0.00218
    },
    {
      "epoch": 8.771084337349398,
      "grad_norm": 5.185455486257246,
      "learning_rate": 4.5e-06,
      "loss": 0.9289287328720093,
      "memory(GiB)": 77.59,
      "step": 9,
      "token_acc": 0.8699944903581267,
      "train_speed(iter/s)": 0.002177
    },
    {
      "epoch": 9.771084337349398,
      "grad_norm": 3.6878315103563324,
      "learning_rate": 5e-06,
      "loss": 0.9007519483566284,
      "memory(GiB)": 77.59,
      "step": 10,
      "token_acc": 0.8636064441638881,
      "train_speed(iter/s)": 0.002176
    },
    {
      "epoch": 10.771084337349398,
      "grad_norm": 3.153907065444448,
      "learning_rate": 4.999658262481173e-06,
      "loss": 0.8321974277496338,
      "memory(GiB)": 77.59,
      "step": 11,
      "token_acc": 0.8710321094284695,
      "train_speed(iter/s)": 0.002152
    },
    {
      "epoch": 11.771084337349398,
      "grad_norm": 2.203676742176422,
      "learning_rate": 4.998633143352315e-06,
      "loss": 0.7872496247291565,
      "memory(GiB)": 77.59,
      "step": 12,
      "token_acc": 0.8761502671032225,
      "train_speed(iter/s)": 0.002149
    },
    {
      "epoch": 12.771084337349398,
      "grad_norm": 2.4327146254784084,
      "learning_rate": 4.9969249228707625e-06,
      "loss": 0.7419657707214355,
      "memory(GiB)": 77.59,
      "step": 13,
      "token_acc": 0.8786807478287534,
      "train_speed(iter/s)": 0.002148
    },
    {
      "epoch": 13.771084337349398,
      "grad_norm": 1.8937402046126173,
      "learning_rate": 4.994534068046936e-06,
      "loss": 0.709729790687561,
      "memory(GiB)": 77.59,
      "step": 14,
      "token_acc": 0.8879921788137868,
      "train_speed(iter/s)": 0.002151
    },
    {
      "epoch": 14.771084337349398,
      "grad_norm": 1.4897225411877277,
      "learning_rate": 4.991461232516675e-06,
      "loss": 0.676007866859436,
      "memory(GiB)": 77.59,
      "step": 15,
      "token_acc": 0.8962045012503473,
      "train_speed(iter/s)": 0.00215
    },
    {
      "epoch": 15.771084337349398,
      "grad_norm": 1.297539359495755,
      "learning_rate": 4.987707256362529e-06,
      "loss": 0.6474949717521667,
      "memory(GiB)": 77.59,
      "step": 16,
      "token_acc": 0.8947659474239368,
      "train_speed(iter/s)": 0.002148
    },
    {
      "epoch": 16.771084337349397,
      "grad_norm": 0.7199351026372981,
      "learning_rate": 4.983273165884096e-06,
      "loss": 0.6222354173660278,
      "memory(GiB)": 77.59,
      "step": 17,
      "token_acc": 0.9017643862580863,
      "train_speed(iter/s)": 0.00215
    },
    {
      "epoch": 17.771084337349397,
      "grad_norm": 1.4007282443863567,
      "learning_rate": 4.978160173317439e-06,
      "loss": 0.6050044298171997,
      "memory(GiB)": 77.59,
      "step": 18,
      "token_acc": 0.899228721854176,
      "train_speed(iter/s)": 0.00215
    },
    {
      "epoch": 18.771084337349397,
      "grad_norm": 1.4351431315774181,
      "learning_rate": 4.972369676503672e-06,
      "loss": 0.5903453230857849,
      "memory(GiB)": 77.59,
      "step": 19,
      "token_acc": 0.9062169786627631,
      "train_speed(iter/s)": 0.002149
    },
    {
      "epoch": 19.771084337349397,
      "grad_norm": 1.1929137455373535,
      "learning_rate": 4.965903258506806e-06,
      "loss": 0.5688210725784302,
      "memory(GiB)": 77.59,
      "step": 20,
      "token_acc": 0.9079767112253118,
      "train_speed(iter/s)": 0.002148
    },
    {
      "epoch": 20.771084337349397,
      "grad_norm": 1.1732494606890316,
      "learning_rate": 4.9587626871809564e-06,
      "loss": 0.543393611907959,
      "memory(GiB)": 77.59,
      "step": 21,
      "token_acc": 0.9126149389091825,
      "train_speed(iter/s)": 0.002136
    },
    {
      "epoch": 21.771084337349397,
      "grad_norm": 1.0573555070054292,
      "learning_rate": 4.950949914687024e-06,
      "loss": 0.5107942223548889,
      "memory(GiB)": 77.59,
      "step": 22,
      "token_acc": 0.913213028660819,
      "train_speed(iter/s)": 0.002136
    },
    {
      "epoch": 22.771084337349397,
      "grad_norm": 0.9119223186948517,
      "learning_rate": 4.942467076958999e-06,
      "loss": 0.48675835132598877,
      "memory(GiB)": 77.59,
      "step": 23,
      "token_acc": 0.916867432182973,
      "train_speed(iter/s)": 0.002137
    },
    {
      "epoch": 23.771084337349397,
      "grad_norm": 1.0782070696527255,
      "learning_rate": 4.933316493120015e-06,
      "loss": 0.5053229331970215,
      "memory(GiB)": 77.59,
      "step": 24,
      "token_acc": 0.9218187118672303,
      "train_speed(iter/s)": 0.002139
    },
    {
      "epoch": 24.771084337349397,
      "grad_norm": 0.9655818937470999,
      "learning_rate": 4.923500664848327e-06,
      "loss": 0.5017877817153931,
      "memory(GiB)": 77.59,
      "step": 25,
      "token_acc": 0.9221839217084671,
      "train_speed(iter/s)": 0.002138
    },
    {
      "epoch": 25.771084337349397,
      "grad_norm": 0.9404583758277602,
      "learning_rate": 4.913022275693372e-06,
      "loss": 0.4680900275707245,
      "memory(GiB)": 77.59,
      "step": 26,
      "token_acc": 0.9232181127591402,
      "train_speed(iter/s)": 0.002139
    },
    {
      "epoch": 26.771084337349397,
      "grad_norm": 0.9566563796831183,
      "learning_rate": 4.901884190342121e-06,
      "loss": 0.4565889239311218,
      "memory(GiB)": 77.59,
      "step": 27,
      "token_acc": 0.9250697693507346,
      "train_speed(iter/s)": 0.002138
    },
    {
      "epoch": 27.771084337349397,
      "grad_norm": 0.7703532186007063,
      "learning_rate": 4.890089453835894e-06,
      "loss": 0.43708479404449463,
      "memory(GiB)": 77.59,
      "step": 28,
      "token_acc": 0.9259928949775574,
      "train_speed(iter/s)": 0.002139
    },
    {
      "epoch": 28.771084337349397,
      "grad_norm": 0.9233409831228016,
      "learning_rate": 4.8776412907378845e-06,
      "loss": 0.4270290732383728,
      "memory(GiB)": 77.59,
      "step": 29,
      "token_acc": 0.9246415671992716,
      "train_speed(iter/s)": 0.00214
    },
    {
      "epoch": 29.771084337349397,
      "grad_norm": 0.9965352268428146,
      "learning_rate": 4.864543104251587e-06,
      "loss": 0.41402751207351685,
      "memory(GiB)": 77.59,
      "step": 30,
      "token_acc": 0.9240786009778112,
      "train_speed(iter/s)": 0.002139
    },
    {
      "epoch": 30.771084337349397,
      "grad_norm": 0.9358295902337755,
      "learning_rate": 4.850798475290403e-06,
      "loss": 0.39457955956459045,
      "memory(GiB)": 77.59,
      "step": 31,
      "token_acc": 0.9368182401505754,
      "train_speed(iter/s)": 0.002131
    },
    {
      "epoch": 31.771084337349397,
      "grad_norm": 0.9813539670443086,
      "learning_rate": 4.836411161498653e-06,
      "loss": 0.38136690855026245,
      "memory(GiB)": 77.59,
      "step": 32,
      "token_acc": 0.9375963637091769,
      "train_speed(iter/s)": 0.002133
    },
    {
      "epoch": 32.7710843373494,
      "grad_norm": 0.4679577449591144,
      "learning_rate": 4.821385096224268e-06,
      "loss": 0.36845850944519043,
      "memory(GiB)": 77.59,
      "step": 33,
      "token_acc": 0.9348571791559913,
      "train_speed(iter/s)": 0.002134
    },
    {
      "epoch": 33.7710843373494,
      "grad_norm": 0.9212511288160155,
      "learning_rate": 4.8057243874434625e-06,
      "loss": 0.35925933718681335,
      "memory(GiB)": 77.59,
      "step": 34,
      "token_acc": 0.9376081366560561,
      "train_speed(iter/s)": 0.002133
    },
    {
      "epoch": 34.7710843373494,
      "grad_norm": 1.0561026403132139,
      "learning_rate": 4.789433316637644e-06,
      "loss": 0.3401709794998169,
      "memory(GiB)": 77.59,
      "step": 35,
      "token_acc": 0.9403078788403664,
      "train_speed(iter/s)": 0.002134
    },
    {
      "epoch": 35.7710843373494,
      "grad_norm": 0.936417548403036,
      "learning_rate": 4.772516337622907e-06,
      "loss": 0.3241081237792969,
      "memory(GiB)": 77.59,
      "step": 36,
      "token_acc": 0.9484973388290848,
      "train_speed(iter/s)": 0.002135
    },
    {
      "epoch": 36.7710843373494,
      "grad_norm": 1.116983283196546,
      "learning_rate": 4.754978075332398e-06,
      "loss": 0.30902814865112305,
      "memory(GiB)": 77.59,
      "step": 37,
      "token_acc": 0.9476456995060315,
      "train_speed(iter/s)": 0.002135
    },
    {
      "epoch": 37.7710843373494,
      "grad_norm": 0.9993160029154087,
      "learning_rate": 4.736823324551909e-06,
      "loss": 0.308858722448349,
      "memory(GiB)": 77.59,
      "step": 38,
      "token_acc": 0.9504927069901695,
      "train_speed(iter/s)": 0.002135
    },
    {
      "epoch": 38.7710843373494,
      "grad_norm": 0.9122824683879396,
      "learning_rate": 4.71805704860903e-06,
      "loss": 0.27843528985977173,
      "memory(GiB)": 77.59,
      "step": 39,
      "token_acc": 0.9496681563352376,
      "train_speed(iter/s)": 0.002136
    },
    {
      "epoch": 39.7710843373494,
      "grad_norm": 1.009388916794573,
      "learning_rate": 4.698684378016223e-06,
      "loss": 0.25660544633865356,
      "memory(GiB)": 77.59,
      "step": 40,
      "token_acc": 0.9552901130798869,
      "train_speed(iter/s)": 0.002137
    },
    {
      "epoch": 40.7710843373494,
      "grad_norm": 1.1895642620511482,
      "learning_rate": 4.678710609068193e-06,
      "loss": 0.2424250692129135,
      "memory(GiB)": 77.59,
      "step": 41,
      "token_acc": 0.9549429605785101,
      "train_speed(iter/s)": 0.002131
    },
    {
      "epoch": 41.7710843373494,
      "grad_norm": 1.1193041843299223,
      "learning_rate": 4.658141202393935e-06,
      "loss": 0.23843874037265778,
      "memory(GiB)": 77.59,
      "step": 42,
      "token_acc": 0.9608553608553608,
      "train_speed(iter/s)": 0.002131
    },
    {
      "epoch": 42.7710843373494,
      "grad_norm": 1.110610217839956,
      "learning_rate": 4.636981781463848e-06,
      "loss": 0.210187166929245,
      "memory(GiB)": 77.59,
      "step": 43,
      "token_acc": 0.9653150186596419,
      "train_speed(iter/s)": 0.002132
    },
    {
      "epoch": 43.7710843373494,
      "grad_norm": 0.8486274696165886,
      "learning_rate": 4.615238131052339e-06,
      "loss": 0.21901345252990723,
      "memory(GiB)": 77.59,
      "step": 44,
      "token_acc": 0.9653416261042005,
      "train_speed(iter/s)": 0.002132
    },
    {
      "epoch": 44.7710843373494,
      "grad_norm": 1.0423804412798912,
      "learning_rate": 4.592916195656322e-06,
      "loss": 0.17482870817184448,
      "memory(GiB)": 77.59,
      "step": 45,
      "token_acc": 0.9674520144027672,
      "train_speed(iter/s)": 0.002133
    },
    {
      "epoch": 45.7710843373494,
      "grad_norm": 1.2841486053816782,
      "learning_rate": 4.570022077870051e-06,
      "loss": 0.18200109899044037,
      "memory(GiB)": 77.59,
      "step": 46,
      "token_acc": 0.9708482572798199,
      "train_speed(iter/s)": 0.002133
    },
    {
      "epoch": 46.7710843373494,
      "grad_norm": 1.172978940255451,
      "learning_rate": 4.546562036716732e-06,
      "loss": 0.15860611200332642,
      "memory(GiB)": 77.59,
      "step": 47,
      "token_acc": 0.9780445141443995,
      "train_speed(iter/s)": 0.002133
    },
    {
      "epoch": 47.7710843373494,
      "grad_norm": 1.2360197347924713,
      "learning_rate": 4.522542485937369e-06,
      "loss": 0.14376184344291687,
      "memory(GiB)": 77.59,
      "step": 48,
      "token_acc": 0.9766241102700259,
      "train_speed(iter/s)": 0.002135
    },
    {
      "epoch": 48.7710843373494,
      "grad_norm": 0.6385686989268172,
      "learning_rate": 4.497969992237312e-06,
      "loss": 0.13206440210342407,
      "memory(GiB)": 77.59,
      "step": 49,
      "token_acc": 0.9818039023693685,
      "train_speed(iter/s)": 0.002135
    },
    {
      "epoch": 49.7710843373494,
      "grad_norm": 1.2901120485113573,
      "learning_rate": 4.472851273490985e-06,
      "loss": 0.11426319181919098,
      "memory(GiB)": 77.59,
      "step": 50,
      "token_acc": 0.9836425932423786,
      "train_speed(iter/s)": 0.002135
    },
    {
      "epoch": 50.7710843373494,
      "grad_norm": 1.2068228907293697,
      "learning_rate": 4.4471931969052816e-06,
      "loss": 0.09582371264696121,
      "memory(GiB)": 77.59,
      "step": 51,
      "token_acc": 0.9849937986991469,
      "train_speed(iter/s)": 0.00213
    },
    {
      "epoch": 51.7710843373494,
      "grad_norm": 1.1813520357475888,
      "learning_rate": 4.421002777142148e-06,
      "loss": 0.09163334965705872,
      "memory(GiB)": 77.59,
      "step": 52,
      "token_acc": 0.985701349843115,
      "train_speed(iter/s)": 0.00213
    },
    {
      "epoch": 52.7710843373494,
      "grad_norm": 1.1122597460598151,
      "learning_rate": 4.394287174400838e-06,
      "loss": 0.08439977467060089,
      "memory(GiB)": 77.59,
      "step": 53,
      "token_acc": 0.9878925052136355,
      "train_speed(iter/s)": 0.00213
    },
    {
      "epoch": 53.7710843373494,
      "grad_norm": 1.1253515530343927,
      "learning_rate": 4.3670536924603855e-06,
      "loss": 0.07314425706863403,
      "memory(GiB)": 77.59,
      "step": 54,
      "token_acc": 0.9900777579782926,
      "train_speed(iter/s)": 0.00213
    },
    {
      "epoch": 54.7710843373494,
      "grad_norm": 1.1033670330706447,
      "learning_rate": 4.33930977668283e-06,
      "loss": 0.06532438099384308,
      "memory(GiB)": 77.59,
      "step": 55,
      "token_acc": 0.9898691656771185,
      "train_speed(iter/s)": 0.00213
    },
    {
      "epoch": 55.7710843373494,
      "grad_norm": 1.1439476751450148,
      "learning_rate": 4.311063011977723e-06,
      "loss": 0.06318923085927963,
      "memory(GiB)": 77.59,
      "step": 56,
      "token_acc": 0.9903093019267052,
      "train_speed(iter/s)": 0.002131
    },
    {
      "epoch": 56.7710843373494,
      "grad_norm": 1.1421545710607997,
      "learning_rate": 4.282321120728493e-06,
      "loss": 0.05167176201939583,
      "memory(GiB)": 77.59,
      "step": 57,
      "token_acc": 0.991860976682798,
      "train_speed(iter/s)": 0.002131
    },
    {
      "epoch": 57.7710843373494,
      "grad_norm": 1.1132394753881205,
      "learning_rate": 4.253091960681222e-06,
      "loss": 0.05162680149078369,
      "memory(GiB)": 77.59,
      "step": 58,
      "token_acc": 0.992666204277479,
      "train_speed(iter/s)": 0.002132
    },
    {
      "epoch": 58.7710843373494,
      "grad_norm": 1.1383077361483038,
      "learning_rate": 4.2233835227964145e-06,
      "loss": 0.044882968068122864,
      "memory(GiB)": 77.59,
      "step": 59,
      "token_acc": 0.9941307654265692,
      "train_speed(iter/s)": 0.002132
    },
    {
      "epoch": 59.7710843373494,
      "grad_norm": 0.9459729284657843,
      "learning_rate": 4.1932039290643534e-06,
      "loss": 0.042324937880039215,
      "memory(GiB)": 77.59,
      "step": 60,
      "token_acc": 0.9941203679909544,
      "train_speed(iter/s)": 0.002132
    }
  ],
  "logging_steps": 1,
  "max_steps": 200,
  "num_input_tokens_seen": 0,
  "num_train_epochs": 200,
  "save_steps": 10,
  "stateful_callbacks": {
    "TrainerControl": {
      "args": {
        "should_epoch_stop": false,
        "should_evaluate": false,
        "should_log": false,
        "should_save": true,
        "should_training_stop": false
      },
      "attributes": {}
    }
  },
  "total_flos": 268227602284544.0,
  "train_batch_size": 1,
  "trial_name": null,
  "trial_params": null
}