File size: 29,455 Bytes
2c74511
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
{
  "best_metric": null,
  "best_model_checkpoint": null,
  "epoch": 19.37219730941704,
  "eval_steps": 500,
  "global_step": 540,
  "is_hyper_param_search": false,
  "is_local_process_zero": true,
  "is_world_process_zero": true,
  "log_history": [
    {
      "epoch": 0.35874439461883406,
      "grad_norm": 5.573123455047607,
      "learning_rate": 9.259259259259259e-07,
      "logits/chosen": -2.3576977252960205,
      "logits/rejected": -2.3553850650787354,
      "logps/chosen": -61.0846061706543,
      "logps/rejected": -126.1152572631836,
      "loss": 0.6942,
      "rewards/accuracies": 0.36250001192092896,
      "rewards/chosen": -0.0012381849810481071,
      "rewards/margins": -0.010831715539097786,
      "rewards/rejected": 0.009593529626727104,
      "step": 10
    },
    {
      "epoch": 0.7174887892376681,
      "grad_norm": 3.8175244331359863,
      "learning_rate": 1.8518518518518519e-06,
      "logits/chosen": -2.3618054389953613,
      "logits/rejected": -2.384873867034912,
      "logps/chosen": -63.2865104675293,
      "logps/rejected": -81.8724365234375,
      "loss": 0.6925,
      "rewards/accuracies": 0.4749999940395355,
      "rewards/chosen": -0.002874236088246107,
      "rewards/margins": -0.003056168556213379,
      "rewards/rejected": 0.00018193255527876318,
      "step": 20
    },
    {
      "epoch": 1.0762331838565022,
      "grad_norm": 4.383662700653076,
      "learning_rate": 2.7777777777777783e-06,
      "logits/chosen": -2.3333091735839844,
      "logits/rejected": -2.3545360565185547,
      "logps/chosen": -76.02193450927734,
      "logps/rejected": -88.19218444824219,
      "loss": 0.6932,
      "rewards/accuracies": 0.5249999761581421,
      "rewards/chosen": -0.008687756024301052,
      "rewards/margins": -0.0016246589366346598,
      "rewards/rejected": -0.007063096854835749,
      "step": 30
    },
    {
      "epoch": 1.4349775784753362,
      "grad_norm": 4.095139503479004,
      "learning_rate": 3.7037037037037037e-06,
      "logits/chosen": -2.3483197689056396,
      "logits/rejected": -2.363131523132324,
      "logps/chosen": -66.08761596679688,
      "logps/rejected": -78.22222900390625,
      "loss": 0.6889,
      "rewards/accuracies": 0.5249999761581421,
      "rewards/chosen": -0.014177674427628517,
      "rewards/margins": 0.012472175993025303,
      "rewards/rejected": -0.026649847626686096,
      "step": 40
    },
    {
      "epoch": 1.7937219730941703,
      "grad_norm": 4.034746170043945,
      "learning_rate": 4.62962962962963e-06,
      "logits/chosen": -2.31459903717041,
      "logits/rejected": -2.335706949234009,
      "logps/chosen": -69.31814575195312,
      "logps/rejected": -77.91527557373047,
      "loss": 0.6823,
      "rewards/accuracies": 0.4749999940395355,
      "rewards/chosen": -0.04294499754905701,
      "rewards/margins": 0.007987136952579021,
      "rewards/rejected": -0.050932131707668304,
      "step": 50
    },
    {
      "epoch": 2.1524663677130045,
      "grad_norm": 4.620233535766602,
      "learning_rate": 4.998119881260576e-06,
      "logits/chosen": -2.364583969116211,
      "logits/rejected": -2.3598217964172363,
      "logps/chosen": -69.84207153320312,
      "logps/rejected": -79.67817687988281,
      "loss": 0.6717,
      "rewards/accuracies": 0.699999988079071,
      "rewards/chosen": -0.06739393621683121,
      "rewards/margins": 0.050740111619234085,
      "rewards/rejected": -0.118134044110775,
      "step": 60
    },
    {
      "epoch": 2.5112107623318387,
      "grad_norm": 4.450082302093506,
      "learning_rate": 4.9866405060165044e-06,
      "logits/chosen": -2.298379421234131,
      "logits/rejected": -2.3169469833374023,
      "logps/chosen": -79.13768005371094,
      "logps/rejected": -79.24192810058594,
      "loss": 0.6499,
      "rewards/accuracies": 0.7124999761581421,
      "rewards/chosen": -0.1282506138086319,
      "rewards/margins": 0.08815944194793701,
      "rewards/rejected": -0.21641004085540771,
      "step": 70
    },
    {
      "epoch": 2.8699551569506725,
      "grad_norm": 4.7328877449035645,
      "learning_rate": 4.964774158361991e-06,
      "logits/chosen": -2.3363795280456543,
      "logits/rejected": -2.3331849575042725,
      "logps/chosen": -77.52214050292969,
      "logps/rejected": -86.90077209472656,
      "loss": 0.6346,
      "rewards/accuracies": 0.75,
      "rewards/chosen": -0.22365322709083557,
      "rewards/margins": 0.1937221735715866,
      "rewards/rejected": -0.4173754155635834,
      "step": 80
    },
    {
      "epoch": 3.2286995515695067,
      "grad_norm": 4.016219615936279,
      "learning_rate": 4.93261217644956e-06,
      "logits/chosen": -2.30871844291687,
      "logits/rejected": -2.3049044609069824,
      "logps/chosen": -65.6957778930664,
      "logps/rejected": -77.02860260009766,
      "loss": 0.6004,
      "rewards/accuracies": 0.762499988079071,
      "rewards/chosen": -0.23905189335346222,
      "rewards/margins": 0.2932591438293457,
      "rewards/rejected": -0.5323110222816467,
      "step": 90
    },
    {
      "epoch": 3.587443946188341,
      "grad_norm": 4.553714752197266,
      "learning_rate": 4.8902889044347e-06,
      "logits/chosen": -2.3068125247955322,
      "logits/rejected": -2.294462203979492,
      "logps/chosen": -85.46375274658203,
      "logps/rejected": -79.72854614257812,
      "loss": 0.5816,
      "rewards/accuracies": 0.6875,
      "rewards/chosen": -0.3506511151790619,
      "rewards/margins": 0.2997886538505554,
      "rewards/rejected": -0.6504397392272949,
      "step": 100
    },
    {
      "epoch": 3.9461883408071747,
      "grad_norm": 4.804734230041504,
      "learning_rate": 4.837981131305475e-06,
      "logits/chosen": -2.3317933082580566,
      "logits/rejected": -2.3493664264678955,
      "logps/chosen": -68.45785522460938,
      "logps/rejected": -88.28421020507812,
      "loss": 0.5672,
      "rewards/accuracies": 0.6875,
      "rewards/chosen": -0.4280410706996918,
      "rewards/margins": 0.3461475670337677,
      "rewards/rejected": -0.7741886377334595,
      "step": 110
    },
    {
      "epoch": 4.304932735426009,
      "grad_norm": 4.650998592376709,
      "learning_rate": 4.775907352415367e-06,
      "logits/chosen": -2.3039748668670654,
      "logits/rejected": -2.301835775375366,
      "logps/chosen": -76.26728820800781,
      "logps/rejected": -87.93345642089844,
      "loss": 0.5258,
      "rewards/accuracies": 0.8999999761581421,
      "rewards/chosen": -0.4848151206970215,
      "rewards/margins": 0.6179562211036682,
      "rewards/rejected": -1.102771282196045,
      "step": 120
    },
    {
      "epoch": 4.663677130044843,
      "grad_norm": 4.17809534072876,
      "learning_rate": 4.70432685680402e-06,
      "logits/chosen": -2.299182653427124,
      "logits/rejected": -2.291717290878296,
      "logps/chosen": -77.27394104003906,
      "logps/rejected": -76.96590423583984,
      "loss": 0.5141,
      "rewards/accuracies": 0.7749999761581421,
      "rewards/chosen": -0.5012012720108032,
      "rewards/margins": 0.39593780040740967,
      "rewards/rejected": -0.8971391916275024,
      "step": 130
    },
    {
      "epoch": 5.022421524663677,
      "grad_norm": 4.4158735275268555,
      "learning_rate": 4.623538644118244e-06,
      "logits/chosen": -2.267500400543213,
      "logits/rejected": -2.271239995956421,
      "logps/chosen": -77.53614807128906,
      "logps/rejected": -105.53443908691406,
      "loss": 0.4929,
      "rewards/accuracies": 0.8999999761581421,
      "rewards/chosen": -0.47719526290893555,
      "rewards/margins": 0.7529892921447754,
      "rewards/rejected": -1.230184555053711,
      "step": 140
    },
    {
      "epoch": 5.381165919282511,
      "grad_norm": 4.5556640625,
      "learning_rate": 4.533880175657419e-06,
      "logits/chosen": -2.2397818565368652,
      "logits/rejected": -2.2796878814697266,
      "logps/chosen": -75.30777740478516,
      "logps/rejected": -97.59407043457031,
      "loss": 0.4545,
      "rewards/accuracies": 0.9125000238418579,
      "rewards/chosen": -0.3897199034690857,
      "rewards/margins": 0.8326279520988464,
      "rewards/rejected": -1.2223479747772217,
      "step": 150
    },
    {
      "epoch": 5.739910313901345,
      "grad_norm": 5.067012786865234,
      "learning_rate": 4.435725964760331e-06,
      "logits/chosen": -2.284294843673706,
      "logits/rejected": -2.264681339263916,
      "logps/chosen": -87.51654052734375,
      "logps/rejected": -82.73191833496094,
      "loss": 0.4392,
      "rewards/accuracies": 0.8374999761581421,
      "rewards/chosen": -0.42664116621017456,
      "rewards/margins": 0.7053943872451782,
      "rewards/rejected": -1.132035493850708,
      "step": 160
    },
    {
      "epoch": 6.098654708520179,
      "grad_norm": 4.425904273986816,
      "learning_rate": 4.329486012421531e-06,
      "logits/chosen": -2.296356201171875,
      "logits/rejected": -2.2917532920837402,
      "logps/chosen": -76.75779724121094,
      "logps/rejected": -90.15029907226562,
      "loss": 0.4153,
      "rewards/accuracies": 0.875,
      "rewards/chosen": -0.5271193385124207,
      "rewards/margins": 0.7608373761177063,
      "rewards/rejected": -1.2879568338394165,
      "step": 170
    },
    {
      "epoch": 6.457399103139013,
      "grad_norm": 4.75029182434082,
      "learning_rate": 4.215604094671835e-06,
      "logits/chosen": -2.2561912536621094,
      "logits/rejected": -2.253948450088501,
      "logps/chosen": -68.14691162109375,
      "logps/rejected": -85.39097595214844,
      "loss": 0.3785,
      "rewards/accuracies": 0.925000011920929,
      "rewards/chosen": -0.5065070390701294,
      "rewards/margins": 1.024714708328247,
      "rewards/rejected": -1.531221628189087,
      "step": 180
    },
    {
      "epoch": 6.816143497757848,
      "grad_norm": 5.048355579376221,
      "learning_rate": 4.094555908876765e-06,
      "logits/chosen": -2.323347568511963,
      "logits/rejected": -2.284219741821289,
      "logps/chosen": -74.8355484008789,
      "logps/rejected": -85.9789047241211,
      "loss": 0.3697,
      "rewards/accuracies": 0.9125000238418579,
      "rewards/chosen": -0.533603310585022,
      "rewards/margins": 1.2120137214660645,
      "rewards/rejected": -1.7456169128417969,
      "step": 190
    },
    {
      "epoch": 7.174887892376682,
      "grad_norm": 5.01900053024292,
      "learning_rate": 3.966847086696045e-06,
      "logits/chosen": -2.300300121307373,
      "logits/rejected": -2.3180909156799316,
      "logps/chosen": -86.38873291015625,
      "logps/rejected": -89.72538757324219,
      "loss": 0.3611,
      "rewards/accuracies": 0.8374999761581421,
      "rewards/chosen": -0.7510987520217896,
      "rewards/margins": 0.9809296727180481,
      "rewards/rejected": -1.7320283651351929,
      "step": 200
    },
    {
      "epoch": 7.533632286995516,
      "grad_norm": 4.92667818069458,
      "learning_rate": 3.833011082004229e-06,
      "logits/chosen": -2.297994375228882,
      "logits/rejected": -2.288382053375244,
      "logps/chosen": -71.64348602294922,
      "logps/rejected": -84.01541900634766,
      "loss": 0.3248,
      "rewards/accuracies": 0.862500011920929,
      "rewards/chosen": -0.8383395075798035,
      "rewards/margins": 1.047420859336853,
      "rewards/rejected": -1.8857605457305908,
      "step": 210
    },
    {
      "epoch": 7.8923766816143495,
      "grad_norm": 5.767988204956055,
      "learning_rate": 3.693606942594873e-06,
      "logits/chosen": -2.194643259048462,
      "logits/rejected": -2.1788828372955322,
      "logps/chosen": -83.70150756835938,
      "logps/rejected": -101.75688171386719,
      "loss": 0.3004,
      "rewards/accuracies": 0.925000011920929,
      "rewards/chosen": -0.8993238210678101,
      "rewards/margins": 1.521676778793335,
      "rewards/rejected": -2.4210009574890137,
      "step": 220
    },
    {
      "epoch": 8.251121076233185,
      "grad_norm": 4.738667964935303,
      "learning_rate": 3.549216974976073e-06,
      "logits/chosen": -2.2021820545196533,
      "logits/rejected": -2.234846591949463,
      "logps/chosen": -85.80267333984375,
      "logps/rejected": -126.21333312988281,
      "loss": 0.2709,
      "rewards/accuracies": 0.9375,
      "rewards/chosen": -1.1428090333938599,
      "rewards/margins": 1.669283151626587,
      "rewards/rejected": -2.8120923042297363,
      "step": 230
    },
    {
      "epoch": 8.609865470852018,
      "grad_norm": 5.022470951080322,
      "learning_rate": 3.400444312011776e-06,
      "logits/chosen": -2.2339494228363037,
      "logits/rejected": -2.259722948074341,
      "logps/chosen": -79.16600036621094,
      "logps/rejected": -100.4195327758789,
      "loss": 0.2653,
      "rewards/accuracies": 0.949999988079071,
      "rewards/chosen": -1.070847988128662,
      "rewards/margins": 1.8333218097686768,
      "rewards/rejected": -2.9041695594787598,
      "step": 240
    },
    {
      "epoch": 8.968609865470851,
      "grad_norm": 6.2118096351623535,
      "learning_rate": 3.2479103935691047e-06,
      "logits/chosen": -2.197798490524292,
      "logits/rejected": -2.2084178924560547,
      "logps/chosen": -85.23640441894531,
      "logps/rejected": -116.28971862792969,
      "loss": 0.2426,
      "rewards/accuracies": 0.925000011920929,
      "rewards/chosen": -1.2635414600372314,
      "rewards/margins": 1.8116687536239624,
      "rewards/rejected": -3.0752100944519043,
      "step": 250
    },
    {
      "epoch": 9.327354260089686,
      "grad_norm": 5.798530101776123,
      "learning_rate": 3.092252370695298e-06,
      "logits/chosen": -2.1891565322875977,
      "logits/rejected": -2.2006757259368896,
      "logps/chosen": -77.15342712402344,
      "logps/rejected": -129.3516845703125,
      "loss": 0.2356,
      "rewards/accuracies": 0.9624999761581421,
      "rewards/chosen": -1.1600430011749268,
      "rewards/margins": 2.2219691276550293,
      "rewards/rejected": -3.382011890411377,
      "step": 260
    },
    {
      "epoch": 9.68609865470852,
      "grad_norm": 6.839400768280029,
      "learning_rate": 2.9341204441673267e-06,
      "logits/chosen": -2.209928035736084,
      "logits/rejected": -2.2113702297210693,
      "logps/chosen": -79.22918701171875,
      "logps/rejected": -105.42140197753906,
      "loss": 0.2016,
      "rewards/accuracies": 0.925000011920929,
      "rewards/chosen": -1.5536689758300781,
      "rewards/margins": 1.9507955312728882,
      "rewards/rejected": -3.504464626312256,
      "step": 270
    },
    {
      "epoch": 10.044843049327355,
      "grad_norm": 4.958732604980469,
      "learning_rate": 2.7741751485313295e-06,
      "logits/chosen": -2.1307220458984375,
      "logits/rejected": -2.1803646087646484,
      "logps/chosen": -81.51334381103516,
      "logps/rejected": -134.2724151611328,
      "loss": 0.1909,
      "rewards/accuracies": 0.9750000238418579,
      "rewards/chosen": -1.3520208597183228,
      "rewards/margins": 2.556453227996826,
      "rewards/rejected": -3.9084744453430176,
      "step": 280
    },
    {
      "epoch": 10.403587443946188,
      "grad_norm": 5.86897611618042,
      "learning_rate": 2.6130845929767662e-06,
      "logits/chosen": -2.139816999435425,
      "logits/rejected": -2.14668607711792,
      "logps/chosen": -87.5683822631836,
      "logps/rejected": -116.4664535522461,
      "loss": 0.1731,
      "rewards/accuracies": 0.925000011920929,
      "rewards/chosen": -1.6710402965545654,
      "rewards/margins": 2.187368869781494,
      "rewards/rejected": -3.8584091663360596,
      "step": 290
    },
    {
      "epoch": 10.762331838565022,
      "grad_norm": 4.753023147583008,
      "learning_rate": 2.4515216705704396e-06,
      "logits/chosen": -2.1322758197784424,
      "logits/rejected": -2.1539082527160645,
      "logps/chosen": -74.43647766113281,
      "logps/rejected": -104.6937255859375,
      "loss": 0.1715,
      "rewards/accuracies": 0.925000011920929,
      "rewards/chosen": -1.6632559299468994,
      "rewards/margins": 2.3588404655456543,
      "rewards/rejected": -4.022096157073975,
      "step": 300
    },
    {
      "epoch": 11.121076233183857,
      "grad_norm": 4.878518104553223,
      "learning_rate": 2.290161247507733e-06,
      "logits/chosen": -2.0836901664733887,
      "logits/rejected": -2.145113229751587,
      "logps/chosen": -102.97503662109375,
      "logps/rejected": -232.6587371826172,
      "loss": 0.1618,
      "rewards/accuracies": 0.987500011920929,
      "rewards/chosen": -2.2652416229248047,
      "rewards/margins": 5.368926048278809,
      "rewards/rejected": -7.634167671203613,
      "step": 310
    },
    {
      "epoch": 11.47982062780269,
      "grad_norm": 5.1058244705200195,
      "learning_rate": 2.129677344121879e-06,
      "logits/chosen": -2.157466173171997,
      "logits/rejected": -2.155546188354492,
      "logps/chosen": -95.01036834716797,
      "logps/rejected": -123.38541412353516,
      "loss": 0.1352,
      "rewards/accuracies": 0.987500011920929,
      "rewards/chosen": -2.074333906173706,
      "rewards/margins": 2.805738925933838,
      "rewards/rejected": -4.880073070526123,
      "step": 320
    },
    {
      "epoch": 11.838565022421525,
      "grad_norm": 5.019484043121338,
      "learning_rate": 1.970740319426474e-06,
      "logits/chosen": -2.1018729209899902,
      "logits/rejected": -2.116931438446045,
      "logps/chosen": -106.68685150146484,
      "logps/rejected": -124.8704833984375,
      "loss": 0.134,
      "rewards/accuracies": 0.987500011920929,
      "rewards/chosen": -2.079122304916382,
      "rewards/margins": 2.727034568786621,
      "rewards/rejected": -4.806157112121582,
      "step": 330
    },
    {
      "epoch": 12.197309417040358,
      "grad_norm": 4.403121471405029,
      "learning_rate": 1.8140140709517467e-06,
      "logits/chosen": -2.116088628768921,
      "logits/rejected": -2.1317386627197266,
      "logps/chosen": -98.20851135253906,
      "logps/rejected": -127.1669921875,
      "loss": 0.1183,
      "rewards/accuracies": 0.9624999761581421,
      "rewards/chosen": -2.1554946899414062,
      "rewards/margins": 2.7777180671691895,
      "rewards/rejected": -4.9332122802734375,
      "step": 340
    },
    {
      "epoch": 12.556053811659194,
      "grad_norm": 4.341458797454834,
      "learning_rate": 1.6601532615711452e-06,
      "logits/chosen": -2.0657103061676025,
      "logits/rejected": -2.106078863143921,
      "logps/chosen": -97.9491958618164,
      "logps/rejected": -138.72988891601562,
      "loss": 0.1135,
      "rewards/accuracies": 0.9750000238418579,
      "rewards/chosen": -2.296207904815674,
      "rewards/margins": 3.2302253246307373,
      "rewards/rejected": -5.526432991027832,
      "step": 350
    },
    {
      "epoch": 12.914798206278027,
      "grad_norm": 4.83184814453125,
      "learning_rate": 1.509800584902108e-06,
      "logits/chosen": -2.0779662132263184,
      "logits/rejected": -2.1079659461975098,
      "logps/chosen": -104.03936767578125,
      "logps/rejected": -128.2595672607422,
      "loss": 0.1083,
      "rewards/accuracies": 0.987500011920929,
      "rewards/chosen": -2.526491165161133,
      "rewards/margins": 3.116278886795044,
      "rewards/rejected": -5.642770290374756,
      "step": 360
    },
    {
      "epoch": 13.27354260089686,
      "grad_norm": 4.208549976348877,
      "learning_rate": 1.3635840807037487e-06,
      "logits/chosen": -2.0661022663116455,
      "logits/rejected": -2.1056418418884277,
      "logps/chosen": -83.17430114746094,
      "logps/rejected": -128.30245971679688,
      "loss": 0.0921,
      "rewards/accuracies": 1.0,
      "rewards/chosen": -2.5337133407592773,
      "rewards/margins": 3.501122236251831,
      "rewards/rejected": -6.034835338592529,
      "step": 370
    },
    {
      "epoch": 13.632286995515695,
      "grad_norm": 6.424919128417969,
      "learning_rate": 1.2221145114853172e-06,
      "logits/chosen": -2.0403847694396973,
      "logits/rejected": -2.0700008869171143,
      "logps/chosen": -105.04042053222656,
      "logps/rejected": -136.50611877441406,
      "loss": 0.1013,
      "rewards/accuracies": 1.0,
      "rewards/chosen": -2.4945261478424072,
      "rewards/margins": 3.4043514728546143,
      "rewards/rejected": -5.898877143859863,
      "step": 380
    },
    {
      "epoch": 13.991031390134529,
      "grad_norm": 4.117794036865234,
      "learning_rate": 1.085982811283654e-06,
      "logits/chosen": -2.041865587234497,
      "logits/rejected": -2.0910420417785645,
      "logps/chosen": -114.09271240234375,
      "logps/rejected": -143.93826293945312,
      "loss": 0.0921,
      "rewards/accuracies": 0.987500011920929,
      "rewards/chosen": -3.0714075565338135,
      "rewards/margins": 3.2793126106262207,
      "rewards/rejected": -6.350719451904297,
      "step": 390
    },
    {
      "epoch": 14.349775784753364,
      "grad_norm": 4.791493892669678,
      "learning_rate": 9.557576172663577e-07,
      "logits/chosen": -2.048828601837158,
      "logits/rejected": -2.064530372619629,
      "logps/chosen": -101.74618530273438,
      "logps/rejected": -145.79129028320312,
      "loss": 0.0828,
      "rewards/accuracies": 1.0,
      "rewards/chosen": -2.637481212615967,
      "rewards/margins": 3.572800397872925,
      "rewards/rejected": -6.2102813720703125,
      "step": 400
    },
    {
      "epoch": 14.708520179372197,
      "grad_norm": 3.2735021114349365,
      "learning_rate": 8.319828944714508e-07,
      "logits/chosen": -2.0613853931427,
      "logits/rejected": -2.0846283435821533,
      "logps/chosen": -105.1708755493164,
      "logps/rejected": -135.4827117919922,
      "loss": 0.078,
      "rewards/accuracies": 1.0,
      "rewards/chosen": -3.1742587089538574,
      "rewards/margins": 3.548724412918091,
      "rewards/rejected": -6.722983360290527,
      "step": 410
    },
    {
      "epoch": 15.067264573991032,
      "grad_norm": 3.9526467323303223,
      "learning_rate": 7.151756636052529e-07,
      "logits/chosen": -2.0549488067626953,
      "logits/rejected": -2.0416836738586426,
      "logps/chosen": -111.19111633300781,
      "logps/rejected": -137.0021209716797,
      "loss": 0.0759,
      "rewards/accuracies": 1.0,
      "rewards/chosen": -3.218317747116089,
      "rewards/margins": 3.6524665355682373,
      "rewards/rejected": -6.870783805847168,
      "step": 420
    },
    {
      "epoch": 15.426008968609866,
      "grad_norm": 3.010849952697754,
      "learning_rate": 6.058238413897052e-07,
      "logits/chosen": -2.02756404876709,
      "logits/rejected": -2.025636672973633,
      "logps/chosen": -118.96656799316406,
      "logps/rejected": -152.67581176757812,
      "loss": 0.0776,
      "rewards/accuracies": 1.0,
      "rewards/chosen": -3.3787834644317627,
      "rewards/margins": 3.612543821334839,
      "rewards/rejected": -6.991326808929443,
      "step": 430
    },
    {
      "epoch": 15.784753363228699,
      "grad_norm": 3.2514679431915283,
      "learning_rate": 5.043842024802675e-07,
      "logits/chosen": -2.0224342346191406,
      "logits/rejected": -2.024209499359131,
      "logps/chosen": -100.25440216064453,
      "logps/rejected": -148.87582397460938,
      "loss": 0.0707,
      "rewards/accuracies": 0.987500011920929,
      "rewards/chosen": -2.8083739280700684,
      "rewards/margins": 3.6432766914367676,
      "rewards/rejected": -6.451650142669678,
      "step": 440
    },
    {
      "epoch": 16.143497757847534,
      "grad_norm": 4.400411605834961,
      "learning_rate": 4.1128047146765936e-07,
      "logits/chosen": -2.0213570594787598,
      "logits/rejected": -2.0214767456054688,
      "logps/chosen": -99.02058410644531,
      "logps/rejected": -140.37637329101562,
      "loss": 0.0631,
      "rewards/accuracies": 0.987500011920929,
      "rewards/chosen": -3.552924394607544,
      "rewards/margins": 3.7184109687805176,
      "rewards/rejected": -7.271335601806641,
      "step": 450
    },
    {
      "epoch": 16.50224215246637,
      "grad_norm": 4.746975898742676,
      "learning_rate": 3.269015529333805e-07,
      "logits/chosen": -1.9746768474578857,
      "logits/rejected": -2.0172688961029053,
      "logps/chosen": -108.14969635009766,
      "logps/rejected": -155.90933227539062,
      "loss": 0.0699,
      "rewards/accuracies": 0.987500011920929,
      "rewards/chosen": -3.199385404586792,
      "rewards/margins": 4.21014928817749,
      "rewards/rejected": -7.4095354080200195,
      "step": 460
    },
    {
      "epoch": 16.8609865470852,
      "grad_norm": 5.0379319190979,
      "learning_rate": 2.515999069522676e-07,
      "logits/chosen": -2.031390428543091,
      "logits/rejected": -2.0676589012145996,
      "logps/chosen": -118.65937805175781,
      "logps/rejected": -143.47872924804688,
      "loss": 0.0624,
      "rewards/accuracies": 1.0,
      "rewards/chosen": -3.263296604156494,
      "rewards/margins": 3.919255018234253,
      "rewards/rejected": -7.182551383972168,
      "step": 470
    },
    {
      "epoch": 17.219730941704036,
      "grad_norm": 3.773524284362793,
      "learning_rate": 1.8569007682777417e-07,
      "logits/chosen": -2.0223498344421387,
      "logits/rejected": -2.028245687484741,
      "logps/chosen": -98.20159149169922,
      "logps/rejected": -126.34019470214844,
      "loss": 0.0658,
      "rewards/accuracies": 0.987500011920929,
      "rewards/chosen": -3.06457781791687,
      "rewards/margins": 3.4116806983947754,
      "rewards/rejected": -6.476258277893066,
      "step": 480
    },
    {
      "epoch": 17.57847533632287,
      "grad_norm": 4.083519458770752,
      "learning_rate": 1.2944737520980883e-07,
      "logits/chosen": -1.9950075149536133,
      "logits/rejected": -2.0348432064056396,
      "logps/chosen": -115.1516342163086,
      "logps/rejected": -160.4219970703125,
      "loss": 0.066,
      "rewards/accuracies": 0.987500011920929,
      "rewards/chosen": -3.625664234161377,
      "rewards/margins": 3.8759894371032715,
      "rewards/rejected": -7.501654148101807,
      "step": 490
    },
    {
      "epoch": 17.937219730941703,
      "grad_norm": 4.096585273742676,
      "learning_rate": 8.310673408334496e-08,
      "logits/chosen": -1.9964863061904907,
      "logits/rejected": -1.9901469945907593,
      "logps/chosen": -107.2447509765625,
      "logps/rejected": -144.03562927246094,
      "loss": 0.0564,
      "rewards/accuracies": 1.0,
      "rewards/chosen": -3.5319015979766846,
      "rewards/margins": 3.939161777496338,
      "rewards/rejected": -7.471064567565918,
      "step": 500
    },
    {
      "epoch": 18.295964125560538,
      "grad_norm": 4.045906066894531,
      "learning_rate": 4.6861723431538273e-08,
      "logits/chosen": -1.9917402267456055,
      "logits/rejected": -2.024575710296631,
      "logps/chosen": -106.6629638671875,
      "logps/rejected": -165.69158935546875,
      "loss": 0.0627,
      "rewards/accuracies": 0.987500011920929,
      "rewards/chosen": -3.547753095626831,
      "rewards/margins": 4.24500036239624,
      "rewards/rejected": -7.79275369644165,
      "step": 510
    },
    {
      "epoch": 18.654708520179373,
      "grad_norm": 4.705228328704834,
      "learning_rate": 2.0863742672497244e-08,
      "logits/chosen": -1.9936788082122803,
      "logits/rejected": -2.010652780532837,
      "logps/chosen": -105.31771087646484,
      "logps/rejected": -149.80661010742188,
      "loss": 0.0576,
      "rewards/accuracies": 1.0,
      "rewards/chosen": -3.252545118331909,
      "rewards/margins": 4.177481651306152,
      "rewards/rejected": -7.430027008056641,
      "step": 520
    },
    {
      "epoch": 19.013452914798208,
      "grad_norm": 4.246400833129883,
      "learning_rate": 5.221388247169945e-09,
      "logits/chosen": -2.0077335834503174,
      "logits/rejected": -2.0036730766296387,
      "logps/chosen": -115.3812026977539,
      "logps/rejected": -141.77145385742188,
      "loss": 0.0655,
      "rewards/accuracies": 1.0,
      "rewards/chosen": -3.551786422729492,
      "rewards/margins": 3.6455719470977783,
      "rewards/rejected": -7.197358131408691,
      "step": 530
    },
    {
      "epoch": 19.37219730941704,
      "grad_norm": 3.7989847660064697,
      "learning_rate": 0.0,
      "logits/chosen": -2.0463144779205322,
      "logits/rejected": -2.069537401199341,
      "logps/chosen": -103.3205795288086,
      "logps/rejected": -152.32081604003906,
      "loss": 0.054,
      "rewards/accuracies": 1.0,
      "rewards/chosen": -3.5541598796844482,
      "rewards/margins": 4.153356552124023,
      "rewards/rejected": -7.707517147064209,
      "step": 540
    },
    {
      "epoch": 19.37219730941704,
      "step": 540,
      "total_flos": 1.981675043968516e+18,
      "train_loss": 0.2879459043343862,
      "train_runtime": 4187.4777,
      "train_samples_per_second": 8.521,
      "train_steps_per_second": 0.129
    }
  ],
  "logging_steps": 10,
  "max_steps": 540,
  "num_input_tokens_seen": 0,
  "num_train_epochs": 20,
  "save_steps": 100,
  "stateful_callbacks": {
    "TrainerControl": {
      "args": {
        "should_epoch_stop": false,
        "should_evaluate": false,
        "should_log": false,
        "should_save": true,
        "should_training_stop": true
      },
      "attributes": {}
    }
  },
  "total_flos": 1.981675043968516e+18,
  "train_batch_size": 1,
  "trial_name": null,
  "trial_params": null
}