File size: 29,659 Bytes
171b7c8
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
{
  "best_metric": null,
  "best_model_checkpoint": null,
  "epoch": 19.45945945945946,
  "eval_steps": 500,
  "global_step": 540,
  "is_hyper_param_search": false,
  "is_local_process_zero": true,
  "is_world_process_zero": true,
  "log_history": [
    {
      "epoch": 0.36036036036036034,
      "grad_norm": 0.5300200581550598,
      "learning_rate": 9.259259259259259e-07,
      "logits/chosen": 1.7405741214752197,
      "logits/rejected": 1.663368582725525,
      "logps/chosen": -97.57881164550781,
      "logps/rejected": -70.59793853759766,
      "loss": 0.6938,
      "rewards/accuracies": 0.4375,
      "rewards/chosen": 0.0018835498485714197,
      "rewards/margins": -0.0008800366776995361,
      "rewards/rejected": 0.0027635858859866858,
      "step": 10
    },
    {
      "epoch": 0.7207207207207207,
      "grad_norm": 0.4883837401866913,
      "learning_rate": 1.8518518518518519e-06,
      "logits/chosen": 1.8280715942382812,
      "logits/rejected": 1.8215343952178955,
      "logps/chosen": -90.60624694824219,
      "logps/rejected": -79.04981994628906,
      "loss": 0.6945,
      "rewards/accuracies": 0.4749999940395355,
      "rewards/chosen": 0.0014454321935772896,
      "rewards/margins": -0.004174981266260147,
      "rewards/rejected": 0.005620413459837437,
      "step": 20
    },
    {
      "epoch": 1.0810810810810811,
      "grad_norm": 0.5554109215736389,
      "learning_rate": 2.7777777777777783e-06,
      "logits/chosen": 1.7980501651763916,
      "logits/rejected": 1.841104507446289,
      "logps/chosen": -80.78228759765625,
      "logps/rejected": -85.08882141113281,
      "loss": 0.693,
      "rewards/accuracies": 0.4749999940395355,
      "rewards/chosen": 0.0042568682692945,
      "rewards/margins": -0.0038191028870642185,
      "rewards/rejected": 0.008075973019003868,
      "step": 30
    },
    {
      "epoch": 1.4414414414414414,
      "grad_norm": 0.537497341632843,
      "learning_rate": 3.7037037037037037e-06,
      "logits/chosen": 1.7320470809936523,
      "logits/rejected": 1.7411377429962158,
      "logps/chosen": -82.24813842773438,
      "logps/rejected": -80.90709686279297,
      "loss": 0.6952,
      "rewards/accuracies": 0.42500001192092896,
      "rewards/chosen": -0.00350201572291553,
      "rewards/margins": -0.012091752141714096,
      "rewards/rejected": 0.008589735254645348,
      "step": 40
    },
    {
      "epoch": 1.8018018018018018,
      "grad_norm": 0.5023094415664673,
      "learning_rate": 4.62962962962963e-06,
      "logits/chosen": 1.8292573690414429,
      "logits/rejected": 1.8632844686508179,
      "logps/chosen": -85.98481750488281,
      "logps/rejected": -86.14008331298828,
      "loss": 0.6928,
      "rewards/accuracies": 0.4124999940395355,
      "rewards/chosen": 0.00168000184930861,
      "rewards/margins": -0.011136507615447044,
      "rewards/rejected": 0.01281650923192501,
      "step": 50
    },
    {
      "epoch": 2.1621621621621623,
      "grad_norm": 0.4981901943683624,
      "learning_rate": 4.998119881260576e-06,
      "logits/chosen": 1.8536121845245361,
      "logits/rejected": 1.7948782444000244,
      "logps/chosen": -90.0439682006836,
      "logps/rejected": -79.8309555053711,
      "loss": 0.6914,
      "rewards/accuracies": 0.48750001192092896,
      "rewards/chosen": 0.0025153260212391615,
      "rewards/margins": -0.0021209525875747204,
      "rewards/rejected": 0.004636278375983238,
      "step": 60
    },
    {
      "epoch": 2.5225225225225225,
      "grad_norm": 0.6208468079566956,
      "learning_rate": 4.9866405060165044e-06,
      "logits/chosen": 1.7845121622085571,
      "logits/rejected": 1.8721017837524414,
      "logps/chosen": -74.6461181640625,
      "logps/rejected": -98.91078186035156,
      "loss": 0.694,
      "rewards/accuracies": 0.5249999761581421,
      "rewards/chosen": 0.00022823773906566203,
      "rewards/margins": 0.0024594543501734734,
      "rewards/rejected": -0.00223121652379632,
      "step": 70
    },
    {
      "epoch": 2.8828828828828827,
      "grad_norm": 0.5074446797370911,
      "learning_rate": 4.964774158361991e-06,
      "logits/chosen": 1.8522154092788696,
      "logits/rejected": 1.8161392211914062,
      "logps/chosen": -89.16864013671875,
      "logps/rejected": -81.97349548339844,
      "loss": 0.6911,
      "rewards/accuracies": 0.5249999761581421,
      "rewards/chosen": 0.006215038243681192,
      "rewards/margins": 0.005521883722394705,
      "rewards/rejected": 0.0006931538810022175,
      "step": 80
    },
    {
      "epoch": 3.2432432432432434,
      "grad_norm": 0.6148263812065125,
      "learning_rate": 4.93261217644956e-06,
      "logits/chosen": 1.7915083169937134,
      "logits/rejected": 1.7579513788223267,
      "logps/chosen": -88.66618347167969,
      "logps/rejected": -80.2238998413086,
      "loss": 0.6918,
      "rewards/accuracies": 0.574999988079071,
      "rewards/chosen": 0.005774274934083223,
      "rewards/margins": 0.016623441129922867,
      "rewards/rejected": -0.010849165730178356,
      "step": 90
    },
    {
      "epoch": 3.6036036036036037,
      "grad_norm": 0.6143240332603455,
      "learning_rate": 4.8902889044347e-06,
      "logits/chosen": 1.6886920928955078,
      "logits/rejected": 1.8121178150177002,
      "logps/chosen": -72.908203125,
      "logps/rejected": -92.44017028808594,
      "loss": 0.6911,
      "rewards/accuracies": 0.48750001192092896,
      "rewards/chosen": -0.0015121791511774063,
      "rewards/margins": 0.0036420777905732393,
      "rewards/rejected": -0.005154256708920002,
      "step": 100
    },
    {
      "epoch": 3.963963963963964,
      "grad_norm": 0.7140340209007263,
      "learning_rate": 4.837981131305475e-06,
      "logits/chosen": 1.7481105327606201,
      "logits/rejected": 1.723141074180603,
      "logps/chosen": -80.63452911376953,
      "logps/rejected": -72.64492797851562,
      "loss": 0.6894,
      "rewards/accuracies": 0.48750001192092896,
      "rewards/chosen": -0.006246576085686684,
      "rewards/margins": -0.0035218377597630024,
      "rewards/rejected": -0.0027247383259236813,
      "step": 110
    },
    {
      "epoch": 4.324324324324325,
      "grad_norm": 1.1645854711532593,
      "learning_rate": 4.775907352415367e-06,
      "logits/chosen": 1.7416290044784546,
      "logits/rejected": 1.8237574100494385,
      "logps/chosen": -89.01248931884766,
      "logps/rejected": -92.31901550292969,
      "loss": 0.6869,
      "rewards/accuracies": 0.612500011920929,
      "rewards/chosen": -0.008855113759636879,
      "rewards/margins": 0.013151508755981922,
      "rewards/rejected": -0.022006623446941376,
      "step": 120
    },
    {
      "epoch": 4.684684684684685,
      "grad_norm": 0.6638664603233337,
      "learning_rate": 4.70432685680402e-06,
      "logits/chosen": 1.7124770879745483,
      "logits/rejected": 1.7777938842773438,
      "logps/chosen": -84.87271881103516,
      "logps/rejected": -92.4839096069336,
      "loss": 0.6855,
      "rewards/accuracies": 0.574999988079071,
      "rewards/chosen": -0.004437069408595562,
      "rewards/margins": 0.019811339676380157,
      "rewards/rejected": -0.024248410016298294,
      "step": 130
    },
    {
      "epoch": 5.045045045045045,
      "grad_norm": 0.785169780254364,
      "learning_rate": 4.623538644118244e-06,
      "logits/chosen": 1.7838690280914307,
      "logits/rejected": 1.8052574396133423,
      "logps/chosen": -94.57842254638672,
      "logps/rejected": -80.6390609741211,
      "loss": 0.6836,
      "rewards/accuracies": 0.48750001192092896,
      "rewards/chosen": -0.026147600263357162,
      "rewards/margins": 0.010936126112937927,
      "rewards/rejected": -0.03708372637629509,
      "step": 140
    },
    {
      "epoch": 5.405405405405405,
      "grad_norm": 0.745952844619751,
      "learning_rate": 4.533880175657419e-06,
      "logits/chosen": 1.7925735712051392,
      "logits/rejected": 1.7987396717071533,
      "logps/chosen": -82.4854507446289,
      "logps/rejected": -77.50787353515625,
      "loss": 0.6767,
      "rewards/accuracies": 0.625,
      "rewards/chosen": -0.013469865545630455,
      "rewards/margins": 0.04023212194442749,
      "rewards/rejected": -0.053701985627412796,
      "step": 150
    },
    {
      "epoch": 5.7657657657657655,
      "grad_norm": 0.7885063290596008,
      "learning_rate": 4.435725964760331e-06,
      "logits/chosen": 1.7725191116333008,
      "logits/rejected": 1.8345234394073486,
      "logps/chosen": -78.7154541015625,
      "logps/rejected": -80.75708770751953,
      "loss": 0.6721,
      "rewards/accuracies": 0.675000011920929,
      "rewards/chosen": -0.018351273611187935,
      "rewards/margins": 0.05368901416659355,
      "rewards/rejected": -0.07204028964042664,
      "step": 160
    },
    {
      "epoch": 6.126126126126126,
      "grad_norm": 0.7734187245368958,
      "learning_rate": 4.329486012421531e-06,
      "logits/chosen": 1.827528715133667,
      "logits/rejected": 1.7939176559448242,
      "logps/chosen": -75.33561706542969,
      "logps/rejected": -71.34326934814453,
      "loss": 0.6775,
      "rewards/accuracies": 0.612500011920929,
      "rewards/chosen": -0.038218818604946136,
      "rewards/margins": 0.06159939616918564,
      "rewards/rejected": -0.09981821477413177,
      "step": 170
    },
    {
      "epoch": 6.486486486486487,
      "grad_norm": 0.7796682715415955,
      "learning_rate": 4.215604094671835e-06,
      "logits/chosen": 1.729288101196289,
      "logits/rejected": 1.7497504949569702,
      "logps/chosen": -83.02750396728516,
      "logps/rejected": -78.53959655761719,
      "loss": 0.6632,
      "rewards/accuracies": 0.699999988079071,
      "rewards/chosen": -0.03207743167877197,
      "rewards/margins": 0.06858544796705246,
      "rewards/rejected": -0.10066288709640503,
      "step": 180
    },
    {
      "epoch": 6.846846846846847,
      "grad_norm": 0.8168752193450928,
      "learning_rate": 4.094555908876765e-06,
      "logits/chosen": 1.747865080833435,
      "logits/rejected": 1.7993282079696655,
      "logps/chosen": -78.75782775878906,
      "logps/rejected": -85.8946304321289,
      "loss": 0.668,
      "rewards/accuracies": 0.612500011920929,
      "rewards/chosen": -0.0766468197107315,
      "rewards/margins": 0.045263100415468216,
      "rewards/rejected": -0.12190990149974823,
      "step": 190
    },
    {
      "epoch": 7.207207207207207,
      "grad_norm": 0.9816763997077942,
      "learning_rate": 3.966847086696045e-06,
      "logits/chosen": 1.792106032371521,
      "logits/rejected": 1.7798125743865967,
      "logps/chosen": -93.53439331054688,
      "logps/rejected": -73.8904037475586,
      "loss": 0.6582,
      "rewards/accuracies": 0.6875,
      "rewards/chosen": -0.06042450666427612,
      "rewards/margins": 0.10538975894451141,
      "rewards/rejected": -0.16581428050994873,
      "step": 200
    },
    {
      "epoch": 7.5675675675675675,
      "grad_norm": 0.9053374528884888,
      "learning_rate": 3.833011082004229e-06,
      "logits/chosen": 1.7648969888687134,
      "logits/rejected": 1.7725557088851929,
      "logps/chosen": -79.61383056640625,
      "logps/rejected": -71.8048095703125,
      "loss": 0.6551,
      "rewards/accuracies": 0.6875,
      "rewards/chosen": -0.040038011968135834,
      "rewards/margins": 0.11104954779148102,
      "rewards/rejected": -0.15108755230903625,
      "step": 210
    },
    {
      "epoch": 7.927927927927928,
      "grad_norm": 1.1332669258117676,
      "learning_rate": 3.693606942594873e-06,
      "logits/chosen": 1.8605678081512451,
      "logits/rejected": 1.91280996799469,
      "logps/chosen": -74.39783477783203,
      "logps/rejected": -90.66893005371094,
      "loss": 0.6565,
      "rewards/accuracies": 0.637499988079071,
      "rewards/chosen": -0.08056517690420151,
      "rewards/margins": 0.0968993678689003,
      "rewards/rejected": -0.1774645447731018,
      "step": 220
    },
    {
      "epoch": 8.288288288288289,
      "grad_norm": 1.0496221780776978,
      "learning_rate": 3.549216974976073e-06,
      "logits/chosen": 1.8636070489883423,
      "logits/rejected": 1.7996151447296143,
      "logps/chosen": -89.9018783569336,
      "logps/rejected": -74.14125061035156,
      "loss": 0.6423,
      "rewards/accuracies": 0.6875,
      "rewards/chosen": -0.05598217993974686,
      "rewards/margins": 0.14807265996932983,
      "rewards/rejected": -0.2040548026561737,
      "step": 230
    },
    {
      "epoch": 8.64864864864865,
      "grad_norm": 0.8506491780281067,
      "learning_rate": 3.400444312011776e-06,
      "logits/chosen": 1.7753232717514038,
      "logits/rejected": 1.7647806406021118,
      "logps/chosen": -88.50201416015625,
      "logps/rejected": -81.80894470214844,
      "loss": 0.6358,
      "rewards/accuracies": 0.6625000238418579,
      "rewards/chosen": -0.09656485170125961,
      "rewards/margins": 0.11554913222789764,
      "rewards/rejected": -0.21211397647857666,
      "step": 240
    },
    {
      "epoch": 9.00900900900901,
      "grad_norm": 0.9554071426391602,
      "learning_rate": 3.2479103935691047e-06,
      "logits/chosen": 1.7528200149536133,
      "logits/rejected": 1.7827808856964111,
      "logps/chosen": -90.01268005371094,
      "logps/rejected": -89.0714340209961,
      "loss": 0.6374,
      "rewards/accuracies": 0.675000011920929,
      "rewards/chosen": -0.07412372529506683,
      "rewards/margins": 0.1321595013141632,
      "rewards/rejected": -0.20628324151039124,
      "step": 250
    },
    {
      "epoch": 9.36936936936937,
      "grad_norm": 0.9363342523574829,
      "learning_rate": 3.092252370695298e-06,
      "logits/chosen": 1.6990101337432861,
      "logits/rejected": 1.8281656503677368,
      "logps/chosen": -71.15428161621094,
      "logps/rejected": -79.72063446044922,
      "loss": 0.6303,
      "rewards/accuracies": 0.762499988079071,
      "rewards/chosen": -0.08240491151809692,
      "rewards/margins": 0.16954098641872406,
      "rewards/rejected": -0.2519459128379822,
      "step": 260
    },
    {
      "epoch": 9.72972972972973,
      "grad_norm": 1.023535966873169,
      "learning_rate": 2.9341204441673267e-06,
      "logits/chosen": 1.7418006658554077,
      "logits/rejected": 1.7500028610229492,
      "logps/chosen": -82.24595642089844,
      "logps/rejected": -91.16307067871094,
      "loss": 0.6321,
      "rewards/accuracies": 0.6875,
      "rewards/chosen": -0.13560767471790314,
      "rewards/margins": 0.1204828992486,
      "rewards/rejected": -0.25609058141708374,
      "step": 270
    },
    {
      "epoch": 10.09009009009009,
      "grad_norm": 1.0393471717834473,
      "learning_rate": 2.7741751485313295e-06,
      "logits/chosen": 1.8437154293060303,
      "logits/rejected": 1.8877627849578857,
      "logps/chosen": -68.61182403564453,
      "logps/rejected": -77.12557220458984,
      "loss": 0.6109,
      "rewards/accuracies": 0.737500011920929,
      "rewards/chosen": -0.11025551706552505,
      "rewards/margins": 0.14969798922538757,
      "rewards/rejected": -0.25995349884033203,
      "step": 280
    },
    {
      "epoch": 10.45045045045045,
      "grad_norm": 0.9554691910743713,
      "learning_rate": 2.6130845929767662e-06,
      "logits/chosen": 1.7998764514923096,
      "logits/rejected": 1.796451210975647,
      "logps/chosen": -72.6626205444336,
      "logps/rejected": -74.15937042236328,
      "loss": 0.6123,
      "rewards/accuracies": 0.737500011920929,
      "rewards/chosen": -0.13948099315166473,
      "rewards/margins": 0.18039894104003906,
      "rewards/rejected": -0.319879949092865,
      "step": 290
    },
    {
      "epoch": 10.81081081081081,
      "grad_norm": 1.1667226552963257,
      "learning_rate": 2.4515216705704396e-06,
      "logits/chosen": 1.8437564373016357,
      "logits/rejected": 1.9043811559677124,
      "logps/chosen": -78.18916320800781,
      "logps/rejected": -90.88524627685547,
      "loss": 0.6218,
      "rewards/accuracies": 0.7875000238418579,
      "rewards/chosen": -0.12091793864965439,
      "rewards/margins": 0.20490169525146484,
      "rewards/rejected": -0.32581964135169983,
      "step": 300
    },
    {
      "epoch": 11.17117117117117,
      "grad_norm": 1.2568660974502563,
      "learning_rate": 2.290161247507733e-06,
      "logits/chosen": 1.7295608520507812,
      "logits/rejected": 1.8652616739273071,
      "logps/chosen": -79.8567886352539,
      "logps/rejected": -87.59798431396484,
      "loss": 0.6067,
      "rewards/accuracies": 0.8374999761581421,
      "rewards/chosen": -0.10961383581161499,
      "rewards/margins": 0.2835196554660797,
      "rewards/rejected": -0.3931335210800171,
      "step": 310
    },
    {
      "epoch": 11.531531531531531,
      "grad_norm": 1.115491271018982,
      "learning_rate": 2.129677344121879e-06,
      "logits/chosen": 1.803063988685608,
      "logits/rejected": 1.820227861404419,
      "logps/chosen": -74.64582061767578,
      "logps/rejected": -72.14736938476562,
      "loss": 0.6113,
      "rewards/accuracies": 0.75,
      "rewards/chosen": -0.09987158328294754,
      "rewards/margins": 0.2059938907623291,
      "rewards/rejected": -0.30586546659469604,
      "step": 320
    },
    {
      "epoch": 11.891891891891891,
      "grad_norm": 0.9227492213249207,
      "learning_rate": 1.970740319426474e-06,
      "logits/chosen": 1.7561572790145874,
      "logits/rejected": 1.789878249168396,
      "logps/chosen": -85.27911376953125,
      "logps/rejected": -77.4220962524414,
      "loss": 0.5891,
      "rewards/accuracies": 0.7875000238418579,
      "rewards/chosen": -0.14200571179389954,
      "rewards/margins": 0.22272682189941406,
      "rewards/rejected": -0.3647325336933136,
      "step": 330
    },
    {
      "epoch": 12.252252252252251,
      "grad_norm": 1.541570782661438,
      "learning_rate": 1.8140140709517467e-06,
      "logits/chosen": 1.7364771366119385,
      "logits/rejected": 1.7182557582855225,
      "logps/chosen": -76.05533599853516,
      "logps/rejected": -81.50830078125,
      "loss": 0.5994,
      "rewards/accuracies": 0.75,
      "rewards/chosen": -0.12165775150060654,
      "rewards/margins": 0.19019995629787445,
      "rewards/rejected": -0.3118577301502228,
      "step": 340
    },
    {
      "epoch": 12.612612612612612,
      "grad_norm": 1.1125155687332153,
      "learning_rate": 1.6601532615711452e-06,
      "logits/chosen": 1.7223026752471924,
      "logits/rejected": 1.8163812160491943,
      "logps/chosen": -71.94869232177734,
      "logps/rejected": -90.12442016601562,
      "loss": 0.6013,
      "rewards/accuracies": 0.7124999761581421,
      "rewards/chosen": -0.16840626299381256,
      "rewards/margins": 0.1776614487171173,
      "rewards/rejected": -0.3460676968097687,
      "step": 350
    },
    {
      "epoch": 12.972972972972974,
      "grad_norm": 1.0530396699905396,
      "learning_rate": 1.509800584902108e-06,
      "logits/chosen": 1.795819878578186,
      "logits/rejected": 1.8446025848388672,
      "logps/chosen": -82.24161529541016,
      "logps/rejected": -79.51258850097656,
      "loss": 0.5857,
      "rewards/accuracies": 0.800000011920929,
      "rewards/chosen": -0.13156726956367493,
      "rewards/margins": 0.2468404471874237,
      "rewards/rejected": -0.37840771675109863,
      "step": 360
    },
    {
      "epoch": 13.333333333333334,
      "grad_norm": 1.0298306941986084,
      "learning_rate": 1.3635840807037487e-06,
      "logits/chosen": 1.8054157495498657,
      "logits/rejected": 1.8246568441390991,
      "logps/chosen": -85.40760803222656,
      "logps/rejected": -78.77482604980469,
      "loss": 0.5943,
      "rewards/accuracies": 0.762499988079071,
      "rewards/chosen": -0.10383795201778412,
      "rewards/margins": 0.2493254393339157,
      "rewards/rejected": -0.35316339135169983,
      "step": 370
    },
    {
      "epoch": 13.693693693693694,
      "grad_norm": 1.0601342916488647,
      "learning_rate": 1.2221145114853172e-06,
      "logits/chosen": 1.6507200002670288,
      "logits/rejected": 1.720569372177124,
      "logps/chosen": -74.90982055664062,
      "logps/rejected": -92.51995849609375,
      "loss": 0.5789,
      "rewards/accuracies": 0.7875000238418579,
      "rewards/chosen": -0.13705268502235413,
      "rewards/margins": 0.2740176022052765,
      "rewards/rejected": -0.4110702872276306,
      "step": 380
    },
    {
      "epoch": 14.054054054054054,
      "grad_norm": 1.1135063171386719,
      "learning_rate": 1.085982811283654e-06,
      "logits/chosen": 1.7133989334106445,
      "logits/rejected": 1.7338130474090576,
      "logps/chosen": -80.55327606201172,
      "logps/rejected": -82.32875061035156,
      "loss": 0.5847,
      "rewards/accuracies": 0.8500000238418579,
      "rewards/chosen": -0.12969203293323517,
      "rewards/margins": 0.26957422494888306,
      "rewards/rejected": -0.39926621317863464,
      "step": 390
    },
    {
      "epoch": 14.414414414414415,
      "grad_norm": 1.0821375846862793,
      "learning_rate": 9.557576172663577e-07,
      "logits/chosen": 1.7035369873046875,
      "logits/rejected": 1.7184028625488281,
      "logps/chosen": -80.99266052246094,
      "logps/rejected": -85.05413818359375,
      "loss": 0.583,
      "rewards/accuracies": 0.824999988079071,
      "rewards/chosen": -0.1366995871067047,
      "rewards/margins": 0.268393874168396,
      "rewards/rejected": -0.4050934910774231,
      "step": 400
    },
    {
      "epoch": 14.774774774774775,
      "grad_norm": 1.1167497634887695,
      "learning_rate": 8.319828944714508e-07,
      "logits/chosen": 1.8293631076812744,
      "logits/rejected": 1.7202396392822266,
      "logps/chosen": -96.96324157714844,
      "logps/rejected": -80.58685302734375,
      "loss": 0.5768,
      "rewards/accuracies": 0.7875000238418579,
      "rewards/chosen": -0.14515718817710876,
      "rewards/margins": 0.27918386459350586,
      "rewards/rejected": -0.4243410527706146,
      "step": 410
    },
    {
      "epoch": 15.135135135135135,
      "grad_norm": 1.0140223503112793,
      "learning_rate": 7.151756636052529e-07,
      "logits/chosen": 1.7751014232635498,
      "logits/rejected": 1.7394100427627563,
      "logps/chosen": -104.48625183105469,
      "logps/rejected": -90.3105239868164,
      "loss": 0.5804,
      "rewards/accuracies": 0.7749999761581421,
      "rewards/chosen": -0.14849844574928284,
      "rewards/margins": 0.2261633574962616,
      "rewards/rejected": -0.37466174364089966,
      "step": 420
    },
    {
      "epoch": 15.495495495495495,
      "grad_norm": 1.0056337118148804,
      "learning_rate": 6.058238413897052e-07,
      "logits/chosen": 1.7368510961532593,
      "logits/rejected": 1.844665288925171,
      "logps/chosen": -88.00404357910156,
      "logps/rejected": -103.71427917480469,
      "loss": 0.5786,
      "rewards/accuracies": 0.7875000238418579,
      "rewards/chosen": -0.2264096438884735,
      "rewards/margins": 0.2332192212343216,
      "rewards/rejected": -0.4596289098262787,
      "step": 430
    },
    {
      "epoch": 15.855855855855856,
      "grad_norm": 1.0402743816375732,
      "learning_rate": 5.043842024802675e-07,
      "logits/chosen": 1.835862398147583,
      "logits/rejected": 1.8621985912322998,
      "logps/chosen": -80.8730697631836,
      "logps/rejected": -77.32243347167969,
      "loss": 0.5737,
      "rewards/accuracies": 0.762499988079071,
      "rewards/chosen": -0.10934928804636002,
      "rewards/margins": 0.26070791482925415,
      "rewards/rejected": -0.3700571656227112,
      "step": 440
    },
    {
      "epoch": 16.216216216216218,
      "grad_norm": 1.0037925243377686,
      "learning_rate": 4.1128047146765936e-07,
      "logits/chosen": 1.6536343097686768,
      "logits/rejected": 1.748152494430542,
      "logps/chosen": -85.28504180908203,
      "logps/rejected": -83.04313659667969,
      "loss": 0.5869,
      "rewards/accuracies": 0.8125,
      "rewards/chosen": -0.1468530297279358,
      "rewards/margins": 0.24461284279823303,
      "rewards/rejected": -0.39146584272384644,
      "step": 450
    },
    {
      "epoch": 16.576576576576578,
      "grad_norm": 1.0473060607910156,
      "learning_rate": 3.269015529333805e-07,
      "logits/chosen": 1.7145726680755615,
      "logits/rejected": 1.7245705127716064,
      "logps/chosen": -68.06051635742188,
      "logps/rejected": -77.09696960449219,
      "loss": 0.5679,
      "rewards/accuracies": 0.7749999761581421,
      "rewards/chosen": -0.16770952939987183,
      "rewards/margins": 0.23443298041820526,
      "rewards/rejected": -0.4021424651145935,
      "step": 460
    },
    {
      "epoch": 16.936936936936938,
      "grad_norm": 1.0985324382781982,
      "learning_rate": 2.515999069522676e-07,
      "logits/chosen": 1.7093912363052368,
      "logits/rejected": 1.6903793811798096,
      "logps/chosen": -79.38801574707031,
      "logps/rejected": -74.43864440917969,
      "loss": 0.5754,
      "rewards/accuracies": 0.8125,
      "rewards/chosen": -0.14471781253814697,
      "rewards/margins": 0.24466648697853088,
      "rewards/rejected": -0.38938429951667786,
      "step": 470
    },
    {
      "epoch": 17.2972972972973,
      "grad_norm": 1.1457403898239136,
      "learning_rate": 1.8569007682777417e-07,
      "logits/chosen": 1.6642320156097412,
      "logits/rejected": 1.7295513153076172,
      "logps/chosen": -80.79732513427734,
      "logps/rejected": -80.07367706298828,
      "loss": 0.5749,
      "rewards/accuracies": 0.7250000238418579,
      "rewards/chosen": -0.16041259467601776,
      "rewards/margins": 0.26678892970085144,
      "rewards/rejected": -0.4272015690803528,
      "step": 480
    },
    {
      "epoch": 17.65765765765766,
      "grad_norm": 1.04747474193573,
      "learning_rate": 1.2944737520980883e-07,
      "logits/chosen": 1.699061632156372,
      "logits/rejected": 1.837684988975525,
      "logps/chosen": -73.55027770996094,
      "logps/rejected": -93.25712585449219,
      "loss": 0.5732,
      "rewards/accuracies": 0.7749999761581421,
      "rewards/chosen": -0.20150330662727356,
      "rewards/margins": 0.2376147210597992,
      "rewards/rejected": -0.43911799788475037,
      "step": 490
    },
    {
      "epoch": 18.01801801801802,
      "grad_norm": 1.033074140548706,
      "learning_rate": 8.310673408334496e-08,
      "logits/chosen": 1.655987024307251,
      "logits/rejected": 1.6537258625030518,
      "logps/chosen": -103.11566162109375,
      "logps/rejected": -96.29421997070312,
      "loss": 0.5747,
      "rewards/accuracies": 0.800000011920929,
      "rewards/chosen": -0.15963700413703918,
      "rewards/margins": 0.30574050545692444,
      "rewards/rejected": -0.4653775095939636,
      "step": 500
    },
    {
      "epoch": 18.37837837837838,
      "grad_norm": 1.0928398370742798,
      "learning_rate": 4.6861723431538273e-08,
      "logits/chosen": 1.7491756677627563,
      "logits/rejected": 1.7941337823867798,
      "logps/chosen": -79.0124282836914,
      "logps/rejected": -94.39396667480469,
      "loss": 0.5745,
      "rewards/accuracies": 0.824999988079071,
      "rewards/chosen": -0.14681483805179596,
      "rewards/margins": 0.31730785965919495,
      "rewards/rejected": -0.4641226828098297,
      "step": 510
    },
    {
      "epoch": 18.73873873873874,
      "grad_norm": 1.0533796548843384,
      "learning_rate": 2.0863742672497244e-08,
      "logits/chosen": 1.7281211614608765,
      "logits/rejected": 1.7764543294906616,
      "logps/chosen": -91.92194366455078,
      "logps/rejected": -85.10621643066406,
      "loss": 0.5726,
      "rewards/accuracies": 0.824999988079071,
      "rewards/chosen": -0.15819533169269562,
      "rewards/margins": 0.2960740923881531,
      "rewards/rejected": -0.4542694687843323,
      "step": 520
    },
    {
      "epoch": 19.0990990990991,
      "grad_norm": 1.1859172582626343,
      "learning_rate": 5.221388247169945e-09,
      "logits/chosen": 1.7769553661346436,
      "logits/rejected": 1.7326488494873047,
      "logps/chosen": -85.6234359741211,
      "logps/rejected": -75.64092254638672,
      "loss": 0.5688,
      "rewards/accuracies": 0.875,
      "rewards/chosen": -0.15740445256233215,
      "rewards/margins": 0.31495219469070435,
      "rewards/rejected": -0.4723566472530365,
      "step": 530
    },
    {
      "epoch": 19.45945945945946,
      "grad_norm": 1.1295475959777832,
      "learning_rate": 0.0,
      "logits/chosen": 1.7790734767913818,
      "logits/rejected": 1.8196531534194946,
      "logps/chosen": -83.07762145996094,
      "logps/rejected": -77.76747131347656,
      "loss": 0.575,
      "rewards/accuracies": 0.7875000238418579,
      "rewards/chosen": -0.1493213176727295,
      "rewards/margins": 0.2750301659107208,
      "rewards/rejected": -0.42435145378112793,
      "step": 540
    },
    {
      "epoch": 19.45945945945946,
      "step": 540,
      "total_flos": 1.8293058049230766e+18,
      "train_loss": 0.6298594880987096,
      "train_runtime": 3921.3478,
      "train_samples_per_second": 9.033,
      "train_steps_per_second": 0.138
    }
  ],
  "logging_steps": 10,
  "max_steps": 540,
  "num_input_tokens_seen": 0,
  "num_train_epochs": 20,
  "save_steps": 50,
  "stateful_callbacks": {
    "TrainerControl": {
      "args": {
        "should_epoch_stop": false,
        "should_evaluate": false,
        "should_log": false,
        "should_save": true,
        "should_training_stop": true
      },
      "attributes": {}
    }
  },
  "total_flos": 1.8293058049230766e+18,
  "train_batch_size": 1,
  "trial_name": null,
  "trial_params": null
}