File size: 21,867 Bytes
dfafc69
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
{
  "best_metric": null,
  "best_model_checkpoint": null,
  "epoch": 13.793103448275861,
  "eval_steps": 500,
  "global_step": 400,
  "is_hyper_param_search": false,
  "is_local_process_zero": true,
  "is_world_process_zero": true,
  "log_history": [
    {
      "epoch": 0.3448275862068966,
      "grad_norm": 3.9107234477996826,
      "learning_rate": 8.620689655172415e-07,
      "logits/chosen": -2.363854169845581,
      "logits/rejected": -2.338671922683716,
      "logps/chosen": -76.8819351196289,
      "logps/rejected": -75.86869049072266,
      "loss": 0.6942,
      "rewards/accuracies": 0.375,
      "rewards/chosen": -0.003996999468654394,
      "rewards/margins": -0.004145228303968906,
      "rewards/rejected": 0.00014822949015069753,
      "step": 10
    },
    {
      "epoch": 0.6896551724137931,
      "grad_norm": 4.328212738037109,
      "learning_rate": 1.724137931034483e-06,
      "logits/chosen": -2.3617663383483887,
      "logits/rejected": -2.3477540016174316,
      "logps/chosen": -93.76543426513672,
      "logps/rejected": -75.16656494140625,
      "loss": 0.6931,
      "rewards/accuracies": 0.48750001192092896,
      "rewards/chosen": 0.002653368515893817,
      "rewards/margins": 0.006352785974740982,
      "rewards/rejected": -0.003699416993185878,
      "step": 20
    },
    {
      "epoch": 1.0344827586206897,
      "grad_norm": 3.473827838897705,
      "learning_rate": 2.5862068965517246e-06,
      "logits/chosen": -2.3354058265686035,
      "logits/rejected": -2.3330740928649902,
      "logps/chosen": -138.9960479736328,
      "logps/rejected": -67.00438690185547,
      "loss": 0.6927,
      "rewards/accuracies": 0.44999998807907104,
      "rewards/chosen": 0.01745142787694931,
      "rewards/margins": 0.013760591857135296,
      "rewards/rejected": 0.0036908381152898073,
      "step": 30
    },
    {
      "epoch": 1.3793103448275863,
      "grad_norm": 3.9458820819854736,
      "learning_rate": 3.448275862068966e-06,
      "logits/chosen": -2.3478751182556152,
      "logits/rejected": -2.3700289726257324,
      "logps/chosen": -75.59983825683594,
      "logps/rejected": -67.20357513427734,
      "loss": 0.69,
      "rewards/accuracies": 0.5,
      "rewards/chosen": 0.00024204826331697404,
      "rewards/margins": 0.005866119172424078,
      "rewards/rejected": -0.005624071694910526,
      "step": 40
    },
    {
      "epoch": 1.7241379310344827,
      "grad_norm": 4.299890041351318,
      "learning_rate": 4.310344827586207e-06,
      "logits/chosen": -2.338815212249756,
      "logits/rejected": -2.3532769680023193,
      "logps/chosen": -72.74342346191406,
      "logps/rejected": -87.63409423828125,
      "loss": 0.6899,
      "rewards/accuracies": 0.574999988079071,
      "rewards/chosen": -0.003249581903219223,
      "rewards/margins": 0.006888723932206631,
      "rewards/rejected": -0.010138307698071003,
      "step": 50
    },
    {
      "epoch": 2.0689655172413794,
      "grad_norm": 4.957555294036865,
      "learning_rate": 4.999818897894192e-06,
      "logits/chosen": -2.344572067260742,
      "logits/rejected": -2.345888614654541,
      "logps/chosen": -80.16046142578125,
      "logps/rejected": -73.05986022949219,
      "loss": 0.6802,
      "rewards/accuracies": 0.637499988079071,
      "rewards/chosen": -0.013288321904838085,
      "rewards/margins": 0.020968889817595482,
      "rewards/rejected": -0.03425721079111099,
      "step": 60
    },
    {
      "epoch": 2.413793103448276,
      "grad_norm": 4.212078094482422,
      "learning_rate": 4.9934830787948756e-06,
      "logits/chosen": -2.3330090045928955,
      "logits/rejected": -2.328575372695923,
      "logps/chosen": -73.63365173339844,
      "logps/rejected": -74.45356750488281,
      "loss": 0.6693,
      "rewards/accuracies": 0.7124999761581421,
      "rewards/chosen": -0.009874681942164898,
      "rewards/margins": 0.04785541445016861,
      "rewards/rejected": -0.05773010104894638,
      "step": 70
    },
    {
      "epoch": 2.7586206896551726,
      "grad_norm": 4.446742534637451,
      "learning_rate": 4.978118375700895e-06,
      "logits/chosen": -2.3472495079040527,
      "logits/rejected": -2.3697409629821777,
      "logps/chosen": -73.44490814208984,
      "logps/rejected": -89.69837951660156,
      "loss": 0.6553,
      "rewards/accuracies": 0.8500000238418579,
      "rewards/chosen": -0.022817375138401985,
      "rewards/margins": 0.09045850485563278,
      "rewards/rejected": -0.11327588558197021,
      "step": 80
    },
    {
      "epoch": 3.103448275862069,
      "grad_norm": 4.435715198516846,
      "learning_rate": 4.953780424089803e-06,
      "logits/chosen": -2.346717596054077,
      "logits/rejected": -2.3626608848571777,
      "logps/chosen": -85.64796447753906,
      "logps/rejected": -80.06268310546875,
      "loss": 0.6432,
      "rewards/accuracies": 0.737500011920929,
      "rewards/chosen": -0.0543200746178627,
      "rewards/margins": 0.1085677295923233,
      "rewards/rejected": -0.1628878116607666,
      "step": 90
    },
    {
      "epoch": 3.4482758620689653,
      "grad_norm": 4.5200910568237305,
      "learning_rate": 4.920557351506409e-06,
      "logits/chosen": -2.358900547027588,
      "logits/rejected": -2.3591980934143066,
      "logps/chosen": -82.83473205566406,
      "logps/rejected": -80.66204071044922,
      "loss": 0.5996,
      "rewards/accuracies": 0.875,
      "rewards/chosen": -0.046993009746074677,
      "rewards/margins": 0.20401433110237122,
      "rewards/rejected": -0.2510073184967041,
      "step": 100
    },
    {
      "epoch": 3.793103448275862,
      "grad_norm": 4.106562614440918,
      "learning_rate": 4.878569458453592e-06,
      "logits/chosen": -2.347762107849121,
      "logits/rejected": -2.33540415763855,
      "logps/chosen": -72.23506164550781,
      "logps/rejected": -85.77088928222656,
      "loss": 0.6003,
      "rewards/accuracies": 0.824999988079071,
      "rewards/chosen": -0.051100969314575195,
      "rewards/margins": 0.24443332850933075,
      "rewards/rejected": -0.29553431272506714,
      "step": 110
    },
    {
      "epoch": 4.137931034482759,
      "grad_norm": 4.67632532119751,
      "learning_rate": 4.827968782785062e-06,
      "logits/chosen": -2.353370428085327,
      "logits/rejected": -2.378964900970459,
      "logps/chosen": -67.91919708251953,
      "logps/rejected": -86.26390838623047,
      "loss": 0.5716,
      "rewards/accuracies": 0.7875000238418579,
      "rewards/chosen": -0.11952020972967148,
      "rewards/margins": 0.30119410157203674,
      "rewards/rejected": -0.42071428894996643,
      "step": 120
    },
    {
      "epoch": 4.482758620689655,
      "grad_norm": 4.5131025314331055,
      "learning_rate": 4.7689385491773934e-06,
      "logits/chosen": -2.3820648193359375,
      "logits/rejected": -2.403965473175049,
      "logps/chosen": -71.66841125488281,
      "logps/rejected": -77.90869140625,
      "loss": 0.5362,
      "rewards/accuracies": 0.875,
      "rewards/chosen": -0.0694696456193924,
      "rewards/margins": 0.47986412048339844,
      "rewards/rejected": -0.5493337512016296,
      "step": 130
    },
    {
      "epoch": 4.827586206896552,
      "grad_norm": 4.446605205535889,
      "learning_rate": 4.70169250567482e-06,
      "logits/chosen": -2.372788906097412,
      "logits/rejected": -2.379612684249878,
      "logps/chosen": -69.44779968261719,
      "logps/rejected": -80.1120376586914,
      "loss": 0.5186,
      "rewards/accuracies": 0.862500011920929,
      "rewards/chosen": -0.12896183133125305,
      "rewards/margins": 0.5137210488319397,
      "rewards/rejected": -0.6426829099655151,
      "step": 140
    },
    {
      "epoch": 5.172413793103448,
      "grad_norm": 4.605374813079834,
      "learning_rate": 4.626474149709127e-06,
      "logits/chosen": -2.3140041828155518,
      "logits/rejected": -2.336975574493408,
      "logps/chosen": -87.05641174316406,
      "logps/rejected": -94.53668212890625,
      "loss": 0.4794,
      "rewards/accuracies": 0.8374999761581421,
      "rewards/chosen": -0.15513856709003448,
      "rewards/margins": 0.6417296528816223,
      "rewards/rejected": -0.7968682050704956,
      "step": 150
    },
    {
      "epoch": 5.517241379310345,
      "grad_norm": 6.027809143066406,
      "learning_rate": 4.54355584639723e-06,
      "logits/chosen": -2.365192413330078,
      "logits/rejected": -2.3872745037078857,
      "logps/chosen": -73.7781753540039,
      "logps/rejected": -82.1873550415039,
      "loss": 0.4599,
      "rewards/accuracies": 0.8125,
      "rewards/chosen": -0.25834327936172485,
      "rewards/margins": 0.5800828337669373,
      "rewards/rejected": -0.8384261131286621,
      "step": 160
    },
    {
      "epoch": 5.862068965517241,
      "grad_norm": 5.196141242980957,
      "learning_rate": 4.45323784230908e-06,
      "logits/chosen": -2.391653299331665,
      "logits/rejected": -2.394854784011841,
      "logps/chosen": -72.33963775634766,
      "logps/rejected": -86.77699279785156,
      "loss": 0.4467,
      "rewards/accuracies": 0.8999999761581421,
      "rewards/chosen": -0.2543284595012665,
      "rewards/margins": 0.7806032299995422,
      "rewards/rejected": -1.0349315404891968,
      "step": 170
    },
    {
      "epoch": 6.206896551724138,
      "grad_norm": 5.285089015960693,
      "learning_rate": 4.355847178277025e-06,
      "logits/chosen": -2.3918795585632324,
      "logits/rejected": -2.4050662517547607,
      "logps/chosen": -77.20832061767578,
      "logps/rejected": -98.5592041015625,
      "loss": 0.4052,
      "rewards/accuracies": 0.887499988079071,
      "rewards/chosen": -0.35020238161087036,
      "rewards/margins": 1.0169063806533813,
      "rewards/rejected": -1.367108702659607,
      "step": 180
    },
    {
      "epoch": 6.551724137931035,
      "grad_norm": 5.308840751647949,
      "learning_rate": 4.2517365051833564e-06,
      "logits/chosen": -2.3778843879699707,
      "logits/rejected": -2.3865675926208496,
      "logps/chosen": -69.90904235839844,
      "logps/rejected": -86.66047668457031,
      "loss": 0.3809,
      "rewards/accuracies": 0.9375,
      "rewards/chosen": -0.42647725343704224,
      "rewards/margins": 0.9949015378952026,
      "rewards/rejected": -1.4213788509368896,
      "step": 190
    },
    {
      "epoch": 6.896551724137931,
      "grad_norm": 6.095545768737793,
      "learning_rate": 4.141282807014034e-06,
      "logits/chosen": -2.407437801361084,
      "logits/rejected": -2.4296727180480957,
      "logps/chosen": -82.95701599121094,
      "logps/rejected": -83.4615478515625,
      "loss": 0.3742,
      "rewards/accuracies": 0.875,
      "rewards/chosen": -0.6070786714553833,
      "rewards/margins": 0.968428909778595,
      "rewards/rejected": -1.5755075216293335,
      "step": 200
    },
    {
      "epoch": 7.241379310344827,
      "grad_norm": 5.683840751647949,
      "learning_rate": 4.024886035802432e-06,
      "logits/chosen": -2.411830186843872,
      "logits/rejected": -2.4354729652404785,
      "logps/chosen": -61.66484451293945,
      "logps/rejected": -90.95805358886719,
      "loss": 0.3421,
      "rewards/accuracies": 0.8999999761581421,
      "rewards/chosen": -0.5454065203666687,
      "rewards/margins": 1.2011836767196655,
      "rewards/rejected": -1.7465900182724,
      "step": 210
    },
    {
      "epoch": 7.586206896551724,
      "grad_norm": 5.584890842437744,
      "learning_rate": 3.9029676634059565e-06,
      "logits/chosen": -2.3816287517547607,
      "logits/rejected": -2.37958025932312,
      "logps/chosen": -87.12813568115234,
      "logps/rejected": -92.26463317871094,
      "loss": 0.3138,
      "rewards/accuracies": 0.949999988079071,
      "rewards/chosen": -0.5636588335037231,
      "rewards/margins": 1.3944618701934814,
      "rewards/rejected": -1.9581207036972046,
      "step": 220
    },
    {
      "epoch": 7.931034482758621,
      "grad_norm": 7.526435375213623,
      "learning_rate": 3.7759691553595214e-06,
      "logits/chosen": -2.399634599685669,
      "logits/rejected": -2.407938241958618,
      "logps/chosen": -82.55473327636719,
      "logps/rejected": -89.76951599121094,
      "loss": 0.3046,
      "rewards/accuracies": 0.9750000238418579,
      "rewards/chosen": -0.9372364282608032,
      "rewards/margins": 1.309622049331665,
      "rewards/rejected": -2.246858596801758,
      "step": 230
    },
    {
      "epoch": 8.275862068965518,
      "grad_norm": 5.150575637817383,
      "learning_rate": 3.6443503723320837e-06,
      "logits/chosen": -2.36824893951416,
      "logits/rejected": -2.3758621215820312,
      "logps/chosen": -85.68852996826172,
      "logps/rejected": -102.09485626220703,
      "loss": 0.2638,
      "rewards/accuracies": 0.949999988079071,
      "rewards/chosen": -0.9749407768249512,
      "rewards/margins": 1.6947702169418335,
      "rewards/rejected": -2.669710874557495,
      "step": 240
    },
    {
      "epoch": 8.620689655172415,
      "grad_norm": 6.282366752624512,
      "learning_rate": 3.508587904974522e-06,
      "logits/chosen": -2.3659470081329346,
      "logits/rejected": -2.367462635040283,
      "logps/chosen": -87.00023651123047,
      "logps/rejected": -123.0126953125,
      "loss": 0.2405,
      "rewards/accuracies": 0.9624999761581421,
      "rewards/chosen": -1.026289939880371,
      "rewards/margins": 1.9990599155426025,
      "rewards/rejected": -3.0253496170043945,
      "step": 250
    },
    {
      "epoch": 8.96551724137931,
      "grad_norm": 6.396641731262207,
      "learning_rate": 3.3691733481883693e-06,
      "logits/chosen": -2.3716306686401367,
      "logits/rejected": -2.3546042442321777,
      "logps/chosen": -87.29267883300781,
      "logps/rejected": -98.01856994628906,
      "loss": 0.2546,
      "rewards/accuracies": 0.949999988079071,
      "rewards/chosen": -1.2485218048095703,
      "rewards/margins": 1.980831503868103,
      "rewards/rejected": -3.229353427886963,
      "step": 260
    },
    {
      "epoch": 9.310344827586206,
      "grad_norm": 5.644590377807617,
      "learning_rate": 3.226611521064278e-06,
      "logits/chosen": -2.2916407585144043,
      "logits/rejected": -2.3004841804504395,
      "logps/chosen": -79.30825805664062,
      "logps/rejected": -113.4272689819336,
      "loss": 0.1999,
      "rewards/accuracies": 0.9750000238418579,
      "rewards/chosen": -1.1944876909255981,
      "rewards/margins": 2.365144729614258,
      "rewards/rejected": -3.5596320629119873,
      "step": 270
    },
    {
      "epoch": 9.655172413793103,
      "grad_norm": 6.177416801452637,
      "learning_rate": 3.0814186389357765e-06,
      "logits/chosen": -2.346484661102295,
      "logits/rejected": -2.346285581588745,
      "logps/chosen": -77.01927947998047,
      "logps/rejected": -105.2061538696289,
      "loss": 0.1875,
      "rewards/accuracies": 0.987500011920929,
      "rewards/chosen": -1.260752558708191,
      "rewards/margins": 2.249293565750122,
      "rewards/rejected": -3.5100464820861816,
      "step": 280
    },
    {
      "epoch": 10.0,
      "grad_norm": 6.281085014343262,
      "learning_rate": 2.9341204441673267e-06,
      "logits/chosen": -2.3005917072296143,
      "logits/rejected": -2.3319716453552246,
      "logps/chosen": -92.97987365722656,
      "logps/rejected": -112.0716552734375,
      "loss": 0.2036,
      "rewards/accuracies": 0.949999988079071,
      "rewards/chosen": -1.6851022243499756,
      "rewards/margins": 2.1508073806762695,
      "rewards/rejected": -3.835909605026245,
      "step": 290
    },
    {
      "epoch": 10.344827586206897,
      "grad_norm": 5.364060401916504,
      "learning_rate": 2.785250302445062e-06,
      "logits/chosen": -2.3305928707122803,
      "logits/rejected": -2.3451201915740967,
      "logps/chosen": -85.11759185791016,
      "logps/rejected": -115.65193176269531,
      "loss": 0.1664,
      "rewards/accuracies": 1.0,
      "rewards/chosen": -1.5494135618209839,
      "rewards/margins": 2.6376235485076904,
      "rewards/rejected": -4.187037467956543,
      "step": 300
    },
    {
      "epoch": 10.689655172413794,
      "grad_norm": 5.540137767791748,
      "learning_rate": 2.6353472714635443e-06,
      "logits/chosen": -2.3217194080352783,
      "logits/rejected": -2.345512628555298,
      "logps/chosen": -83.56015014648438,
      "logps/rejected": -111.0335922241211,
      "loss": 0.1446,
      "rewards/accuracies": 0.9624999761581421,
      "rewards/chosen": -1.6171716451644897,
      "rewards/margins": 2.553410053253174,
      "rewards/rejected": -4.170581340789795,
      "step": 310
    },
    {
      "epoch": 11.03448275862069,
      "grad_norm": 5.321618556976318,
      "learning_rate": 2.4849541490017868e-06,
      "logits/chosen": -2.2711265087127686,
      "logits/rejected": -2.274831533432007,
      "logps/chosen": -93.46397399902344,
      "logps/rejected": -138.03286743164062,
      "loss": 0.1416,
      "rewards/accuracies": 0.9624999761581421,
      "rewards/chosen": -2.0557093620300293,
      "rewards/margins": 2.809981346130371,
      "rewards/rejected": -4.8656907081604,
      "step": 320
    },
    {
      "epoch": 11.379310344827585,
      "grad_norm": 6.168019771575928,
      "learning_rate": 2.3346155074564712e-06,
      "logits/chosen": -2.2561862468719482,
      "logits/rejected": -2.260565996170044,
      "logps/chosen": -89.53536224365234,
      "logps/rejected": -119.193603515625,
      "loss": 0.1174,
      "rewards/accuracies": 0.987500011920929,
      "rewards/chosen": -2.2866904735565186,
      "rewards/margins": 2.866985321044922,
      "rewards/rejected": -5.1536760330200195,
      "step": 330
    },
    {
      "epoch": 11.724137931034482,
      "grad_norm": 4.970952033996582,
      "learning_rate": 2.184875721949277e-06,
      "logits/chosen": -2.204287052154541,
      "logits/rejected": -2.2466628551483154,
      "logps/chosen": -171.76095581054688,
      "logps/rejected": -147.26907348632812,
      "loss": 0.1152,
      "rewards/accuracies": 0.987500011920929,
      "rewards/chosen": -2.4709744453430176,
      "rewards/margins": 3.782508134841919,
      "rewards/rejected": -6.253481864929199,
      "step": 340
    },
    {
      "epoch": 12.068965517241379,
      "grad_norm": 4.031501770019531,
      "learning_rate": 2.0362769991485514e-06,
      "logits/chosen": -2.245434522628784,
      "logits/rejected": -2.267470121383667,
      "logps/chosen": -98.18807220458984,
      "logps/rejected": -143.83383178710938,
      "loss": 0.1104,
      "rewards/accuracies": 0.987500011920929,
      "rewards/chosen": -2.5792040824890137,
      "rewards/margins": 3.238723039627075,
      "rewards/rejected": -5.817927360534668,
      "step": 350
    },
    {
      "epoch": 12.413793103448276,
      "grad_norm": 4.009598731994629,
      "learning_rate": 1.8893574139429226e-06,
      "logits/chosen": -2.2339296340942383,
      "logits/rejected": -2.254106283187866,
      "logps/chosen": -104.34129333496094,
      "logps/rejected": -130.74081420898438,
      "loss": 0.0859,
      "rewards/accuracies": 0.9750000238418579,
      "rewards/chosen": -2.8974556922912598,
      "rewards/margins": 3.275568723678589,
      "rewards/rejected": -6.1730241775512695,
      "step": 360
    },
    {
      "epoch": 12.758620689655173,
      "grad_norm": 6.944422721862793,
      "learning_rate": 1.744648961076068e-06,
      "logits/chosen": -2.252500295639038,
      "logits/rejected": -2.25302791595459,
      "logps/chosen": -97.11503601074219,
      "logps/rejected": -135.61911010742188,
      "loss": 0.0901,
      "rewards/accuracies": 1.0,
      "rewards/chosen": -2.814434051513672,
      "rewards/margins": 3.549976348876953,
      "rewards/rejected": -6.364409923553467,
      "step": 370
    },
    {
      "epoch": 13.10344827586207,
      "grad_norm": 5.268442630767822,
      "learning_rate": 1.602675628797636e-06,
      "logits/chosen": -2.2403993606567383,
      "logits/rejected": -2.240546703338623,
      "logps/chosen": -101.62408447265625,
      "logps/rejected": -136.84268188476562,
      "loss": 0.0902,
      "rewards/accuracies": 0.987500011920929,
      "rewards/chosen": -2.9260125160217285,
      "rewards/margins": 3.421342134475708,
      "rewards/rejected": -6.347354888916016,
      "step": 380
    },
    {
      "epoch": 13.448275862068966,
      "grad_norm": 4.1959404945373535,
      "learning_rate": 1.4639515015056205e-06,
      "logits/chosen": -2.2759053707122803,
      "logits/rejected": -2.2657554149627686,
      "logps/chosen": -103.3962631225586,
      "logps/rejected": -138.56057739257812,
      "loss": 0.0729,
      "rewards/accuracies": 1.0,
      "rewards/chosen": -2.688096523284912,
      "rewards/margins": 4.030136585235596,
      "rewards/rejected": -6.718233585357666,
      "step": 390
    },
    {
      "epoch": 13.793103448275861,
      "grad_norm": 4.1255574226379395,
      "learning_rate": 1.328978898250525e-06,
      "logits/chosen": -2.180371046066284,
      "logits/rejected": -2.213881015777588,
      "logps/chosen": -100.98771667480469,
      "logps/rejected": -135.07044982910156,
      "loss": 0.0747,
      "rewards/accuracies": 1.0,
      "rewards/chosen": -3.0276432037353516,
      "rewards/margins": 3.8850128650665283,
      "rewards/rejected": -6.912655830383301,
      "step": 400
    }
  ],
  "logging_steps": 10,
  "max_steps": 580,
  "num_input_tokens_seen": 0,
  "num_train_epochs": 20,
  "save_steps": 100,
  "stateful_callbacks": {
    "TrainerControl": {
      "args": {
        "should_epoch_stop": false,
        "should_evaluate": false,
        "should_log": false,
        "should_save": true,
        "should_training_stop": false
      },
      "attributes": {}
    }
  },
  "total_flos": 1.4566878297351782e+18,
  "train_batch_size": 1,
  "trial_name": null,
  "trial_params": null
}