File size: 21,984 Bytes
db6ca4b
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
{
  "best_metric": null,
  "best_model_checkpoint": null,
  "epoch": 13.793103448275861,
  "eval_steps": 500,
  "global_step": 400,
  "is_hyper_param_search": false,
  "is_local_process_zero": true,
  "is_world_process_zero": true,
  "log_history": [
    {
      "epoch": 0.3448275862068966,
      "grad_norm": 0.5030671954154968,
      "learning_rate": 8.620689655172415e-07,
      "logits/chosen": 1.8564815521240234,
      "logits/rejected": 1.8255866765975952,
      "logps/chosen": -95.54290008544922,
      "logps/rejected": -79.79582214355469,
      "loss": 0.693,
      "rewards/accuracies": 0.4000000059604645,
      "rewards/chosen": -0.0028573228046298027,
      "rewards/margins": 0.0005112116923555732,
      "rewards/rejected": -0.0033685355447232723,
      "step": 10
    },
    {
      "epoch": 0.6896551724137931,
      "grad_norm": 0.45081979036331177,
      "learning_rate": 1.724137931034483e-06,
      "logits/chosen": 1.781947374343872,
      "logits/rejected": 1.6911979913711548,
      "logps/chosen": -104.20463562011719,
      "logps/rejected": -78.91150665283203,
      "loss": 0.6929,
      "rewards/accuracies": 0.512499988079071,
      "rewards/chosen": 0.00018558502779342234,
      "rewards/margins": -0.002152198925614357,
      "rewards/rejected": 0.0023377849720418453,
      "step": 20
    },
    {
      "epoch": 1.0344827586206897,
      "grad_norm": 0.41456305980682373,
      "learning_rate": 2.5862068965517246e-06,
      "logits/chosen": 1.7937275171279907,
      "logits/rejected": 1.729128122329712,
      "logps/chosen": -90.21867370605469,
      "logps/rejected": -71.69265747070312,
      "loss": 0.6935,
      "rewards/accuracies": 0.574999988079071,
      "rewards/chosen": 0.003012509550899267,
      "rewards/margins": 0.009945740923285484,
      "rewards/rejected": -0.006933231838047504,
      "step": 30
    },
    {
      "epoch": 1.3793103448275863,
      "grad_norm": 0.45128729939460754,
      "learning_rate": 3.448275862068966e-06,
      "logits/chosen": 1.864363670349121,
      "logits/rejected": 1.9002879858016968,
      "logps/chosen": -87.1126480102539,
      "logps/rejected": -77.18392181396484,
      "loss": 0.6923,
      "rewards/accuracies": 0.5625,
      "rewards/chosen": 0.0013037443859502673,
      "rewards/margins": 0.006342612206935883,
      "rewards/rejected": -0.005038867238909006,
      "step": 40
    },
    {
      "epoch": 1.7241379310344827,
      "grad_norm": 0.4960842728614807,
      "learning_rate": 4.310344827586207e-06,
      "logits/chosen": 1.7825076580047607,
      "logits/rejected": 1.829602837562561,
      "logps/chosen": -78.63069915771484,
      "logps/rejected": -91.36685180664062,
      "loss": 0.6954,
      "rewards/accuracies": 0.625,
      "rewards/chosen": 0.009792634285986423,
      "rewards/margins": 0.014146638102829456,
      "rewards/rejected": -0.0043540047481656075,
      "step": 50
    },
    {
      "epoch": 2.0689655172413794,
      "grad_norm": 0.5404626727104187,
      "learning_rate": 4.999818897894192e-06,
      "logits/chosen": 1.8061244487762451,
      "logits/rejected": 1.7855304479599,
      "logps/chosen": -90.69072723388672,
      "logps/rejected": -72.0459213256836,
      "loss": 0.6938,
      "rewards/accuracies": 0.5874999761581421,
      "rewards/chosen": 0.002143201883882284,
      "rewards/margins": 0.005239076912403107,
      "rewards/rejected": -0.0030958750285208225,
      "step": 60
    },
    {
      "epoch": 2.413793103448276,
      "grad_norm": 0.5091063380241394,
      "learning_rate": 4.9934830787948756e-06,
      "logits/chosen": 1.6158870458602905,
      "logits/rejected": 1.696754813194275,
      "logps/chosen": -76.28319549560547,
      "logps/rejected": -77.81846618652344,
      "loss": 0.6905,
      "rewards/accuracies": 0.637499988079071,
      "rewards/chosen": 1.5049334251671098e-05,
      "rewards/margins": 0.007967600598931313,
      "rewards/rejected": -0.00795255322009325,
      "step": 70
    },
    {
      "epoch": 2.7586206896551726,
      "grad_norm": 0.6066665053367615,
      "learning_rate": 4.978118375700895e-06,
      "logits/chosen": 1.6109062433242798,
      "logits/rejected": 1.7185981273651123,
      "logps/chosen": -84.615966796875,
      "logps/rejected": -96.0793228149414,
      "loss": 0.6942,
      "rewards/accuracies": 0.4749999940395355,
      "rewards/chosen": -0.000627221364993602,
      "rewards/margins": 0.0009560534963384271,
      "rewards/rejected": -0.0015832759672775865,
      "step": 80
    },
    {
      "epoch": 3.103448275862069,
      "grad_norm": 0.5341666340827942,
      "learning_rate": 4.953780424089803e-06,
      "logits/chosen": 1.8657306432724,
      "logits/rejected": 1.8894052505493164,
      "logps/chosen": -87.67496490478516,
      "logps/rejected": -77.41777038574219,
      "loss": 0.6919,
      "rewards/accuracies": 0.550000011920929,
      "rewards/chosen": -0.004135184455662966,
      "rewards/margins": 0.011894735507667065,
      "rewards/rejected": -0.016029920428991318,
      "step": 90
    },
    {
      "epoch": 3.4482758620689653,
      "grad_norm": 0.5891664028167725,
      "learning_rate": 4.920557351506409e-06,
      "logits/chosen": 1.8143476247787476,
      "logits/rejected": 1.8618648052215576,
      "logps/chosen": -82.7096176147461,
      "logps/rejected": -80.64532470703125,
      "loss": 0.6905,
      "rewards/accuracies": 0.4749999940395355,
      "rewards/chosen": -0.014110831543803215,
      "rewards/margins": -0.007867741398513317,
      "rewards/rejected": -0.0062430910766124725,
      "step": 100
    },
    {
      "epoch": 3.793103448275862,
      "grad_norm": 0.5773605704307556,
      "learning_rate": 4.878569458453592e-06,
      "logits/chosen": 1.7850589752197266,
      "logits/rejected": 1.76922607421875,
      "logps/chosen": -87.4909896850586,
      "logps/rejected": -90.67459869384766,
      "loss": 0.6899,
      "rewards/accuracies": 0.5874999761581421,
      "rewards/chosen": 0.0026252653915435076,
      "rewards/margins": 0.014214910566806793,
      "rewards/rejected": -0.011589646339416504,
      "step": 110
    },
    {
      "epoch": 4.137931034482759,
      "grad_norm": 0.6582341194152832,
      "learning_rate": 4.827968782785062e-06,
      "logits/chosen": 1.7467533349990845,
      "logits/rejected": 1.8858740329742432,
      "logps/chosen": -69.12274169921875,
      "logps/rejected": -94.86824035644531,
      "loss": 0.6878,
      "rewards/accuracies": 0.4749999940395355,
      "rewards/chosen": -0.009279675781726837,
      "rewards/margins": 0.0035783485509455204,
      "rewards/rejected": -0.012858022935688496,
      "step": 120
    },
    {
      "epoch": 4.482758620689655,
      "grad_norm": 0.7790700793266296,
      "learning_rate": 4.7689385491773934e-06,
      "logits/chosen": 1.8058643341064453,
      "logits/rejected": 1.866713523864746,
      "logps/chosen": -79.86953735351562,
      "logps/rejected": -76.4103012084961,
      "loss": 0.6857,
      "rewards/accuracies": 0.625,
      "rewards/chosen": 0.00016659722314216197,
      "rewards/margins": 0.023889193311333656,
      "rewards/rejected": -0.02372259460389614,
      "step": 130
    },
    {
      "epoch": 4.827586206896552,
      "grad_norm": 0.7933465242385864,
      "learning_rate": 4.70169250567482e-06,
      "logits/chosen": 1.7705074548721313,
      "logits/rejected": 1.7728191614151,
      "logps/chosen": -86.46583557128906,
      "logps/rejected": -75.83828735351562,
      "loss": 0.6804,
      "rewards/accuracies": 0.6625000238418579,
      "rewards/chosen": -0.0001745700865285471,
      "rewards/margins": 0.029326915740966797,
      "rewards/rejected": -0.02950148656964302,
      "step": 140
    },
    {
      "epoch": 5.172413793103448,
      "grad_norm": 0.7435988187789917,
      "learning_rate": 4.626474149709127e-06,
      "logits/chosen": 1.740312933921814,
      "logits/rejected": 1.7526963949203491,
      "logps/chosen": -100.93019104003906,
      "logps/rejected": -90.39695739746094,
      "loss": 0.6803,
      "rewards/accuracies": 0.637499988079071,
      "rewards/chosen": -0.013870477676391602,
      "rewards/margins": 0.03614342585206032,
      "rewards/rejected": -0.05001390725374222,
      "step": 150
    },
    {
      "epoch": 5.517241379310345,
      "grad_norm": 0.8416279554367065,
      "learning_rate": 4.54355584639723e-06,
      "logits/chosen": 1.7618926763534546,
      "logits/rejected": 1.8320789337158203,
      "logps/chosen": -71.05604553222656,
      "logps/rejected": -82.77362060546875,
      "loss": 0.6748,
      "rewards/accuracies": 0.6625000238418579,
      "rewards/chosen": -0.028952527791261673,
      "rewards/margins": 0.03283882141113281,
      "rewards/rejected": -0.061791349202394485,
      "step": 160
    },
    {
      "epoch": 5.862068965517241,
      "grad_norm": 0.9731557965278625,
      "learning_rate": 4.45323784230908e-06,
      "logits/chosen": 1.7541310787200928,
      "logits/rejected": 1.845503807067871,
      "logps/chosen": -79.18022155761719,
      "logps/rejected": -89.36749267578125,
      "loss": 0.6706,
      "rewards/accuracies": 0.7250000238418579,
      "rewards/chosen": -0.02990702912211418,
      "rewards/margins": 0.05189533159136772,
      "rewards/rejected": -0.0818023532629013,
      "step": 170
    },
    {
      "epoch": 6.206896551724138,
      "grad_norm": 0.8338156938552856,
      "learning_rate": 4.355847178277025e-06,
      "logits/chosen": 1.7501287460327148,
      "logits/rejected": 1.7370710372924805,
      "logps/chosen": -91.2562484741211,
      "logps/rejected": -80.88179779052734,
      "loss": 0.6661,
      "rewards/accuracies": 0.7250000238418579,
      "rewards/chosen": -0.03045990690588951,
      "rewards/margins": 0.08234542608261108,
      "rewards/rejected": -0.1128053292632103,
      "step": 180
    },
    {
      "epoch": 6.551724137931035,
      "grad_norm": 0.8057475686073303,
      "learning_rate": 4.2517365051833564e-06,
      "logits/chosen": 1.8594491481781006,
      "logits/rejected": 1.9256786108016968,
      "logps/chosen": -74.18313598632812,
      "logps/rejected": -84.37249755859375,
      "loss": 0.6573,
      "rewards/accuracies": 0.7124999761581421,
      "rewards/chosen": -0.05836126208305359,
      "rewards/margins": 0.06953532248735428,
      "rewards/rejected": -0.12789657711982727,
      "step": 190
    },
    {
      "epoch": 6.896551724137931,
      "grad_norm": 0.957699716091156,
      "learning_rate": 4.141282807014034e-06,
      "logits/chosen": 1.696286916732788,
      "logits/rejected": 1.782080888748169,
      "logps/chosen": -83.79094696044922,
      "logps/rejected": -79.17488861083984,
      "loss": 0.6595,
      "rewards/accuracies": 0.574999988079071,
      "rewards/chosen": -0.08588370680809021,
      "rewards/margins": 0.062126852571964264,
      "rewards/rejected": -0.14801056683063507,
      "step": 200
    },
    {
      "epoch": 7.241379310344827,
      "grad_norm": 0.8981906771659851,
      "learning_rate": 4.024886035802432e-06,
      "logits/chosen": 1.8213344812393188,
      "logits/rejected": 1.9113010168075562,
      "logps/chosen": -67.83535766601562,
      "logps/rejected": -80.57597351074219,
      "loss": 0.6498,
      "rewards/accuracies": 0.737500011920929,
      "rewards/chosen": -0.05136314034461975,
      "rewards/margins": 0.10948891937732697,
      "rewards/rejected": -0.16085204482078552,
      "step": 210
    },
    {
      "epoch": 7.586206896551724,
      "grad_norm": 1.0450000762939453,
      "learning_rate": 3.9029676634059565e-06,
      "logits/chosen": 1.8091751337051392,
      "logits/rejected": 1.705392599105835,
      "logps/chosen": -98.79942321777344,
      "logps/rejected": -79.21489715576172,
      "loss": 0.6457,
      "rewards/accuracies": 0.699999988079071,
      "rewards/chosen": -0.09070365875959396,
      "rewards/margins": 0.12246594578027725,
      "rewards/rejected": -0.21316960453987122,
      "step": 220
    },
    {
      "epoch": 7.931034482758621,
      "grad_norm": 0.9624446034431458,
      "learning_rate": 3.7759691553595214e-06,
      "logits/chosen": 1.8079789876937866,
      "logits/rejected": 1.8931957483291626,
      "logps/chosen": -77.8460922241211,
      "logps/rejected": -76.8448715209961,
      "loss": 0.6391,
      "rewards/accuracies": 0.6875,
      "rewards/chosen": -0.09181363880634308,
      "rewards/margins": 0.09949400275945663,
      "rewards/rejected": -0.19130763411521912,
      "step": 230
    },
    {
      "epoch": 8.275862068965518,
      "grad_norm": 0.9608933925628662,
      "learning_rate": 3.6443503723320837e-06,
      "logits/chosen": 1.7896497249603271,
      "logits/rejected": 1.8757597208023071,
      "logps/chosen": -79.89161682128906,
      "logps/rejected": -88.79243469238281,
      "loss": 0.6322,
      "rewards/accuracies": 0.675000011920929,
      "rewards/chosen": -0.10905975103378296,
      "rewards/margins": 0.12515850365161896,
      "rewards/rejected": -0.23421823978424072,
      "step": 240
    },
    {
      "epoch": 8.620689655172415,
      "grad_norm": 0.9355646967887878,
      "learning_rate": 3.508587904974522e-06,
      "logits/chosen": 1.860887885093689,
      "logits/rejected": 1.9076162576675415,
      "logps/chosen": -85.80632781982422,
      "logps/rejected": -104.0634765625,
      "loss": 0.6217,
      "rewards/accuracies": 0.7749999761581421,
      "rewards/chosen": -0.11484359204769135,
      "rewards/margins": 0.1558837592601776,
      "rewards/rejected": -0.27072733640670776,
      "step": 250
    },
    {
      "epoch": 8.96551724137931,
      "grad_norm": 1.0788410902023315,
      "learning_rate": 3.3691733481883693e-06,
      "logits/chosen": 1.9172537326812744,
      "logits/rejected": 1.7995975017547607,
      "logps/chosen": -92.4389419555664,
      "logps/rejected": -69.5486068725586,
      "loss": 0.6273,
      "rewards/accuracies": 0.762499988079071,
      "rewards/chosen": -0.10718520730733871,
      "rewards/margins": 0.15187612175941467,
      "rewards/rejected": -0.259061336517334,
      "step": 260
    },
    {
      "epoch": 9.310344827586206,
      "grad_norm": 0.9002386331558228,
      "learning_rate": 3.226611521064278e-06,
      "logits/chosen": 1.676670789718628,
      "logits/rejected": 1.7091697454452515,
      "logps/chosen": -78.51396942138672,
      "logps/rejected": -85.13417053222656,
      "loss": 0.6077,
      "rewards/accuracies": 0.75,
      "rewards/chosen": -0.10331835597753525,
      "rewards/margins": 0.18381647765636444,
      "rewards/rejected": -0.2871348559856415,
      "step": 270
    },
    {
      "epoch": 9.655172413793103,
      "grad_norm": 0.9108310341835022,
      "learning_rate": 3.0814186389357765e-06,
      "logits/chosen": 1.778263807296753,
      "logits/rejected": 1.8473546504974365,
      "logps/chosen": -74.77851867675781,
      "logps/rejected": -72.4294662475586,
      "loss": 0.6161,
      "rewards/accuracies": 0.7875000238418579,
      "rewards/chosen": -0.15308822691440582,
      "rewards/margins": 0.1778026521205902,
      "rewards/rejected": -0.33089086413383484,
      "step": 280
    },
    {
      "epoch": 10.0,
      "grad_norm": 0.946640133857727,
      "learning_rate": 2.9341204441673267e-06,
      "logits/chosen": 1.7126500606536865,
      "logits/rejected": 1.7476106882095337,
      "logps/chosen": -86.2017593383789,
      "logps/rejected": -80.07001495361328,
      "loss": 0.605,
      "rewards/accuracies": 0.75,
      "rewards/chosen": -0.19395658373832703,
      "rewards/margins": 0.19845889508724213,
      "rewards/rejected": -0.39241549372673035,
      "step": 290
    },
    {
      "epoch": 10.344827586206897,
      "grad_norm": 1.0770831108093262,
      "learning_rate": 2.785250302445062e-06,
      "logits/chosen": 1.7564175128936768,
      "logits/rejected": 1.7827428579330444,
      "logps/chosen": -83.96778869628906,
      "logps/rejected": -90.85274505615234,
      "loss": 0.6044,
      "rewards/accuracies": 0.7749999761581421,
      "rewards/chosen": -0.1555892676115036,
      "rewards/margins": 0.2721293568611145,
      "rewards/rejected": -0.4277185797691345,
      "step": 300
    },
    {
      "epoch": 10.689655172413794,
      "grad_norm": 1.034155011177063,
      "learning_rate": 2.6353472714635443e-06,
      "logits/chosen": 1.8422276973724365,
      "logits/rejected": 1.9225587844848633,
      "logps/chosen": -81.3154525756836,
      "logps/rejected": -82.27003479003906,
      "loss": 0.589,
      "rewards/accuracies": 0.7875000238418579,
      "rewards/chosen": -0.1461830735206604,
      "rewards/margins": 0.2492922842502594,
      "rewards/rejected": -0.3954753279685974,
      "step": 310
    },
    {
      "epoch": 11.03448275862069,
      "grad_norm": 0.9909716844558716,
      "learning_rate": 2.4849541490017868e-06,
      "logits/chosen": 1.6462303400039673,
      "logits/rejected": 1.676540732383728,
      "logps/chosen": -83.37794494628906,
      "logps/rejected": -100.16087341308594,
      "loss": 0.5912,
      "rewards/accuracies": 0.737500011920929,
      "rewards/chosen": -0.20598438382148743,
      "rewards/margins": 0.2601252496242523,
      "rewards/rejected": -0.46610966324806213,
      "step": 320
    },
    {
      "epoch": 11.379310344827585,
      "grad_norm": 1.0035589933395386,
      "learning_rate": 2.3346155074564712e-06,
      "logits/chosen": 1.8267767429351807,
      "logits/rejected": 1.8420498371124268,
      "logps/chosen": -83.43693542480469,
      "logps/rejected": -81.88127899169922,
      "loss": 0.5903,
      "rewards/accuracies": 0.7875000238418579,
      "rewards/chosen": -0.16100475192070007,
      "rewards/margins": 0.2701917588710785,
      "rewards/rejected": -0.43119654059410095,
      "step": 330
    },
    {
      "epoch": 11.724137931034482,
      "grad_norm": 1.1042752265930176,
      "learning_rate": 2.184875721949277e-06,
      "logits/chosen": 1.5917112827301025,
      "logits/rejected": 1.747766137123108,
      "logps/chosen": -84.04930114746094,
      "logps/rejected": -99.69436645507812,
      "loss": 0.5773,
      "rewards/accuracies": 0.8374999761581421,
      "rewards/chosen": -0.20484447479248047,
      "rewards/margins": 0.34736505150794983,
      "rewards/rejected": -0.5522094964981079,
      "step": 340
    },
    {
      "epoch": 12.068965517241379,
      "grad_norm": 1.109389066696167,
      "learning_rate": 2.0362769991485514e-06,
      "logits/chosen": 1.7192186117172241,
      "logits/rejected": 1.7819591760635376,
      "logps/chosen": -81.86787414550781,
      "logps/rejected": -106.79791259765625,
      "loss": 0.5738,
      "rewards/accuracies": 0.8374999761581421,
      "rewards/chosen": -0.2179536074399948,
      "rewards/margins": 0.31939607858657837,
      "rewards/rejected": -0.5373496413230896,
      "step": 350
    },
    {
      "epoch": 12.413793103448276,
      "grad_norm": 1.0638686418533325,
      "learning_rate": 1.8893574139429226e-06,
      "logits/chosen": 1.7060273885726929,
      "logits/rejected": 1.695678949356079,
      "logps/chosen": -96.91914367675781,
      "logps/rejected": -81.13513946533203,
      "loss": 0.5678,
      "rewards/accuracies": 0.8125,
      "rewards/chosen": -0.2419094741344452,
      "rewards/margins": 0.30147960782051086,
      "rewards/rejected": -0.543389081954956,
      "step": 360
    },
    {
      "epoch": 12.758620689655173,
      "grad_norm": 1.2344255447387695,
      "learning_rate": 1.744648961076068e-06,
      "logits/chosen": 1.8007898330688477,
      "logits/rejected": 1.8490253686904907,
      "logps/chosen": -75.9570083618164,
      "logps/rejected": -80.76083374023438,
      "loss": 0.5739,
      "rewards/accuracies": 0.7875000238418579,
      "rewards/chosen": -0.22758683562278748,
      "rewards/margins": 0.28146892786026,
      "rewards/rejected": -0.5090557336807251,
      "step": 370
    },
    {
      "epoch": 13.10344827586207,
      "grad_norm": 1.401179313659668,
      "learning_rate": 1.602675628797636e-06,
      "logits/chosen": 1.7176204919815063,
      "logits/rejected": 1.7408435344696045,
      "logps/chosen": -78.79522705078125,
      "logps/rejected": -92.59135437011719,
      "loss": 0.5856,
      "rewards/accuracies": 0.75,
      "rewards/chosen": -0.26380348205566406,
      "rewards/margins": 0.24713349342346191,
      "rewards/rejected": -0.510936975479126,
      "step": 380
    },
    {
      "epoch": 13.448275862068966,
      "grad_norm": 1.135343313217163,
      "learning_rate": 1.4639515015056205e-06,
      "logits/chosen": 1.7480716705322266,
      "logits/rejected": 1.7659183740615845,
      "logps/chosen": -97.42142486572266,
      "logps/rejected": -84.87925720214844,
      "loss": 0.5641,
      "rewards/accuracies": 0.824999988079071,
      "rewards/chosen": -0.2054813802242279,
      "rewards/margins": 0.34871339797973633,
      "rewards/rejected": -0.5541948080062866,
      "step": 390
    },
    {
      "epoch": 13.793103448275861,
      "grad_norm": 1.2716423273086548,
      "learning_rate": 1.328978898250525e-06,
      "logits/chosen": 1.7928969860076904,
      "logits/rejected": 1.7819544076919556,
      "logps/chosen": -83.4045181274414,
      "logps/rejected": -75.65983581542969,
      "loss": 0.5611,
      "rewards/accuracies": 0.824999988079071,
      "rewards/chosen": -0.21872875094413757,
      "rewards/margins": 0.3643662631511688,
      "rewards/rejected": -0.5830950140953064,
      "step": 400
    }
  ],
  "logging_steps": 10,
  "max_steps": 580,
  "num_input_tokens_seen": 0,
  "num_train_epochs": 20,
  "save_steps": 50,
  "stateful_callbacks": {
    "TrainerControl": {
      "args": {
        "should_epoch_stop": false,
        "should_evaluate": false,
        "should_log": false,
        "should_save": true,
        "should_training_stop": false
      },
      "attributes": {}
    }
  },
  "total_flos": 1.3506529383971553e+18,
  "train_batch_size": 1,
  "trial_name": null,
  "trial_params": null
}