Safetensors
English
qwen2
File size: 25,183 Bytes
c6a012a
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
{
  "best_global_step": null,
  "best_metric": null,
  "best_model_checkpoint": null,
  "epoch": 0.1718213058419244,
  "eval_steps": 500,
  "global_step": 50,
  "is_hyper_param_search": false,
  "is_local_process_zero": true,
  "is_world_process_zero": true,
  "log_history": [
    {
      "clip_ratio": 0.0,
      "completion_length": 3435.541748046875,
      "epoch": 0.003436426116838488,
      "grad_norm": 0.07630682736635208,
      "kl": 0.0,
      "learning_rate": 2e-08,
      "loss": -0.0067,
      "num_tokens": 1402750.0,
      "reward": -0.28836290910840034,
      "reward_std": 0.5228589028120041,
      "rewards/cosine_scaled_reward": -0.19886894896626472,
      "rewards/format_reward": 0.10937500093132257,
      "step": 1
    },
    {
      "clip_ratio": 0.0,
      "completion_length": 3328.8594360351562,
      "epoch": 0.006872852233676976,
      "grad_norm": 0.0795634388923645,
      "kl": 0.0,
      "learning_rate": 4e-08,
      "loss": -0.0212,
      "num_tokens": 2772778.0,
      "reward": -0.24981184303760529,
      "reward_std": 0.5158329159021378,
      "rewards/cosine_scaled_reward": -0.21214550733566284,
      "rewards/format_reward": 0.1744791679084301,
      "step": 2
    },
    {
      "clip_ratio": 0.0,
      "completion_length": 3388.9662475585938,
      "epoch": 0.010309278350515464,
      "grad_norm": 0.0782911404967308,
      "kl": 0.0006399154663085938,
      "learning_rate": 6e-08,
      "loss": -0.0117,
      "num_tokens": 4160535.0,
      "reward": -0.1925769094377756,
      "reward_std": 0.49376169592142105,
      "rewards/cosine_scaled_reward": -0.18483011424541473,
      "rewards/format_reward": 0.1770833358168602,
      "step": 3
    },
    {
      "clip_ratio": 0.0,
      "completion_length": 3274.7423095703125,
      "epoch": 0.013745704467353952,
      "grad_norm": 0.08107814937829971,
      "kl": 0.0006198883056640625,
      "learning_rate": 8e-08,
      "loss": -0.0191,
      "num_tokens": 5507208.0,
      "reward": -0.11736843098333338,
      "reward_std": 0.5767310410737991,
      "rewards/cosine_scaled_reward": -0.1550383809953928,
      "rewards/format_reward": 0.19270833395421505,
      "step": 4
    },
    {
      "clip_ratio": 0.0,
      "completion_length": 3351.6198120117188,
      "epoch": 0.01718213058419244,
      "grad_norm": 0.0697300136089325,
      "kl": 0.000614166259765625,
      "learning_rate": 1e-07,
      "loss": -0.0236,
      "num_tokens": 6873604.0,
      "reward": -0.17346507962793112,
      "reward_std": 0.5954280346632004,
      "rewards/cosine_scaled_reward": -0.1687637884169817,
      "rewards/format_reward": 0.1640625037252903,
      "step": 5
    },
    {
      "clip_ratio": 0.0,
      "completion_length": 3481.205810546875,
      "epoch": 0.020618556701030927,
      "grad_norm": 0.07721535861492157,
      "kl": 0.0006694793701171875,
      "learning_rate": 1.2e-07,
      "loss": -0.0123,
      "num_tokens": 8293145.0,
      "reward": -0.26027682796120644,
      "reward_std": 0.589268833398819,
      "rewards/cosine_scaled_reward": -0.19784674793481827,
      "rewards/format_reward": 0.13541666977107525,
      "step": 6
    },
    {
      "clip_ratio": 0.0,
      "completion_length": 3353.908935546875,
      "epoch": 0.024054982817869417,
      "grad_norm": 0.08021606504917145,
      "kl": 0.00066375732421875,
      "learning_rate": 1.4e-07,
      "loss": -0.0066,
      "num_tokens": 9663540.0,
      "reward": -0.11294916458427906,
      "reward_std": 0.5502363964915276,
      "rewards/cosine_scaled_reward": -0.15413082763552666,
      "rewards/format_reward": 0.19531250186264515,
      "step": 7
    },
    {
      "clip_ratio": 0.0,
      "completion_length": 3286.4375610351562,
      "epoch": 0.027491408934707903,
      "grad_norm": 0.08099574595689774,
      "kl": 0.0006456375122070312,
      "learning_rate": 1.6e-07,
      "loss": -0.0197,
      "num_tokens": 11007492.0,
      "reward": -0.1736841667443514,
      "reward_std": 0.6177337318658829,
      "rewards/cosine_scaled_reward": -0.19231082685291767,
      "rewards/format_reward": 0.21093750186264515,
      "step": 8
    },
    {
      "clip_ratio": 0.0,
      "completion_length": 3281.7579345703125,
      "epoch": 0.030927835051546393,
      "grad_norm": 0.07414574921131134,
      "kl": 0.0006361007690429688,
      "learning_rate": 1.8e-07,
      "loss": -0.0083,
      "num_tokens": 12354957.0,
      "reward": -0.022597413510084152,
      "reward_std": 0.5933112800121307,
      "rewards/cosine_scaled_reward": -0.12718411907553673,
      "rewards/format_reward": 0.2317708283662796,
      "step": 9
    },
    {
      "clip_ratio": 0.0,
      "completion_length": 3356.3021850585938,
      "epoch": 0.03436426116838488,
      "grad_norm": 0.07732414454221725,
      "kl": 0.0006494522094726562,
      "learning_rate": 2e-07,
      "loss": -0.0166,
      "num_tokens": 13727747.0,
      "reward": -0.15971257165074348,
      "reward_std": 0.6636292636394501,
      "rewards/cosine_scaled_reward": -0.17751253210008144,
      "rewards/format_reward": 0.1953124962747097,
      "step": 10
    },
    {
      "clip_ratio": 0.0,
      "completion_length": 3344.6823120117188,
      "epoch": 0.037800687285223365,
      "grad_norm": 0.07448139786720276,
      "kl": 0.0006856918334960938,
      "learning_rate": 2.1999999999999998e-07,
      "loss": -0.0224,
      "num_tokens": 15100095.0,
      "reward": -0.2629380598664284,
      "reward_std": 0.5202662125229836,
      "rewards/cosine_scaled_reward": -0.2148023582994938,
      "rewards/format_reward": 0.16666666977107525,
      "step": 11
    },
    {
      "clip_ratio": 0.0,
      "completion_length": 3299.7682495117188,
      "epoch": 0.041237113402061855,
      "grad_norm": 0.0820096954703331,
      "kl": 0.0006608963012695312,
      "learning_rate": 2.4e-07,
      "loss": -0.0151,
      "num_tokens": 16449742.0,
      "reward": -0.1303296772239264,
      "reward_std": 0.5848766267299652,
      "rewards/cosine_scaled_reward": -0.1654252614825964,
      "rewards/format_reward": 0.2005208320915699,
      "step": 12
    },
    {
      "clip_ratio": 0.0,
      "completion_length": 3410.3333740234375,
      "epoch": 0.044673539518900345,
      "grad_norm": 0.07228722423315048,
      "kl": 0.0006933212280273438,
      "learning_rate": 2.6e-07,
      "loss": -0.0197,
      "num_tokens": 17843298.0,
      "reward": -0.22578875720500946,
      "reward_std": 0.5726261362433434,
      "rewards/cosine_scaled_reward": -0.18711312860250473,
      "rewards/format_reward": 0.1484375037252903,
      "step": 13
    },
    {
      "clip_ratio": 0.0,
      "completion_length": 3346.8073120117188,
      "epoch": 0.048109965635738834,
      "grad_norm": 0.07395637035369873,
      "kl": 0.0006265640258789062,
      "learning_rate": 2.8e-07,
      "loss": -0.0217,
      "num_tokens": 19217242.0,
      "reward": -0.12510624434798956,
      "reward_std": 0.6174614131450653,
      "rewards/cosine_scaled_reward": -0.16281353868544102,
      "rewards/format_reward": 0.2005208283662796,
      "step": 14
    },
    {
      "clip_ratio": 0.0,
      "completion_length": 3459.5391845703125,
      "epoch": 0.05154639175257732,
      "grad_norm": 0.07217426598072052,
      "kl": 0.0006237030029296875,
      "learning_rate": 3e-07,
      "loss": -0.009,
      "num_tokens": 20625409.0,
      "reward": -0.2986975237727165,
      "reward_std": 0.5028247013688087,
      "rewards/cosine_scaled_reward": -0.20403625443577766,
      "rewards/format_reward": 0.10937500139698386,
      "step": 15
    },
    {
      "clip_ratio": 0.0,
      "completion_length": 3453.9298095703125,
      "epoch": 0.054982817869415807,
      "grad_norm": 0.07679347693920135,
      "kl": 0.0006990432739257812,
      "learning_rate": 3.2e-07,
      "loss": -0.0042,
      "num_tokens": 22037578.0,
      "reward": -0.25092077255249023,
      "reward_std": 0.5149872973561287,
      "rewards/cosine_scaled_reward": -0.18014787510037422,
      "rewards/format_reward": 0.109375,
      "step": 16
    },
    {
      "clip_ratio": 0.0,
      "completion_length": 3302.6407470703125,
      "epoch": 0.058419243986254296,
      "grad_norm": 0.08445706218481064,
      "kl": 0.00072479248046875,
      "learning_rate": 3.4000000000000003e-07,
      "loss": -0.0126,
      "num_tokens": 23384524.0,
      "reward": -0.15077882632613182,
      "reward_std": 0.5705722346901894,
      "rewards/cosine_scaled_reward": -0.17044148780405521,
      "rewards/format_reward": 0.19010416232049465,
      "step": 17
    },
    {
      "clip_ratio": 0.0,
      "completion_length": 3237.401123046875,
      "epoch": 0.061855670103092786,
      "grad_norm": 0.07715122401714325,
      "kl": 0.000637054443359375,
      "learning_rate": 3.6e-07,
      "loss": -0.0111,
      "num_tokens": 24714692.0,
      "reward": 0.02988712175283581,
      "reward_std": 0.6443284898996353,
      "rewards/cosine_scaled_reward": -0.10875435825437307,
      "rewards/format_reward": 0.2473958320915699,
      "step": 18
    },
    {
      "clip_ratio": 0.0,
      "completion_length": 3372.8281860351562,
      "epoch": 0.06529209621993128,
      "grad_norm": 0.08387548476457596,
      "kl": 0.000675201416015625,
      "learning_rate": 3.7999999999999996e-07,
      "loss": -0.0098,
      "num_tokens": 26092178.0,
      "reward": -0.1666894219815731,
      "reward_std": 0.5761949121952057,
      "rewards/cosine_scaled_reward": -0.1588655449450016,
      "rewards/format_reward": 0.15104166697710752,
      "step": 19
    },
    {
      "clip_ratio": 0.0,
      "completion_length": 3322.8881225585938,
      "epoch": 0.06872852233676977,
      "grad_norm": 0.08063298463821411,
      "kl": 0.0006866455078125,
      "learning_rate": 4e-07,
      "loss": -0.0153,
      "num_tokens": 27456091.0,
      "reward": -0.23877026326954365,
      "reward_std": 0.4924458712339401,
      "rewards/cosine_scaled_reward": -0.1896976288408041,
      "rewards/format_reward": 0.14062499906867743,
      "step": 20
    },
    {
      "clip_ratio": 0.0,
      "completion_length": 3381.4323120117188,
      "epoch": 0.07216494845360824,
      "grad_norm": 0.07568925619125366,
      "kl": 0.000701904296875,
      "learning_rate": 4.1999999999999995e-07,
      "loss": -0.0125,
      "num_tokens": 28838129.0,
      "reward": -0.11927792057394981,
      "reward_std": 0.5444926768541336,
      "rewards/cosine_scaled_reward": -0.1585972849279642,
      "rewards/format_reward": 0.19791666604578495,
      "step": 21
    },
    {
      "clip_ratio": 0.0,
      "completion_length": 3260.4401245117188,
      "epoch": 0.07560137457044673,
      "grad_norm": 0.08385315537452698,
      "kl": 0.0006837844848632812,
      "learning_rate": 4.3999999999999997e-07,
      "loss": -0.0108,
      "num_tokens": 30171120.0,
      "reward": -0.10095808655023575,
      "reward_std": 0.587816633284092,
      "rewards/cosine_scaled_reward": -0.15985404793173075,
      "rewards/format_reward": 0.21875,
      "step": 22
    },
    {
      "clip_ratio": 0.0,
      "completion_length": 3373.6173095703125,
      "epoch": 0.07903780068728522,
      "grad_norm": 0.08223242312669754,
      "kl": 0.0006856918334960938,
      "learning_rate": 4.6e-07,
      "loss": -0.0174,
      "num_tokens": 31549449.0,
      "reward": -0.25981973111629486,
      "reward_std": 0.5918650180101395,
      "rewards/cosine_scaled_reward": -0.21194110810756683,
      "rewards/format_reward": 0.16406250186264515,
      "step": 23
    },
    {
      "clip_ratio": 0.0,
      "completion_length": 2977.151123046875,
      "epoch": 0.08247422680412371,
      "grad_norm": 0.09303563088178635,
      "kl": 0.0006265640258789062,
      "learning_rate": 4.8e-07,
      "loss": -0.0219,
      "num_tokens": 32781049.0,
      "reward": 0.14391778409481049,
      "reward_std": 0.6896847039461136,
      "rewards/cosine_scaled_reward": -0.1090306956321001,
      "rewards/format_reward": 0.3619791641831398,
      "step": 24
    },
    {
      "clip_ratio": 0.0,
      "completion_length": 3351.9610595703125,
      "epoch": 0.0859106529209622,
      "grad_norm": 0.08072555065155029,
      "kl": 0.000690460205078125,
      "learning_rate": 5e-07,
      "loss": -0.0183,
      "num_tokens": 34155622.0,
      "reward": -0.14797978568822145,
      "reward_std": 0.5801117792725563,
      "rewards/cosine_scaled_reward": -0.1703440584242344,
      "rewards/format_reward": 0.1927083320915699,
      "step": 25
    },
    {
      "clip_ratio": 0.0,
      "completion_length": 3287.3984985351562,
      "epoch": 0.08934707903780069,
      "grad_norm": 0.0794869065284729,
      "kl": 0.0006580352783203125,
      "learning_rate": 5.2e-07,
      "loss": -0.0171,
      "num_tokens": 35505685.0,
      "reward": -0.08633977361023426,
      "reward_std": 0.5518800467252731,
      "rewards/cosine_scaled_reward": -0.15905530750751495,
      "rewards/format_reward": 0.2317708320915699,
      "step": 26
    },
    {
      "clip_ratio": 0.0,
      "completion_length": 3477.8203125,
      "epoch": 0.09278350515463918,
      "grad_norm": 0.07011042535305023,
      "kl": 0.0006265640258789062,
      "learning_rate": 5.4e-07,
      "loss": -0.0108,
      "num_tokens": 36918598.0,
      "reward": -0.2969396822154522,
      "reward_std": 0.5096745043992996,
      "rewards/cosine_scaled_reward": -0.19534482806921005,
      "rewards/format_reward": 0.09375,
      "step": 27
    },
    {
      "clip_ratio": 0.0,
      "completion_length": 3348.5912475585938,
      "epoch": 0.09621993127147767,
      "grad_norm": 0.0749465823173523,
      "kl": 0.000682830810546875,
      "learning_rate": 5.6e-07,
      "loss": -0.0155,
      "num_tokens": 38287383.0,
      "reward": -0.15528945997357368,
      "reward_std": 0.6058137118816376,
      "rewards/cosine_scaled_reward": -0.1753009781241417,
      "rewards/format_reward": 0.1953125,
      "step": 28
    },
    {
      "clip_ratio": 0.0,
      "completion_length": 3372.822998046875,
      "epoch": 0.09965635738831616,
      "grad_norm": 0.07214676588773727,
      "kl": 0.0006723403930664062,
      "learning_rate": 5.8e-07,
      "loss": -0.0182,
      "num_tokens": 39663997.0,
      "reward": -0.17148404195904732,
      "reward_std": 0.6370180249214172,
      "rewards/cosine_scaled_reward": -0.1847003474831581,
      "rewards/format_reward": 0.1979166679084301,
      "step": 29
    },
    {
      "clip_ratio": 0.0,
      "completion_length": 3380.385498046875,
      "epoch": 0.10309278350515463,
      "grad_norm": 0.07193299382925034,
      "kl": 0.0006971359252929688,
      "learning_rate": 6e-07,
      "loss": -0.0178,
      "num_tokens": 41042879.0,
      "reward": -0.2691922076046467,
      "reward_std": 0.4672084078192711,
      "rewards/cosine_scaled_reward": -0.2114190198481083,
      "rewards/format_reward": 0.15364583767950535,
      "step": 30
    },
    {
      "clip_ratio": 0.0,
      "completion_length": 3371.7891845703125,
      "epoch": 0.10652920962199312,
      "grad_norm": 0.07917183637619019,
      "kl": 0.0007505416870117188,
      "learning_rate": 6.2e-07,
      "loss": -0.0137,
      "num_tokens": 42425156.0,
      "reward": -0.2727040550671518,
      "reward_std": 0.4990428015589714,
      "rewards/cosine_scaled_reward": -0.21057077683508396,
      "rewards/format_reward": 0.1484375,
      "step": 31
    },
    {
      "clip_ratio": 0.0,
      "completion_length": 3415.7943725585938,
      "epoch": 0.10996563573883161,
      "grad_norm": 0.07662034034729004,
      "kl": 0.0006952285766601562,
      "learning_rate": 6.4e-07,
      "loss": -0.019,
      "num_tokens": 43823815.0,
      "reward": -0.28142979741096497,
      "reward_std": 0.5432849302887917,
      "rewards/cosine_scaled_reward": -0.21363156288862228,
      "rewards/format_reward": 0.1458333320915699,
      "step": 32
    },
    {
      "clip_ratio": 0.0,
      "completion_length": 3361.9037475585938,
      "epoch": 0.1134020618556701,
      "grad_norm": 0.07624170184135437,
      "kl": 0.0006971359252929688,
      "learning_rate": 6.6e-07,
      "loss": -0.0144,
      "num_tokens": 45193236.0,
      "reward": -0.1651035211980343,
      "reward_std": 0.6060075983405113,
      "rewards/cosine_scaled_reward": -0.17239550687372684,
      "rewards/format_reward": 0.1796875037252903,
      "step": 33
    },
    {
      "clip_ratio": 0.0,
      "completion_length": 3295.6173095703125,
      "epoch": 0.11683848797250859,
      "grad_norm": 0.07951829582452774,
      "kl": 0.0006999969482421875,
      "learning_rate": 6.800000000000001e-07,
      "loss": -0.0126,
      "num_tokens": 46544973.0,
      "reward": -0.1435023844242096,
      "reward_std": 0.6106936782598495,
      "rewards/cosine_scaled_reward": -0.1889386922121048,
      "rewards/format_reward": 0.23437499813735485,
      "step": 34
    },
    {
      "clip_ratio": 0.0,
      "completion_length": 3305.9193115234375,
      "epoch": 0.12027491408934708,
      "grad_norm": 0.08214527368545532,
      "kl": 0.0006923675537109375,
      "learning_rate": 7e-07,
      "loss": -0.0116,
      "num_tokens": 47892434.0,
      "reward": -0.08280465751886368,
      "reward_std": 0.6393849849700928,
      "rewards/cosine_scaled_reward": -0.1559856589883566,
      "rewards/format_reward": 0.2291666641831398,
      "step": 35
    },
    {
      "clip_ratio": 0.0,
      "completion_length": 3307.6328735351562,
      "epoch": 0.12371134020618557,
      "grad_norm": 0.0808284804224968,
      "kl": 0.000701904296875,
      "learning_rate": 7.2e-07,
      "loss": -0.0228,
      "num_tokens": 49246847.0,
      "reward": -0.16467856615781784,
      "reward_std": 0.639389768242836,
      "rewards/cosine_scaled_reward": -0.18129761889576912,
      "rewards/format_reward": 0.1979166716337204,
      "step": 36
    },
    {
      "clip_ratio": 0.0,
      "completion_length": 3419.182373046875,
      "epoch": 0.12714776632302405,
      "grad_norm": 0.07309407740831375,
      "kl": 0.000705718994140625,
      "learning_rate": 7.4e-07,
      "loss": -0.0113,
      "num_tokens": 50640711.0,
      "reward": -0.16528937965631485,
      "reward_std": 0.6092499941587448,
      "rewards/cosine_scaled_reward": -0.16858218982815742,
      "rewards/format_reward": 0.17187499813735485,
      "step": 37
    },
    {
      "clip_ratio": 0.0,
      "completion_length": 3183.1563720703125,
      "epoch": 0.13058419243986255,
      "grad_norm": 0.0878312960267067,
      "kl": 0.000705718994140625,
      "learning_rate": 7.599999999999999e-07,
      "loss": -0.017,
      "num_tokens": 51947181.0,
      "reward": -0.08579694479703903,
      "reward_std": 0.5650007948279381,
      "rewards/cosine_scaled_reward": -0.17571097612380981,
      "rewards/format_reward": 0.265625,
      "step": 38
    },
    {
      "clip_ratio": 0.0,
      "completion_length": 3337.8516235351562,
      "epoch": 0.13402061855670103,
      "grad_norm": 0.07768604904413223,
      "kl": 0.0007076263427734375,
      "learning_rate": 7.799999999999999e-07,
      "loss": -0.0154,
      "num_tokens": 53317764.0,
      "reward": -0.20015084743499756,
      "reward_std": 0.5532346814870834,
      "rewards/cosine_scaled_reward": -0.18080458976328373,
      "rewards/format_reward": 0.16145833395421505,
      "step": 39
    },
    {
      "clip_ratio": 0.0,
      "completion_length": 3270.3021240234375,
      "epoch": 0.13745704467353953,
      "grad_norm": 0.07652192562818527,
      "kl": 0.0006961822509765625,
      "learning_rate": 8e-07,
      "loss": -0.023,
      "num_tokens": 54659654.0,
      "reward": -0.056294072419404984,
      "reward_std": 0.5959418416023254,
      "rewards/cosine_scaled_reward": -0.1466366145759821,
      "rewards/format_reward": 0.2369791679084301,
      "step": 40
    },
    {
      "clip_ratio": 0.0,
      "completion_length": 3348.822998046875,
      "epoch": 0.140893470790378,
      "grad_norm": 0.07810583710670471,
      "kl": 0.0006856918334960938,
      "learning_rate": 8.199999999999999e-07,
      "loss": -0.0102,
      "num_tokens": 56025612.0,
      "reward": -0.19650722108781338,
      "reward_std": 0.5950964242219925,
      "rewards/cosine_scaled_reward": -0.18419111147522926,
      "rewards/format_reward": 0.17187499813735485,
      "step": 41
    },
    {
      "clip_ratio": 0.0,
      "completion_length": 3268.635498046875,
      "epoch": 0.14432989690721648,
      "grad_norm": 0.07616201788187027,
      "kl": 0.0006771087646484375,
      "learning_rate": 8.399999999999999e-07,
      "loss": -0.0138,
      "num_tokens": 57364348.0,
      "reward": 0.024999878369271755,
      "reward_std": 0.635323241353035,
      "rewards/cosine_scaled_reward": -0.11380214802920818,
      "rewards/format_reward": 0.2526041679084301,
      "step": 42
    },
    {
      "clip_ratio": 0.0,
      "completion_length": 3218.4974975585938,
      "epoch": 0.14776632302405499,
      "grad_norm": 0.0802433118224144,
      "kl": 0.0007371902465820312,
      "learning_rate": 8.599999999999999e-07,
      "loss": -0.021,
      "num_tokens": 58678671.0,
      "reward": -0.09253586642444134,
      "reward_std": 0.595914788544178,
      "rewards/cosine_scaled_reward": -0.1699658501893282,
      "rewards/format_reward": 0.2473958283662796,
      "step": 43
    },
    {
      "clip_ratio": 0.0,
      "completion_length": 3297.1121215820312,
      "epoch": 0.15120274914089346,
      "grad_norm": 0.0863797515630722,
      "kl": 0.0007600784301757812,
      "learning_rate": 8.799999999999999e-07,
      "loss": -0.0196,
      "num_tokens": 60033334.0,
      "reward": -0.04163103736937046,
      "reward_std": 0.5838516503572464,
      "rewards/cosine_scaled_reward": -0.1275863479822874,
      "rewards/format_reward": 0.2135416641831398,
      "step": 44
    },
    {
      "clip_ratio": 0.0,
      "completion_length": 3309.9922485351562,
      "epoch": 0.15463917525773196,
      "grad_norm": 0.08027694374322891,
      "kl": 0.0007181167602539062,
      "learning_rate": 9e-07,
      "loss": -0.0224,
      "num_tokens": 61382983.0,
      "reward": -0.10378132946789265,
      "reward_std": 0.6359454840421677,
      "rewards/cosine_scaled_reward": -0.15996357426047325,
      "rewards/format_reward": 0.2161458320915699,
      "step": 45
    },
    {
      "clip_ratio": 0.0,
      "completion_length": 3202.8724975585938,
      "epoch": 0.15807560137457044,
      "grad_norm": 0.0834212377667427,
      "kl": 0.000774383544921875,
      "learning_rate": 9.2e-07,
      "loss": -0.0124,
      "num_tokens": 62689362.0,
      "reward": -0.0990382581949234,
      "reward_std": 0.5990613698959351,
      "rewards/cosine_scaled_reward": -0.16670663096010685,
      "rewards/format_reward": 0.23437499720603228,
      "step": 46
    },
    {
      "clip_ratio": 0.0,
      "completion_length": 3431.0208740234375,
      "epoch": 0.16151202749140894,
      "grad_norm": 0.07041551172733307,
      "kl": 0.0007162094116210938,
      "learning_rate": 9.399999999999999e-07,
      "loss": -0.0228,
      "num_tokens": 64087406.0,
      "reward": -0.2570202387869358,
      "reward_std": 0.5948278307914734,
      "rewards/cosine_scaled_reward": -0.20142677798867226,
      "rewards/format_reward": 0.1458333320915699,
      "step": 47
    },
    {
      "clip_ratio": 0.0,
      "completion_length": 3309.7761840820312,
      "epoch": 0.16494845360824742,
      "grad_norm": 0.07270783185958862,
      "kl": 0.0007419586181640625,
      "learning_rate": 9.6e-07,
      "loss": -0.0212,
      "num_tokens": 65438982.0,
      "reward": -0.06799130514264107,
      "reward_std": 0.6631468534469604,
      "rewards/cosine_scaled_reward": -0.13165189698338509,
      "rewards/format_reward": 0.1953125037252903,
      "step": 48
    },
    {
      "clip_ratio": 0.0,
      "completion_length": 3239.0000610351562,
      "epoch": 0.16838487972508592,
      "grad_norm": 0.07522521167993546,
      "kl": 0.0007476806640625,
      "learning_rate": 9.8e-07,
      "loss": -0.0196,
      "num_tokens": 66769818.0,
      "reward": 0.0649011842906475,
      "reward_std": 0.6760600805282593,
      "rewards/cosine_scaled_reward": -0.09645565785467625,
      "rewards/format_reward": 0.2578125,
      "step": 49
    },
    {
      "clip_ratio": 0.0,
      "completion_length": 3254.1094360351562,
      "epoch": 0.1718213058419244,
      "grad_norm": 0.07710978388786316,
      "kl": 0.000732421875,
      "learning_rate": 1e-06,
      "loss": -0.0147,
      "num_tokens": 68100300.0,
      "reward": -0.058488317765295506,
      "reward_std": 0.6240686923265457,
      "rewards/cosine_scaled_reward": -0.14512957073748112,
      "rewards/format_reward": 0.2317708358168602,
      "step": 50
    }
  ],
  "logging_steps": 1,
  "max_steps": 500,
  "num_input_tokens_seen": 0,
  "num_train_epochs": 2,
  "save_steps": 50,
  "stateful_callbacks": {
    "TrainerControl": {
      "args": {
        "should_epoch_stop": false,
        "should_evaluate": false,
        "should_log": false,
        "should_save": true,
        "should_training_stop": false
      },
      "attributes": {}
    }
  },
  "total_flos": 0.0,
  "train_batch_size": 6,
  "trial_name": null,
  "trial_params": null
}