File size: 14,900 Bytes
6206da7
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
{
  "best_metric": null,
  "best_model_checkpoint": null,
  "epoch": 9.674418604651162,
  "eval_steps": 500,
  "global_step": 260,
  "is_hyper_param_search": false,
  "is_local_process_zero": true,
  "is_world_process_zero": true,
  "log_history": [
    {
      "epoch": 0.37209302325581395,
      "grad_norm": 0.9763292670249939,
      "learning_rate": 1.9230769230769234e-06,
      "logits/chosen": 1.7089074850082397,
      "logits/rejected": 1.7024719715118408,
      "logps/chosen": -80.31088256835938,
      "logps/rejected": -91.45267486572266,
      "loss": 0.692,
      "rewards/accuracies": 0.38749998807907104,
      "rewards/chosen": -0.0024384786374866962,
      "rewards/margins": -0.0009521525353193283,
      "rewards/rejected": -0.001486325403675437,
      "step": 10
    },
    {
      "epoch": 0.7441860465116279,
      "grad_norm": 0.5258393883705139,
      "learning_rate": 3.846153846153847e-06,
      "logits/chosen": 1.8638836145401,
      "logits/rejected": 1.8883317708969116,
      "logps/chosen": -81.79865264892578,
      "logps/rejected": -76.99979400634766,
      "loss": 0.6928,
      "rewards/accuracies": 0.5375000238418579,
      "rewards/chosen": 0.00844244472682476,
      "rewards/margins": 0.0048658037558197975,
      "rewards/rejected": 0.0035766414366662502,
      "step": 20
    },
    {
      "epoch": 1.1162790697674418,
      "grad_norm": 0.5254527926445007,
      "learning_rate": 4.996395926410354e-06,
      "logits/chosen": 1.7035324573516846,
      "logits/rejected": 1.731885552406311,
      "logps/chosen": -80.90572357177734,
      "logps/rejected": -82.523193359375,
      "loss": 0.6945,
      "rewards/accuracies": 0.48750001192092896,
      "rewards/chosen": -0.003143525216728449,
      "rewards/margins": -0.0011914735659956932,
      "rewards/rejected": -0.0019520517671480775,
      "step": 30
    },
    {
      "epoch": 1.4883720930232558,
      "grad_norm": 0.5585340261459351,
      "learning_rate": 4.955969343539162e-06,
      "logits/chosen": 1.7126038074493408,
      "logits/rejected": 1.7729876041412354,
      "logps/chosen": -82.4004898071289,
      "logps/rejected": -86.97776794433594,
      "loss": 0.6942,
      "rewards/accuracies": 0.36250001192092896,
      "rewards/chosen": -0.005664472468197346,
      "rewards/margins": -0.014465728774666786,
      "rewards/rejected": 0.00880125630646944,
      "step": 40
    },
    {
      "epoch": 1.8604651162790697,
      "grad_norm": 0.46961840987205505,
      "learning_rate": 4.8713411048678635e-06,
      "logits/chosen": 1.7948715686798096,
      "logits/rejected": 1.836954116821289,
      "logps/chosen": -87.7071304321289,
      "logps/rejected": -75.44202423095703,
      "loss": 0.693,
      "rewards/accuracies": 0.4625000059604645,
      "rewards/chosen": 0.0024648040998727083,
      "rewards/margins": -0.004807753954082727,
      "rewards/rejected": 0.007272557821124792,
      "step": 50
    },
    {
      "epoch": 2.2325581395348837,
      "grad_norm": 0.48951366543769836,
      "learning_rate": 4.744034319097536e-06,
      "logits/chosen": 1.8686463832855225,
      "logits/rejected": 1.831539511680603,
      "logps/chosen": -97.39242553710938,
      "logps/rejected": -83.82582092285156,
      "loss": 0.6931,
      "rewards/accuracies": 0.512499988079071,
      "rewards/chosen": 0.006232857704162598,
      "rewards/margins": 0.006834377534687519,
      "rewards/rejected": -0.0006015209364704788,
      "step": 60
    },
    {
      "epoch": 2.604651162790698,
      "grad_norm": 0.5472654700279236,
      "learning_rate": 4.5763402081200295e-06,
      "logits/chosen": 1.7030988931655884,
      "logits/rejected": 1.7544777393341064,
      "logps/chosen": -76.77324676513672,
      "logps/rejected": -74.05345153808594,
      "loss": 0.6906,
      "rewards/accuracies": 0.512499988079071,
      "rewards/chosen": -0.005271740257740021,
      "rewards/margins": -0.0019098047632724047,
      "rewards/rejected": -0.0033619359601289034,
      "step": 70
    },
    {
      "epoch": 2.9767441860465116,
      "grad_norm": 0.5428044199943542,
      "learning_rate": 4.3712768704277535e-06,
      "logits/chosen": 1.7908856868743896,
      "logits/rejected": 1.8412494659423828,
      "logps/chosen": -81.26551818847656,
      "logps/rejected": -90.65868377685547,
      "loss": 0.6903,
      "rewards/accuracies": 0.4124999940395355,
      "rewards/chosen": -0.0007023714715614915,
      "rewards/margins": -0.004734506830573082,
      "rewards/rejected": 0.004032135009765625,
      "step": 80
    },
    {
      "epoch": 3.3488372093023258,
      "grad_norm": 0.5832298994064331,
      "learning_rate": 4.1325349624589625e-06,
      "logits/chosen": 1.6744792461395264,
      "logits/rejected": 1.7848085165023804,
      "logps/chosen": -69.33172607421875,
      "logps/rejected": -89.09760284423828,
      "loss": 0.6887,
      "rewards/accuracies": 0.612500011920929,
      "rewards/chosen": 0.001944942632690072,
      "rewards/margins": 0.011368460953235626,
      "rewards/rejected": -0.009423518553376198,
      "step": 90
    },
    {
      "epoch": 3.7209302325581395,
      "grad_norm": 0.5550350546836853,
      "learning_rate": 3.8644112754862614e-06,
      "logits/chosen": 1.8829429149627686,
      "logits/rejected": 1.8508192300796509,
      "logps/chosen": -87.32453918457031,
      "logps/rejected": -90.84036254882812,
      "loss": 0.6894,
      "rewards/accuracies": 0.5375000238418579,
      "rewards/chosen": -0.0012000806163996458,
      "rewards/margins": 0.0026218085549771786,
      "rewards/rejected": -0.0038218882400542498,
      "step": 100
    },
    {
      "epoch": 4.093023255813954,
      "grad_norm": 3.977888584136963,
      "learning_rate": 3.5717314035076355e-06,
      "logits/chosen": 1.787570595741272,
      "logits/rejected": 1.7879459857940674,
      "logps/chosen": -99.02583312988281,
      "logps/rejected": -94.5812759399414,
      "loss": 0.6865,
      "rewards/accuracies": 0.612500011920929,
      "rewards/chosen": 0.006580929271876812,
      "rewards/margins": 0.021825337782502174,
      "rewards/rejected": -0.015244407579302788,
      "step": 110
    },
    {
      "epoch": 4.465116279069767,
      "grad_norm": 0.6172447204589844,
      "learning_rate": 3.2597628939356174e-06,
      "logits/chosen": 1.7528693675994873,
      "logits/rejected": 1.7908378839492798,
      "logps/chosen": -79.02194213867188,
      "logps/rejected": -88.16065216064453,
      "loss": 0.689,
      "rewards/accuracies": 0.512499988079071,
      "rewards/chosen": -0.010873542167246342,
      "rewards/margins": 0.001626663259230554,
      "rewards/rejected": -0.012500204145908356,
      "step": 120
    },
    {
      "epoch": 4.837209302325581,
      "grad_norm": 0.7959902286529541,
      "learning_rate": 2.9341204441673267e-06,
      "logits/chosen": 1.8172928094863892,
      "logits/rejected": 1.713372826576233,
      "logps/chosen": -104.71771240234375,
      "logps/rejected": -74.14459228515625,
      "loss": 0.6807,
      "rewards/accuracies": 0.637499988079071,
      "rewards/chosen": 0.01367110200226307,
      "rewards/margins": 0.0376802422106266,
      "rewards/rejected": -0.024009134620428085,
      "step": 130
    },
    {
      "epoch": 5.209302325581396,
      "grad_norm": 0.7148152589797974,
      "learning_rate": 2.6006648502735384e-06,
      "logits/chosen": 1.8063843250274658,
      "logits/rejected": 1.7726043462753296,
      "logps/chosen": -94.35012817382812,
      "logps/rejected": -74.70433044433594,
      "loss": 0.679,
      "rewards/accuracies": 0.675000011920929,
      "rewards/chosen": 0.0016125829424709082,
      "rewards/margins": 0.048104941844940186,
      "rewards/rejected": -0.04649236053228378,
      "step": 140
    },
    {
      "epoch": 5.5813953488372094,
      "grad_norm": 0.7632599472999573,
      "learning_rate": 2.265397526492052e-06,
      "logits/chosen": 1.7107969522476196,
      "logits/rejected": 1.7916028499603271,
      "logps/chosen": -82.15348815917969,
      "logps/rejected": -72.45692443847656,
      "loss": 0.6803,
      "rewards/accuracies": 0.762499988079071,
      "rewards/chosen": 0.010645274072885513,
      "rewards/margins": 0.05290994048118591,
      "rewards/rejected": -0.0422646626830101,
      "step": 150
    },
    {
      "epoch": 5.953488372093023,
      "grad_norm": 0.7417441606521606,
      "learning_rate": 1.934352493925695e-06,
      "logits/chosen": 1.805847406387329,
      "logits/rejected": 1.853757619857788,
      "logps/chosen": -80.99852752685547,
      "logps/rejected": -85.37675476074219,
      "loss": 0.6804,
      "rewards/accuracies": 0.6000000238418579,
      "rewards/chosen": -0.018115444108843803,
      "rewards/margins": 0.026529336348176003,
      "rewards/rejected": -0.044644780457019806,
      "step": 160
    },
    {
      "epoch": 6.325581395348837,
      "grad_norm": 0.6947072744369507,
      "learning_rate": 1.613487782393661e-06,
      "logits/chosen": 1.78365159034729,
      "logits/rejected": 1.7505042552947998,
      "logps/chosen": -79.20752716064453,
      "logps/rejected": -76.4256362915039,
      "loss": 0.6756,
      "rewards/accuracies": 0.6499999761581421,
      "rewards/chosen": -0.028582002967596054,
      "rewards/margins": 0.0369187593460083,
      "rewards/rejected": -0.06550076603889465,
      "step": 170
    },
    {
      "epoch": 6.6976744186046515,
      "grad_norm": 0.708878755569458,
      "learning_rate": 1.3085781999467303e-06,
      "logits/chosen": 1.8475749492645264,
      "logits/rejected": 1.8639358282089233,
      "logps/chosen": -86.50167083740234,
      "logps/rejected": -86.65702819824219,
      "loss": 0.6744,
      "rewards/accuracies": 0.6499999761581421,
      "rewards/chosen": -0.020427193492650986,
      "rewards/margins": 0.03602122887969017,
      "rewards/rejected": -0.056448422372341156,
      "step": 180
    },
    {
      "epoch": 7.069767441860465,
      "grad_norm": 0.7321914434432983,
      "learning_rate": 1.0251113999421936e-06,
      "logits/chosen": 1.7573623657226562,
      "logits/rejected": 1.8096891641616821,
      "logps/chosen": -73.01332092285156,
      "logps/rejected": -84.01178741455078,
      "loss": 0.6703,
      "rewards/accuracies": 0.5874999761581421,
      "rewards/chosen": -0.03601118177175522,
      "rewards/margins": 0.02739904262125492,
      "rewards/rejected": -0.06341022253036499,
      "step": 190
    },
    {
      "epoch": 7.441860465116279,
      "grad_norm": 0.7363404035568237,
      "learning_rate": 7.681891162260016e-07,
      "logits/chosen": 1.7942943572998047,
      "logits/rejected": 1.8757835626602173,
      "logps/chosen": -71.05104064941406,
      "logps/rejected": -97.1144790649414,
      "loss": 0.6705,
      "rewards/accuracies": 0.6499999761581421,
      "rewards/chosen": -0.04499458894133568,
      "rewards/margins": 0.023196522146463394,
      "rewards/rejected": -0.06819111108779907,
      "step": 200
    },
    {
      "epoch": 7.813953488372093,
      "grad_norm": 0.751647412776947,
      "learning_rate": 5.424353439559446e-07,
      "logits/chosen": 1.8643348217010498,
      "logits/rejected": 1.852317452430725,
      "logps/chosen": -88.37493896484375,
      "logps/rejected": -86.77766418457031,
      "loss": 0.675,
      "rewards/accuracies": 0.637499988079071,
      "rewards/chosen": -0.017291061580181122,
      "rewards/margins": 0.036708246916532516,
      "rewards/rejected": -0.053999315947294235,
      "step": 210
    },
    {
      "epoch": 8.186046511627907,
      "grad_norm": 0.7266362309455872,
      "learning_rate": 3.51913118594458e-07,
      "logits/chosen": 1.7909473180770874,
      "logits/rejected": 1.8149019479751587,
      "logps/chosen": -87.76082611083984,
      "logps/rejected": -94.9640884399414,
      "loss": 0.6657,
      "rewards/accuracies": 0.6875,
      "rewards/chosen": -0.016025662422180176,
      "rewards/margins": 0.05901428312063217,
      "rewards/rejected": -0.07503994554281235,
      "step": 220
    },
    {
      "epoch": 8.55813953488372,
      "grad_norm": 0.7438246011734009,
      "learning_rate": 2.0005139085293945e-07,
      "logits/chosen": 1.805499792098999,
      "logits/rejected": 1.8353807926177979,
      "logps/chosen": -86.86074829101562,
      "logps/rejected": -97.83507537841797,
      "loss": 0.6736,
      "rewards/accuracies": 0.6625000238418579,
      "rewards/chosen": -0.034867025911808014,
      "rewards/margins": 0.06159989908337593,
      "rewards/rejected": -0.09646693617105484,
      "step": 230
    },
    {
      "epoch": 8.930232558139535,
      "grad_norm": 0.726691484451294,
      "learning_rate": 8.958331366609424e-08,
      "logits/chosen": 1.8834251165390015,
      "logits/rejected": 1.7817695140838623,
      "logps/chosen": -92.15116119384766,
      "logps/rejected": -71.86451721191406,
      "loss": 0.6683,
      "rewards/accuracies": 0.7250000238418579,
      "rewards/chosen": -0.0010798743460327387,
      "rewards/margins": 0.08708261698484421,
      "rewards/rejected": -0.08816249668598175,
      "step": 240
    },
    {
      "epoch": 9.30232558139535,
      "grad_norm": 0.8213956952095032,
      "learning_rate": 2.2497051885228825e-08,
      "logits/chosen": 1.5768067836761475,
      "logits/rejected": 1.6853668689727783,
      "logps/chosen": -83.7536849975586,
      "logps/rejected": -89.20189666748047,
      "loss": 0.6717,
      "rewards/accuracies": 0.550000011920929,
      "rewards/chosen": -0.05014566332101822,
      "rewards/margins": 0.031247947365045547,
      "rewards/rejected": -0.08139361441135406,
      "step": 250
    },
    {
      "epoch": 9.674418604651162,
      "grad_norm": 2.622415781021118,
      "learning_rate": 0.0,
      "logits/chosen": 1.8887628316879272,
      "logits/rejected": 1.8104219436645508,
      "logps/chosen": -98.27021789550781,
      "logps/rejected": -68.67156219482422,
      "loss": 0.6705,
      "rewards/accuracies": 0.6875,
      "rewards/chosen": -0.009171323850750923,
      "rewards/margins": 0.09182025492191315,
      "rewards/rejected": -0.10099156945943832,
      "step": 260
    },
    {
      "epoch": 9.674418604651162,
      "step": 260,
      "total_flos": 8.856435014909297e+17,
      "train_loss": 0.6819301917002751,
      "train_runtime": 1908.5849,
      "train_samples_per_second": 8.996,
      "train_steps_per_second": 0.136
    }
  ],
  "logging_steps": 10,
  "max_steps": 260,
  "num_input_tokens_seen": 0,
  "num_train_epochs": 10,
  "save_steps": 100,
  "stateful_callbacks": {
    "TrainerControl": {
      "args": {
        "should_epoch_stop": false,
        "should_evaluate": false,
        "should_log": false,
        "should_save": true,
        "should_training_stop": true
      },
      "attributes": {}
    }
  },
  "total_flos": 8.856435014909297e+17,
  "train_batch_size": 1,
  "trial_name": null,
  "trial_params": null
}