SFTWerewolf / trainer_state.json
SoonOk's picture
Upload folder using huggingface_hub
54eee10 verified
Invalid JSON: Unexpected token 'N', ..."/chosen": NaN, "... is not valid JSON
{
"best_global_step": null,
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 10.0,
"eval_steps": 500,
"global_step": 1600,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.06259780907668232,
"grad_norm": 85.62175750732422,
"kl": 0.0,
"learning_rate": 1.25e-08,
"logits/chosen": NaN,
"logits/rejected": NaN,
"logps/chosen": -239.94375,
"logps/rejected": -234.8265625,
"loss": 0.5,
"rewards/chosen": 0.0,
"rewards/margins": 1009254.4,
"rewards/rejected": -1009254.4,
"step": 10
},
{
"epoch": 0.12519561815336464,
"grad_norm": 81.53575134277344,
"kl": 0.010937499813735485,
"learning_rate": 4.0625e-08,
"logits/chosen": NaN,
"logits/rejected": NaN,
"logps/chosen": -240.3453125,
"logps/rejected": -231.4453125,
"loss": 0.493,
"rewards/chosen": 0.014176416397094726,
"rewards/margins": 0.04230022430419922,
"rewards/rejected": -0.028123807907104493,
"step": 20
},
{
"epoch": 0.18779342723004694,
"grad_norm": 85.28985595703125,
"kl": 0.0,
"learning_rate": 7.187499999999999e-08,
"logits/chosen": NaN,
"logits/rejected": NaN,
"logps/chosen": -234.5515625,
"logps/rejected": -235.8296875,
"loss": 0.3955,
"rewards/chosen": 0.2135101318359375,
"rewards/margins": 0.6504745483398438,
"rewards/rejected": -0.43696441650390627,
"step": 30
},
{
"epoch": 0.25039123630672927,
"grad_norm": 44.50237274169922,
"kl": 0.0,
"learning_rate": 1.0312499999999999e-07,
"logits/chosen": NaN,
"logits/rejected": NaN,
"logps/chosen": -222.625,
"logps/rejected": -259.0625,
"loss": 0.1458,
"rewards/chosen": 1.1271026611328125,
"rewards/margins": 3.556680297851562,
"rewards/rejected": -2.42957763671875,
"step": 40
},
{
"epoch": 0.3129890453834116,
"grad_norm": 2.836519479751587,
"kl": 2.9095702171325684,
"learning_rate": 1.3437499999999998e-07,
"logits/chosen": NaN,
"logits/rejected": NaN,
"logps/chosen": -191.846875,
"logps/rejected": -280.8359375,
"loss": 0.0271,
"rewards/chosen": 3.7464111328125,
"rewards/margins": 7.50875244140625,
"rewards/rejected": -3.76234130859375,
"step": 50
},
{
"epoch": 0.3755868544600939,
"grad_norm": 2.1135339736938477,
"kl": 0.03535156324505806,
"learning_rate": 1.6562499999999998e-07,
"logits/chosen": NaN,
"logits/rejected": NaN,
"logps/chosen": -171.25234375,
"logps/rejected": -330.5875,
"loss": 0.0014,
"rewards/chosen": 4.8279296875,
"rewards/margins": 13.102929687500001,
"rewards/rejected": -8.275,
"step": 60
},
{
"epoch": 0.4381846635367762,
"grad_norm": 0.09101714193820953,
"kl": 0.05312500149011612,
"learning_rate": 1.96875e-07,
"logits/chosen": NaN,
"logits/rejected": NaN,
"logps/chosen": -150.29921875,
"logps/rejected": -336.365625,
"loss": 0.0002,
"rewards/chosen": 7.8068359375,
"rewards/margins": 988194.9068359375,
"rewards/rejected": -988187.1,
"step": 70
},
{
"epoch": 0.5007824726134585,
"grad_norm": 0.0,
"kl": 0.22666016221046448,
"learning_rate": 2.2812499999999999e-07,
"logits/chosen": NaN,
"logits/rejected": NaN,
"logps/chosen": -136.48515625,
"logps/rejected": -344.921875,
"loss": 0.0,
"rewards/chosen": 8.19384765625,
"rewards/margins": 17.39794921875,
"rewards/rejected": -9.2041015625,
"step": 80
},
{
"epoch": 0.5633802816901409,
"grad_norm": 0.0,
"kl": 0.87158203125,
"learning_rate": 2.59375e-07,
"logits/chosen": NaN,
"logits/rejected": NaN,
"logps/chosen": -132.1171875,
"logps/rejected": -342.521875,
"loss": 0.0,
"rewards/chosen": 8.72099609375,
"rewards/margins": 17.72890625,
"rewards/rejected": -9.00791015625,
"step": 90
},
{
"epoch": 0.6259780907668232,
"grad_norm": 0.0,
"kl": 0.71630859375,
"learning_rate": 2.90625e-07,
"logits/chosen": NaN,
"logits/rejected": NaN,
"logps/chosen": -126.82890625,
"logps/rejected": -356.28125,
"loss": 0.0006,
"rewards/chosen": 7.728125,
"rewards/margins": 18.240527343750003,
"rewards/rejected": -10.51240234375,
"step": 100
},
{
"epoch": 0.6885758998435054,
"grad_norm": 0.0,
"kl": 0.0,
"learning_rate": 3.21875e-07,
"logits/chosen": NaN,
"logits/rejected": NaN,
"logps/chosen": -124.55546875,
"logps/rejected": -369.45,
"loss": 0.0,
"rewards/chosen": 9.27529296875,
"rewards/margins": 19.19375,
"rewards/rejected": -9.91845703125,
"step": 110
},
{
"epoch": 0.7511737089201878,
"grad_norm": 0.0,
"kl": 0.0,
"learning_rate": 3.53125e-07,
"logits/chosen": NaN,
"logits/rejected": NaN,
"logps/chosen": -133.61484375,
"logps/rejected": -367.2953125,
"loss": 0.0003,
"rewards/chosen": 9.3970703125,
"rewards/margins": 19.797265625,
"rewards/rejected": -10.4001953125,
"step": 120
},
{
"epoch": 0.8137715179968701,
"grad_norm": 0.0063149575144052505,
"kl": 0.0,
"learning_rate": 3.84375e-07,
"logits/chosen": NaN,
"logits/rejected": NaN,
"logps/chosen": -134.00234375,
"logps/rejected": -374.975,
"loss": 0.0,
"rewards/chosen": 8.2287109375,
"rewards/margins": 19.3267578125,
"rewards/rejected": -11.098046875,
"step": 130
},
{
"epoch": 0.8763693270735524,
"grad_norm": 0.0,
"kl": 0.0,
"learning_rate": 4.15625e-07,
"logits/chosen": NaN,
"logits/rejected": NaN,
"logps/chosen": -127.0015625,
"logps/rejected": -371.39375,
"loss": 0.0,
"rewards/chosen": 7.44541015625,
"rewards/margins": 18.87958984375,
"rewards/rejected": -11.4341796875,
"step": 140
},
{
"epoch": 0.9389671361502347,
"grad_norm": 0.0,
"kl": 0.01083984412252903,
"learning_rate": 4.46875e-07,
"logits/chosen": NaN,
"logits/rejected": NaN,
"logps/chosen": -127.55625,
"logps/rejected": -372.4828125,
"loss": 0.0002,
"rewards/chosen": 8.4076171875,
"rewards/margins": 988269.9076171875,
"rewards/rejected": -988261.5,
"step": 150
},
{
"epoch": 1.0,
"grad_norm": 0.0,
"kl": 0.0,
"learning_rate": 4.78125e-07,
"logits/chosen": NaN,
"logits/rejected": NaN,
"logps/chosen": -132.6778846153846,
"logps/rejected": -396.50961538461536,
"loss": 0.0,
"rewards/chosen": 8.459835737179487,
"rewards/margins": 22.047275641025642,
"rewards/rejected": -13.587439903846153,
"step": 160
},
{
"epoch": 1.0625978090766823,
"grad_norm": 0.0,
"kl": 0.0,
"learning_rate": 4.999946454160323e-07,
"logits/chosen": NaN,
"logits/rejected": NaN,
"logps/chosen": -132.94140625,
"logps/rejected": -403.84375,
"loss": 0.0001,
"rewards/chosen": 8.1732421875,
"rewards/margins": 1014222.9732421875,
"rewards/rejected": -1014214.8,
"step": 170
},
{
"epoch": 1.1251956181533647,
"grad_norm": 0.0,
"kl": 0.0,
"learning_rate": 4.998994591929265e-07,
"logits/chosen": NaN,
"logits/rejected": NaN,
"logps/chosen": -133.13125,
"logps/rejected": -397.328125,
"loss": 0.0,
"rewards/chosen": 8.85146484375,
"rewards/margins": 21.0630859375,
"rewards/rejected": -12.21162109375,
"step": 180
},
{
"epoch": 1.187793427230047,
"grad_norm": 0.006036366336047649,
"kl": 0.0,
"learning_rate": 4.996853343617542e-07,
"logits/chosen": NaN,
"logits/rejected": NaN,
"logps/chosen": -127.7609375,
"logps/rejected": -387.0625,
"loss": 0.0,
"rewards/chosen": 8.6265625,
"rewards/margins": 21.03515625,
"rewards/rejected": -12.40859375,
"step": 190
},
{
"epoch": 1.2503912363067293,
"grad_norm": 0.0,
"kl": 0.0,
"learning_rate": 4.99352372834338e-07,
"logits/chosen": NaN,
"logits/rejected": NaN,
"logps/chosen": -126.28828125,
"logps/rejected": -385.565625,
"loss": 0.0,
"rewards/chosen": 8.459375,
"rewards/margins": 21.9951171875,
"rewards/rejected": -13.5357421875,
"step": 200
},
{
"epoch": 1.3129890453834117,
"grad_norm": 0.0,
"kl": 0.0,
"learning_rate": 4.989007330823317e-07,
"logits/chosen": NaN,
"logits/rejected": NaN,
"logps/chosen": -124.2,
"logps/rejected": -385.896875,
"loss": 0.0,
"rewards/chosen": 9.2541015625,
"rewards/margins": 21.25703125,
"rewards/rejected": -12.0029296875,
"step": 210
},
{
"epoch": 1.375586854460094,
"grad_norm": 0.0,
"kl": 0.0,
"learning_rate": 4.983306300617969e-07,
"logits/chosen": NaN,
"logits/rejected": NaN,
"logps/chosen": -126.19453125,
"logps/rejected": -383.58125,
"loss": 0.0,
"rewards/chosen": 7.83056640625,
"rewards/margins": 20.52666015625,
"rewards/rejected": -12.69609375,
"step": 220
},
{
"epoch": 1.4381846635367763,
"grad_norm": 0.005455248989164829,
"kl": 0.0,
"learning_rate": 4.976423351108942e-07,
"logits/chosen": NaN,
"logits/rejected": NaN,
"logps/chosen": -127.978125,
"logps/rejected": -376.996875,
"loss": 0.0,
"rewards/chosen": 9.66123046875,
"rewards/margins": 1034109.5612304688,
"rewards/rejected": -1034099.9,
"step": 230
},
{
"epoch": 1.5007824726134587,
"grad_norm": 0.0,
"kl": 0.0,
"learning_rate": 4.968361758207427e-07,
"logits/chosen": NaN,
"logits/rejected": NaN,
"logps/chosen": -123.70078125,
"logps/rejected": -380.584375,
"loss": 0.0,
"rewards/chosen": 9.24931640625,
"rewards/margins": 21.27431640625,
"rewards/rejected": -12.025,
"step": 240
},
{
"epoch": 1.563380281690141,
"grad_norm": 0.0,
"kl": 0.0,
"learning_rate": 4.95912535879503e-07,
"logits/chosen": NaN,
"logits/rejected": NaN,
"logps/chosen": -127.3453125,
"logps/rejected": -375.78125,
"loss": 0.0,
"rewards/chosen": 9.12412109375,
"rewards/margins": 20.69892578125,
"rewards/rejected": -11.5748046875,
"step": 250
},
{
"epoch": 1.6259780907668233,
"grad_norm": 0.0,
"kl": 0.0,
"learning_rate": 4.948718548897628e-07,
"logits/chosen": NaN,
"logits/rejected": NaN,
"logps/chosen": -123.1171875,
"logps/rejected": -387.85625,
"loss": 0.0001,
"rewards/chosen": 7.9748046875,
"rewards/margins": 21.2263671875,
"rewards/rejected": -13.2515625,
"step": 260
},
{
"epoch": 1.6885758998435054,
"grad_norm": 0.0,
"kl": 0.0,
"learning_rate": 4.937146281593102e-07,
"logits/chosen": NaN,
"logits/rejected": NaN,
"logps/chosen": -116.15859375,
"logps/rejected": -400.45625,
"loss": 0.0,
"rewards/chosen": 10.01826171875,
"rewards/margins": 22.246484375,
"rewards/rejected": -12.22822265625,
"step": 270
},
{
"epoch": 1.7511737089201878,
"grad_norm": 0.0,
"kl": 0.0,
"learning_rate": 4.924414064653938e-07,
"logits/chosen": NaN,
"logits/rejected": NaN,
"logps/chosen": -123.99453125,
"logps/rejected": -401.19375,
"loss": 0.0,
"rewards/chosen": 10.244140625,
"rewards/margins": 23.22998046875,
"rewards/rejected": -12.98583984375,
"step": 280
},
{
"epoch": 1.81377151799687,
"grad_norm": 0.0,
"kl": 0.0,
"learning_rate": 4.910527957925823e-07,
"logits/chosen": NaN,
"logits/rejected": NaN,
"logps/chosen": -125.4328125,
"logps/rejected": -408.809375,
"loss": 0.0,
"rewards/chosen": 8.8796875,
"rewards/margins": 22.677929687499997,
"rewards/rejected": -13.7982421875,
"step": 290
},
{
"epoch": 1.8763693270735524,
"grad_norm": 0.0,
"kl": 0.0,
"learning_rate": 4.895494570443491e-07,
"logits/chosen": NaN,
"logits/rejected": NaN,
"logps/chosen": -121.15859375,
"logps/rejected": -409.34375,
"loss": 0.0,
"rewards/chosen": 7.855859375,
"rewards/margins": 22.48984375,
"rewards/rejected": -14.633984375,
"step": 300
},
{
"epoch": 1.9389671361502347,
"grad_norm": 0.0,
"kl": 0.0,
"learning_rate": 4.879321057285179e-07,
"logits/chosen": NaN,
"logits/rejected": NaN,
"logps/chosen": -122.64921875,
"logps/rejected": -404.428125,
"loss": 0.0,
"rewards/chosen": 8.778125,
"rewards/margins": 1034176.078125,
"rewards/rejected": -1034167.3,
"step": 310
},
{
"epoch": 2.0,
"grad_norm": 0.0,
"kl": 0.0,
"learning_rate": 4.862015116167195e-07,
"logits/chosen": NaN,
"logits/rejected": NaN,
"logps/chosen": -125.1474358974359,
"logps/rejected": -408.4935897435897,
"loss": 0.0,
"rewards/chosen": 9.043770032051283,
"rewards/margins": 23.60827323717949,
"rewards/rejected": -14.564503205128204,
"step": 320
},
{
"epoch": 2.0625978090766823,
"grad_norm": 0.0,
"kl": 0.0,
"learning_rate": 4.843584983780224e-07,
"logits/chosen": NaN,
"logits/rejected": NaN,
"logps/chosen": -124.25234375,
"logps/rejected": -412.665625,
"loss": 0.0,
"rewards/chosen": 8.83603515625,
"rewards/margins": 1053636.4360351562,
"rewards/rejected": -1053627.6,
"step": 330
},
{
"epoch": 2.1251956181533647,
"grad_norm": 0.0,
"kl": 0.0,
"learning_rate": 4.824039431869111e-07,
"logits/chosen": NaN,
"logits/rejected": NaN,
"logps/chosen": -123.78828125,
"logps/rejected": -411.65625,
"loss": 0.0,
"rewards/chosen": 9.63583984375,
"rewards/margins": 22.90498046875,
"rewards/rejected": -13.269140625,
"step": 340
},
{
"epoch": 2.187793427230047,
"grad_norm": 0.0,
"kl": 0.0,
"learning_rate": 4.803387763057981e-07,
"logits/chosen": NaN,
"logits/rejected": NaN,
"logps/chosen": -120.65703125,
"logps/rejected": -408.965625,
"loss": 0.0,
"rewards/chosen": 9.16875,
"rewards/margins": 23.2859375,
"rewards/rejected": -14.1171875,
"step": 350
},
{
"epoch": 2.2503912363067293,
"grad_norm": 0.0,
"kl": 0.0,
"learning_rate": 4.781639806422699e-07,
"logits/chosen": NaN,
"logits/rejected": NaN,
"logps/chosen": -121.665625,
"logps/rejected": -411.8375,
"loss": 0.0,
"rewards/chosen": 8.803125,
"rewards/margins": 24.63515625,
"rewards/rejected": -15.83203125,
"step": 360
},
{
"epoch": 2.3129890453834117,
"grad_norm": 0.0,
"kl": 0.0,
"learning_rate": 4.7588059128127547e-07,
"logits/chosen": NaN,
"logits/rejected": NaN,
"logps/chosen": -121.1765625,
"logps/rejected": -414.2,
"loss": 0.0,
"rewards/chosen": 9.49833984375,
"rewards/margins": 23.75087890625,
"rewards/rejected": -14.2525390625,
"step": 370
},
{
"epoch": 2.375586854460094,
"grad_norm": 0.0,
"kl": 0.0,
"learning_rate": 4.7348969499248305e-07,
"logits/chosen": NaN,
"logits/rejected": NaN,
"logps/chosen": -124.36171875,
"logps/rejected": -412.475,
"loss": 0.0,
"rewards/chosen": 7.957421875,
"rewards/margins": 23.0701171875,
"rewards/rejected": -15.1126953125,
"step": 380
},
{
"epoch": 2.4381846635367763,
"grad_norm": 0.0,
"kl": 0.0,
"learning_rate": 4.709924297130354e-07,
"logits/chosen": NaN,
"logits/rejected": NaN,
"logps/chosen": -126.88203125,
"logps/rejected": -407.1,
"loss": 0.0,
"rewards/chosen": 9.75537109375,
"rewards/margins": 1040690.1553710938,
"rewards/rejected": -1040680.4,
"step": 390
},
{
"epoch": 2.5007824726134587,
"grad_norm": 0.0,
"kl": 0.0,
"learning_rate": 4.683899840059542e-07,
"logits/chosen": NaN,
"logits/rejected": NaN,
"logps/chosen": -122.88125,
"logps/rejected": -411.190625,
"loss": 0.0,
"rewards/chosen": 9.3162109375,
"rewards/margins": 23.785351562499997,
"rewards/rejected": -14.469140625,
"step": 400
},
{
"epoch": 2.563380281690141,
"grad_norm": 0.0,
"kl": 0.0,
"learning_rate": 4.6568359649444796e-07,
"logits/chosen": NaN,
"logits/rejected": NaN,
"logps/chosen": -126.928125,
"logps/rejected": -406.478125,
"loss": 0.0,
"rewards/chosen": 9.1625,
"rewards/margins": 23.119140625,
"rewards/rejected": -13.956640625,
"step": 410
},
{
"epoch": 2.6259780907668233,
"grad_norm": 0.0,
"kl": 0.0,
"learning_rate": 4.628745552723947e-07,
"logits/chosen": NaN,
"logits/rejected": NaN,
"logps/chosen": -122.96328125,
"logps/rejected": -414.665625,
"loss": 0.0,
"rewards/chosen": 7.9828125,
"rewards/margins": 23.58603515625,
"rewards/rejected": -15.60322265625,
"step": 420
},
{
"epoch": 2.6885758998435056,
"grad_norm": 0.0,
"kl": 0.0,
"learning_rate": 4.5996419729127904e-07,
"logits/chosen": NaN,
"logits/rejected": NaN,
"logps/chosen": -115.996875,
"logps/rejected": -415.746875,
"loss": 0.0,
"rewards/chosen": 10.02958984375,
"rewards/margins": 23.37451171875,
"rewards/rejected": -13.344921875,
"step": 430
},
{
"epoch": 2.751173708920188,
"grad_norm": 0.0,
"kl": 0.0,
"learning_rate": 4.5695390772387553e-07,
"logits/chosen": NaN,
"logits/rejected": NaN,
"logps/chosen": -124.025,
"logps/rejected": -413.50625,
"loss": 0.0,
"rewards/chosen": 10.2400390625,
"rewards/margins": 24.1744140625,
"rewards/rejected": -13.934375,
"step": 440
},
{
"epoch": 2.8137715179968703,
"grad_norm": 0.0,
"kl": 0.0,
"learning_rate": 4.538451193049814e-07,
"logits/chosen": NaN,
"logits/rejected": NaN,
"logps/chosen": -125.5609375,
"logps/rejected": -418.05625,
"loss": 0.0,
"rewards/chosen": 8.86904296875,
"rewards/margins": 23.40283203125,
"rewards/rejected": -14.5337890625,
"step": 450
},
{
"epoch": 2.8763693270735526,
"grad_norm": 0.0,
"kl": 0.0,
"learning_rate": 4.5063931164951276e-07,
"logits/chosen": NaN,
"logits/rejected": NaN,
"logps/chosen": -121.359375,
"logps/rejected": -417.6875,
"loss": 0.0,
"rewards/chosen": 7.84296875,
"rewards/margins": 23.1740234375,
"rewards/rejected": -15.3310546875,
"step": 460
},
{
"epoch": 2.938967136150235,
"grad_norm": 0.0,
"kl": 0.0,
"learning_rate": 4.4733801054828746e-07,
"logits/chosen": NaN,
"logits/rejected": NaN,
"logps/chosen": -122.8359375,
"logps/rejected": -412.2,
"loss": 0.0,
"rewards/chosen": 8.7650390625,
"rewards/margins": 1040735.6650390625,
"rewards/rejected": -1040726.9,
"step": 470
},
{
"epoch": 3.0,
"grad_norm": 0.0,
"kl": 0.0,
"learning_rate": 4.439427872418321e-07,
"logits/chosen": NaN,
"logits/rejected": NaN,
"logps/chosen": -125.36378205128206,
"logps/rejected": -415.18589743589746,
"loss": 0.0,
"rewards/chosen": 9.025540865384615,
"rewards/margins": 24.14092548076923,
"rewards/rejected": -15.115384615384615,
"step": 480
},
{
"epoch": 3.0625978090766823,
"grad_norm": 0.0,
"kl": 0.0,
"learning_rate": 4.4045525767255566e-07,
"logits/chosen": NaN,
"logits/rejected": NaN,
"logps/chosen": -124.5,
"logps/rejected": -418.9375,
"loss": 0.0,
"rewards/chosen": 8.81640625,
"rewards/margins": 1060202.71640625,
"rewards/rejected": -1060193.9,
"step": 490
},
{
"epoch": 3.1251956181533647,
"grad_norm": 0.0,
"kl": 0.0,
"learning_rate": 4.3687708171564917e-07,
"logits/chosen": NaN,
"logits/rejected": NaN,
"logps/chosen": -124.021875,
"logps/rejected": -417.4625,
"loss": 0.0,
"rewards/chosen": 9.61826171875,
"rewards/margins": 23.310839843750003,
"rewards/rejected": -13.692578125,
"step": 500
},
{
"epoch": 3.187793427230047,
"grad_norm": 0.0,
"kl": 0.0,
"learning_rate": 4.332099623890748e-07,
"logits/chosen": NaN,
"logits/rejected": NaN,
"logps/chosen": -120.8671875,
"logps/rejected": -414.53125,
"loss": 0.0,
"rewards/chosen": 9.15126953125,
"rewards/margins": 23.70791015625,
"rewards/rejected": -14.556640625,
"step": 510
},
{
"epoch": 3.2503912363067293,
"grad_norm": 0.0,
"kl": 0.0,
"learning_rate": 4.2945564504302156e-07,
"logits/chosen": NaN,
"logits/rejected": NaN,
"logps/chosen": -121.8828125,
"logps/rejected": -417.375,
"loss": 0.0,
"rewards/chosen": 8.7865234375,
"rewards/margins": 25.0986328125,
"rewards/rejected": -16.312109375,
"step": 520
},
{
"epoch": 3.3129890453834117,
"grad_norm": 0.0,
"kl": 0.0,
"learning_rate": 4.2561591652921293e-07,
"logits/chosen": NaN,
"logits/rejected": NaN,
"logps/chosen": -121.38203125,
"logps/rejected": -419.7125,
"loss": 0.0,
"rewards/chosen": 9.4814453125,
"rewards/margins": 24.1669921875,
"rewards/rejected": -14.685546875,
"step": 530
},
{
"epoch": 3.375586854460094,
"grad_norm": 0.0,
"kl": 0.0,
"learning_rate": 4.216926043504625e-07,
"logits/chosen": NaN,
"logits/rejected": NaN,
"logps/chosen": -124.5890625,
"logps/rejected": -417.740625,
"loss": 0.0,
"rewards/chosen": 7.94150390625,
"rewards/margins": 23.496386718750003,
"rewards/rejected": -15.5548828125,
"step": 540
},
{
"epoch": 3.4381846635367763,
"grad_norm": 0.0,
"kl": 0.0,
"learning_rate": 4.176875757908814e-07,
"logits/chosen": NaN,
"logits/rejected": NaN,
"logps/chosen": -127.1328125,
"logps/rejected": -412.325,
"loss": 0.0,
"rewards/chosen": 9.73388671875,
"rewards/margins": 1040696.2338867188,
"rewards/rejected": -1040686.5,
"step": 550
},
{
"epoch": 3.5007824726134587,
"grad_norm": 0.0,
"kl": 0.0,
"learning_rate": 4.1360273702715257e-07,
"logits/chosen": NaN,
"logits/rejected": NaN,
"logps/chosen": -123.1,
"logps/rejected": -416.346875,
"loss": 0.0,
"rewards/chosen": 9.2978515625,
"rewards/margins": 24.17734375,
"rewards/rejected": -14.8794921875,
"step": 560
},
{
"epoch": 3.563380281690141,
"grad_norm": 0.0,
"kl": 0.0,
"learning_rate": 4.094400322212933e-07,
"logits/chosen": NaN,
"logits/rejected": NaN,
"logps/chosen": -127.18671875,
"logps/rejected": -411.440625,
"loss": 0.0,
"rewards/chosen": 9.14140625,
"rewards/margins": 23.482421875,
"rewards/rejected": -14.341015625,
"step": 570
},
{
"epoch": 3.6259780907668233,
"grad_norm": 0.0,
"kl": 0.0,
"learning_rate": 4.0520144259533985e-07,
"logits/chosen": NaN,
"logits/rejected": NaN,
"logps/chosen": -123.20546875,
"logps/rejected": -419.9625,
"loss": 0.0,
"rewards/chosen": 7.965234375,
"rewards/margins": 24.023144531249997,
"rewards/rejected": -16.05791015625,
"step": 580
},
{
"epoch": 3.6885758998435056,
"grad_norm": 0.0,
"kl": 0.0,
"learning_rate": 4.0088898548839285e-07,
"logits/chosen": NaN,
"logits/rejected": NaN,
"logps/chosen": -116.21875,
"logps/rejected": -420.90625,
"loss": 0.0,
"rewards/chosen": 10.00966796875,
"rewards/margins": 23.736035156249997,
"rewards/rejected": -13.7263671875,
"step": 590
},
{
"epoch": 3.751173708920188,
"grad_norm": 0.0,
"kl": 0.0,
"learning_rate": 3.9650471339647345e-07,
"logits/chosen": NaN,
"logits/rejected": NaN,
"logps/chosen": -124.21328125,
"logps/rejected": -418.575,
"loss": 0.0,
"rewards/chosen": 10.2236328125,
"rewards/margins": 24.5478515625,
"rewards/rejected": -14.32421875,
"step": 600
},
{
"epoch": 3.8137715179968703,
"grad_norm": 0.0,
"kl": 0.0,
"learning_rate": 3.9205071299564595e-07,
"logits/chosen": NaN,
"logits/rejected": NaN,
"logps/chosen": -125.75859375,
"logps/rejected": -422.78125,
"loss": 0.0,
"rewards/chosen": 8.85390625,
"rewards/margins": 23.763671875,
"rewards/rejected": -14.909765625,
"step": 610
},
{
"epoch": 3.8763693270735526,
"grad_norm": 0.0,
"kl": 0.0,
"learning_rate": 3.8752910414887335e-07,
"logits/chosen": NaN,
"logits/rejected": NaN,
"logps/chosen": -121.5296875,
"logps/rejected": -422.45,
"loss": 0.0,
"rewards/chosen": 7.8318359375,
"rewards/margins": 23.55859375,
"rewards/rejected": -15.7267578125,
"step": 620
},
{
"epoch": 3.938967136150235,
"grad_norm": 0.0,
"kl": 0.0,
"learning_rate": 3.829420388970771e-07,
"logits/chosen": NaN,
"logits/rejected": NaN,
"logps/chosen": -122.9984375,
"logps/rejected": -416.628125,
"loss": 0.0,
"rewards/chosen": 8.75244140625,
"rewards/margins": 1040739.1524414063,
"rewards/rejected": -1040730.4,
"step": 630
},
{
"epoch": 4.0,
"grad_norm": 0.0,
"kl": 0.0,
"learning_rate": 3.7829170043488255e-07,
"logits/chosen": NaN,
"logits/rejected": NaN,
"logps/chosen": -125.5576923076923,
"logps/rejected": -419.1378205128205,
"loss": 0.0,
"rewards/chosen": 9.010016025641026,
"rewards/margins": 24.447716346153847,
"rewards/rejected": -15.437700320512821,
"step": 640
},
{
"epoch": 4.062597809076682,
"grad_norm": 0.0,
"kl": 0.0,
"learning_rate": 3.7358030207153614e-07,
"logits/chosen": NaN,
"logits/rejected": NaN,
"logps/chosen": -124.68515625,
"logps/rejected": -422.7875,
"loss": 0.0,
"rewards/chosen": 8.80205078125,
"rewards/margins": 1066764.202050781,
"rewards/rejected": -1066755.4,
"step": 650
},
{
"epoch": 4.125195618153365,
"grad_norm": 0.0,
"kl": 0.0,
"learning_rate": 3.688100861774904e-07,
"logits/chosen": NaN,
"logits/rejected": NaN,
"logps/chosen": -124.20078125,
"logps/rejected": -421.165625,
"loss": 0.0,
"rewards/chosen": 9.6025390625,
"rewards/margins": 23.566015625,
"rewards/rejected": -13.9634765625,
"step": 660
},
{
"epoch": 4.187793427230047,
"grad_norm": 0.0,
"kl": 0.0,
"learning_rate": 3.639833231171568e-07,
"logits/chosen": NaN,
"logits/rejected": NaN,
"logps/chosen": -121.03671875,
"logps/rejected": -418.0125,
"loss": 0.0,
"rewards/chosen": 9.1375,
"rewards/margins": 23.967968749999997,
"rewards/rejected": -14.83046875,
"step": 670
},
{
"epoch": 4.250391236306729,
"grad_norm": 0.0,
"kl": 0.0,
"learning_rate": 3.5910231016833546e-07,
"logits/chosen": NaN,
"logits/rejected": NaN,
"logps/chosen": -122.05,
"logps/rejected": -420.934375,
"loss": 0.0,
"rewards/chosen": 8.77421875,
"rewards/margins": 25.39765625,
"rewards/rejected": -16.6234375,
"step": 680
},
{
"epoch": 4.312989045383412,
"grad_norm": 0.0,
"kl": 0.0,
"learning_rate": 3.541693704288354e-07,
"logits/chosen": NaN,
"logits/rejected": NaN,
"logps/chosen": -121.553125,
"logps/rejected": -423.28125,
"loss": 0.0,
"rewards/chosen": 9.4671875,
"rewards/margins": 24.4375,
"rewards/rejected": -14.9703125,
"step": 690
},
{
"epoch": 4.375586854460094,
"grad_norm": 0.0,
"kl": 0.0,
"learning_rate": 3.4918685171080525e-07,
"logits/chosen": NaN,
"logits/rejected": NaN,
"logps/chosen": -124.7484375,
"logps/rejected": -421.309375,
"loss": 0.0,
"rewards/chosen": 7.93095703125,
"rewards/margins": 23.78427734375,
"rewards/rejected": -15.8533203125,
"step": 700
},
{
"epoch": 4.438184663536776,
"grad_norm": 0.005550738889724016,
"kl": 0.0,
"learning_rate": 3.441571254233027e-07,
"logits/chosen": NaN,
"logits/rejected": NaN,
"logps/chosen": -127.2765625,
"logps/rejected": -415.965625,
"loss": 0.0,
"rewards/chosen": 9.7220703125,
"rewards/margins": 1047253.7220703125,
"rewards/rejected": -1047244.0,
"step": 710
},
{
"epoch": 4.500782472613459,
"grad_norm": 0.0,
"kl": 0.0,
"learning_rate": 3.390825854436314e-07,
"logits/chosen": NaN,
"logits/rejected": NaN,
"logps/chosen": -123.0,
"logps/rejected": -419.89375,
"loss": 0.0,
"rewards/chosen": 9.30546875,
"rewards/margins": 24.4697265625,
"rewards/rejected": -15.1642578125,
"step": 720
},
{
"epoch": 4.563380281690141,
"grad_norm": 0.0,
"kl": 0.0,
"learning_rate": 3.3396564697798556e-07,
"logits/chosen": NaN,
"logits/rejected": NaN,
"logps/chosen": -126.87734375,
"logps/rejected": -414.896875,
"loss": 0.0,
"rewards/chosen": 9.16611328125,
"rewards/margins": 23.77490234375,
"rewards/rejected": -14.6087890625,
"step": 730
},
{
"epoch": 4.625978090766823,
"grad_norm": 0.0,
"kl": 0.0,
"learning_rate": 3.2880874541194245e-07,
"logits/chosen": NaN,
"logits/rejected": NaN,
"logps/chosen": -122.8171875,
"logps/rejected": -423.13125,
"loss": 0.0,
"rewards/chosen": 7.99150390625,
"rewards/margins": 24.32587890625,
"rewards/rejected": -16.334375,
"step": 740
},
{
"epoch": 4.688575899843506,
"grad_norm": 0.0,
"kl": 0.0,
"learning_rate": 3.236143351513505e-07,
"logits/chosen": NaN,
"logits/rejected": NaN,
"logps/chosen": -115.8296875,
"logps/rejected": -423.95625,
"loss": 0.0,
"rewards/chosen": 10.0427734375,
"rewards/margins": 23.9888671875,
"rewards/rejected": -13.94609375,
"step": 750
},
{
"epoch": 4.751173708920188,
"grad_norm": 0.0,
"kl": 0.0,
"learning_rate": 3.1838488845416553e-07,
"logits/chosen": NaN,
"logits/rejected": NaN,
"logps/chosen": -123.85625,
"logps/rejected": -421.490625,
"loss": 0.0,
"rewards/chosen": 10.25458984375,
"rewards/margins": 24.801074218750003,
"rewards/rejected": -14.546484375,
"step": 760
},
{
"epoch": 4.81377151799687,
"grad_norm": 0.0,
"kl": 0.0,
"learning_rate": 3.1312289425378944e-07,
"logits/chosen": NaN,
"logits/rejected": NaN,
"logps/chosen": -125.3703125,
"logps/rejected": -425.434375,
"loss": 0.0,
"rewards/chosen": 8.88447265625,
"rewards/margins": 24.003222656250003,
"rewards/rejected": -15.11875,
"step": 770
},
{
"epoch": 4.876369327073553,
"grad_norm": 0.0,
"kl": 0.0,
"learning_rate": 3.078308569744732e-07,
"logits/chosen": NaN,
"logits/rejected": NaN,
"logps/chosen": -121.16484375,
"logps/rejected": -424.81875,
"loss": 0.0,
"rewards/chosen": 7.85830078125,
"rewards/margins": 23.783886718749997,
"rewards/rejected": -15.9255859375,
"step": 780
},
{
"epoch": 4.938967136150235,
"grad_norm": 0.0,
"kl": 0.0,
"learning_rate": 3.025112953393456e-07,
"logits/chosen": NaN,
"logits/rejected": NaN,
"logps/chosen": -122.61015625,
"logps/rejected": -419.08125,
"loss": 0.0,
"rewards/chosen": 8.7822265625,
"rewards/margins": 1047294.5822265625,
"rewards/rejected": -1047285.8,
"step": 790
},
{
"epoch": 5.0,
"grad_norm": 0.0,
"kl": 0.0,
"learning_rate": 2.9716674117163883e-07,
"logits/chosen": NaN,
"logits/rejected": NaN,
"logps/chosen": -125.17948717948718,
"logps/rejected": -421.5,
"loss": 0.0,
"rewards/chosen": 9.039563301282051,
"rewards/margins": 24.669571314102562,
"rewards/rejected": -15.630008012820513,
"step": 800
},
{
"epoch": 5.062597809076682,
"grad_norm": 0.0,
"kl": 0.0,
"learning_rate": 2.917997381896764e-07,
"logits/chosen": NaN,
"logits/rejected": NaN,
"logps/chosen": -124.28984375,
"logps/rejected": -424.934375,
"loss": 0.0,
"rewards/chosen": 8.83330078125,
"rewards/margins": 1079876.9333007813,
"rewards/rejected": -1079868.1,
"step": 810
},
{
"epoch": 5.125195618153365,
"grad_norm": 0.0,
"kl": 0.0,
"learning_rate": 2.86412840796202e-07,
"logits/chosen": NaN,
"logits/rejected": NaN,
"logps/chosen": -123.81875,
"logps/rejected": -423.1125,
"loss": 0.0,
"rewards/chosen": 9.634375,
"rewards/margins": 23.742578125,
"rewards/rejected": -14.108203125,
"step": 820
},
{
"epoch": 5.187793427230047,
"grad_norm": 0.0,
"kl": 0.0,
"learning_rate": 2.8100861286262137e-07,
"logits/chosen": NaN,
"logits/rejected": NaN,
"logps/chosen": -120.6484375,
"logps/rejected": -420.015625,
"loss": 0.0,
"rewards/chosen": 9.1689453125,
"rewards/margins": 24.1546875,
"rewards/rejected": -14.9857421875,
"step": 830
},
{
"epoch": 5.250391236306729,
"grad_norm": 0.0,
"kl": 0.0,
"learning_rate": 2.7558962650873896e-07,
"logits/chosen": NaN,
"logits/rejected": NaN,
"logps/chosen": -121.6453125,
"logps/rejected": -423.03125,
"loss": 0.0,
"rewards/chosen": 8.804296875,
"rewards/margins": 25.608593749999997,
"rewards/rejected": -16.804296875,
"step": 840
},
{
"epoch": 5.312989045383412,
"grad_norm": 0.0,
"kl": 0.0,
"learning_rate": 2.7015846087856793e-07,
"logits/chosen": NaN,
"logits/rejected": NaN,
"logps/chosen": -121.175,
"logps/rejected": -425.09375,
"loss": 0.0,
"rewards/chosen": 9.49833984375,
"rewards/margins": 24.61240234375,
"rewards/rejected": -15.1140625,
"step": 850
},
{
"epoch": 5.375586854460094,
"grad_norm": 0.0,
"kl": 0.0,
"learning_rate": 2.647177009127972e-07,
"logits/chosen": NaN,
"logits/rejected": NaN,
"logps/chosen": -124.3296875,
"logps/rejected": -423.20625,
"loss": 0.0,
"rewards/chosen": 7.95791015625,
"rewards/margins": 23.971386718749997,
"rewards/rejected": -16.0134765625,
"step": 860
},
{
"epoch": 5.438184663536776,
"grad_norm": 0.0,
"kl": 0.0,
"learning_rate": 2.5926993611850017e-07,
"logits/chosen": NaN,
"logits/rejected": NaN,
"logps/chosen": -126.896875,
"logps/rejected": -417.915625,
"loss": 0.0,
"rewards/chosen": 9.7544921875,
"rewards/margins": 1047255.6544921875,
"rewards/rejected": -1047245.9,
"step": 870
},
{
"epoch": 5.500782472613459,
"grad_norm": 0.0,
"kl": 0.0,
"learning_rate": 2.538177593366686e-07,
"logits/chosen": NaN,
"logits/rejected": NaN,
"logps/chosen": -122.8359375,
"logps/rejected": -421.9625,
"loss": 0.0,
"rewards/chosen": 9.3177734375,
"rewards/margins": 24.648632812499997,
"rewards/rejected": -15.330859375,
"step": 880
},
{
"epoch": 5.563380281690141,
"grad_norm": 0.0,
"kl": 0.0,
"learning_rate": 2.4836376550816205e-07,
"logits/chosen": NaN,
"logits/rejected": NaN,
"logps/chosen": -126.9578125,
"logps/rejected": -417.05,
"loss": 0.0,
"rewards/chosen": 9.15947265625,
"rewards/margins": 23.93642578125,
"rewards/rejected": -14.776953125,
"step": 890
},
{
"epoch": 5.625978090766823,
"grad_norm": 0.0,
"kl": 0.0,
"learning_rate": 2.429105504386554e-07,
"logits/chosen": NaN,
"logits/rejected": NaN,
"logps/chosen": -122.9390625,
"logps/rejected": -425.440625,
"loss": 0.0,
"rewards/chosen": 7.9828125,
"rewards/margins": 24.51474609375,
"rewards/rejected": -16.53193359375,
"step": 900
},
{
"epoch": 5.688575899843506,
"grad_norm": 0.0,
"kl": 0.0,
"learning_rate": 2.374607095631766e-07,
"logits/chosen": NaN,
"logits/rejected": NaN,
"logps/chosen": -115.99921875,
"logps/rejected": -426.05625,
"loss": 0.0,
"rewards/chosen": 10.0283203125,
"rewards/margins": 24.1314453125,
"rewards/rejected": -14.103125,
"step": 910
},
{
"epoch": 5.751173708920188,
"grad_norm": 0.0,
"kl": 0.0,
"learning_rate": 2.3201683671082012e-07,
"logits/chosen": NaN,
"logits/rejected": NaN,
"logps/chosen": -123.9859375,
"logps/rejected": -423.6125,
"loss": 0.0,
"rewards/chosen": 10.24228515625,
"rewards/margins": 24.95322265625,
"rewards/rejected": -14.7109375,
"step": 920
},
{
"epoch": 5.81377151799687,
"grad_norm": 0.0,
"kl": 0.0,
"learning_rate": 2.2658152287022447e-07,
"logits/chosen": NaN,
"logits/rejected": NaN,
"logps/chosen": -125.4875,
"logps/rejected": -427.41875,
"loss": 0.0,
"rewards/chosen": 8.8740234375,
"rewards/margins": 24.151953125,
"rewards/rejected": -15.2779296875,
"step": 930
},
{
"epoch": 5.876369327073553,
"grad_norm": 0.0,
"kl": 0.0,
"learning_rate": 2.211573549564021e-07,
"logits/chosen": NaN,
"logits/rejected": NaN,
"logps/chosen": -121.2875,
"logps/rejected": -426.796875,
"loss": 0.0,
"rewards/chosen": 7.85087890625,
"rewards/margins": 23.94189453125,
"rewards/rejected": -16.091015625,
"step": 940
},
{
"epoch": 5.938967136150235,
"grad_norm": 0.0,
"kl": 0.0,
"learning_rate": 2.1574691457950803e-07,
"logits/chosen": NaN,
"logits/rejected": NaN,
"logps/chosen": -122.72421875,
"logps/rejected": -421.015625,
"loss": 0.0,
"rewards/chosen": 8.7734375,
"rewards/margins": 1047295.8734375,
"rewards/rejected": -1047287.1,
"step": 950
},
{
"epoch": 6.0,
"grad_norm": 0.0,
"kl": 0.0,
"learning_rate": 2.103527768161332e-07,
"logits/chosen": NaN,
"logits/rejected": NaN,
"logps/chosen": -125.29246794871794,
"logps/rejected": -423.1314102564103,
"loss": 0.0,
"rewards/chosen": 9.032451923076923,
"rewards/margins": 24.796674679487182,
"rewards/rejected": -15.764222756410257,
"step": 960
},
{
"epoch": 6.062597809076682,
"grad_norm": 0.0,
"kl": 0.0,
"learning_rate": 2.0497750898370752e-07,
"logits/chosen": NaN,
"logits/rejected": NaN,
"logps/chosen": -124.3859375,
"logps/rejected": -426.490625,
"loss": 0.0,
"rewards/chosen": 8.82646484375,
"rewards/margins": 1086434.1264648438,
"rewards/rejected": -1086425.3,
"step": 970
},
{
"epoch": 6.125195618153365,
"grad_norm": 0.0,
"kl": 0.0,
"learning_rate": 1.9962366941859572e-07,
"logits/chosen": NaN,
"logits/rejected": NaN,
"logps/chosen": -123.9078125,
"logps/rejected": -424.55625,
"loss": 0.0,
"rewards/chosen": 9.626953125,
"rewards/margins": 23.8404296875,
"rewards/rejected": -14.2134765625,
"step": 980
},
{
"epoch": 6.187793427230047,
"grad_norm": 0.0,
"kl": 0.0,
"learning_rate": 1.9429380625846778e-07,
"logits/chosen": NaN,
"logits/rejected": NaN,
"logps/chosen": -120.7140625,
"logps/rejected": -421.41875,
"loss": 0.0,
"rewards/chosen": 9.16318359375,
"rewards/margins": 24.25966796875,
"rewards/rejected": -15.096484375,
"step": 990
},
{
"epoch": 6.250391236306729,
"grad_norm": 0.0,
"kl": 0.0,
"learning_rate": 1.8899045622952334e-07,
"logits/chosen": NaN,
"logits/rejected": NaN,
"logps/chosen": -121.7625,
"logps/rejected": -424.309375,
"loss": 0.0,
"rewards/chosen": 8.795703125,
"rewards/margins": 25.7111328125,
"rewards/rejected": -16.9154296875,
"step": 1000
},
{
"epoch": 6.312989045383412,
"grad_norm": 0.0,
"kl": 0.0,
"learning_rate": 1.8371614343914796e-07,
"logits/chosen": NaN,
"logits/rejected": NaN,
"logps/chosen": -121.2640625,
"logps/rejected": -426.496875,
"loss": 0.0,
"rewards/chosen": 9.49189453125,
"rewards/margins": 24.71630859375,
"rewards/rejected": -15.2244140625,
"step": 1010
},
{
"epoch": 6.375586854460094,
"grad_norm": 0.0,
"kl": 0.0,
"learning_rate": 1.7847337817457397e-07,
"logits/chosen": NaN,
"logits/rejected": NaN,
"logps/chosen": -124.4140625,
"logps/rejected": -424.740625,
"loss": 0.0,
"rewards/chosen": 7.9515625,
"rewards/margins": 24.0921875,
"rewards/rejected": -16.140625,
"step": 1020
},
{
"epoch": 6.438184663536776,
"grad_norm": 0.0,
"kl": 0.0,
"learning_rate": 1.7326465570812e-07,
"logits/chosen": NaN,
"logits/rejected": NaN,
"logps/chosen": -126.98671875,
"logps/rejected": -419.478125,
"loss": 0.0,
"rewards/chosen": 9.746484375,
"rewards/margins": 1040703.646484375,
"rewards/rejected": -1040693.9,
"step": 1030
},
{
"epoch": 6.500782472613459,
"grad_norm": 0.0,
"kl": 0.0,
"learning_rate": 1.6809245510957666e-07,
"logits/chosen": NaN,
"logits/rejected": NaN,
"logps/chosen": -122.934375,
"logps/rejected": -423.5,
"loss": 0.0,
"rewards/chosen": 9.31103515625,
"rewards/margins": 24.76357421875,
"rewards/rejected": -15.4525390625,
"step": 1040
},
{
"epoch": 6.563380281690141,
"grad_norm": 0.0,
"kl": 0.0,
"learning_rate": 1.6295923806630337e-07,
"logits/chosen": NaN,
"logits/rejected": NaN,
"logps/chosen": -127.04921875,
"logps/rejected": -418.584375,
"loss": 0.0,
"rewards/chosen": 9.152734375,
"rewards/margins": 24.0482421875,
"rewards/rejected": -14.8955078125,
"step": 1050
},
{
"epoch": 6.625978090766823,
"grad_norm": 0.0,
"kl": 0.0,
"learning_rate": 1.5786744771159873e-07,
"logits/chosen": NaN,
"logits/rejected": NaN,
"logps/chosen": -123.0265625,
"logps/rejected": -426.890625,
"loss": 0.0,
"rewards/chosen": 7.976171875,
"rewards/margins": 24.63388671875,
"rewards/rejected": -16.65771484375,
"step": 1060
},
{
"epoch": 6.688575899843506,
"grad_norm": 0.0,
"kl": 0.0,
"learning_rate": 1.528195074619011e-07,
"logits/chosen": NaN,
"logits/rejected": NaN,
"logps/chosen": -116.06484375,
"logps/rejected": -427.428125,
"loss": 0.0,
"rewards/chosen": 10.021875,
"rewards/margins": 24.22421875,
"rewards/rejected": -14.20234375,
"step": 1070
},
{
"epoch": 6.751173708920188,
"grad_norm": 0.0,
"kl": 0.0,
"learning_rate": 1.4781781986337384e-07,
"logits/chosen": NaN,
"logits/rejected": NaN,
"logps/chosen": -124.059375,
"logps/rejected": -424.915625,
"loss": 0.0,
"rewards/chosen": 10.236328125,
"rewards/margins": 25.044921875,
"rewards/rejected": -14.80859375,
"step": 1080
},
{
"epoch": 6.81377151799687,
"grad_norm": 0.0,
"kl": 0.0,
"learning_rate": 1.4286476544842324e-07,
"logits/chosen": NaN,
"logits/rejected": NaN,
"logps/chosen": -125.56796875,
"logps/rejected": -428.546875,
"loss": 0.0,
"rewards/chosen": 8.867578125,
"rewards/margins": 24.23515625,
"rewards/rejected": -15.367578125,
"step": 1090
},
{
"epoch": 6.876369327073553,
"grad_norm": 0.0,
"kl": 0.0,
"learning_rate": 1.3796270160269439e-07,
"logits/chosen": NaN,
"logits/rejected": NaN,
"logps/chosen": -121.3640625,
"logps/rejected": -428.04375,
"loss": 0.0,
"rewards/chosen": 7.8455078125,
"rewards/margins": 24.0421875,
"rewards/rejected": -16.1966796875,
"step": 1100
},
{
"epoch": 6.938967136150235,
"grad_norm": 0.0,
"kl": 0.0,
"learning_rate": 1.3311396144308296e-07,
"logits/chosen": NaN,
"logits/rejected": NaN,
"logps/chosen": -122.78359375,
"logps/rejected": -422.16875,
"loss": 0.0,
"rewards/chosen": 8.76953125,
"rewards/margins": 1047296.56953125,
"rewards/rejected": -1047287.8,
"step": 1110
},
{
"epoch": 7.0,
"grad_norm": 0.0,
"kl": 0.0,
"learning_rate": 1.2832085270729797e-07,
"logits/chosen": NaN,
"logits/rejected": NaN,
"logps/chosen": -125.3573717948718,
"logps/rejected": -424.0416666666667,
"loss": 0.0,
"rewards/chosen": 9.02704326923077,
"rewards/margins": 24.865785256410255,
"rewards/rejected": -15.838741987179487,
"step": 1120
},
{
"epoch": 7.062597809076682,
"grad_norm": 0.0,
"kl": 0.0,
"learning_rate": 1.2358565665550387e-07,
"logits/chosen": NaN,
"logits/rejected": NaN,
"logps/chosen": -124.44765625,
"logps/rejected": -427.471875,
"loss": 0.0,
"rewards/chosen": 8.82080078125,
"rewards/margins": 1079882.9208007813,
"rewards/rejected": -1079874.1,
"step": 1130
},
{
"epoch": 7.125195618153365,
"grad_norm": 0.0,
"kl": 0.0,
"learning_rate": 1.189106269845638e-07,
"logits/chosen": NaN,
"logits/rejected": NaN,
"logps/chosen": -123.96640625,
"logps/rejected": -425.396875,
"loss": 0.0,
"rewards/chosen": 9.623046875,
"rewards/margins": 23.8982421875,
"rewards/rejected": -14.2751953125,
"step": 1140
},
{
"epoch": 7.187793427230047,
"grad_norm": 0.0,
"kl": 0.0,
"learning_rate": 1.1429798875540267e-07,
"logits/chosen": NaN,
"logits/rejected": NaN,
"logps/chosen": -120.766015625,
"logps/rejected": -422.184375,
"loss": 0.0,
"rewards/chosen": 9.16044921875,
"rewards/margins": 24.31982421875,
"rewards/rejected": -15.159375,
"step": 1150
},
{
"epoch": 7.250391236306729,
"grad_norm": 0.0,
"kl": 0.0,
"learning_rate": 1.097499373339976e-07,
"logits/chosen": NaN,
"logits/rejected": NaN,
"logps/chosen": -121.8,
"logps/rejected": -425.178125,
"loss": 0.0,
"rewards/chosen": 8.7927734375,
"rewards/margins": 25.78515625,
"rewards/rejected": -16.9923828125,
"step": 1160
},
{
"epoch": 7.312989045383412,
"grad_norm": 0.0,
"kl": 0.0,
"learning_rate": 1.0526863734650362e-07,
"logits/chosen": NaN,
"logits/rejected": NaN,
"logps/chosen": -121.3171875,
"logps/rejected": -427.35,
"loss": 0.0,
"rewards/chosen": 9.4873046875,
"rewards/margins": 24.777734375,
"rewards/rejected": -15.2904296875,
"step": 1170
},
{
"epoch": 7.375586854460094,
"grad_norm": 0.0,
"kl": 0.0,
"learning_rate": 1.0085622164900836e-07,
"logits/chosen": NaN,
"logits/rejected": NaN,
"logps/chosen": -124.49453125,
"logps/rejected": -425.421875,
"loss": 0.0,
"rewards/chosen": 7.94658203125,
"rewards/margins": 24.146386718749998,
"rewards/rejected": -16.1998046875,
"step": 1180
},
{
"epoch": 7.438184663536776,
"grad_norm": 0.0,
"kl": 0.0,
"learning_rate": 9.651479031240836e-08,
"logits/chosen": NaN,
"logits/rejected": NaN,
"logps/chosen": -127.04609375,
"logps/rejected": -420.171875,
"loss": 0.0,
"rewards/chosen": 9.7416015625,
"rewards/margins": 1047258.2416015625,
"rewards/rejected": -1047248.5,
"step": 1190
},
{
"epoch": 7.500782472613459,
"grad_norm": 0.0,
"kl": 0.0,
"learning_rate": 9.224640962288857e-08,
"logits/chosen": NaN,
"logits/rejected": NaN,
"logps/chosen": -122.98828125,
"logps/rejected": -424.15,
"loss": 0.0,
"rewards/chosen": 9.30625,
"rewards/margins": 24.812304687500003,
"rewards/rejected": -15.5060546875,
"step": 1200
},
{
"epoch": 7.563380281690141,
"grad_norm": 0.0,
"kl": 0.0,
"learning_rate": 8.805311109848254e-08,
"logits/chosen": NaN,
"logits/rejected": NaN,
"logps/chosen": -127.1328125,
"logps/rejected": -419.2625,
"loss": 0.0,
"rewards/chosen": 9.1458984375,
"rewards/margins": 24.095703125,
"rewards/rejected": -14.9498046875,
"step": 1210
},
{
"epoch": 7.625978090766823,
"grad_norm": 0.0,
"kl": 0.0,
"learning_rate": 8.393689052217964e-08,
"logits/chosen": NaN,
"logits/rejected": NaN,
"logps/chosen": -123.1015625,
"logps/rejected": -427.63125,
"loss": 0.0,
"rewards/chosen": 7.97099609375,
"rewards/margins": 24.689453125,
"rewards/rejected": -16.71845703125,
"step": 1220
},
{
"epoch": 7.688575899843506,
"grad_norm": 0.0,
"kl": 0.0,
"learning_rate": 7.989970699204021e-08,
"logits/chosen": NaN,
"logits/rejected": NaN,
"logps/chosen": -116.11796875,
"logps/rejected": -428.1,
"loss": 0.0,
"rewards/chosen": 10.018359375,
"rewards/margins": 24.27109375,
"rewards/rejected": -14.252734375,
"step": 1230
},
{
"epoch": 7.751173708920188,
"grad_norm": 0.0,
"kl": 0.0,
"learning_rate": 7.594348198877171e-08,
"logits/chosen": NaN,
"logits/rejected": NaN,
"logps/chosen": -124.12109375,
"logps/rejected": -425.6375,
"loss": 0.0,
"rewards/chosen": 10.22890625,
"rewards/margins": 25.094140625,
"rewards/rejected": -14.865234375,
"step": 1240
},
{
"epoch": 7.81377151799687,
"grad_norm": 0.0,
"kl": 0.0,
"learning_rate": 7.207009846120718e-08,
"logits/chosen": NaN,
"logits/rejected": NaN,
"logps/chosen": -125.634375,
"logps/rejected": -429.153125,
"loss": 0.0,
"rewards/chosen": 8.86318359375,
"rewards/margins": 24.279003906249997,
"rewards/rejected": -15.4158203125,
"step": 1250
},
{
"epoch": 7.876369327073553,
"grad_norm": 0.0,
"kl": 0.0,
"learning_rate": 6.8281399930124e-08,
"logits/chosen": NaN,
"logits/rejected": NaN,
"logps/chosen": -121.396875,
"logps/rejected": -428.484375,
"loss": 0.0,
"rewards/chosen": 7.844140625,
"rewards/margins": 24.07578125,
"rewards/rejected": -16.231640625,
"step": 1260
},
{
"epoch": 7.938967136150235,
"grad_norm": 0.0,
"kl": 0.0,
"learning_rate": 6.457918961082773e-08,
"logits/chosen": NaN,
"logits/rejected": NaN,
"logps/chosen": -122.82265625,
"logps/rejected": -422.675,
"loss": 0.0,
"rewards/chosen": 8.7658203125,
"rewards/margins": 1047297.3658203125,
"rewards/rejected": -1047288.6,
"step": 1270
},
{
"epoch": 8.0,
"grad_norm": 0.0,
"kl": 0.0,
"learning_rate": 6.096522955491932e-08,
"logits/chosen": NaN,
"logits/rejected": NaN,
"logps/chosen": -125.38141025641026,
"logps/rejected": -424.55128205128204,
"loss": 0.0,
"rewards/chosen": 9.024839743589743,
"rewards/margins": 24.903846153846153,
"rewards/rejected": -15.87900641025641,
"step": 1280
},
{
"epoch": 8.062597809076681,
"grad_norm": 0.0,
"kl": 0.0,
"learning_rate": 5.7441239811653994e-08,
"logits/chosen": NaN,
"logits/rejected": NaN,
"logps/chosen": -124.496875,
"logps/rejected": -427.85,
"loss": 0.0,
"rewards/chosen": 8.8173828125,
"rewards/margins": 1079884.0173828125,
"rewards/rejected": -1079875.2,
"step": 1290
},
{
"epoch": 8.125195618153365,
"grad_norm": 0.0,
"kl": 0.0,
"learning_rate": 5.4008897609290864e-08,
"logits/chosen": NaN,
"logits/rejected": NaN,
"logps/chosen": -123.99609375,
"logps/rejected": -425.7375,
"loss": 0.0,
"rewards/chosen": 9.62001953125,
"rewards/margins": 23.92080078125,
"rewards/rejected": -14.30078125,
"step": 1300
},
{
"epoch": 8.187793427230046,
"grad_norm": 0.0,
"kl": 0.0,
"learning_rate": 5.066983655682325e-08,
"logits/chosen": NaN,
"logits/rejected": NaN,
"logps/chosen": -120.80859375,
"logps/rejected": -422.44375,
"loss": 0.0,
"rewards/chosen": 9.15625,
"rewards/margins": 24.3357421875,
"rewards/rejected": -15.1794921875,
"step": 1310
},
{
"epoch": 8.25039123630673,
"grad_norm": 0.0,
"kl": 0.0,
"learning_rate": 4.7425645866469104e-08,
"logits/chosen": NaN,
"logits/rejected": NaN,
"logps/chosen": -121.8234375,
"logps/rejected": -425.390625,
"loss": 0.0,
"rewards/chosen": 8.79130859375,
"rewards/margins": 25.80400390625,
"rewards/rejected": -17.0126953125,
"step": 1320
},
{
"epoch": 8.31298904538341,
"grad_norm": 0.0,
"kl": 0.0,
"learning_rate": 4.427786959729246e-08,
"logits/chosen": NaN,
"logits/rejected": NaN,
"logps/chosen": -121.34609375,
"logps/rejected": -427.59375,
"loss": 0.0,
"rewards/chosen": 9.48466796875,
"rewards/margins": 24.79482421875,
"rewards/rejected": -15.31015625,
"step": 1330
},
{
"epoch": 8.375586854460094,
"grad_norm": 0.0,
"kl": 0.0,
"learning_rate": 4.122800592031425e-08,
"logits/chosen": NaN,
"logits/rejected": NaN,
"logps/chosen": -124.5328125,
"logps/rejected": -425.7,
"loss": 0.0,
"rewards/chosen": 7.94443359375,
"rewards/margins": 24.164746093749997,
"rewards/rejected": -16.2203125,
"step": 1340
},
{
"epoch": 8.438184663536775,
"grad_norm": 0.0,
"kl": 0.0,
"learning_rate": 3.827750640546476e-08,
"logits/chosen": NaN,
"logits/rejected": NaN,
"logps/chosen": -127.07421875,
"logps/rejected": -420.41875,
"loss": 0.0,
"rewards/chosen": 9.74013671875,
"rewards/margins": 1047258.5401367188,
"rewards/rejected": -1047248.8,
"step": 1350
},
{
"epoch": 8.500782472613459,
"grad_norm": 0.0,
"kl": 0.0,
"learning_rate": 3.542777533071442e-08,
"logits/chosen": NaN,
"logits/rejected": NaN,
"logps/chosen": -123.025,
"logps/rejected": -424.396875,
"loss": 0.0,
"rewards/chosen": 9.3037109375,
"rewards/margins": 24.82890625,
"rewards/rejected": -15.5251953125,
"step": 1360
},
{
"epoch": 8.56338028169014,
"grad_norm": 0.0,
"kl": 0.0,
"learning_rate": 3.268016901371406e-08,
"logits/chosen": NaN,
"logits/rejected": NaN,
"logps/chosen": -127.13984375,
"logps/rejected": -419.48125,
"loss": 0.0,
"rewards/chosen": 9.1462890625,
"rewards/margins": 24.112109375,
"rewards/rejected": -14.9658203125,
"step": 1370
},
{
"epoch": 8.625978090766823,
"grad_norm": 0.0,
"kl": 0.0,
"learning_rate": 3.003599516626065e-08,
"logits/chosen": NaN,
"logits/rejected": NaN,
"logps/chosen": -123.1296875,
"logps/rejected": -427.940625,
"loss": 0.0,
"rewards/chosen": 7.96953125,
"rewards/margins": 24.7130859375,
"rewards/rejected": -16.7435546875,
"step": 1380
},
{
"epoch": 8.688575899843505,
"grad_norm": 0.0,
"kl": 0.0,
"learning_rate": 2.749651227189756e-08,
"logits/chosen": NaN,
"logits/rejected": NaN,
"logps/chosen": -116.1296875,
"logps/rejected": -428.45,
"loss": 0.0,
"rewards/chosen": 10.01630859375,
"rewards/margins": 24.29365234375,
"rewards/rejected": -14.27734375,
"step": 1390
},
{
"epoch": 8.751173708920188,
"grad_norm": 0.0,
"kl": 0.0,
"learning_rate": 2.5062928986944676e-08,
"logits/chosen": NaN,
"logits/rejected": NaN,
"logps/chosen": -124.1234375,
"logps/rejected": -425.965625,
"loss": 0.0,
"rewards/chosen": 10.230078125,
"rewards/margins": 25.12177734375,
"rewards/rejected": -14.89169921875,
"step": 1400
},
{
"epoch": 8.81377151799687,
"grad_norm": 0.0,
"kl": 0.0,
"learning_rate": 2.2736403565243034e-08,
"logits/chosen": NaN,
"logits/rejected": NaN,
"logps/chosen": -125.63828125,
"logps/rejected": -429.4375,
"loss": 0.0,
"rewards/chosen": 8.86376953125,
"rewards/margins": 24.30263671875,
"rewards/rejected": -15.4388671875,
"step": 1410
},
{
"epoch": 8.876369327073553,
"grad_norm": 0.0,
"kl": 0.0,
"learning_rate": 2.0518043306889334e-08,
"logits/chosen": NaN,
"logits/rejected": NaN,
"logps/chosen": -121.3953125,
"logps/rejected": -428.765625,
"loss": 0.0,
"rewards/chosen": 7.84296875,
"rewards/margins": 24.098242187500002,
"rewards/rejected": -16.2552734375,
"step": 1420
},
{
"epoch": 8.938967136150234,
"grad_norm": 0.0,
"kl": 0.0,
"learning_rate": 1.8408904031220475e-08,
"logits/chosen": NaN,
"logits/rejected": NaN,
"logps/chosen": -122.82890625,
"logps/rejected": -422.95,
"loss": 0.0,
"rewards/chosen": 8.76552734375,
"rewards/margins": 1047297.2655273437,
"rewards/rejected": -1047288.5,
"step": 1430
},
{
"epoch": 9.0,
"grad_norm": 0.0,
"kl": 0.0,
"learning_rate": 1.640998957430112e-08,
"logits/chosen": NaN,
"logits/rejected": NaN,
"logps/chosen": -125.41185897435898,
"logps/rejected": -424.68910256410254,
"loss": 0.0,
"rewards/chosen": 9.022135416666666,
"rewards/margins": 24.911959134615383,
"rewards/rejected": -15.889823717948717,
"step": 1440
},
{
"epoch": 9.062597809076681,
"grad_norm": 0.0,
"kl": 0.0,
"learning_rate": 1.4522251311151923e-08,
"logits/chosen": NaN,
"logits/rejected": NaN,
"logps/chosen": -124.50234375,
"logps/rejected": -427.859375,
"loss": 0.0,
"rewards/chosen": 8.8177734375,
"rewards/margins": 1079884.5177734375,
"rewards/rejected": -1079875.7,
"step": 1450
},
{
"epoch": 9.125195618153365,
"grad_norm": 0.0,
"kl": 0.0,
"learning_rate": 1.2746587702946537e-08,
"logits/chosen": NaN,
"logits/rejected": NaN,
"logps/chosen": -123.99765625,
"logps/rejected": -425.884375,
"loss": 0.0,
"rewards/chosen": 9.62001953125,
"rewards/margins": 23.92978515625,
"rewards/rejected": -14.309765625,
"step": 1460
},
{
"epoch": 9.187793427230046,
"grad_norm": 0.0,
"kl": 0.0,
"learning_rate": 1.1083843869392873e-08,
"logits/chosen": NaN,
"logits/rejected": NaN,
"logps/chosen": -120.8,
"logps/rejected": -422.528125,
"loss": 0.0,
"rewards/chosen": 9.1568359375,
"rewards/margins": 24.341601562500003,
"rewards/rejected": -15.184765625,
"step": 1470
},
{
"epoch": 9.25039123630673,
"grad_norm": 0.0,
"kl": 0.0,
"learning_rate": 9.534811186501817e-09,
"logits/chosen": NaN,
"logits/rejected": NaN,
"logps/chosen": -121.8390625,
"logps/rejected": -425.43125,
"loss": 0.0,
"rewards/chosen": 8.79033203125,
"rewards/margins": 25.80419921875,
"rewards/rejected": -17.0138671875,
"step": 1480
},
{
"epoch": 9.31298904538341,
"grad_norm": 0.0,
"kl": 0.0,
"learning_rate": 8.100226909935059e-09,
"logits/chosen": NaN,
"logits/rejected": NaN,
"logps/chosen": -121.35625,
"logps/rejected": -427.50625,
"loss": 0.0,
"rewards/chosen": 9.48447265625,
"rewards/margins": 24.78779296875,
"rewards/rejected": -15.3033203125,
"step": 1490
},
{
"epoch": 9.375586854460094,
"grad_norm": 0.0,
"kl": 0.0,
"learning_rate": 6.780773824111435e-09,
"logits/chosen": NaN,
"logits/rejected": NaN,
"logps/chosen": -124.53203125,
"logps/rejected": -425.690625,
"loss": 0.0,
"rewards/chosen": 7.9443359375,
"rewards/margins": 24.16484375,
"rewards/rejected": -16.2205078125,
"step": 1500
},
{
"epoch": 9.438184663536775,
"grad_norm": 0.0,
"kl": 0.0,
"learning_rate": 5.57707991723852e-09,
"logits/chosen": NaN,
"logits/rejected": NaN,
"logps/chosen": -127.09296875,
"logps/rejected": -420.4125,
"loss": 0.0,
"rewards/chosen": 9.73759765625,
"rewards/margins": 1047258.2375976562,
"rewards/rejected": -1047248.5,
"step": 1510
},
{
"epoch": 9.500782472613459,
"grad_norm": 0.0,
"kl": 0.0,
"learning_rate": 4.4897180824240435e-09,
"logits/chosen": NaN,
"logits/rejected": NaN,
"logps/chosen": -123.05,
"logps/rejected": -424.384375,
"loss": 0.0,
"rewards/chosen": 9.3013671875,
"rewards/margins": 24.8251953125,
"rewards/rejected": -15.523828125,
"step": 1520
},
{
"epoch": 9.56338028169014,
"grad_norm": 0.0,
"kl": 0.0,
"learning_rate": 3.5192058450101146e-09,
"logits/chosen": NaN,
"logits/rejected": NaN,
"logps/chosen": -127.18046875,
"logps/rejected": -419.484375,
"loss": 0.0,
"rewards/chosen": 9.1427734375,
"rewards/margins": 24.1103515625,
"rewards/rejected": -14.967578125,
"step": 1530
},
{
"epoch": 9.625978090766823,
"grad_norm": 0.0,
"kl": 0.0,
"learning_rate": 2.666005116258946e-09,
"logits/chosen": NaN,
"logits/rejected": NaN,
"logps/chosen": -123.12734375,
"logps/rejected": -427.828125,
"loss": 0.0,
"rewards/chosen": 7.96943359375,
"rewards/margins": 24.70830078125,
"rewards/rejected": -16.7388671875,
"step": 1540
},
{
"epoch": 9.688575899843505,
"grad_norm": 0.0,
"kl": 0.0,
"learning_rate": 1.9305219735082367e-09,
"logits/chosen": NaN,
"logits/rejected": NaN,
"logps/chosen": -116.15859375,
"logps/rejected": -428.315625,
"loss": 0.0,
"rewards/chosen": 10.0142578125,
"rewards/margins": 24.28125,
"rewards/rejected": -14.2669921875,
"step": 1550
},
{
"epoch": 9.751173708920188,
"grad_norm": 0.0,
"kl": 0.0,
"learning_rate": 1.3131064669003322e-09,
"logits/chosen": NaN,
"logits/rejected": NaN,
"logps/chosen": -124.1421875,
"logps/rejected": -425.778125,
"loss": 0.0,
"rewards/chosen": 10.228515625,
"rewards/margins": 25.10654296875,
"rewards/rejected": -14.87802734375,
"step": 1560
},
{
"epoch": 9.81377151799687,
"grad_norm": 0.0,
"kl": 0.0,
"learning_rate": 8.140524527772419e-10,
"logits/chosen": NaN,
"logits/rejected": NaN,
"logps/chosen": -125.6515625,
"logps/rejected": -429.36875,
"loss": 0.0,
"rewards/chosen": 8.862890625,
"rewards/margins": 24.294140625,
"rewards/rejected": -15.43125,
"step": 1570
},
{
"epoch": 9.876369327073553,
"grad_norm": 0.0,
"kl": 0.0,
"learning_rate": 4.3359745382104405e-10,
"logits/chosen": NaN,
"logits/rejected": NaN,
"logps/chosen": -121.4265625,
"logps/rejected": -428.6375,
"loss": 0.0,
"rewards/chosen": 7.8412109375,
"rewards/margins": 24.085742187500003,
"rewards/rejected": -16.24453125,
"step": 1580
},
{
"epoch": 9.938967136150234,
"grad_norm": 0.0,
"kl": 0.0,
"learning_rate": 1.7192254600578716e-10,
"logits/chosen": NaN,
"logits/rejected": NaN,
"logps/chosen": -122.84296875,
"logps/rejected": -422.765625,
"loss": 0.0,
"rewards/chosen": 8.76435546875,
"rewards/margins": 1047297.2643554688,
"rewards/rejected": -1047288.5,
"step": 1590
},
{
"epoch": 10.0,
"grad_norm": 0.0,
"kl": 0.0,
"learning_rate": 2.915227241542806e-11,
"logits/chosen": NaN,
"logits/rejected": NaN,
"logps/chosen": -125.42147435897436,
"logps/rejected": -424.49038461538464,
"loss": 0.0,
"rewards/chosen": 9.022035256410257,
"rewards/margins": 24.897435897435898,
"rewards/rejected": -15.87540064102564,
"step": 1600
}
],
"logging_steps": 10,
"max_steps": 1600,
"num_input_tokens_seen": 0,
"num_train_epochs": 10,
"save_steps": 200,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 0.0,
"train_batch_size": 2,
"trial_name": null,
"trial_params": null
}