SemanticWorldModel-2B / checkpoint-1074 /trainer_state.json
mjf-su's picture
Upload folder using huggingface_hub
74d4d6a verified
{
"best_global_step": null,
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 2.0,
"eval_steps": 50,
"global_step": 1074,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.009324009324009324,
"grad_norm": 269.7618103027344,
"learning_rate": 1.8604651162790698e-07,
"loss": 13.883856201171875,
"step": 5
},
{
"epoch": 0.018648018648018648,
"grad_norm": 256.5934143066406,
"learning_rate": 4.186046511627907e-07,
"loss": 13.883564758300782,
"step": 10
},
{
"epoch": 0.027972027972027972,
"grad_norm": 247.4810028076172,
"learning_rate": 6.511627906976745e-07,
"loss": 13.845947265625,
"step": 15
},
{
"epoch": 0.037296037296037296,
"grad_norm": 243.70272827148438,
"learning_rate": 8.837209302325582e-07,
"loss": 13.705046081542969,
"step": 20
},
{
"epoch": 0.046620046620046623,
"grad_norm": 238.2322235107422,
"learning_rate": 1.116279069767442e-06,
"loss": 13.556683349609376,
"step": 25
},
{
"epoch": 0.055944055944055944,
"grad_norm": 241.71214294433594,
"learning_rate": 1.3488372093023258e-06,
"loss": 13.285650634765625,
"step": 30
},
{
"epoch": 0.06526806526806526,
"grad_norm": 221.19960021972656,
"learning_rate": 1.5813953488372093e-06,
"loss": 13.05435791015625,
"step": 35
},
{
"epoch": 0.07459207459207459,
"grad_norm": 205.08326721191406,
"learning_rate": 1.8139534883720933e-06,
"loss": 12.770874786376954,
"step": 40
},
{
"epoch": 0.08391608391608392,
"grad_norm": 170.5838623046875,
"learning_rate": 2.0465116279069768e-06,
"loss": 12.280790710449219,
"step": 45
},
{
"epoch": 0.09324009324009325,
"grad_norm": 96.90670776367188,
"learning_rate": 2.2790697674418607e-06,
"loss": 11.730733489990234,
"step": 50
},
{
"epoch": 0.09324009324009325,
"eval_com_accuracy": 0.2,
"eval_rew_accuracy": 0.276,
"step": 50
},
{
"epoch": 0.10256410256410256,
"grad_norm": 61.14415740966797,
"learning_rate": 2.5116279069767446e-06,
"loss": 11.25308609008789,
"step": 55
},
{
"epoch": 0.11188811188811189,
"grad_norm": 56.027095794677734,
"learning_rate": 2.744186046511628e-06,
"loss": 11.197045135498048,
"step": 60
},
{
"epoch": 0.12121212121212122,
"grad_norm": 46.54409408569336,
"learning_rate": 2.9767441860465116e-06,
"loss": 10.874276733398437,
"step": 65
},
{
"epoch": 0.13053613053613053,
"grad_norm": 40.62468719482422,
"learning_rate": 3.2093023255813956e-06,
"loss": 10.69852294921875,
"step": 70
},
{
"epoch": 0.13986013986013987,
"grad_norm": 37.44975662231445,
"learning_rate": 3.4418604651162795e-06,
"loss": 10.560739135742187,
"step": 75
},
{
"epoch": 0.14918414918414918,
"grad_norm": 32.19042205810547,
"learning_rate": 3.674418604651163e-06,
"loss": 10.408053588867187,
"step": 80
},
{
"epoch": 0.1585081585081585,
"grad_norm": 27.208942413330078,
"learning_rate": 3.906976744186047e-06,
"loss": 10.108213806152344,
"step": 85
},
{
"epoch": 0.16783216783216784,
"grad_norm": 24.76529312133789,
"learning_rate": 4.1395348837209304e-06,
"loss": 9.929393005371093,
"step": 90
},
{
"epoch": 0.17715617715617715,
"grad_norm": 23.917495727539062,
"learning_rate": 4.372093023255815e-06,
"loss": 9.677659606933593,
"step": 95
},
{
"epoch": 0.1864801864801865,
"grad_norm": 22.98892593383789,
"learning_rate": 4.604651162790698e-06,
"loss": 9.481817626953125,
"step": 100
},
{
"epoch": 0.1864801864801865,
"eval_com_accuracy": 0.176,
"eval_rew_accuracy": 0.264,
"step": 100
},
{
"epoch": 0.1958041958041958,
"grad_norm": 25.177200317382812,
"learning_rate": 4.837209302325582e-06,
"loss": 9.42150650024414,
"step": 105
},
{
"epoch": 0.20512820512820512,
"grad_norm": 18.95603370666504,
"learning_rate": 5.069767441860466e-06,
"loss": 9.267645263671875,
"step": 110
},
{
"epoch": 0.21445221445221446,
"grad_norm": 22.155452728271484,
"learning_rate": 5.302325581395349e-06,
"loss": 9.06854248046875,
"step": 115
},
{
"epoch": 0.22377622377622378,
"grad_norm": 20.26909065246582,
"learning_rate": 5.534883720930233e-06,
"loss": 9.045303344726562,
"step": 120
},
{
"epoch": 0.2331002331002331,
"grad_norm": 26.49457359313965,
"learning_rate": 5.7674418604651175e-06,
"loss": 8.91327362060547,
"step": 125
},
{
"epoch": 0.24242424242424243,
"grad_norm": 19.886178970336914,
"learning_rate": 6e-06,
"loss": 8.950289154052735,
"step": 130
},
{
"epoch": 0.2517482517482518,
"grad_norm": 18.22088050842285,
"learning_rate": 6.2325581395348845e-06,
"loss": 8.710862731933593,
"step": 135
},
{
"epoch": 0.26107226107226106,
"grad_norm": 21.09984016418457,
"learning_rate": 6.465116279069767e-06,
"loss": 8.762718200683594,
"step": 140
},
{
"epoch": 0.2703962703962704,
"grad_norm": 17.96813201904297,
"learning_rate": 6.6976744186046515e-06,
"loss": 8.533232116699219,
"step": 145
},
{
"epoch": 0.27972027972027974,
"grad_norm": 20.52909278869629,
"learning_rate": 6.930232558139536e-06,
"loss": 8.279258728027344,
"step": 150
},
{
"epoch": 0.27972027972027974,
"eval_com_accuracy": 0.152,
"eval_rew_accuracy": 0.252,
"step": 150
},
{
"epoch": 0.289044289044289,
"grad_norm": 19.475040435791016,
"learning_rate": 7.1627906976744185e-06,
"loss": 8.369435882568359,
"step": 155
},
{
"epoch": 0.29836829836829837,
"grad_norm": 16.741973876953125,
"learning_rate": 7.395348837209303e-06,
"loss": 8.280664825439453,
"step": 160
},
{
"epoch": 0.3076923076923077,
"grad_norm": 19.79235076904297,
"learning_rate": 7.627906976744187e-06,
"loss": 8.200827026367188,
"step": 165
},
{
"epoch": 0.317016317016317,
"grad_norm": 32.991554260253906,
"learning_rate": 7.86046511627907e-06,
"loss": 7.973581695556641,
"step": 170
},
{
"epoch": 0.32634032634032634,
"grad_norm": 17.949445724487305,
"learning_rate": 8.093023255813955e-06,
"loss": 8.141377258300782,
"step": 175
},
{
"epoch": 0.3356643356643357,
"grad_norm": 16.19559097290039,
"learning_rate": 8.325581395348837e-06,
"loss": 7.837094116210937,
"step": 180
},
{
"epoch": 0.34498834498834496,
"grad_norm": 23.260622024536133,
"learning_rate": 8.558139534883722e-06,
"loss": 7.763467407226562,
"step": 185
},
{
"epoch": 0.3543123543123543,
"grad_norm": 24.858335494995117,
"learning_rate": 8.790697674418606e-06,
"loss": 7.886874389648438,
"step": 190
},
{
"epoch": 0.36363636363636365,
"grad_norm": 18.59360122680664,
"learning_rate": 9.023255813953489e-06,
"loss": 7.7624969482421875,
"step": 195
},
{
"epoch": 0.372960372960373,
"grad_norm": 18.056495666503906,
"learning_rate": 9.255813953488373e-06,
"loss": 7.4621437072753904,
"step": 200
},
{
"epoch": 0.372960372960373,
"eval_com_accuracy": 0.144,
"eval_rew_accuracy": 0.332,
"step": 200
},
{
"epoch": 0.3822843822843823,
"grad_norm": 16.496261596679688,
"learning_rate": 9.488372093023258e-06,
"loss": 7.526261901855468,
"step": 205
},
{
"epoch": 0.3916083916083916,
"grad_norm": 16.77888298034668,
"learning_rate": 9.72093023255814e-06,
"loss": 7.624967193603515,
"step": 210
},
{
"epoch": 0.40093240093240096,
"grad_norm": 17.096778869628906,
"learning_rate": 9.953488372093025e-06,
"loss": 7.337849426269531,
"step": 215
},
{
"epoch": 0.41025641025641024,
"grad_norm": 18.3544979095459,
"learning_rate": 9.999894343918674e-06,
"loss": 7.354197692871094,
"step": 220
},
{
"epoch": 0.4195804195804196,
"grad_norm": 15.287137985229492,
"learning_rate": 9.999465123741172e-06,
"loss": 7.085916137695312,
"step": 225
},
{
"epoch": 0.4289044289044289,
"grad_norm": 17.03432273864746,
"learning_rate": 9.998705764284132e-06,
"loss": 7.059596252441406,
"step": 230
},
{
"epoch": 0.4382284382284382,
"grad_norm": 15.470928192138672,
"learning_rate": 9.99761631569179e-06,
"loss": 7.118296813964844,
"step": 235
},
{
"epoch": 0.44755244755244755,
"grad_norm": 15.732117652893066,
"learning_rate": 9.9961968499058e-06,
"loss": 6.981436157226563,
"step": 240
},
{
"epoch": 0.4568764568764569,
"grad_norm": 20.92630958557129,
"learning_rate": 9.994447460660473e-06,
"loss": 6.96953125,
"step": 245
},
{
"epoch": 0.4662004662004662,
"grad_norm": 14.848095893859863,
"learning_rate": 9.992368263476585e-06,
"loss": 6.831211090087891,
"step": 250
},
{
"epoch": 0.4662004662004662,
"eval_com_accuracy": 0.132,
"eval_rew_accuracy": 0.312,
"step": 250
},
{
"epoch": 0.4755244755244755,
"grad_norm": 18.59755516052246,
"learning_rate": 9.989959395653756e-06,
"loss": 6.873024749755859,
"step": 255
},
{
"epoch": 0.48484848484848486,
"grad_norm": 15.705364227294922,
"learning_rate": 9.98722101626138e-06,
"loss": 6.633563232421875,
"step": 260
},
{
"epoch": 0.49417249417249415,
"grad_norm": 39.2294807434082,
"learning_rate": 9.984153306128124e-06,
"loss": 6.63948974609375,
"step": 265
},
{
"epoch": 0.5034965034965035,
"grad_norm": 13.209358215332031,
"learning_rate": 9.980756467829977e-06,
"loss": 6.688297271728516,
"step": 270
},
{
"epoch": 0.5128205128205128,
"grad_norm": 14.645393371582031,
"learning_rate": 9.977030725676887e-06,
"loss": 6.4885505676269535,
"step": 275
},
{
"epoch": 0.5221445221445221,
"grad_norm": 17.008819580078125,
"learning_rate": 9.972976325697938e-06,
"loss": 6.78935546875,
"step": 280
},
{
"epoch": 0.5314685314685315,
"grad_norm": 16.029518127441406,
"learning_rate": 9.96859353562511e-06,
"loss": 6.681609344482422,
"step": 285
},
{
"epoch": 0.5407925407925408,
"grad_norm": 13.878348350524902,
"learning_rate": 9.963882644875594e-06,
"loss": 6.528868103027344,
"step": 290
},
{
"epoch": 0.5501165501165501,
"grad_norm": 13.996464729309082,
"learning_rate": 9.958843964532683e-06,
"loss": 6.431344604492187,
"step": 295
},
{
"epoch": 0.5594405594405595,
"grad_norm": 15.72003173828125,
"learning_rate": 9.953477827325229e-06,
"loss": 6.5927978515625,
"step": 300
},
{
"epoch": 0.5594405594405595,
"eval_com_accuracy": 0.148,
"eval_rew_accuracy": 0.368,
"step": 300
},
{
"epoch": 0.5687645687645687,
"grad_norm": 16.416725158691406,
"learning_rate": 9.947784587605678e-06,
"loss": 6.411885833740234,
"step": 305
},
{
"epoch": 0.578088578088578,
"grad_norm": 14.880790710449219,
"learning_rate": 9.941764621326655e-06,
"loss": 6.5298927307128904,
"step": 310
},
{
"epoch": 0.5874125874125874,
"grad_norm": 17.828296661376953,
"learning_rate": 9.935418326016153e-06,
"loss": 6.045618057250977,
"step": 315
},
{
"epoch": 0.5967365967365967,
"grad_norm": 13.30245590209961,
"learning_rate": 9.928746120751275e-06,
"loss": 6.1650840759277346,
"step": 320
},
{
"epoch": 0.6060606060606061,
"grad_norm": 13.527432441711426,
"learning_rate": 9.921748446130564e-06,
"loss": 6.349945831298828,
"step": 325
},
{
"epoch": 0.6153846153846154,
"grad_norm": 14.491817474365234,
"learning_rate": 9.9144257642449e-06,
"loss": 6.232943725585938,
"step": 330
},
{
"epoch": 0.6247086247086248,
"grad_norm": 15.7533597946167,
"learning_rate": 9.906778558647e-06,
"loss": 6.564260864257813,
"step": 335
},
{
"epoch": 0.634032634032634,
"grad_norm": 16.538602828979492,
"learning_rate": 9.898807334319471e-06,
"loss": 6.122506332397461,
"step": 340
},
{
"epoch": 0.6433566433566433,
"grad_norm": 16.459688186645508,
"learning_rate": 9.890512617641474e-06,
"loss": 6.332431030273438,
"step": 345
},
{
"epoch": 0.6526806526806527,
"grad_norm": 14.879058837890625,
"learning_rate": 9.881894956353963e-06,
"loss": 6.118003082275391,
"step": 350
},
{
"epoch": 0.6526806526806527,
"eval_com_accuracy": 0.164,
"eval_rew_accuracy": 0.364,
"step": 350
},
{
"epoch": 0.662004662004662,
"grad_norm": 16.02516746520996,
"learning_rate": 9.87295491952351e-06,
"loss": 6.241584014892578,
"step": 355
},
{
"epoch": 0.6713286713286714,
"grad_norm": 14.9837646484375,
"learning_rate": 9.863693097504733e-06,
"loss": 6.083467864990235,
"step": 360
},
{
"epoch": 0.6806526806526807,
"grad_norm": 14.138517379760742,
"learning_rate": 9.854110101901308e-06,
"loss": 6.143560791015625,
"step": 365
},
{
"epoch": 0.6899766899766899,
"grad_norm": 12.696243286132812,
"learning_rate": 9.844206565525585e-06,
"loss": 6.061418914794922,
"step": 370
},
{
"epoch": 0.6993006993006993,
"grad_norm": 15.22846508026123,
"learning_rate": 9.833983142356792e-06,
"loss": 6.193332672119141,
"step": 375
},
{
"epoch": 0.7086247086247086,
"grad_norm": 15.284857749938965,
"learning_rate": 9.823440507497863e-06,
"loss": 6.0868377685546875,
"step": 380
},
{
"epoch": 0.717948717948718,
"grad_norm": 14.117541313171387,
"learning_rate": 9.812579357130848e-06,
"loss": 6.289948272705078,
"step": 385
},
{
"epoch": 0.7272727272727273,
"grad_norm": 12.648367881774902,
"learning_rate": 9.801400408470943e-06,
"loss": 6.194795227050781,
"step": 390
},
{
"epoch": 0.7365967365967366,
"grad_norm": 12.394021034240723,
"learning_rate": 9.789904399719124e-06,
"loss": 6.031880950927734,
"step": 395
},
{
"epoch": 0.745920745920746,
"grad_norm": 15.566425323486328,
"learning_rate": 9.778092090013416e-06,
"loss": 6.0966644287109375,
"step": 400
},
{
"epoch": 0.745920745920746,
"eval_com_accuracy": 0.184,
"eval_rew_accuracy": 0.364,
"step": 400
},
{
"epoch": 0.7552447552447552,
"grad_norm": 14.885455131530762,
"learning_rate": 9.76596425937874e-06,
"loss": 6.476510620117187,
"step": 405
},
{
"epoch": 0.7645687645687645,
"grad_norm": 16.627117156982422,
"learning_rate": 9.753521708675426e-06,
"loss": 5.9605262756347654,
"step": 410
},
{
"epoch": 0.7738927738927739,
"grad_norm": 14.517923355102539,
"learning_rate": 9.740765259546312e-06,
"loss": 6.1104694366455075,
"step": 415
},
{
"epoch": 0.7832167832167832,
"grad_norm": 12.007637977600098,
"learning_rate": 9.727695754362498e-06,
"loss": 5.964785766601563,
"step": 420
},
{
"epoch": 0.7925407925407926,
"grad_norm": 16.17314338684082,
"learning_rate": 9.714314056167711e-06,
"loss": 5.976337432861328,
"step": 425
},
{
"epoch": 0.8018648018648019,
"grad_norm": 13.246196746826172,
"learning_rate": 9.700621048621322e-06,
"loss": 5.936331939697266,
"step": 430
},
{
"epoch": 0.8111888111888111,
"grad_norm": 15.030324935913086,
"learning_rate": 9.686617635939988e-06,
"loss": 6.209032821655273,
"step": 435
},
{
"epoch": 0.8205128205128205,
"grad_norm": 13.853086471557617,
"learning_rate": 9.672304742837945e-06,
"loss": 5.985638427734375,
"step": 440
},
{
"epoch": 0.8298368298368298,
"grad_norm": 13.534188270568848,
"learning_rate": 9.657683314465948e-06,
"loss": 5.7971649169921875,
"step": 445
},
{
"epoch": 0.8391608391608392,
"grad_norm": 26.1879825592041,
"learning_rate": 9.642754316348846e-06,
"loss": 5.949985504150391,
"step": 450
},
{
"epoch": 0.8391608391608392,
"eval_com_accuracy": 0.176,
"eval_rew_accuracy": 0.34,
"step": 450
},
{
"epoch": 0.8484848484848485,
"grad_norm": 16.848834991455078,
"learning_rate": 9.627518734321837e-06,
"loss": 6.1193115234375,
"step": 455
},
{
"epoch": 0.8578088578088578,
"grad_norm": 30.765796661376953,
"learning_rate": 9.61197757446536e-06,
"loss": 5.74780158996582,
"step": 460
},
{
"epoch": 0.8671328671328671,
"grad_norm": 13.27775764465332,
"learning_rate": 9.596131863038664e-06,
"loss": 5.994970703125,
"step": 465
},
{
"epoch": 0.8764568764568764,
"grad_norm": 14.923792839050293,
"learning_rate": 9.579982646412039e-06,
"loss": 5.810473251342773,
"step": 470
},
{
"epoch": 0.8857808857808858,
"grad_norm": 14.351722717285156,
"learning_rate": 9.563530990997707e-06,
"loss": 6.09692268371582,
"step": 475
},
{
"epoch": 0.8951048951048951,
"grad_norm": 11.83583927154541,
"learning_rate": 9.546777983179421e-06,
"loss": 5.867227935791016,
"step": 480
},
{
"epoch": 0.9044289044289044,
"grad_norm": 19.415884017944336,
"learning_rate": 9.529724729240712e-06,
"loss": 5.834615707397461,
"step": 485
},
{
"epoch": 0.9137529137529138,
"grad_norm": 12.147261619567871,
"learning_rate": 9.512372355291838e-06,
"loss": 6.039286041259766,
"step": 490
},
{
"epoch": 0.9230769230769231,
"grad_norm": 12.706372261047363,
"learning_rate": 9.494722007195427e-06,
"loss": 6.06304702758789,
"step": 495
},
{
"epoch": 0.9324009324009324,
"grad_norm": 12.642041206359863,
"learning_rate": 9.476774850490803e-06,
"loss": 5.9729866027832035,
"step": 500
},
{
"epoch": 0.9324009324009324,
"eval_com_accuracy": 0.204,
"eval_rew_accuracy": 0.328,
"step": 500
},
{
"epoch": 0.9417249417249417,
"grad_norm": 12.66169548034668,
"learning_rate": 9.458532070317021e-06,
"loss": 5.8281097412109375,
"step": 505
},
{
"epoch": 0.951048951048951,
"grad_norm": 13.191854476928711,
"learning_rate": 9.439994871334614e-06,
"loss": 6.183546447753907,
"step": 510
},
{
"epoch": 0.9603729603729604,
"grad_norm": 12.52676010131836,
"learning_rate": 9.421164477646031e-06,
"loss": 5.785601043701172,
"step": 515
},
{
"epoch": 0.9696969696969697,
"grad_norm": 13.225418090820312,
"learning_rate": 9.402042132714817e-06,
"loss": 5.784825134277344,
"step": 520
},
{
"epoch": 0.9790209790209791,
"grad_norm": 12.035611152648926,
"learning_rate": 9.382629099283486e-06,
"loss": 5.9122272491455075,
"step": 525
},
{
"epoch": 0.9883449883449883,
"grad_norm": 12.166351318359375,
"learning_rate": 9.362926659290149e-06,
"loss": 5.790053939819336,
"step": 530
},
{
"epoch": 0.9976689976689976,
"grad_norm": 13.22154426574707,
"learning_rate": 9.342936113783855e-06,
"loss": 5.745006561279297,
"step": 535
},
{
"epoch": 1.0055944055944055,
"grad_norm": 13.12752628326416,
"learning_rate": 9.32265878283868e-06,
"loss": 5.1531005859375,
"step": 540
},
{
"epoch": 1.014918414918415,
"grad_norm": 15.964373588562012,
"learning_rate": 9.302096005466547e-06,
"loss": 5.7917236328125,
"step": 545
},
{
"epoch": 1.0242424242424242,
"grad_norm": 18.454797744750977,
"learning_rate": 9.281249139528816e-06,
"loss": 5.967070007324219,
"step": 550
},
{
"epoch": 1.0242424242424242,
"eval_com_accuracy": 0.212,
"eval_rew_accuracy": 0.352,
"step": 550
},
{
"epoch": 1.0335664335664336,
"grad_norm": 15.802419662475586,
"learning_rate": 9.260119561646614e-06,
"loss": 6.093529129028321,
"step": 555
},
{
"epoch": 1.0428904428904429,
"grad_norm": 21.95537757873535,
"learning_rate": 9.238708667109924e-06,
"loss": 5.489861297607422,
"step": 560
},
{
"epoch": 1.0522144522144523,
"grad_norm": 13.373199462890625,
"learning_rate": 9.217017869785453e-06,
"loss": 5.871435928344726,
"step": 565
},
{
"epoch": 1.0615384615384615,
"grad_norm": 13.36340618133545,
"learning_rate": 9.19504860202327e-06,
"loss": 5.805611038208008,
"step": 570
},
{
"epoch": 1.0708624708624708,
"grad_norm": 14.099553108215332,
"learning_rate": 9.172802314562214e-06,
"loss": 5.600634765625,
"step": 575
},
{
"epoch": 1.0801864801864802,
"grad_norm": 15.012040138244629,
"learning_rate": 9.150280476434098e-06,
"loss": 5.793732452392578,
"step": 580
},
{
"epoch": 1.0895104895104895,
"grad_norm": 15.98738956451416,
"learning_rate": 9.127484574866699e-06,
"loss": 5.8642620086669925,
"step": 585
},
{
"epoch": 1.098834498834499,
"grad_norm": 13.785572052001953,
"learning_rate": 9.104416115185557e-06,
"loss": 5.770995330810547,
"step": 590
},
{
"epoch": 1.1081585081585081,
"grad_norm": 12.145087242126465,
"learning_rate": 9.08107662071456e-06,
"loss": 5.850731277465821,
"step": 595
},
{
"epoch": 1.1174825174825176,
"grad_norm": 13.681862831115723,
"learning_rate": 9.057467632675357e-06,
"loss": 5.782034301757813,
"step": 600
},
{
"epoch": 1.1174825174825176,
"eval_com_accuracy": 0.22,
"eval_rew_accuracy": 0.348,
"step": 600
},
{
"epoch": 1.1268065268065268,
"grad_norm": 13.870027542114258,
"learning_rate": 9.033590710085584e-06,
"loss": 5.723427963256836,
"step": 605
},
{
"epoch": 1.136130536130536,
"grad_norm": 16.20020294189453,
"learning_rate": 9.00944742965592e-06,
"loss": 5.953633117675781,
"step": 610
},
{
"epoch": 1.1454545454545455,
"grad_norm": 15.48993968963623,
"learning_rate": 8.985039385685952e-06,
"loss": 5.809123992919922,
"step": 615
},
{
"epoch": 1.1547785547785547,
"grad_norm": 13.686211585998535,
"learning_rate": 8.960368189958913e-06,
"loss": 5.813122177124024,
"step": 620
},
{
"epoch": 1.1641025641025642,
"grad_norm": 13.733199119567871,
"learning_rate": 8.935435471635238e-06,
"loss": 5.6748512268066404,
"step": 625
},
{
"epoch": 1.1734265734265734,
"grad_norm": 12.450380325317383,
"learning_rate": 8.91024287714499e-06,
"loss": 5.6834667205810545,
"step": 630
},
{
"epoch": 1.1827505827505829,
"grad_norm": 14.111934661865234,
"learning_rate": 8.884792070079128e-06,
"loss": 5.680038833618164,
"step": 635
},
{
"epoch": 1.192074592074592,
"grad_norm": 13.175325393676758,
"learning_rate": 8.859084731079664e-06,
"loss": 5.445759201049805,
"step": 640
},
{
"epoch": 1.2013986013986013,
"grad_norm": 14.951041221618652,
"learning_rate": 8.833122557728667e-06,
"loss": 5.673246765136719,
"step": 645
},
{
"epoch": 1.2107226107226108,
"grad_norm": 12.364238739013672,
"learning_rate": 8.806907264436183e-06,
"loss": 5.234909439086914,
"step": 650
},
{
"epoch": 1.2107226107226108,
"eval_com_accuracy": 0.224,
"eval_rew_accuracy": 0.348,
"step": 650
},
{
"epoch": 1.22004662004662,
"grad_norm": 13.065054893493652,
"learning_rate": 8.780440582327005e-06,
"loss": 5.442026519775391,
"step": 655
},
{
"epoch": 1.2293706293706295,
"grad_norm": 13.872124671936035,
"learning_rate": 8.75372425912637e-06,
"loss": 5.736603546142578,
"step": 660
},
{
"epoch": 1.2386946386946387,
"grad_norm": 13.677570343017578,
"learning_rate": 8.726760059044542e-06,
"loss": 5.951699447631836,
"step": 665
},
{
"epoch": 1.248018648018648,
"grad_norm": 15.023405075073242,
"learning_rate": 8.699549762660318e-06,
"loss": 5.936520004272461,
"step": 670
},
{
"epoch": 1.2573426573426574,
"grad_norm": 12.804817199707031,
"learning_rate": 8.672095166803445e-06,
"loss": 5.778074645996094,
"step": 675
},
{
"epoch": 1.2666666666666666,
"grad_norm": 13.148968696594238,
"learning_rate": 8.644398084435959e-06,
"loss": 5.7417655944824215,
"step": 680
},
{
"epoch": 1.275990675990676,
"grad_norm": 14.131410598754883,
"learning_rate": 8.616460344532483e-06,
"loss": 5.58878173828125,
"step": 685
},
{
"epoch": 1.2853146853146853,
"grad_norm": 14.100456237792969,
"learning_rate": 8.588283791959437e-06,
"loss": 5.546084976196289,
"step": 690
},
{
"epoch": 1.2946386946386945,
"grad_norm": 13.94593620300293,
"learning_rate": 8.559870287353214e-06,
"loss": 6.0555156707763675,
"step": 695
},
{
"epoch": 1.303962703962704,
"grad_norm": 18.369823455810547,
"learning_rate": 8.531221706997316e-06,
"loss": 5.930012512207031,
"step": 700
},
{
"epoch": 1.303962703962704,
"eval_com_accuracy": 0.24,
"eval_rew_accuracy": 0.364,
"step": 700
},
{
"epoch": 1.3132867132867134,
"grad_norm": 14.022618293762207,
"learning_rate": 8.502339942698463e-06,
"loss": 5.831372833251953,
"step": 705
},
{
"epoch": 1.3226107226107227,
"grad_norm": 12.844898223876953,
"learning_rate": 8.473226901661643e-06,
"loss": 5.503532409667969,
"step": 710
},
{
"epoch": 1.3319347319347319,
"grad_norm": 13.991181373596191,
"learning_rate": 8.443884506364192e-06,
"loss": 5.344602966308594,
"step": 715
},
{
"epoch": 1.3412587412587413,
"grad_norm": 13.290801048278809,
"learning_rate": 8.414314694428842e-06,
"loss": 5.7142791748046875,
"step": 720
},
{
"epoch": 1.3505827505827506,
"grad_norm": 13.986117362976074,
"learning_rate": 8.384519418495755e-06,
"loss": 5.509880065917969,
"step": 725
},
{
"epoch": 1.3599067599067598,
"grad_norm": 17.483638763427734,
"learning_rate": 8.354500646093592e-06,
"loss": 5.769047546386719,
"step": 730
},
{
"epoch": 1.3692307692307693,
"grad_norm": 12.383395195007324,
"learning_rate": 8.324260359509594e-06,
"loss": 5.693521881103516,
"step": 735
},
{
"epoch": 1.3785547785547785,
"grad_norm": 12.225541114807129,
"learning_rate": 8.29380055565866e-06,
"loss": 5.618255233764648,
"step": 740
},
{
"epoch": 1.387878787878788,
"grad_norm": 11.737737655639648,
"learning_rate": 8.263123245951504e-06,
"loss": 5.702220916748047,
"step": 745
},
{
"epoch": 1.3972027972027972,
"grad_norm": 14.235527992248535,
"learning_rate": 8.232230456161819e-06,
"loss": 5.744013977050781,
"step": 750
},
{
"epoch": 1.3972027972027972,
"eval_com_accuracy": 0.24,
"eval_rew_accuracy": 0.368,
"step": 750
},
{
"epoch": 1.4065268065268066,
"grad_norm": 12.433022499084473,
"learning_rate": 8.201124226292505e-06,
"loss": 5.6122283935546875,
"step": 755
},
{
"epoch": 1.4158508158508158,
"grad_norm": 13.874029159545898,
"learning_rate": 8.169806610440966e-06,
"loss": 5.530184936523438,
"step": 760
},
{
"epoch": 1.425174825174825,
"grad_norm": 12.640862464904785,
"learning_rate": 8.138279676663458e-06,
"loss": 5.602608489990234,
"step": 765
},
{
"epoch": 1.4344988344988345,
"grad_norm": 13.672120094299316,
"learning_rate": 8.106545506838533e-06,
"loss": 5.971489715576172,
"step": 770
},
{
"epoch": 1.4438228438228438,
"grad_norm": 12.973994255065918,
"learning_rate": 8.074606196529554e-06,
"loss": 5.967403411865234,
"step": 775
},
{
"epoch": 1.4531468531468532,
"grad_norm": 12.31808090209961,
"learning_rate": 8.042463854846325e-06,
"loss": 5.71468505859375,
"step": 780
},
{
"epoch": 1.4624708624708624,
"grad_norm": 12.416245460510254,
"learning_rate": 8.010120604305806e-06,
"loss": 5.836771392822266,
"step": 785
},
{
"epoch": 1.471794871794872,
"grad_norm": 13.020674705505371,
"learning_rate": 7.977578580691963e-06,
"loss": 5.71131591796875,
"step": 790
},
{
"epoch": 1.4811188811188811,
"grad_norm": 13.946771621704102,
"learning_rate": 7.944839932914718e-06,
"loss": 5.564648818969727,
"step": 795
},
{
"epoch": 1.4904428904428904,
"grad_norm": 12.287398338317871,
"learning_rate": 7.91190682286806e-06,
"loss": 5.753592300415039,
"step": 800
},
{
"epoch": 1.4904428904428904,
"eval_com_accuracy": 0.236,
"eval_rew_accuracy": 0.384,
"step": 800
},
{
"epoch": 1.4997668997668998,
"grad_norm": 20.467731475830078,
"learning_rate": 7.878781425287277e-06,
"loss": 5.64927978515625,
"step": 805
},
{
"epoch": 1.509090909090909,
"grad_norm": 13.811100006103516,
"learning_rate": 7.84546592760535e-06,
"loss": 5.415856170654297,
"step": 810
},
{
"epoch": 1.5184149184149183,
"grad_norm": 12.4107666015625,
"learning_rate": 7.811962529808499e-06,
"loss": 5.767454147338867,
"step": 815
},
{
"epoch": 1.5277389277389277,
"grad_norm": 14.148603439331055,
"learning_rate": 7.778273444290921e-06,
"loss": 5.463053131103516,
"step": 820
},
{
"epoch": 1.5370629370629372,
"grad_norm": 13.375722885131836,
"learning_rate": 7.744400895708683e-06,
"loss": 5.625373077392578,
"step": 825
},
{
"epoch": 1.5463869463869464,
"grad_norm": 12.991719245910645,
"learning_rate": 7.710347120832821e-06,
"loss": 5.6271202087402346,
"step": 830
},
{
"epoch": 1.5557109557109556,
"grad_norm": 12.906146049499512,
"learning_rate": 7.676114368401635e-06,
"loss": 5.5160057067871096,
"step": 835
},
{
"epoch": 1.565034965034965,
"grad_norm": 12.344596862792969,
"learning_rate": 7.641704898972194e-06,
"loss": 5.506198120117188,
"step": 840
},
{
"epoch": 1.5743589743589743,
"grad_norm": 13.285745620727539,
"learning_rate": 7.607120984771058e-06,
"loss": 5.7151634216308596,
"step": 845
},
{
"epoch": 1.5836829836829835,
"grad_norm": 12.432035446166992,
"learning_rate": 7.572364909544235e-06,
"loss": 5.852275085449219,
"step": 850
},
{
"epoch": 1.5836829836829835,
"eval_com_accuracy": 0.228,
"eval_rew_accuracy": 0.332,
"step": 850
},
{
"epoch": 1.593006993006993,
"grad_norm": 12.581634521484375,
"learning_rate": 7.537438968406372e-06,
"loss": 5.735688018798828,
"step": 855
},
{
"epoch": 1.6023310023310025,
"grad_norm": 13.161237716674805,
"learning_rate": 7.502345467689202e-06,
"loss": 5.648350524902344,
"step": 860
},
{
"epoch": 1.6116550116550117,
"grad_norm": 12.48270034790039,
"learning_rate": 7.4670867247892346e-06,
"loss": 5.635168075561523,
"step": 865
},
{
"epoch": 1.620979020979021,
"grad_norm": 13.835477828979492,
"learning_rate": 7.431665068014737e-06,
"loss": 5.548571014404297,
"step": 870
},
{
"epoch": 1.6303030303030304,
"grad_norm": 12.403617858886719,
"learning_rate": 7.396082836431981e-06,
"loss": 5.7486827850341795,
"step": 875
},
{
"epoch": 1.6396270396270396,
"grad_norm": 13.730851173400879,
"learning_rate": 7.3603423797107845e-06,
"loss": 5.288154602050781,
"step": 880
},
{
"epoch": 1.6489510489510488,
"grad_norm": 14.688888549804688,
"learning_rate": 7.324446057969346e-06,
"loss": 5.648311614990234,
"step": 885
},
{
"epoch": 1.6582750582750583,
"grad_norm": 13.311734199523926,
"learning_rate": 7.288396241618401e-06,
"loss": 5.636775207519531,
"step": 890
},
{
"epoch": 1.6675990675990677,
"grad_norm": 12.776606559753418,
"learning_rate": 7.252195311204689e-06,
"loss": 5.666929626464844,
"step": 895
},
{
"epoch": 1.676923076923077,
"grad_norm": 14.130080223083496,
"learning_rate": 7.215845657253755e-06,
"loss": 5.534748840332031,
"step": 900
},
{
"epoch": 1.676923076923077,
"eval_com_accuracy": 0.264,
"eval_rew_accuracy": 0.364,
"step": 900
},
{
"epoch": 1.6862470862470862,
"grad_norm": 14.159326553344727,
"learning_rate": 7.1793496801120885e-06,
"loss": 5.652375793457031,
"step": 905
},
{
"epoch": 1.6955710955710956,
"grad_norm": 13.113655090332031,
"learning_rate": 7.1427097897886225e-06,
"loss": 5.383832168579102,
"step": 910
},
{
"epoch": 1.7048951048951049,
"grad_norm": 14.203001022338867,
"learning_rate": 7.105928405795584e-06,
"loss": 5.6499076843261715,
"step": 915
},
{
"epoch": 1.714219114219114,
"grad_norm": 12.1084623336792,
"learning_rate": 7.069007956988718e-06,
"loss": 5.560025787353515,
"step": 920
},
{
"epoch": 1.7235431235431236,
"grad_norm": 13.819426536560059,
"learning_rate": 7.031950881406913e-06,
"loss": 5.535141372680664,
"step": 925
},
{
"epoch": 1.732867132867133,
"grad_norm": 17.341110229492188,
"learning_rate": 6.994759626111189e-06,
"loss": 5.697111511230469,
"step": 930
},
{
"epoch": 1.7421911421911422,
"grad_norm": 16.404672622680664,
"learning_rate": 6.957436647023117e-06,
"loss": 5.4155632019042965,
"step": 935
},
{
"epoch": 1.7515151515151515,
"grad_norm": 13.115015029907227,
"learning_rate": 6.919984408762632e-06,
"loss": 5.758349990844726,
"step": 940
},
{
"epoch": 1.760839160839161,
"grad_norm": 13.850481986999512,
"learning_rate": 6.882405384485294e-06,
"loss": 5.69927864074707,
"step": 945
},
{
"epoch": 1.7701631701631702,
"grad_norm": 12.921819686889648,
"learning_rate": 6.844702055718964e-06,
"loss": 5.631483459472657,
"step": 950
},
{
"epoch": 1.7701631701631702,
"eval_com_accuracy": 0.26,
"eval_rew_accuracy": 0.352,
"step": 950
},
{
"epoch": 1.7794871794871794,
"grad_norm": 13.401888847351074,
"learning_rate": 6.806876912199945e-06,
"loss": 5.824296569824218,
"step": 955
},
{
"epoch": 1.7888111888111888,
"grad_norm": 13.481993675231934,
"learning_rate": 6.768932451708557e-06,
"loss": 5.752721786499023,
"step": 960
},
{
"epoch": 1.7981351981351983,
"grad_norm": 14.022424697875977,
"learning_rate": 6.730871179904218e-06,
"loss": 5.453369522094727,
"step": 965
},
{
"epoch": 1.8074592074592073,
"grad_norm": 13.432122230529785,
"learning_rate": 6.692695610159966e-06,
"loss": 5.664297103881836,
"step": 970
},
{
"epoch": 1.8167832167832167,
"grad_norm": 14.292946815490723,
"learning_rate": 6.6544082633964955e-06,
"loss": 5.576699066162109,
"step": 975
},
{
"epoch": 1.8261072261072262,
"grad_norm": 14.256867408752441,
"learning_rate": 6.6160116679156874e-06,
"loss": 5.551004791259766,
"step": 980
},
{
"epoch": 1.8354312354312354,
"grad_norm": 14.11919116973877,
"learning_rate": 6.577508359233653e-06,
"loss": 5.580959320068359,
"step": 985
},
{
"epoch": 1.8447552447552447,
"grad_norm": 14.006019592285156,
"learning_rate": 6.538900879913301e-06,
"loss": 5.443265533447265,
"step": 990
},
{
"epoch": 1.8540792540792541,
"grad_norm": 11.976597785949707,
"learning_rate": 6.500191779396439e-06,
"loss": 5.410481643676758,
"step": 995
},
{
"epoch": 1.8634032634032636,
"grad_norm": 14.439790725708008,
"learning_rate": 6.461383613835427e-06,
"loss": 5.3311511993408205,
"step": 1000
},
{
"epoch": 1.8634032634032636,
"eval_com_accuracy": 0.24,
"eval_rew_accuracy": 0.356,
"step": 1000
},
{
"epoch": 1.8727272727272726,
"grad_norm": 15.29392147064209,
"learning_rate": 6.4224789459243705e-06,
"loss": 5.477576446533203,
"step": 1005
},
{
"epoch": 1.882051282051282,
"grad_norm": 14.032716751098633,
"learning_rate": 6.383480344729903e-06,
"loss": 5.458187103271484,
"step": 1010
},
{
"epoch": 1.8913752913752915,
"grad_norm": 11.861150741577148,
"learning_rate": 6.344390385521534e-06,
"loss": 5.806562423706055,
"step": 1015
},
{
"epoch": 1.9006993006993007,
"grad_norm": 14.015438079833984,
"learning_rate": 6.305211649601595e-06,
"loss": 5.2158203125,
"step": 1020
},
{
"epoch": 1.91002331002331,
"grad_norm": 15.672555923461914,
"learning_rate": 6.265946724134782e-06,
"loss": 5.469319915771484,
"step": 1025
},
{
"epoch": 1.9193473193473194,
"grad_norm": 13.891338348388672,
"learning_rate": 6.226598201977299e-06,
"loss": 5.495826721191406,
"step": 1030
},
{
"epoch": 1.9286713286713286,
"grad_norm": 12.441986083984375,
"learning_rate": 6.187168681505666e-06,
"loss": 5.466391372680664,
"step": 1035
},
{
"epoch": 1.9379953379953379,
"grad_norm": 13.905447006225586,
"learning_rate": 6.1476607664451105e-06,
"loss": 5.558423233032227,
"step": 1040
},
{
"epoch": 1.9473193473193473,
"grad_norm": 14.817802429199219,
"learning_rate": 6.1080770656976444e-06,
"loss": 5.50462646484375,
"step": 1045
},
{
"epoch": 1.9566433566433568,
"grad_norm": 14.293425559997559,
"learning_rate": 6.068420193169779e-06,
"loss": 5.567475128173828,
"step": 1050
},
{
"epoch": 1.9566433566433568,
"eval_com_accuracy": 0.26,
"eval_rew_accuracy": 0.384,
"step": 1050
},
{
"epoch": 1.965967365967366,
"grad_norm": 13.285664558410645,
"learning_rate": 6.0286927675999205e-06,
"loss": 5.3831031799316404,
"step": 1055
},
{
"epoch": 1.9752913752913752,
"grad_norm": 12.634699821472168,
"learning_rate": 5.98889741238544e-06,
"loss": 5.4307403564453125,
"step": 1060
},
{
"epoch": 1.9846153846153847,
"grad_norm": 13.824172019958496,
"learning_rate": 5.949036755409432e-06,
"loss": 5.559865570068359,
"step": 1065
},
{
"epoch": 1.993939393939394,
"grad_norm": 14.028116226196289,
"learning_rate": 5.909113428867195e-06,
"loss": 5.506551361083984,
"step": 1070
}
],
"logging_steps": 5,
"max_steps": 2148,
"num_input_tokens_seen": 0,
"num_train_epochs": 4,
"save_steps": 500,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 0.0,
"train_batch_size": 8,
"trial_name": null,
"trial_params": null
}