Qwen2.5-1.5B-Open-R1-Distill / trainer_state.json
jonatatyska's picture
Model save
50de928 verified
{
"best_global_step": null,
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 1.0,
"eval_steps": 500,
"global_step": 733,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.0068212824010914054,
"grad_norm": 1.8736196584589864,
"learning_rate": 5.405405405405406e-06,
"loss": 0.8523,
"num_tokens": 3759146.0,
"step": 5
},
{
"epoch": 0.013642564802182811,
"grad_norm": 1.6529107120671427,
"learning_rate": 1.2162162162162164e-05,
"loss": 0.8046,
"num_tokens": 7668808.0,
"step": 10
},
{
"epoch": 0.020463847203274217,
"grad_norm": 0.6852619736798029,
"learning_rate": 1.891891891891892e-05,
"loss": 0.7184,
"num_tokens": 11368873.0,
"step": 15
},
{
"epoch": 0.027285129604365622,
"grad_norm": 0.555559605510255,
"learning_rate": 2.5675675675675675e-05,
"loss": 0.677,
"num_tokens": 15118063.0,
"step": 20
},
{
"epoch": 0.034106412005457026,
"grad_norm": 0.48702272354609055,
"learning_rate": 3.2432432432432436e-05,
"loss": 0.6545,
"num_tokens": 18906839.0,
"step": 25
},
{
"epoch": 0.040927694406548434,
"grad_norm": 0.4176988340052698,
"learning_rate": 3.918918918918919e-05,
"loss": 0.6322,
"num_tokens": 22641755.0,
"step": 30
},
{
"epoch": 0.047748976807639835,
"grad_norm": 0.37783784655178565,
"learning_rate": 4.594594594594595e-05,
"loss": 0.6277,
"num_tokens": 26636629.0,
"step": 35
},
{
"epoch": 0.054570259208731244,
"grad_norm": 0.393448087972434,
"learning_rate": 4.999908316574644e-05,
"loss": 0.5972,
"num_tokens": 30417967.0,
"step": 40
},
{
"epoch": 0.061391541609822645,
"grad_norm": 0.3835295310987194,
"learning_rate": 4.998876963847189e-05,
"loss": 0.6003,
"num_tokens": 34231333.0,
"step": 45
},
{
"epoch": 0.06821282401091405,
"grad_norm": 0.41917239714742505,
"learning_rate": 4.996700181165029e-05,
"loss": 0.6002,
"num_tokens": 37961424.0,
"step": 50
},
{
"epoch": 0.07503410641200546,
"grad_norm": 0.3928746940447866,
"learning_rate": 4.993379077238036e-05,
"loss": 0.6007,
"num_tokens": 41826860.0,
"step": 55
},
{
"epoch": 0.08185538881309687,
"grad_norm": 0.43820216184922917,
"learning_rate": 4.9889153436180295e-05,
"loss": 0.5885,
"num_tokens": 45543403.0,
"step": 60
},
{
"epoch": 0.08867667121418826,
"grad_norm": 0.4224534396688633,
"learning_rate": 4.983311253837213e-05,
"loss": 0.5925,
"num_tokens": 49369486.0,
"step": 65
},
{
"epoch": 0.09549795361527967,
"grad_norm": 0.390751600397789,
"learning_rate": 4.9765696622501846e-05,
"loss": 0.5819,
"num_tokens": 53010874.0,
"step": 70
},
{
"epoch": 0.10231923601637108,
"grad_norm": 0.3848639134961841,
"learning_rate": 4.968694002580118e-05,
"loss": 0.584,
"num_tokens": 56909889.0,
"step": 75
},
{
"epoch": 0.10914051841746249,
"grad_norm": 0.43739436315321556,
"learning_rate": 4.959688286169851e-05,
"loss": 0.5675,
"num_tokens": 60650570.0,
"step": 80
},
{
"epoch": 0.11596180081855388,
"grad_norm": 0.39935343601617596,
"learning_rate": 4.9495570999387685e-05,
"loss": 0.5611,
"num_tokens": 64564660.0,
"step": 85
},
{
"epoch": 0.12278308321964529,
"grad_norm": 0.40999891421986956,
"learning_rate": 4.9383056040465276e-05,
"loss": 0.5793,
"num_tokens": 68426882.0,
"step": 90
},
{
"epoch": 0.1296043656207367,
"grad_norm": 0.46162490091934516,
"learning_rate": 4.925939529264815e-05,
"loss": 0.5753,
"num_tokens": 72252819.0,
"step": 95
},
{
"epoch": 0.1364256480218281,
"grad_norm": 0.4477769389895218,
"learning_rate": 4.9124651740584684e-05,
"loss": 0.5614,
"num_tokens": 76160914.0,
"step": 100
},
{
"epoch": 0.1432469304229195,
"grad_norm": 0.4339569135687328,
"learning_rate": 4.897889401377447e-05,
"loss": 0.5631,
"num_tokens": 80152955.0,
"step": 105
},
{
"epoch": 0.15006821282401092,
"grad_norm": 0.34267757496690937,
"learning_rate": 4.882219635161306e-05,
"loss": 0.5668,
"num_tokens": 83901702.0,
"step": 110
},
{
"epoch": 0.15688949522510232,
"grad_norm": 0.44395176859224605,
"learning_rate": 4.865463856557922e-05,
"loss": 0.5655,
"num_tokens": 87691018.0,
"step": 115
},
{
"epoch": 0.16371077762619374,
"grad_norm": 0.3737049486376839,
"learning_rate": 4.847630599858426e-05,
"loss": 0.5547,
"num_tokens": 91542428.0,
"step": 120
},
{
"epoch": 0.17053206002728513,
"grad_norm": 0.35047365389316976,
"learning_rate": 4.8287289481503954e-05,
"loss": 0.5615,
"num_tokens": 95438170.0,
"step": 125
},
{
"epoch": 0.17735334242837653,
"grad_norm": 0.3783670154461764,
"learning_rate": 4.8087685286915276e-05,
"loss": 0.557,
"num_tokens": 99383692.0,
"step": 130
},
{
"epoch": 0.18417462482946795,
"grad_norm": 0.4052171512788836,
"learning_rate": 4.787759508006147e-05,
"loss": 0.5567,
"num_tokens": 103223537.0,
"step": 135
},
{
"epoch": 0.19099590723055934,
"grad_norm": 0.4326450003437268,
"learning_rate": 4.765712586707048e-05,
"loss": 0.5694,
"num_tokens": 106901687.0,
"step": 140
},
{
"epoch": 0.19781718963165076,
"grad_norm": 0.3804058177869605,
"learning_rate": 4.7426389940453065e-05,
"loss": 0.5419,
"num_tokens": 110840758.0,
"step": 145
},
{
"epoch": 0.20463847203274216,
"grad_norm": 0.41687827590545184,
"learning_rate": 4.718550482190837e-05,
"loss": 0.5579,
"num_tokens": 114521504.0,
"step": 150
},
{
"epoch": 0.21145975443383355,
"grad_norm": 0.3403903362087962,
"learning_rate": 4.6934593202466127e-05,
"loss": 0.5425,
"num_tokens": 118445759.0,
"step": 155
},
{
"epoch": 0.21828103683492497,
"grad_norm": 0.3447498543333958,
"learning_rate": 4.6673782879995896e-05,
"loss": 0.5511,
"num_tokens": 122311693.0,
"step": 160
},
{
"epoch": 0.22510231923601637,
"grad_norm": 0.3388163263998386,
"learning_rate": 4.640320669411526e-05,
"loss": 0.5539,
"num_tokens": 126094524.0,
"step": 165
},
{
"epoch": 0.23192360163710776,
"grad_norm": 0.3526421465715504,
"learning_rate": 4.612300245853004e-05,
"loss": 0.5472,
"num_tokens": 129971056.0,
"step": 170
},
{
"epoch": 0.23874488403819918,
"grad_norm": 0.4416764127220365,
"learning_rate": 4.5833312890841085e-05,
"loss": 0.5619,
"num_tokens": 133765982.0,
"step": 175
},
{
"epoch": 0.24556616643929058,
"grad_norm": 0.38893427340230086,
"learning_rate": 4.553428553985329e-05,
"loss": 0.5416,
"num_tokens": 137522281.0,
"step": 180
},
{
"epoch": 0.252387448840382,
"grad_norm": 0.38857406762975316,
"learning_rate": 4.522607271042399e-05,
"loss": 0.5366,
"num_tokens": 141196084.0,
"step": 185
},
{
"epoch": 0.2592087312414734,
"grad_norm": 0.44916319979429337,
"learning_rate": 4.490883138588882e-05,
"loss": 0.548,
"num_tokens": 145136704.0,
"step": 190
},
{
"epoch": 0.2660300136425648,
"grad_norm": 0.34655455324240514,
"learning_rate": 4.458272314810479e-05,
"loss": 0.5358,
"num_tokens": 148940122.0,
"step": 195
},
{
"epoch": 0.2728512960436562,
"grad_norm": 0.3092620382836066,
"learning_rate": 4.4247914095151086e-05,
"loss": 0.5457,
"num_tokens": 152809678.0,
"step": 200
},
{
"epoch": 0.27967257844474763,
"grad_norm": 0.36535596373173046,
"learning_rate": 4.390457475672966e-05,
"loss": 0.5393,
"num_tokens": 156683573.0,
"step": 205
},
{
"epoch": 0.286493860845839,
"grad_norm": 0.31649340256694225,
"learning_rate": 4.35528800073086e-05,
"loss": 0.5407,
"num_tokens": 160433326.0,
"step": 210
},
{
"epoch": 0.2933151432469304,
"grad_norm": 0.3499332988489543,
"learning_rate": 4.31930089770526e-05,
"loss": 0.5442,
"num_tokens": 164374316.0,
"step": 215
},
{
"epoch": 0.30013642564802184,
"grad_norm": 0.3456966931593394,
"learning_rate": 4.282514496058582e-05,
"loss": 0.5237,
"num_tokens": 168223299.0,
"step": 220
},
{
"epoch": 0.3069577080491132,
"grad_norm": 0.28620134376914713,
"learning_rate": 4.24494753236337e-05,
"loss": 0.5364,
"num_tokens": 172132000.0,
"step": 225
},
{
"epoch": 0.31377899045020463,
"grad_norm": 0.3136055325702659,
"learning_rate": 4.2066191407591125e-05,
"loss": 0.532,
"num_tokens": 176086331.0,
"step": 230
},
{
"epoch": 0.32060027285129605,
"grad_norm": 0.2904507410041757,
"learning_rate": 4.1675488432065785e-05,
"loss": 0.5242,
"num_tokens": 179917640.0,
"step": 235
},
{
"epoch": 0.3274215552523875,
"grad_norm": 0.2838475235439828,
"learning_rate": 4.127756539544609e-05,
"loss": 0.5369,
"num_tokens": 183746129.0,
"step": 240
},
{
"epoch": 0.33424283765347884,
"grad_norm": 0.36567602581459485,
"learning_rate": 4.087262497354452e-05,
"loss": 0.5453,
"num_tokens": 187699370.0,
"step": 245
},
{
"epoch": 0.34106412005457026,
"grad_norm": 0.31227442368392383,
"learning_rate": 4.046087341636789e-05,
"loss": 0.5279,
"num_tokens": 191512142.0,
"step": 250
},
{
"epoch": 0.3478854024556617,
"grad_norm": 0.31326507615271154,
"learning_rate": 4.0042520443067176e-05,
"loss": 0.5292,
"num_tokens": 195367749.0,
"step": 255
},
{
"epoch": 0.35470668485675305,
"grad_norm": 0.3188648715010267,
"learning_rate": 3.961777913512035e-05,
"loss": 0.5182,
"num_tokens": 199215371.0,
"step": 260
},
{
"epoch": 0.3615279672578445,
"grad_norm": 0.30482784830831816,
"learning_rate": 3.9186865827802724e-05,
"loss": 0.5377,
"num_tokens": 202903048.0,
"step": 265
},
{
"epoch": 0.3683492496589359,
"grad_norm": 0.3201126000984806,
"learning_rate": 3.875e-05,
"loss": 0.5265,
"num_tokens": 206761213.0,
"step": 270
},
{
"epoch": 0.37517053206002726,
"grad_norm": 0.2841596390937697,
"learning_rate": 3.830740416242014e-05,
"loss": 0.5224,
"num_tokens": 210585632.0,
"step": 275
},
{
"epoch": 0.3819918144611187,
"grad_norm": 0.30685330617751183,
"learning_rate": 3.7859303744261064e-05,
"loss": 0.5281,
"num_tokens": 214261738.0,
"step": 280
},
{
"epoch": 0.3888130968622101,
"grad_norm": 0.2721241538271607,
"learning_rate": 3.740592697839185e-05,
"loss": 0.5329,
"num_tokens": 218144024.0,
"step": 285
},
{
"epoch": 0.3956343792633015,
"grad_norm": 0.26649843675777884,
"learning_rate": 3.694750478510596e-05,
"loss": 0.5285,
"num_tokens": 222057295.0,
"step": 290
},
{
"epoch": 0.4024556616643929,
"grad_norm": 0.26129699269023865,
"learning_rate": 3.648427065450555e-05,
"loss": 0.5197,
"num_tokens": 225828573.0,
"step": 295
},
{
"epoch": 0.4092769440654843,
"grad_norm": 0.2871832538824481,
"learning_rate": 3.601646052757707e-05,
"loss": 0.519,
"num_tokens": 229710487.0,
"step": 300
},
{
"epoch": 0.41609822646657574,
"grad_norm": 0.3059717183527635,
"learning_rate": 3.55443126760184e-05,
"loss": 0.5343,
"num_tokens": 233617525.0,
"step": 305
},
{
"epoch": 0.4229195088676671,
"grad_norm": 0.35168664075568773,
"learning_rate": 3.506806758087894e-05,
"loss": 0.532,
"num_tokens": 237394471.0,
"step": 310
},
{
"epoch": 0.4297407912687585,
"grad_norm": 0.3148243829183939,
"learning_rate": 3.458796781007437e-05,
"loss": 0.5266,
"num_tokens": 241114261.0,
"step": 315
},
{
"epoch": 0.43656207366984995,
"grad_norm": 0.2813339053896127,
"learning_rate": 3.410425789483854e-05,
"loss": 0.527,
"num_tokens": 244967987.0,
"step": 320
},
{
"epoch": 0.4433833560709413,
"grad_norm": 0.28200102184901027,
"learning_rate": 3.3617184205175304e-05,
"loss": 0.5334,
"num_tokens": 248751095.0,
"step": 325
},
{
"epoch": 0.45020463847203274,
"grad_norm": 0.3313470015451347,
"learning_rate": 3.312699482437392e-05,
"loss": 0.5206,
"num_tokens": 252607265.0,
"step": 330
},
{
"epoch": 0.45702592087312416,
"grad_norm": 0.29055424057520324,
"learning_rate": 3.263393942265168e-05,
"loss": 0.5273,
"num_tokens": 256417909.0,
"step": 335
},
{
"epoch": 0.4638472032742155,
"grad_norm": 0.257263653555583,
"learning_rate": 3.213826912998838e-05,
"loss": 0.5199,
"num_tokens": 260456429.0,
"step": 340
},
{
"epoch": 0.47066848567530695,
"grad_norm": 0.26867157928326685,
"learning_rate": 3.164023640821719e-05,
"loss": 0.5131,
"num_tokens": 264287905.0,
"step": 345
},
{
"epoch": 0.47748976807639837,
"grad_norm": 0.27131278139557347,
"learning_rate": 3.114009492243721e-05,
"loss": 0.5215,
"num_tokens": 268098790.0,
"step": 350
},
{
"epoch": 0.4843110504774898,
"grad_norm": 0.2819904094269511,
"learning_rate": 3.063809941181321e-05,
"loss": 0.5312,
"num_tokens": 271974065.0,
"step": 355
},
{
"epoch": 0.49113233287858116,
"grad_norm": 0.28006282312881625,
"learning_rate": 3.0134505559828203e-05,
"loss": 0.5349,
"num_tokens": 275852045.0,
"step": 360
},
{
"epoch": 0.4979536152796726,
"grad_norm": 0.2868754861670018,
"learning_rate": 2.9629569864055125e-05,
"loss": 0.5129,
"num_tokens": 279504484.0,
"step": 365
},
{
"epoch": 0.504774897680764,
"grad_norm": 0.26731895610018197,
"learning_rate": 2.9123549505513868e-05,
"loss": 0.5149,
"num_tokens": 283461546.0,
"step": 370
},
{
"epoch": 0.5115961800818554,
"grad_norm": 0.2591582306380616,
"learning_rate": 2.8616702217680134e-05,
"loss": 0.5229,
"num_tokens": 287371918.0,
"step": 375
},
{
"epoch": 0.5184174624829468,
"grad_norm": 0.2716517829278145,
"learning_rate": 2.810928615521303e-05,
"loss": 0.5096,
"num_tokens": 291057738.0,
"step": 380
},
{
"epoch": 0.5252387448840382,
"grad_norm": 0.28085010872584193,
"learning_rate": 2.7601559762468022e-05,
"loss": 0.5188,
"num_tokens": 294881963.0,
"step": 385
},
{
"epoch": 0.5320600272851296,
"grad_norm": 0.27819826322879626,
"learning_rate": 2.7093781641862387e-05,
"loss": 0.5213,
"num_tokens": 298677895.0,
"step": 390
},
{
"epoch": 0.538881309686221,
"grad_norm": 0.27094783517982796,
"learning_rate": 2.658621042216021e-05,
"loss": 0.5056,
"num_tokens": 302387985.0,
"step": 395
},
{
"epoch": 0.5457025920873124,
"grad_norm": 0.28214922968621825,
"learning_rate": 2.6079104626743845e-05,
"loss": 0.5273,
"num_tokens": 306130593.0,
"step": 400
},
{
"epoch": 0.5525238744884038,
"grad_norm": 0.28152116381505404,
"learning_rate": 2.5572722541939113e-05,
"loss": 0.525,
"num_tokens": 309952008.0,
"step": 405
},
{
"epoch": 0.5593451568894953,
"grad_norm": 0.3046227632539219,
"learning_rate": 2.5067322085461315e-05,
"loss": 0.5104,
"num_tokens": 313725489.0,
"step": 410
},
{
"epoch": 0.5661664392905866,
"grad_norm": 0.32073667981593174,
"learning_rate": 2.4563160675048846e-05,
"loss": 0.5155,
"num_tokens": 317525148.0,
"step": 415
},
{
"epoch": 0.572987721691678,
"grad_norm": 0.27274661004934003,
"learning_rate": 2.406049509735156e-05,
"loss": 0.5153,
"num_tokens": 321410101.0,
"step": 420
},
{
"epoch": 0.5798090040927695,
"grad_norm": 0.2825565593171515,
"learning_rate": 2.355958137714056e-05,
"loss": 0.5106,
"num_tokens": 325102278.0,
"step": 425
},
{
"epoch": 0.5866302864938608,
"grad_norm": 0.277076656365198,
"learning_rate": 2.3060674646906004e-05,
"loss": 0.5154,
"num_tokens": 328831071.0,
"step": 430
},
{
"epoch": 0.5934515688949522,
"grad_norm": 0.2998479094213919,
"learning_rate": 2.2564029016909416e-05,
"loss": 0.5048,
"num_tokens": 332767044.0,
"step": 435
},
{
"epoch": 0.6002728512960437,
"grad_norm": 0.26336063955648953,
"learning_rate": 2.2069897445756627e-05,
"loss": 0.5027,
"num_tokens": 336595638.0,
"step": 440
},
{
"epoch": 0.607094133697135,
"grad_norm": 0.26194060961360477,
"learning_rate": 2.1578531611557322e-05,
"loss": 0.5157,
"num_tokens": 340358925.0,
"step": 445
},
{
"epoch": 0.6139154160982264,
"grad_norm": 0.23559986218790224,
"learning_rate": 2.109018178373675e-05,
"loss": 0.5145,
"num_tokens": 344239058.0,
"step": 450
},
{
"epoch": 0.6207366984993179,
"grad_norm": 0.25524111639931907,
"learning_rate": 2.0605096695564973e-05,
"loss": 0.518,
"num_tokens": 348080585.0,
"step": 455
},
{
"epoch": 0.6275579809004093,
"grad_norm": 0.24278501086019294,
"learning_rate": 2.0123523417468466e-05,
"loss": 0.5113,
"num_tokens": 351817695.0,
"step": 460
},
{
"epoch": 0.6343792633015006,
"grad_norm": 0.2435871006604587,
"learning_rate": 1.9645707231188742e-05,
"loss": 0.5057,
"num_tokens": 355639183.0,
"step": 465
},
{
"epoch": 0.6412005457025921,
"grad_norm": 0.2360014706850625,
"learning_rate": 1.9171891504851925e-05,
"loss": 0.5244,
"num_tokens": 359437581.0,
"step": 470
},
{
"epoch": 0.6480218281036835,
"grad_norm": 0.25004817565493664,
"learning_rate": 1.8702317569013094e-05,
"loss": 0.5003,
"num_tokens": 363189983.0,
"step": 475
},
{
"epoch": 0.654843110504775,
"grad_norm": 0.23130113042849926,
"learning_rate": 1.8237224593738327e-05,
"loss": 0.5026,
"num_tokens": 366863209.0,
"step": 480
},
{
"epoch": 0.6616643929058663,
"grad_norm": 0.22782568728458572,
"learning_rate": 1.7776849466787223e-05,
"loss": 0.517,
"num_tokens": 370725860.0,
"step": 485
},
{
"epoch": 0.6684856753069577,
"grad_norm": 0.23427370273749204,
"learning_rate": 1.7321426672957896e-05,
"loss": 0.5024,
"num_tokens": 374566515.0,
"step": 490
},
{
"epoch": 0.6753069577080492,
"grad_norm": 0.25775802217340466,
"learning_rate": 1.6871188174655787e-05,
"loss": 0.4956,
"num_tokens": 378330489.0,
"step": 495
},
{
"epoch": 0.6821282401091405,
"grad_norm": 0.23837674599354872,
"learning_rate": 1.6426363293747334e-05,
"loss": 0.4999,
"num_tokens": 382103468.0,
"step": 500
},
{
"epoch": 0.6889495225102319,
"grad_norm": 0.2411231563063646,
"learning_rate": 1.598717859475846e-05,
"loss": 0.5085,
"num_tokens": 385837297.0,
"step": 505
},
{
"epoch": 0.6957708049113234,
"grad_norm": 0.2362897778396916,
"learning_rate": 1.5553857769477553e-05,
"loss": 0.5054,
"num_tokens": 389586680.0,
"step": 510
},
{
"epoch": 0.7025920873124147,
"grad_norm": 0.2307857419590509,
"learning_rate": 1.5126621523021518e-05,
"loss": 0.5099,
"num_tokens": 393428760.0,
"step": 515
},
{
"epoch": 0.7094133697135061,
"grad_norm": 0.26352221429023026,
"learning_rate": 1.4705687461423209e-05,
"loss": 0.5221,
"num_tokens": 397158700.0,
"step": 520
},
{
"epoch": 0.7162346521145976,
"grad_norm": 0.24408140997201158,
"learning_rate": 1.4291269980797139e-05,
"loss": 0.5064,
"num_tokens": 400923938.0,
"step": 525
},
{
"epoch": 0.723055934515689,
"grad_norm": 0.22835774391918423,
"learning_rate": 1.3883580158140291e-05,
"loss": 0.5004,
"num_tokens": 404685655.0,
"step": 530
},
{
"epoch": 0.7298772169167803,
"grad_norm": 0.2191285949950328,
"learning_rate": 1.3482825643823293e-05,
"loss": 0.5061,
"num_tokens": 408582799.0,
"step": 535
},
{
"epoch": 0.7366984993178718,
"grad_norm": 0.2240008162655879,
"learning_rate": 1.3089210555827086e-05,
"loss": 0.5117,
"num_tokens": 412386009.0,
"step": 540
},
{
"epoch": 0.7435197817189632,
"grad_norm": 0.21935213301280262,
"learning_rate": 1.270293537577855e-05,
"loss": 0.5147,
"num_tokens": 416372039.0,
"step": 545
},
{
"epoch": 0.7503410641200545,
"grad_norm": 0.21858983600847137,
"learning_rate": 1.232419684683844e-05,
"loss": 0.4993,
"num_tokens": 420051975.0,
"step": 550
},
{
"epoch": 0.757162346521146,
"grad_norm": 0.22829836461822706,
"learning_rate": 1.1953187873493303e-05,
"loss": 0.4999,
"num_tokens": 423685709.0,
"step": 555
},
{
"epoch": 0.7639836289222374,
"grad_norm": 0.23054591425701906,
"learning_rate": 1.1590097423302684e-05,
"loss": 0.4959,
"num_tokens": 427405904.0,
"step": 560
},
{
"epoch": 0.7708049113233287,
"grad_norm": 0.2216772096074348,
"learning_rate": 1.1235110430651421e-05,
"loss": 0.4959,
"num_tokens": 431288378.0,
"step": 565
},
{
"epoch": 0.7776261937244202,
"grad_norm": 0.23870079264998784,
"learning_rate": 1.0888407702556284e-05,
"loss": 0.4997,
"num_tokens": 435077995.0,
"step": 570
},
{
"epoch": 0.7844474761255116,
"grad_norm": 0.24262844204918257,
"learning_rate": 1.0550165826574766e-05,
"loss": 0.4996,
"num_tokens": 439006864.0,
"step": 575
},
{
"epoch": 0.791268758526603,
"grad_norm": 0.2366728392543131,
"learning_rate": 1.0220557080862985e-05,
"loss": 0.5148,
"num_tokens": 443045688.0,
"step": 580
},
{
"epoch": 0.7980900409276944,
"grad_norm": 0.2198918697757138,
"learning_rate": 9.899749346428556e-06,
"loss": 0.502,
"num_tokens": 446852018.0,
"step": 585
},
{
"epoch": 0.8049113233287858,
"grad_norm": 0.21089772519725644,
"learning_rate": 9.587906021623016e-06,
"loss": 0.516,
"num_tokens": 450687287.0,
"step": 590
},
{
"epoch": 0.8117326057298773,
"grad_norm": 0.2383902991369814,
"learning_rate": 9.28518593891749e-06,
"loss": 0.5012,
"num_tokens": 454483896.0,
"step": 595
},
{
"epoch": 0.8185538881309686,
"grad_norm": 0.22718614205669174,
"learning_rate": 8.99174328400385e-06,
"loss": 0.4995,
"num_tokens": 458325861.0,
"step": 600
},
{
"epoch": 0.82537517053206,
"grad_norm": 0.2323934518666854,
"learning_rate": 8.707727517262697e-06,
"loss": 0.5048,
"num_tokens": 462055868.0,
"step": 605
},
{
"epoch": 0.8321964529331515,
"grad_norm": 0.21182601524961708,
"learning_rate": 8.433283297638053e-06,
"loss": 0.4992,
"num_tokens": 465973876.0,
"step": 610
},
{
"epoch": 0.8390177353342428,
"grad_norm": 0.2198811526373794,
"learning_rate": 8.168550408957632e-06,
"loss": 0.4969,
"num_tokens": 469791498.0,
"step": 615
},
{
"epoch": 0.8458390177353342,
"grad_norm": 0.2307239967299105,
"learning_rate": 7.91366368873613e-06,
"loss": 0.4944,
"num_tokens": 473570581.0,
"step": 620
},
{
"epoch": 0.8526603001364257,
"grad_norm": 0.23063987241314526,
"learning_rate": 7.66875295949791e-06,
"loss": 0.5104,
"num_tokens": 477401353.0,
"step": 625
},
{
"epoch": 0.859481582537517,
"grad_norm": 0.20695295936947347,
"learning_rate": 7.4339429626539e-06,
"loss": 0.5101,
"num_tokens": 481348892.0,
"step": 630
},
{
"epoch": 0.8663028649386084,
"grad_norm": 0.23596918357029553,
"learning_rate": 7.2093532949665715e-06,
"loss": 0.508,
"num_tokens": 485171910.0,
"step": 635
},
{
"epoch": 0.8731241473396999,
"grad_norm": 0.21024153872167606,
"learning_rate": 6.995098347635173e-06,
"loss": 0.4932,
"num_tokens": 489059548.0,
"step": 640
},
{
"epoch": 0.8799454297407913,
"grad_norm": 0.21056945755021317,
"learning_rate": 6.791287248032431e-06,
"loss": 0.4965,
"num_tokens": 492893029.0,
"step": 645
},
{
"epoch": 0.8867667121418826,
"grad_norm": 0.21467631429891465,
"learning_rate": 6.598023804122194e-06,
"loss": 0.5038,
"num_tokens": 496905674.0,
"step": 650
},
{
"epoch": 0.8935879945429741,
"grad_norm": 0.20241715910866598,
"learning_rate": 6.415406451586528e-06,
"loss": 0.4993,
"num_tokens": 500864542.0,
"step": 655
},
{
"epoch": 0.9004092769440655,
"grad_norm": 0.20445885305790695,
"learning_rate": 6.243528203689025e-06,
"loss": 0.5031,
"num_tokens": 504810366.0,
"step": 660
},
{
"epoch": 0.9072305593451568,
"grad_norm": 0.22038996995818003,
"learning_rate": 6.0824766039e-06,
"loss": 0.4993,
"num_tokens": 508607232.0,
"step": 665
},
{
"epoch": 0.9140518417462483,
"grad_norm": 0.21041886366306528,
"learning_rate": 5.932333681307571e-06,
"loss": 0.506,
"num_tokens": 512474084.0,
"step": 670
},
{
"epoch": 0.9208731241473397,
"grad_norm": 0.20677802146077925,
"learning_rate": 5.793175908837471e-06,
"loss": 0.4966,
"num_tokens": 516216104.0,
"step": 675
},
{
"epoch": 0.927694406548431,
"grad_norm": 0.2045811787418825,
"learning_rate": 5.665074164302742e-06,
"loss": 0.5063,
"num_tokens": 519966345.0,
"step": 680
},
{
"epoch": 0.9345156889495225,
"grad_norm": 0.19469354472759123,
"learning_rate": 5.548093694303275e-06,
"loss": 0.4918,
"num_tokens": 523793837.0,
"step": 685
},
{
"epoch": 0.9413369713506139,
"grad_norm": 0.2180629067462523,
"learning_rate": 5.442294080993446e-06,
"loss": 0.5058,
"num_tokens": 527666864.0,
"step": 690
},
{
"epoch": 0.9481582537517054,
"grad_norm": 0.2049465869798585,
"learning_rate": 5.347729211734919e-06,
"loss": 0.5033,
"num_tokens": 531466359.0,
"step": 695
},
{
"epoch": 0.9549795361527967,
"grad_norm": 0.2009098561304282,
"learning_rate": 5.264447251649954e-06,
"loss": 0.5057,
"num_tokens": 535253217.0,
"step": 700
},
{
"epoch": 0.9618008185538881,
"grad_norm": 0.20669161442470152,
"learning_rate": 5.192490619089267e-06,
"loss": 0.4898,
"num_tokens": 539137436.0,
"step": 705
},
{
"epoch": 0.9686221009549796,
"grad_norm": 0.2077236609901708,
"learning_rate": 5.1318959640269095e-06,
"loss": 0.5003,
"num_tokens": 542824098.0,
"step": 710
},
{
"epoch": 0.975443383356071,
"grad_norm": 0.21273317950464898,
"learning_rate": 5.082694149393189e-06,
"loss": 0.5113,
"num_tokens": 546578047.0,
"step": 715
},
{
"epoch": 0.9822646657571623,
"grad_norm": 0.20615733476617562,
"learning_rate": 5.044910235355121e-06,
"loss": 0.4973,
"num_tokens": 550377811.0,
"step": 720
},
{
"epoch": 0.9890859481582538,
"grad_norm": 0.1926376982161975,
"learning_rate": 5.0185634665524255e-06,
"loss": 0.4932,
"num_tokens": 554224024.0,
"step": 725
},
{
"epoch": 0.9959072305593452,
"grad_norm": 0.19127148561259888,
"learning_rate": 5.003667262295572e-06,
"loss": 0.5014,
"num_tokens": 558001366.0,
"step": 730
},
{
"epoch": 1.0,
"num_tokens": 560311272.0,
"step": 733,
"total_flos": 1147472759488512.0,
"train_loss": 0.5343828282609968,
"train_runtime": 5522.9556,
"train_samples_per_second": 16.972,
"train_steps_per_second": 0.133
}
],
"logging_steps": 5,
"max_steps": 733,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 100,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 1147472759488512.0,
"train_batch_size": 16,
"trial_name": null,
"trial_params": null
}