c2 / checkpoint-1200 /trainer_state.json
thetmon's picture
Upload merged Qwen3-4B-Instruct-2507 model (auto-generated README)
6eb8a65 verified
{
"best_global_step": null,
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 2.978274363749224,
"eval_steps": 30,
"global_step": 1200,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.024829298572315334,
"grad_norm": 8.391741752624512,
"learning_rate": 1.487603305785124e-05,
"loss": 2.6023,
"step": 10
},
{
"epoch": 0.04965859714463067,
"grad_norm": 0.7645617723464966,
"learning_rate": 3.1404958677685955e-05,
"loss": 1.0126,
"step": 20
},
{
"epoch": 0.074487895716946,
"grad_norm": 0.7797226905822754,
"learning_rate": 4.793388429752066e-05,
"loss": 0.6236,
"step": 30
},
{
"epoch": 0.074487895716946,
"eval_loss": 0.5439823865890503,
"eval_runtime": 41.1617,
"eval_samples_per_second": 4.13,
"eval_steps_per_second": 2.065,
"step": 30
},
{
"epoch": 0.09931719428926133,
"grad_norm": 0.5489462614059448,
"learning_rate": 6.446280991735537e-05,
"loss": 0.4543,
"step": 40
},
{
"epoch": 0.12414649286157665,
"grad_norm": 0.7057201266288757,
"learning_rate": 8.099173553719009e-05,
"loss": 0.369,
"step": 50
},
{
"epoch": 0.148975791433892,
"grad_norm": 0.44007009267807007,
"learning_rate": 9.75206611570248e-05,
"loss": 0.2855,
"step": 60
},
{
"epoch": 0.148975791433892,
"eval_loss": 0.28305545449256897,
"eval_runtime": 40.404,
"eval_samples_per_second": 4.208,
"eval_steps_per_second": 2.104,
"step": 60
},
{
"epoch": 0.17380509000620734,
"grad_norm": 0.5000414848327637,
"learning_rate": 0.0001140495867768595,
"loss": 0.2506,
"step": 70
},
{
"epoch": 0.19863438857852267,
"grad_norm": 0.6662213802337646,
"learning_rate": 0.00013057851239669423,
"loss": 0.2262,
"step": 80
},
{
"epoch": 0.22346368715083798,
"grad_norm": 0.4713539481163025,
"learning_rate": 0.00014710743801652894,
"loss": 0.1983,
"step": 90
},
{
"epoch": 0.22346368715083798,
"eval_loss": 0.2015635371208191,
"eval_runtime": 40.5434,
"eval_samples_per_second": 4.193,
"eval_steps_per_second": 2.097,
"step": 90
},
{
"epoch": 0.2482929857231533,
"grad_norm": 0.3806518316268921,
"learning_rate": 0.00016363636363636366,
"loss": 0.1905,
"step": 100
},
{
"epoch": 0.27312228429546864,
"grad_norm": 0.537176251411438,
"learning_rate": 0.00018016528925619835,
"loss": 0.1681,
"step": 110
},
{
"epoch": 0.297951582867784,
"grad_norm": 0.39260727167129517,
"learning_rate": 0.0001966942148760331,
"loss": 0.1619,
"step": 120
},
{
"epoch": 0.297951582867784,
"eval_loss": 0.1580844670534134,
"eval_runtime": 40.5961,
"eval_samples_per_second": 4.188,
"eval_steps_per_second": 2.094,
"step": 120
},
{
"epoch": 0.3227808814400993,
"grad_norm": 0.2614082396030426,
"learning_rate": 0.00019997332081116373,
"loss": 0.156,
"step": 130
},
{
"epoch": 0.34761018001241467,
"grad_norm": 0.4994271993637085,
"learning_rate": 0.00019986496100395275,
"loss": 0.1591,
"step": 140
},
{
"epoch": 0.37243947858473,
"grad_norm": 0.2901883125305176,
"learning_rate": 0.000199673343399533,
"loss": 0.1536,
"step": 150
},
{
"epoch": 0.37243947858473,
"eval_loss": 0.15050268173217773,
"eval_runtime": 40.6229,
"eval_samples_per_second": 4.185,
"eval_steps_per_second": 2.092,
"step": 150
},
{
"epoch": 0.39726877715704534,
"grad_norm": 0.4040389657020569,
"learning_rate": 0.00019939862775022893,
"loss": 0.1614,
"step": 160
},
{
"epoch": 0.42209807572936064,
"grad_norm": 0.3563652038574219,
"learning_rate": 0.0001990410430875205,
"loss": 0.1489,
"step": 170
},
{
"epoch": 0.44692737430167595,
"grad_norm": 0.21592873334884644,
"learning_rate": 0.00019860088753109896,
"loss": 0.1491,
"step": 180
},
{
"epoch": 0.44692737430167595,
"eval_loss": 0.14503230154514313,
"eval_runtime": 40.5377,
"eval_samples_per_second": 4.194,
"eval_steps_per_second": 2.097,
"step": 180
},
{
"epoch": 0.4717566728739913,
"grad_norm": 0.293130487203598,
"learning_rate": 0.00019807852804032305,
"loss": 0.1388,
"step": 190
},
{
"epoch": 0.4965859714463066,
"grad_norm": 0.5099315643310547,
"learning_rate": 0.00019747440010828383,
"loss": 0.1398,
"step": 200
},
{
"epoch": 0.521415270018622,
"grad_norm": 0.3809162974357605,
"learning_rate": 0.00019678900739873226,
"loss": 0.1484,
"step": 210
},
{
"epoch": 0.521415270018622,
"eval_loss": 0.1399187594652176,
"eval_runtime": 40.3775,
"eval_samples_per_second": 4.21,
"eval_steps_per_second": 2.105,
"step": 210
},
{
"epoch": 0.5462445685909373,
"grad_norm": 0.2565288245677948,
"learning_rate": 0.000196022921326173,
"loss": 0.1406,
"step": 220
},
{
"epoch": 0.5710738671632526,
"grad_norm": 0.25355908274650574,
"learning_rate": 0.00019517678057947384,
"loss": 0.1436,
"step": 230
},
{
"epoch": 0.595903165735568,
"grad_norm": 0.3126136064529419,
"learning_rate": 0.00019425129058938832,
"loss": 0.1437,
"step": 240
},
{
"epoch": 0.595903165735568,
"eval_loss": 0.13724148273468018,
"eval_runtime": 40.489,
"eval_samples_per_second": 4.199,
"eval_steps_per_second": 2.099,
"step": 240
},
{
"epoch": 0.6207324643078833,
"grad_norm": 0.32518845796585083,
"learning_rate": 0.00019324722294043558,
"loss": 0.1391,
"step": 250
},
{
"epoch": 0.6455617628801986,
"grad_norm": 0.32164257764816284,
"learning_rate": 0.00019216541472762735,
"loss": 0.1386,
"step": 260
},
{
"epoch": 0.6703910614525139,
"grad_norm": 0.37033355236053467,
"learning_rate": 0.0001910067678585786,
"loss": 0.1329,
"step": 270
},
{
"epoch": 0.6703910614525139,
"eval_loss": 0.12901660799980164,
"eval_runtime": 40.5259,
"eval_samples_per_second": 4.195,
"eval_steps_per_second": 2.097,
"step": 270
},
{
"epoch": 0.6952203600248293,
"grad_norm": 0.24198684096336365,
"learning_rate": 0.0001897722483015838,
"loss": 0.1262,
"step": 280
},
{
"epoch": 0.7200496585971446,
"grad_norm": 0.20765037834644318,
"learning_rate": 0.00018846288528028555,
"loss": 0.1281,
"step": 290
},
{
"epoch": 0.74487895716946,
"grad_norm": 0.1669204831123352,
"learning_rate": 0.0001870797704156067,
"loss": 0.1376,
"step": 300
},
{
"epoch": 0.74487895716946,
"eval_loss": 0.1301908791065216,
"eval_runtime": 40.6681,
"eval_samples_per_second": 4.18,
"eval_steps_per_second": 2.09,
"step": 300
},
{
"epoch": 0.7697082557417753,
"grad_norm": 0.17013825476169586,
"learning_rate": 0.00018562405681566216,
"loss": 0.1407,
"step": 310
},
{
"epoch": 0.7945375543140907,
"grad_norm": 0.1282002329826355,
"learning_rate": 0.00018409695811440796,
"loss": 0.1365,
"step": 320
},
{
"epoch": 0.819366852886406,
"grad_norm": 0.12342263758182526,
"learning_rate": 0.00018249974745983023,
"loss": 0.13,
"step": 330
},
{
"epoch": 0.819366852886406,
"eval_loss": 0.1285768747329712,
"eval_runtime": 40.5047,
"eval_samples_per_second": 4.197,
"eval_steps_per_second": 2.099,
"step": 330
},
{
"epoch": 0.8441961514587213,
"grad_norm": 0.14901821315288544,
"learning_rate": 0.00018083375645251684,
"loss": 0.1347,
"step": 340
},
{
"epoch": 0.8690254500310366,
"grad_norm": 0.17922259867191315,
"learning_rate": 0.00017910037403549693,
"loss": 0.1325,
"step": 350
},
{
"epoch": 0.8938547486033519,
"grad_norm": 0.09752878546714783,
"learning_rate": 0.0001773010453362737,
"loss": 0.1272,
"step": 360
},
{
"epoch": 0.8938547486033519,
"eval_loss": 0.12547558546066284,
"eval_runtime": 40.6167,
"eval_samples_per_second": 4.185,
"eval_steps_per_second": 2.093,
"step": 360
},
{
"epoch": 0.9186840471756673,
"grad_norm": 0.17390093207359314,
"learning_rate": 0.0001754372704620164,
"loss": 0.1334,
"step": 370
},
{
"epoch": 0.9435133457479826,
"grad_norm": 0.10675700008869171,
"learning_rate": 0.00017351060324891502,
"loss": 0.1299,
"step": 380
},
{
"epoch": 0.9683426443202979,
"grad_norm": 0.13889861106872559,
"learning_rate": 0.00017152264996674136,
"loss": 0.1366,
"step": 390
},
{
"epoch": 0.9683426443202979,
"eval_loss": 0.12701915204524994,
"eval_runtime": 40.4376,
"eval_samples_per_second": 4.204,
"eval_steps_per_second": 2.102,
"step": 390
},
{
"epoch": 0.9931719428926132,
"grad_norm": 0.24341008067131042,
"learning_rate": 0.00016947506797969562,
"loss": 0.1262,
"step": 400
},
{
"epoch": 1.0173805090006207,
"grad_norm": 0.16011632978916168,
"learning_rate": 0.00016736956436465573,
"loss": 0.1338,
"step": 410
},
{
"epoch": 1.042209807572936,
"grad_norm": 0.13081607222557068,
"learning_rate": 0.00016520789448798087,
"loss": 0.1236,
"step": 420
},
{
"epoch": 1.042209807572936,
"eval_loss": 0.12546835839748383,
"eval_runtime": 40.6293,
"eval_samples_per_second": 4.184,
"eval_steps_per_second": 2.092,
"step": 420
},
{
"epoch": 1.0670391061452513,
"grad_norm": 0.10969800502061844,
"learning_rate": 0.00016299186054205577,
"loss": 0.1263,
"step": 430
},
{
"epoch": 1.0918684047175666,
"grad_norm": 0.16010411083698273,
"learning_rate": 0.00016072331004279614,
"loss": 0.1163,
"step": 440
},
{
"epoch": 1.1166977032898822,
"grad_norm": 0.15039803087711334,
"learning_rate": 0.00015840413428936767,
"loss": 0.1207,
"step": 450
},
{
"epoch": 1.1166977032898822,
"eval_loss": 0.12573280930519104,
"eval_runtime": 40.5321,
"eval_samples_per_second": 4.194,
"eval_steps_per_second": 2.097,
"step": 450
},
{
"epoch": 1.1415270018621975,
"grad_norm": 0.10430776327848434,
"learning_rate": 0.00015603626678740263,
"loss": 0.1277,
"step": 460
},
{
"epoch": 1.1663563004345128,
"grad_norm": 0.11445056647062302,
"learning_rate": 0.000153621681637029,
"loss": 0.1279,
"step": 470
},
{
"epoch": 1.191185599006828,
"grad_norm": 0.08513262867927551,
"learning_rate": 0.00015116239188705556,
"loss": 0.1247,
"step": 480
},
{
"epoch": 1.191185599006828,
"eval_loss": 0.12441173195838928,
"eval_runtime": 40.608,
"eval_samples_per_second": 4.186,
"eval_steps_per_second": 2.093,
"step": 480
},
{
"epoch": 1.2160148975791434,
"grad_norm": 0.13227878510951996,
"learning_rate": 0.00014866044785668563,
"loss": 0.1302,
"step": 490
},
{
"epoch": 1.2408441961514587,
"grad_norm": 0.11930737644433975,
"learning_rate": 0.00014611793542615803,
"loss": 0.1269,
"step": 500
},
{
"epoch": 1.265673494723774,
"grad_norm": 0.08728877454996109,
"learning_rate": 0.00014353697429774084,
"loss": 0.1217,
"step": 510
},
{
"epoch": 1.265673494723774,
"eval_loss": 0.1240459457039833,
"eval_runtime": 40.3865,
"eval_samples_per_second": 4.209,
"eval_steps_per_second": 2.105,
"step": 510
},
{
"epoch": 1.2905027932960893,
"grad_norm": 0.08365845680236816,
"learning_rate": 0.0001409197162285275,
"loss": 0.1222,
"step": 520
},
{
"epoch": 1.3153320918684046,
"grad_norm": 0.07661114633083344,
"learning_rate": 0.000138268343236509,
"loss": 0.1239,
"step": 530
},
{
"epoch": 1.34016139044072,
"grad_norm": 0.10216409713029861,
"learning_rate": 0.00013558506578141682,
"loss": 0.1307,
"step": 540
},
{
"epoch": 1.34016139044072,
"eval_loss": 0.12202484905719757,
"eval_runtime": 40.6661,
"eval_samples_per_second": 4.18,
"eval_steps_per_second": 2.09,
"step": 540
},
{
"epoch": 1.3649906890130354,
"grad_norm": 0.12297932803630829,
"learning_rate": 0.00013287212092185464,
"loss": 0.1162,
"step": 550
},
{
"epoch": 1.3898199875853507,
"grad_norm": 0.1034003421664238,
"learning_rate": 0.00013013177045025374,
"loss": 0.1239,
"step": 560
},
{
"epoch": 1.414649286157666,
"grad_norm": 0.09044180065393448,
"learning_rate": 0.0001273662990072083,
"loss": 0.1267,
"step": 570
},
{
"epoch": 1.414649286157666,
"eval_loss": 0.12448150664567947,
"eval_runtime": 40.6346,
"eval_samples_per_second": 4.184,
"eval_steps_per_second": 2.092,
"step": 570
},
{
"epoch": 1.4394785847299814,
"grad_norm": 0.25621697306632996,
"learning_rate": 0.00012457801217676182,
"loss": 0.1271,
"step": 580
},
{
"epoch": 1.4643078833022967,
"grad_norm": 0.1325441151857376,
"learning_rate": 0.00012176923456423284,
"loss": 0.1201,
"step": 590
},
{
"epoch": 1.489137181874612,
"grad_norm": 0.09877412766218185,
"learning_rate": 0.00011894230785818284,
"loss": 0.1244,
"step": 600
},
{
"epoch": 1.489137181874612,
"eval_loss": 0.12183106690645218,
"eval_runtime": 40.5526,
"eval_samples_per_second": 4.192,
"eval_steps_per_second": 2.096,
"step": 600
},
{
"epoch": 1.5139664804469275,
"grad_norm": 0.0995541661977768,
"learning_rate": 0.00011609958887814129,
"loss": 0.1281,
"step": 610
},
{
"epoch": 1.5387957790192428,
"grad_norm": 0.07273132354021072,
"learning_rate": 0.00011324344760971671,
"loss": 0.1231,
"step": 620
},
{
"epoch": 1.563625077591558,
"grad_norm": 0.0884179100394249,
"learning_rate": 0.00011037626522873019,
"loss": 0.1325,
"step": 630
},
{
"epoch": 1.563625077591558,
"eval_loss": 0.12128450721502304,
"eval_runtime": 40.5099,
"eval_samples_per_second": 4.197,
"eval_steps_per_second": 2.098,
"step": 630
},
{
"epoch": 1.5884543761638734,
"grad_norm": 0.10035305470228195,
"learning_rate": 0.00010750043211602045,
"loss": 0.1225,
"step": 640
},
{
"epoch": 1.6132836747361887,
"grad_norm": 0.10183248668909073,
"learning_rate": 0.00010461834586457398,
"loss": 0.1238,
"step": 650
},
{
"epoch": 1.638112973308504,
"grad_norm": 0.07954993844032288,
"learning_rate": 0.00010173240928064285,
"loss": 0.1122,
"step": 660
},
{
"epoch": 1.638112973308504,
"eval_loss": 0.12219967693090439,
"eval_runtime": 40.5696,
"eval_samples_per_second": 4.19,
"eval_steps_per_second": 2.095,
"step": 660
},
{
"epoch": 1.6629422718808193,
"grad_norm": 0.06452041119337082,
"learning_rate": 9.884502838051595e-05,
"loss": 0.1224,
"step": 670
},
{
"epoch": 1.6877715704531346,
"grad_norm": 0.13734178245067596,
"learning_rate": 9.595861038461398e-05,
"loss": 0.1209,
"step": 680
},
{
"epoch": 1.71260086902545,
"grad_norm": 0.23182320594787598,
"learning_rate": 9.307556171058085e-05,
"loss": 0.1259,
"step": 690
},
{
"epoch": 1.71260086902545,
"eval_loss": 0.12142278999090195,
"eval_runtime": 40.5173,
"eval_samples_per_second": 4.196,
"eval_steps_per_second": 2.098,
"step": 690
},
{
"epoch": 1.7374301675977653,
"grad_norm": 0.07282444834709167,
"learning_rate": 9.019828596704394e-05,
"loss": 0.1277,
"step": 700
},
{
"epoch": 1.7622594661700806,
"grad_norm": 0.09642595052719116,
"learning_rate": 8.732918194971664e-05,
"loss": 0.1195,
"step": 710
},
{
"epoch": 1.7870887647423959,
"grad_norm": 0.08414994180202484,
"learning_rate": 8.447064164151304e-05,
"loss": 0.1318,
"step": 720
},
{
"epoch": 1.7870887647423959,
"eval_loss": 0.12068802118301392,
"eval_runtime": 40.6257,
"eval_samples_per_second": 4.185,
"eval_steps_per_second": 2.092,
"step": 720
},
{
"epoch": 1.8119180633147114,
"grad_norm": 0.10453636199235916,
"learning_rate": 8.162504821834295e-05,
"loss": 0.1218,
"step": 730
},
{
"epoch": 1.8367473618870267,
"grad_norm": 0.07897430658340454,
"learning_rate": 7.879477406224894e-05,
"loss": 0.1242,
"step": 740
},
{
"epoch": 1.861576660459342,
"grad_norm": 0.08946316689252853,
"learning_rate": 7.598217878354237e-05,
"loss": 0.1178,
"step": 750
},
{
"epoch": 1.861576660459342,
"eval_loss": 0.12083352357149124,
"eval_runtime": 40.4985,
"eval_samples_per_second": 4.198,
"eval_steps_per_second": 2.099,
"step": 750
},
{
"epoch": 1.8864059590316573,
"grad_norm": 0.10244396328926086,
"learning_rate": 7.318960725358741e-05,
"loss": 0.1239,
"step": 760
},
{
"epoch": 1.9112352576039728,
"grad_norm": 0.07139507681131363,
"learning_rate": 7.041938764987297e-05,
"loss": 0.1129,
"step": 770
},
{
"epoch": 1.9360645561762881,
"grad_norm": 0.10991553962230682,
"learning_rate": 6.767382951500204e-05,
"loss": 0.1165,
"step": 780
},
{
"epoch": 1.9360645561762881,
"eval_loss": 0.12075280398130417,
"eval_runtime": 40.5036,
"eval_samples_per_second": 4.197,
"eval_steps_per_second": 2.099,
"step": 780
},
{
"epoch": 1.9608938547486034,
"grad_norm": 0.0928761437535286,
"learning_rate": 6.495522183121741e-05,
"loss": 0.1286,
"step": 790
},
{
"epoch": 1.9857231533209188,
"grad_norm": 0.08117903023958206,
"learning_rate": 6.226583111206856e-05,
"loss": 0.1217,
"step": 800
},
{
"epoch": 2.009931719428926,
"grad_norm": 0.08340153843164444,
"learning_rate": 5.960789951281052e-05,
"loss": 0.1138,
"step": 810
},
{
"epoch": 2.009931719428926,
"eval_loss": 0.11938990652561188,
"eval_runtime": 40.4386,
"eval_samples_per_second": 4.204,
"eval_steps_per_second": 2.102,
"step": 810
},
{
"epoch": 2.0347610180012414,
"grad_norm": 0.0680561512708664,
"learning_rate": 5.698364296111056e-05,
"loss": 0.1194,
"step": 820
},
{
"epoch": 2.0595903165735567,
"grad_norm": 0.09459295123815536,
"learning_rate": 5.43952493096211e-05,
"loss": 0.1136,
"step": 830
},
{
"epoch": 2.084419615145872,
"grad_norm": 0.07785169035196304,
"learning_rate": 5.184487651195825e-05,
"loss": 0.12,
"step": 840
},
{
"epoch": 2.084419615145872,
"eval_loss": 0.12003041803836823,
"eval_runtime": 40.5516,
"eval_samples_per_second": 4.192,
"eval_steps_per_second": 2.096,
"step": 840
},
{
"epoch": 2.1092489137181873,
"grad_norm": 0.08185684680938721,
"learning_rate": 4.933465082360807e-05,
"loss": 0.1262,
"step": 850
},
{
"epoch": 2.1340782122905027,
"grad_norm": 0.08443768322467804,
"learning_rate": 4.686666502925908e-05,
"loss": 0.1192,
"step": 860
},
{
"epoch": 2.158907510862818,
"grad_norm": 0.08123073726892471,
"learning_rate": 4.444297669803981e-05,
"loss": 0.1213,
"step": 870
},
{
"epoch": 2.158907510862818,
"eval_loss": 0.1197180226445198,
"eval_runtime": 40.546,
"eval_samples_per_second": 4.193,
"eval_steps_per_second": 2.096,
"step": 870
},
{
"epoch": 2.1837368094351333,
"grad_norm": 0.09900680184364319,
"learning_rate": 4.206560646811545e-05,
"loss": 0.123,
"step": 880
},
{
"epoch": 2.2085661080074486,
"grad_norm": 0.08664524555206299,
"learning_rate": 3.973653636207437e-05,
"loss": 0.11,
"step": 890
},
{
"epoch": 2.2333954065797643,
"grad_norm": 0.0986846536397934,
"learning_rate": 3.745770813450824e-05,
"loss": 0.1175,
"step": 900
},
{
"epoch": 2.2333954065797643,
"eval_loss": 0.11943857371807098,
"eval_runtime": 40.5782,
"eval_samples_per_second": 4.189,
"eval_steps_per_second": 2.095,
"step": 900
},
{
"epoch": 2.2582247051520796,
"grad_norm": 0.07558804750442505,
"learning_rate": 3.523102165316381e-05,
"loss": 0.1148,
"step": 910
},
{
"epoch": 2.283054003724395,
"grad_norm": 0.08511102199554443,
"learning_rate": 3.3058333315016065e-05,
"loss": 0.1157,
"step": 920
},
{
"epoch": 2.3078833022967102,
"grad_norm": 0.10936655849218369,
"learning_rate": 3.094145449858285e-05,
"loss": 0.1202,
"step": 930
},
{
"epoch": 2.3078833022967102,
"eval_loss": 0.11901655793190002,
"eval_runtime": 40.5004,
"eval_samples_per_second": 4.197,
"eval_steps_per_second": 2.099,
"step": 930
},
{
"epoch": 2.3327126008690255,
"grad_norm": 0.07215583324432373,
"learning_rate": 2.8882150053771995e-05,
"loss": 0.1127,
"step": 940
},
{
"epoch": 2.357541899441341,
"grad_norm": 0.08981175720691681,
"learning_rate": 2.688213683051892e-05,
"loss": 0.1232,
"step": 950
},
{
"epoch": 2.382371198013656,
"grad_norm": 0.09188387542963028,
"learning_rate": 2.4943082247442585e-05,
"loss": 0.1122,
"step": 960
},
{
"epoch": 2.382371198013656,
"eval_loss": 0.11975335329771042,
"eval_runtime": 40.5374,
"eval_samples_per_second": 4.194,
"eval_steps_per_second": 2.097,
"step": 960
},
{
"epoch": 2.4072004965859715,
"grad_norm": 0.09929151087999344,
"learning_rate": 2.3066602901712108e-05,
"loss": 0.1224,
"step": 970
},
{
"epoch": 2.4320297951582868,
"grad_norm": 0.08618225902318954,
"learning_rate": 2.1254263221283654e-05,
"loss": 0.1092,
"step": 980
},
{
"epoch": 2.456859093730602,
"grad_norm": 0.09560118615627289,
"learning_rate": 1.950757416063077e-05,
"loss": 0.1139,
"step": 990
},
{
"epoch": 2.456859093730602,
"eval_loss": 0.11890735477209091,
"eval_runtime": 40.6128,
"eval_samples_per_second": 4.186,
"eval_steps_per_second": 2.093,
"step": 990
},
{
"epoch": 2.4816883923029174,
"grad_norm": 0.10124364495277405,
"learning_rate": 1.7827991941056177e-05,
"loss": 0.112,
"step": 1000
},
{
"epoch": 2.5065176908752327,
"grad_norm": 0.0958225354552269,
"learning_rate": 1.621691683663418e-05,
"loss": 0.1224,
"step": 1010
},
{
"epoch": 2.531346989447548,
"grad_norm": 0.08832782506942749,
"learning_rate": 1.4675692006797137e-05,
"loss": 0.1187,
"step": 1020
},
{
"epoch": 2.531346989447548,
"eval_loss": 0.11912033706903458,
"eval_runtime": 40.6481,
"eval_samples_per_second": 4.182,
"eval_steps_per_second": 2.091,
"step": 1020
},
{
"epoch": 2.5561762880198633,
"grad_norm": 0.1004873588681221,
"learning_rate": 1.3205602376538163e-05,
"loss": 0.124,
"step": 1030
},
{
"epoch": 2.5810055865921786,
"grad_norm": 0.08371691405773163,
"learning_rate": 1.1807873565164506e-05,
"loss": 0.1173,
"step": 1040
},
{
"epoch": 2.6058348851644944,
"grad_norm": 0.16757965087890625,
"learning_rate": 1.0483670864493778e-05,
"loss": 0.1201,
"step": 1050
},
{
"epoch": 2.6058348851644944,
"eval_loss": 0.11892388015985489,
"eval_runtime": 40.6187,
"eval_samples_per_second": 4.185,
"eval_steps_per_second": 2.093,
"step": 1050
},
{
"epoch": 2.630664183736809,
"grad_norm": 0.0729515329003334,
"learning_rate": 9.234098267345958e-06,
"loss": 0.1145,
"step": 1060
},
{
"epoch": 2.655493482309125,
"grad_norm": 0.12517458200454712,
"learning_rate": 8.060197547140347e-06,
"loss": 0.1124,
"step": 1070
},
{
"epoch": 2.68032278088144,
"grad_norm": 0.0805535688996315,
"learning_rate": 6.962947389365071e-06,
"loss": 0.1165,
"step": 1080
},
{
"epoch": 2.68032278088144,
"eval_loss": 0.11893562972545624,
"eval_runtime": 40.5518,
"eval_samples_per_second": 4.192,
"eval_steps_per_second": 2.096,
"step": 1080
},
{
"epoch": 2.7051520794537556,
"grad_norm": 0.1218450516462326,
"learning_rate": 5.943262575643238e-06,
"loss": 0.1275,
"step": 1090
},
{
"epoch": 2.729981378026071,
"grad_norm": 0.09670189023017883,
"learning_rate": 5.001993221076162e-06,
"loss": 0.1187,
"step": 1100
},
{
"epoch": 2.754810676598386,
"grad_norm": 0.09584905207157135,
"learning_rate": 4.139924065499035e-06,
"loss": 0.115,
"step": 1110
},
{
"epoch": 2.754810676598386,
"eval_loss": 0.1188565194606781,
"eval_runtime": 40.4097,
"eval_samples_per_second": 4.207,
"eval_steps_per_second": 2.103,
"step": 1110
},
{
"epoch": 2.7796399751707015,
"grad_norm": 0.11602101475000381,
"learning_rate": 3.3577738192404395e-06,
"loss": 0.1132,
"step": 1120
},
{
"epoch": 2.804469273743017,
"grad_norm": 0.09847009181976318,
"learning_rate": 2.656194563930714e-06,
"loss": 0.1158,
"step": 1130
},
{
"epoch": 2.829298572315332,
"grad_norm": 0.0924677848815918,
"learning_rate": 2.035771208859194e-06,
"loss": 0.1156,
"step": 1140
},
{
"epoch": 2.829298572315332,
"eval_loss": 0.11884363740682602,
"eval_runtime": 40.55,
"eval_samples_per_second": 4.192,
"eval_steps_per_second": 2.096,
"step": 1140
},
{
"epoch": 2.8541278708876474,
"grad_norm": 0.10300548374652863,
"learning_rate": 1.49702100333291e-06,
"loss": 0.1124,
"step": 1150
},
{
"epoch": 2.8789571694599627,
"grad_norm": 0.12260911613702774,
"learning_rate": 1.0403931054440374e-06,
"loss": 0.1171,
"step": 1160
},
{
"epoch": 2.903786468032278,
"grad_norm": 0.10273098200559616,
"learning_rate": 6.662682076050031e-07,
"loss": 0.1076,
"step": 1170
},
{
"epoch": 2.903786468032278,
"eval_loss": 0.1188308522105217,
"eval_runtime": 40.5462,
"eval_samples_per_second": 4.193,
"eval_steps_per_second": 2.096,
"step": 1170
},
{
"epoch": 2.9286157666045933,
"grad_norm": 0.10531267523765564,
"learning_rate": 3.7495821916382344e-07,
"loss": 0.1255,
"step": 1180
},
{
"epoch": 2.9534450651769086,
"grad_norm": 0.12192708998918533,
"learning_rate": 1.6670600636403687e-07,
"loss": 0.1037,
"step": 1190
},
{
"epoch": 2.978274363749224,
"grad_norm": 0.09150505810976028,
"learning_rate": 4.168518986628067e-08,
"loss": 0.1113,
"step": 1200
},
{
"epoch": 2.978274363749224,
"eval_loss": 0.11880358308553696,
"eval_runtime": 40.6429,
"eval_samples_per_second": 4.183,
"eval_steps_per_second": 2.091,
"step": 1200
}
],
"logging_steps": 10,
"max_steps": 1209,
"num_input_tokens_seen": 0,
"num_train_epochs": 3,
"save_steps": 100,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 2.2911362158634496e+17,
"train_batch_size": 2,
"trial_name": null,
"trial_params": null
}