c19 / checkpoint-900 /trainer_state.json
thetmon's picture
Upload merged Qwen3-4B-Instruct-2507 model (auto-generated README)
8518f26 verified
{
"best_global_step": null,
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 1.8874901652242329,
"eval_steps": 30,
"global_step": 900,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.02098085496984002,
"grad_norm": 1.8758461475372314,
"learning_rate": 3.7500000000000003e-05,
"loss": 2.2028,
"step": 10
},
{
"epoch": 0.04196170993968004,
"grad_norm": 0.6216842532157898,
"learning_rate": 7.916666666666666e-05,
"loss": 0.6879,
"step": 20
},
{
"epoch": 0.06294256490952006,
"grad_norm": 0.5248464941978455,
"learning_rate": 0.00012083333333333333,
"loss": 0.4469,
"step": 30
},
{
"epoch": 0.06294256490952006,
"eval_loss": 0.3734886944293976,
"eval_runtime": 53.5314,
"eval_samples_per_second": 3.755,
"eval_steps_per_second": 3.755,
"step": 30
},
{
"epoch": 0.08392341987936008,
"grad_norm": 0.580585777759552,
"learning_rate": 0.00016250000000000002,
"loss": 0.3179,
"step": 40
},
{
"epoch": 0.1049042748492001,
"grad_norm": 0.3856310248374939,
"learning_rate": 0.00019999939880837878,
"loss": 0.2501,
"step": 50
},
{
"epoch": 0.12588512981904013,
"grad_norm": 0.39517685770988464,
"learning_rate": 0.00019992726456006157,
"loss": 0.2317,
"step": 60
},
{
"epoch": 0.12588512981904013,
"eval_loss": 0.2062484174966812,
"eval_runtime": 53.2039,
"eval_samples_per_second": 3.778,
"eval_steps_per_second": 3.778,
"step": 60
},
{
"epoch": 0.14686598478888016,
"grad_norm": 0.3303804397583008,
"learning_rate": 0.00019973499136147606,
"loss": 0.1999,
"step": 70
},
{
"epoch": 0.16784683975872017,
"grad_norm": 0.34801360964775085,
"learning_rate": 0.00019942281037576222,
"loss": 0.1834,
"step": 80
},
{
"epoch": 0.1888276947285602,
"grad_norm": 0.1771993190050125,
"learning_rate": 0.00019899109692687278,
"loss": 0.1827,
"step": 90
},
{
"epoch": 0.1888276947285602,
"eval_loss": 0.1743820756673813,
"eval_runtime": 53.1567,
"eval_samples_per_second": 3.781,
"eval_steps_per_second": 3.781,
"step": 90
},
{
"epoch": 0.2098085496984002,
"grad_norm": 0.2773347496986389,
"learning_rate": 0.00019844037004833473,
"loss": 0.1683,
"step": 100
},
{
"epoch": 0.23078940466824024,
"grad_norm": 0.16112257540225983,
"learning_rate": 0.00019777129185923395,
"loss": 0.1697,
"step": 110
},
{
"epoch": 0.25177025963808025,
"grad_norm": 0.26540514826774597,
"learning_rate": 0.00019698466676817348,
"loss": 0.168,
"step": 120
},
{
"epoch": 0.25177025963808025,
"eval_loss": 0.15549546480178833,
"eval_runtime": 53.5518,
"eval_samples_per_second": 3.753,
"eval_steps_per_second": 3.753,
"step": 120
},
{
"epoch": 0.2727511146079203,
"grad_norm": 0.1737111657857895,
"learning_rate": 0.0001960814405061619,
"loss": 0.1524,
"step": 130
},
{
"epoch": 0.2937319695777603,
"grad_norm": 0.23373852670192719,
"learning_rate": 0.000195062698989595,
"loss": 0.1577,
"step": 140
},
{
"epoch": 0.3147128245476003,
"grad_norm": 0.23839105665683746,
"learning_rate": 0.0001939296670146976,
"loss": 0.1562,
"step": 150
},
{
"epoch": 0.3147128245476003,
"eval_loss": 0.14308005571365356,
"eval_runtime": 53.5362,
"eval_samples_per_second": 3.754,
"eval_steps_per_second": 3.754,
"step": 150
},
{
"epoch": 0.33569367951744034,
"grad_norm": 0.22177831828594208,
"learning_rate": 0.00019268370678499533,
"loss": 0.1507,
"step": 160
},
{
"epoch": 0.35667453448728037,
"grad_norm": 0.16764189302921295,
"learning_rate": 0.0001913263162735862,
"loss": 0.15,
"step": 170
},
{
"epoch": 0.3776553894571204,
"grad_norm": 0.17721307277679443,
"learning_rate": 0.00018985912742218165,
"loss": 0.148,
"step": 180
},
{
"epoch": 0.3776553894571204,
"eval_loss": 0.13974663615226746,
"eval_runtime": 53.2066,
"eval_samples_per_second": 3.778,
"eval_steps_per_second": 3.778,
"step": 180
},
{
"epoch": 0.3986362444269604,
"grad_norm": 0.11757932603359222,
"learning_rate": 0.0001882839041790818,
"loss": 0.1482,
"step": 190
},
{
"epoch": 0.4196170993968004,
"grad_norm": 0.17747701704502106,
"learning_rate": 0.00018660254037844388,
"loss": 0.1442,
"step": 200
},
{
"epoch": 0.44059795436664045,
"grad_norm": 0.15337860584259033,
"learning_rate": 0.0001848170574633937,
"loss": 0.1481,
"step": 210
},
{
"epoch": 0.44059795436664045,
"eval_loss": 0.13732697069644928,
"eval_runtime": 52.9478,
"eval_samples_per_second": 3.796,
"eval_steps_per_second": 3.796,
"step": 210
},
{
"epoch": 0.4615788093364805,
"grad_norm": 0.25963345170021057,
"learning_rate": 0.0001829296020557174,
"loss": 0.1557,
"step": 220
},
{
"epoch": 0.48255966430632047,
"grad_norm": 0.12170088291168213,
"learning_rate": 0.0001809424433750555,
"loss": 0.1401,
"step": 230
},
{
"epoch": 0.5035405192761605,
"grad_norm": 0.1923166960477829,
"learning_rate": 0.0001788579705107017,
"loss": 0.1455,
"step": 240
},
{
"epoch": 0.5035405192761605,
"eval_loss": 0.13553670048713684,
"eval_runtime": 53.4546,
"eval_samples_per_second": 3.76,
"eval_steps_per_second": 3.76,
"step": 240
},
{
"epoch": 0.5245213742460005,
"grad_norm": 0.11837635189294815,
"learning_rate": 0.00017667868954928694,
"loss": 0.147,
"step": 250
},
{
"epoch": 0.5455022292158406,
"grad_norm": 0.10800451040267944,
"learning_rate": 0.0001744072205618019,
"loss": 0.1362,
"step": 260
},
{
"epoch": 0.5664830841856806,
"grad_norm": 0.12966878712177277,
"learning_rate": 0.0001720462944535798,
"loss": 0.1509,
"step": 270
},
{
"epoch": 0.5664830841856806,
"eval_loss": 0.13392792642116547,
"eval_runtime": 53.5435,
"eval_samples_per_second": 3.754,
"eval_steps_per_second": 3.754,
"step": 270
},
{
"epoch": 0.5874639391555206,
"grad_norm": 0.11909834295511246,
"learning_rate": 0.00016959874968102735,
"loss": 0.1399,
"step": 280
},
{
"epoch": 0.6084447941253606,
"grad_norm": 0.1039784699678421,
"learning_rate": 0.00016706752883905107,
"loss": 0.141,
"step": 290
},
{
"epoch": 0.6294256490952006,
"grad_norm": 0.11826395988464355,
"learning_rate": 0.0001644556751232812,
"loss": 0.1495,
"step": 300
},
{
"epoch": 0.6294256490952006,
"eval_loss": 0.13278058171272278,
"eval_runtime": 53.7282,
"eval_samples_per_second": 3.741,
"eval_steps_per_second": 3.741,
"step": 300
},
{
"epoch": 0.6504065040650406,
"grad_norm": 0.12009374052286148,
"learning_rate": 0.0001617663286713474,
"loss": 0.1407,
"step": 310
},
{
"epoch": 0.6713873590348807,
"grad_norm": 0.11825403571128845,
"learning_rate": 0.00015900272278760407,
"loss": 0.1387,
"step": 320
},
{
"epoch": 0.6923682140047207,
"grad_norm": 0.11216018348932266,
"learning_rate": 0.00015616818005584554,
"loss": 0.1404,
"step": 330
},
{
"epoch": 0.6923682140047207,
"eval_loss": 0.13230031728744507,
"eval_runtime": 53.7398,
"eval_samples_per_second": 3.74,
"eval_steps_per_second": 3.74,
"step": 330
},
{
"epoch": 0.7133490689745607,
"grad_norm": 0.10475457459688187,
"learning_rate": 0.0001532661083446829,
"loss": 0.1503,
"step": 340
},
{
"epoch": 0.7343299239444008,
"grad_norm": 0.11185269057750702,
"learning_rate": 0.00015029999671038635,
"loss": 0.1321,
"step": 350
},
{
"epoch": 0.7553107789142408,
"grad_norm": 0.0922306478023529,
"learning_rate": 0.00014727341120211867,
"loss": 0.1428,
"step": 360
},
{
"epoch": 0.7553107789142408,
"eval_loss": 0.1310930848121643,
"eval_runtime": 53.6353,
"eval_samples_per_second": 3.748,
"eval_steps_per_second": 3.748,
"step": 360
},
{
"epoch": 0.7762916338840807,
"grad_norm": 0.12543067336082458,
"learning_rate": 0.00014418999057460276,
"loss": 0.1437,
"step": 370
},
{
"epoch": 0.7972724888539208,
"grad_norm": 0.11975102126598358,
"learning_rate": 0.0001410534419133778,
"loss": 0.1357,
"step": 380
},
{
"epoch": 0.8182533438237608,
"grad_norm": 0.09955570101737976,
"learning_rate": 0.00013786753617790404,
"loss": 0.1446,
"step": 390
},
{
"epoch": 0.8182533438237608,
"eval_loss": 0.13151544332504272,
"eval_runtime": 53.6082,
"eval_samples_per_second": 3.749,
"eval_steps_per_second": 3.749,
"step": 390
},
{
"epoch": 0.8392341987936008,
"grad_norm": 0.11849252134561539,
"learning_rate": 0.00013463610366787392,
"loss": 0.1452,
"step": 400
},
{
"epoch": 0.8602150537634409,
"grad_norm": 0.10621926933526993,
"learning_rate": 0.00013136302941818085,
"loss": 0.1411,
"step": 410
},
{
"epoch": 0.8811959087332809,
"grad_norm": 0.09327146410942078,
"learning_rate": 0.00012805224852808163,
"loss": 0.1391,
"step": 420
},
{
"epoch": 0.8811959087332809,
"eval_loss": 0.13071787357330322,
"eval_runtime": 53.9186,
"eval_samples_per_second": 3.728,
"eval_steps_per_second": 3.728,
"step": 420
},
{
"epoch": 0.9021767637031209,
"grad_norm": 0.5398991703987122,
"learning_rate": 0.00012470774143016853,
"loss": 0.1399,
"step": 430
},
{
"epoch": 0.923157618672961,
"grad_norm": 0.18647532165050507,
"learning_rate": 0.00012133352910483838,
"loss": 0.1399,
"step": 440
},
{
"epoch": 0.9441384736428009,
"grad_norm": 0.16210441291332245,
"learning_rate": 0.0001179336682460128,
"loss": 0.1509,
"step": 450
},
{
"epoch": 0.9441384736428009,
"eval_loss": 0.13233241438865662,
"eval_runtime": 53.9437,
"eval_samples_per_second": 3.726,
"eval_steps_per_second": 3.726,
"step": 450
},
{
"epoch": 0.9651193286126409,
"grad_norm": 0.09105144441127777,
"learning_rate": 0.00011451224638392129,
"loss": 0.1428,
"step": 460
},
{
"epoch": 0.986100183582481,
"grad_norm": 0.08386187255382538,
"learning_rate": 0.0001110733769708108,
"loss": 0.14,
"step": 470
},
{
"epoch": 1.006294256490952,
"grad_norm": 0.07673283666372299,
"learning_rate": 0.00010762119443549035,
"loss": 0.142,
"step": 480
},
{
"epoch": 1.006294256490952,
"eval_loss": 0.1293467879295349,
"eval_runtime": 53.6668,
"eval_samples_per_second": 3.745,
"eval_steps_per_second": 3.745,
"step": 480
},
{
"epoch": 1.027275111460792,
"grad_norm": 0.09950114041566849,
"learning_rate": 0.00010415984921265609,
"loss": 0.1365,
"step": 490
},
{
"epoch": 1.0482559664306321,
"grad_norm": 0.08757766336202621,
"learning_rate": 0.00010069350275297337,
"loss": 0.1297,
"step": 500
},
{
"epoch": 1.0692368214004722,
"grad_norm": 0.08058227598667145,
"learning_rate": 9.722632251991444e-05,
"loss": 0.1258,
"step": 510
},
{
"epoch": 1.0692368214004722,
"eval_loss": 0.1290971338748932,
"eval_runtime": 53.9631,
"eval_samples_per_second": 3.725,
"eval_steps_per_second": 3.725,
"step": 510
},
{
"epoch": 1.0902176763703122,
"grad_norm": 0.14287236332893372,
"learning_rate": 9.376247697936719e-05,
"loss": 0.1405,
"step": 520
},
{
"epoch": 1.1111985313401522,
"grad_norm": 0.10073138028383255,
"learning_rate": 9.030613058803881e-05,
"loss": 0.1372,
"step": 530
},
{
"epoch": 1.132179386309992,
"grad_norm": 0.09560772776603699,
"learning_rate": 8.686143878667965e-05,
"loss": 0.1344,
"step": 540
},
{
"epoch": 1.132179386309992,
"eval_loss": 0.1287485510110855,
"eval_runtime": 53.9488,
"eval_samples_per_second": 3.726,
"eval_steps_per_second": 3.726,
"step": 540
},
{
"epoch": 1.153160241279832,
"grad_norm": 0.09153210371732712,
"learning_rate": 8.343254300414628e-05,
"loss": 0.1379,
"step": 550
},
{
"epoch": 1.1741410962496721,
"grad_norm": 0.0932949036359787,
"learning_rate": 8.002356567831103e-05,
"loss": 0.1361,
"step": 560
},
{
"epoch": 1.1951219512195121,
"grad_norm": 0.1091330498456955,
"learning_rate": 7.663860529980317e-05,
"loss": 0.1356,
"step": 570
},
{
"epoch": 1.1951219512195121,
"eval_loss": 0.13008803129196167,
"eval_runtime": 53.6432,
"eval_samples_per_second": 3.747,
"eval_steps_per_second": 3.747,
"step": 570
},
{
"epoch": 1.2161028061893522,
"grad_norm": 0.08824314922094345,
"learning_rate": 7.328173148454151e-05,
"loss": 0.1335,
"step": 580
},
{
"epoch": 1.2370836611591922,
"grad_norm": 0.12611347436904907,
"learning_rate": 6.99569800809816e-05,
"loss": 0.1353,
"step": 590
},
{
"epoch": 1.2580645161290323,
"grad_norm": 0.08815793693065643,
"learning_rate": 6.66683483179608e-05,
"loss": 0.1365,
"step": 600
},
{
"epoch": 1.2580645161290323,
"eval_loss": 0.12925004959106445,
"eval_runtime": 53.6403,
"eval_samples_per_second": 3.747,
"eval_steps_per_second": 3.747,
"step": 600
},
{
"epoch": 1.2790453710988723,
"grad_norm": 0.12618786096572876,
"learning_rate": 6.341978999897365e-05,
"loss": 0.1408,
"step": 610
},
{
"epoch": 1.3000262260687123,
"grad_norm": 0.0881665050983429,
"learning_rate": 6.021521074865678e-05,
"loss": 0.1384,
"step": 620
},
{
"epoch": 1.3210070810385524,
"grad_norm": 0.08780647069215775,
"learning_rate": 5.705846331719675e-05,
"loss": 0.138,
"step": 630
},
{
"epoch": 1.3210070810385524,
"eval_loss": 0.12802067399024963,
"eval_runtime": 53.8068,
"eval_samples_per_second": 3.736,
"eval_steps_per_second": 3.736,
"step": 630
},
{
"epoch": 1.3419879360083924,
"grad_norm": 0.08264625817537308,
"learning_rate": 5.395334294830765e-05,
"loss": 0.1376,
"step": 640
},
{
"epoch": 1.3629687909782324,
"grad_norm": 0.08895017951726913,
"learning_rate": 5.090358281634594e-05,
"loss": 0.1325,
"step": 650
},
{
"epoch": 1.3839496459480725,
"grad_norm": 0.08006913214921951,
"learning_rate": 4.791284953804969e-05,
"loss": 0.1336,
"step": 660
},
{
"epoch": 1.3839496459480725,
"eval_loss": 0.1273050308227539,
"eval_runtime": 53.8944,
"eval_samples_per_second": 3.73,
"eval_steps_per_second": 3.73,
"step": 660
},
{
"epoch": 1.4049305009179123,
"grad_norm": 0.09627630561590195,
"learning_rate": 4.498473876429726e-05,
"loss": 0.1341,
"step": 670
},
{
"epoch": 1.4259113558877523,
"grad_norm": 0.08036801964044571,
"learning_rate": 4.21227708571858e-05,
"loss": 0.1419,
"step": 680
},
{
"epoch": 1.4468922108575923,
"grad_norm": 0.1135723665356636,
"learning_rate": 3.93303866576267e-05,
"loss": 0.13,
"step": 690
},
{
"epoch": 1.4468922108575923,
"eval_loss": 0.12765151262283325,
"eval_runtime": 53.9209,
"eval_samples_per_second": 3.728,
"eval_steps_per_second": 3.728,
"step": 690
},
{
"epoch": 1.4678730658274324,
"grad_norm": 0.07758687436580658,
"learning_rate": 3.6610943348546526e-05,
"loss": 0.1427,
"step": 700
},
{
"epoch": 1.4888539207972724,
"grad_norm": 0.09151093661785126,
"learning_rate": 3.3967710418666984e-05,
"loss": 0.1372,
"step": 710
},
{
"epoch": 1.5098347757671124,
"grad_norm": 0.10960382223129272,
"learning_rate": 3.1403865731716266e-05,
"loss": 0.134,
"step": 720
},
{
"epoch": 1.5098347757671124,
"eval_loss": 0.12656451761722565,
"eval_runtime": 53.601,
"eval_samples_per_second": 3.75,
"eval_steps_per_second": 3.75,
"step": 720
},
{
"epoch": 1.5308156307369525,
"grad_norm": 0.10491708666086197,
"learning_rate": 2.892249170579826e-05,
"loss": 0.1341,
"step": 730
},
{
"epoch": 1.5517964857067925,
"grad_norm": 0.09089571982622147,
"learning_rate": 2.652657160751193e-05,
"loss": 0.1377,
"step": 740
},
{
"epoch": 1.5727773406766326,
"grad_norm": 0.07756100594997406,
"learning_rate": 2.4218985965277675e-05,
"loss": 0.1325,
"step": 750
},
{
"epoch": 1.5727773406766326,
"eval_loss": 0.12670381367206573,
"eval_runtime": 53.6584,
"eval_samples_per_second": 3.746,
"eval_steps_per_second": 3.746,
"step": 750
},
{
"epoch": 1.5937581956464726,
"grad_norm": 0.10038470476865768,
"learning_rate": 2.2002509106181624e-05,
"loss": 0.1275,
"step": 760
},
{
"epoch": 1.6147390506163126,
"grad_norm": 0.09714746475219727,
"learning_rate": 1.9879805820502174e-05,
"loss": 0.138,
"step": 770
},
{
"epoch": 1.6357199055861527,
"grad_norm": 0.10250572115182877,
"learning_rate": 1.785342815792862e-05,
"loss": 0.1337,
"step": 780
},
{
"epoch": 1.6357199055861527,
"eval_loss": 0.12649498879909515,
"eval_runtime": 53.9776,
"eval_samples_per_second": 3.724,
"eval_steps_per_second": 3.724,
"step": 780
},
{
"epoch": 1.6567007605559927,
"grad_norm": 0.10221794247627258,
"learning_rate": 1.5925812359323745e-05,
"loss": 0.138,
"step": 790
},
{
"epoch": 1.6776816155258327,
"grad_norm": 0.08815432339906693,
"learning_rate": 1.4099275927719235e-05,
"loss": 0.1308,
"step": 800
},
{
"epoch": 1.6986624704956728,
"grad_norm": 0.08850682526826859,
"learning_rate": 1.2376014842065265e-05,
"loss": 0.1385,
"step": 810
},
{
"epoch": 1.6986624704956728,
"eval_loss": 0.12599113583564758,
"eval_runtime": 53.6591,
"eval_samples_per_second": 3.746,
"eval_steps_per_second": 3.746,
"step": 810
},
{
"epoch": 1.7196433254655128,
"grad_norm": 0.0861755833029747,
"learning_rate": 1.0758100917083991e-05,
"loss": 0.1321,
"step": 820
},
{
"epoch": 1.7406241804353528,
"grad_norm": 0.09379922598600388,
"learning_rate": 9.247479312401641e-06,
"loss": 0.1288,
"step": 830
},
{
"epoch": 1.7616050354051929,
"grad_norm": 0.11254911869764328,
"learning_rate": 7.845966193952824e-06,
"loss": 0.126,
"step": 840
},
{
"epoch": 1.7616050354051929,
"eval_loss": 0.12584222853183746,
"eval_runtime": 54.0496,
"eval_samples_per_second": 3.719,
"eval_steps_per_second": 3.719,
"step": 840
},
{
"epoch": 1.782585890375033,
"grad_norm": 0.09247393906116486,
"learning_rate": 6.555246550469907e-06,
"loss": 0.1365,
"step": 850
},
{
"epoch": 1.803566745344873,
"grad_norm": 0.11225956678390503,
"learning_rate": 5.376872167681635e-06,
"loss": 0.1325,
"step": 860
},
{
"epoch": 1.824547600314713,
"grad_norm": 0.08813902735710144,
"learning_rate": 4.312259762657145e-06,
"loss": 0.1321,
"step": 870
},
{
"epoch": 1.824547600314713,
"eval_loss": 0.12573501467704773,
"eval_runtime": 53.8714,
"eval_samples_per_second": 3.731,
"eval_steps_per_second": 3.731,
"step": 870
},
{
"epoch": 1.845528455284553,
"grad_norm": 0.09407710283994675,
"learning_rate": 3.3626892805379562e-06,
"loss": 0.1295,
"step": 880
},
{
"epoch": 1.8665093102543928,
"grad_norm": 0.10119906812906265,
"learning_rate": 2.5293023557061644e-06,
"loss": 0.1259,
"step": 890
},
{
"epoch": 1.8874901652242329,
"grad_norm": 0.12075681984424591,
"learning_rate": 1.8131009392384323e-06,
"loss": 0.1338,
"step": 900
},
{
"epoch": 1.8874901652242329,
"eval_loss": 0.12563975155353546,
"eval_runtime": 53.9437,
"eval_samples_per_second": 3.726,
"eval_steps_per_second": 3.726,
"step": 900
}
],
"logging_steps": 10,
"max_steps": 954,
"num_input_tokens_seen": 0,
"num_train_epochs": 2,
"save_steps": 100,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 1.4996258681421312e+17,
"train_batch_size": 1,
"trial_name": null,
"trial_params": null
}