qwen3-4b-math-lora3 / trainer_state.json
stepprog's picture
Upload LoRA checkpoint
3283f72 verified
{
"best_global_step": null,
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 38.4642166344294,
"eval_steps": 500,
"global_step": 2500,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.15473887814313347,
"grad_norm": 4.871020317077637,
"learning_rate": 1.8e-05,
"loss": 15.2697,
"step": 10
},
{
"epoch": 0.30947775628626695,
"grad_norm": 3.2353532314300537,
"learning_rate": 3.8e-05,
"loss": 14.0214,
"step": 20
},
{
"epoch": 0.46421663442940037,
"grad_norm": 4.503875732421875,
"learning_rate": 5.8e-05,
"loss": 10.7152,
"step": 30
},
{
"epoch": 0.6189555125725339,
"grad_norm": 1.743087649345398,
"learning_rate": 7.800000000000001e-05,
"loss": 6.5351,
"step": 40
},
{
"epoch": 0.7736943907156673,
"grad_norm": 1.3860297203063965,
"learning_rate": 9.8e-05,
"loss": 4.8329,
"step": 50
},
{
"epoch": 0.9284332688588007,
"grad_norm": 1.0991828441619873,
"learning_rate": 0.000118,
"loss": 4.2633,
"step": 60
},
{
"epoch": 1.0773694390715667,
"grad_norm": 0.9841532111167908,
"learning_rate": 0.000138,
"loss": 3.6229,
"step": 70
},
{
"epoch": 1.2321083172147003,
"grad_norm": 0.9759953618049622,
"learning_rate": 0.00015800000000000002,
"loss": 3.5272,
"step": 80
},
{
"epoch": 1.3868471953578336,
"grad_norm": 1.0690470933914185,
"learning_rate": 0.00017800000000000002,
"loss": 3.2966,
"step": 90
},
{
"epoch": 1.5415860735009672,
"grad_norm": 1.2185126543045044,
"learning_rate": 0.00019800000000000002,
"loss": 3.1252,
"step": 100
},
{
"epoch": 1.6963249516441006,
"grad_norm": 1.4583178758621216,
"learning_rate": 0.0001999990241251755,
"loss": 2.9714,
"step": 110
},
{
"epoch": 1.851063829787234,
"grad_norm": 1.4185903072357178,
"learning_rate": 0.00019999565075517342,
"loss": 2.7636,
"step": 120
},
{
"epoch": 2.0,
"grad_norm": 1.201859951019287,
"learning_rate": 0.00019998986792342066,
"loss": 2.4738,
"step": 130
},
{
"epoch": 2.1547388781431334,
"grad_norm": 1.5350439548492432,
"learning_rate": 0.0001999816757692584,
"loss": 2.0558,
"step": 140
},
{
"epoch": 2.3094777562862667,
"grad_norm": 1.5729354619979858,
"learning_rate": 0.00019997107449008203,
"loss": 1.9569,
"step": 150
},
{
"epoch": 2.4642166344294005,
"grad_norm": 1.4811993837356567,
"learning_rate": 0.00019995806434133648,
"loss": 1.9537,
"step": 160
},
{
"epoch": 2.618955512572534,
"grad_norm": 1.7899974584579468,
"learning_rate": 0.00019994264563650988,
"loss": 1.8071,
"step": 170
},
{
"epoch": 2.7736943907156673,
"grad_norm": 1.619870662689209,
"learning_rate": 0.0001999248187471262,
"loss": 1.8032,
"step": 180
},
{
"epoch": 2.9284332688588006,
"grad_norm": 2.305919647216797,
"learning_rate": 0.00019990458410273622,
"loss": 1.672,
"step": 190
},
{
"epoch": 3.0773694390715667,
"grad_norm": 1.7628117799758911,
"learning_rate": 0.0001998819421909072,
"loss": 1.4775,
"step": 200
},
{
"epoch": 3.2321083172147,
"grad_norm": 1.5669045448303223,
"learning_rate": 0.000199856893557211,
"loss": 1.2942,
"step": 210
},
{
"epoch": 3.3868471953578334,
"grad_norm": 1.6973986625671387,
"learning_rate": 0.00019982943880521113,
"loss": 1.2287,
"step": 220
},
{
"epoch": 3.541586073500967,
"grad_norm": 1.7542940378189087,
"learning_rate": 0.0001997995785964481,
"loss": 1.2301,
"step": 230
},
{
"epoch": 3.6963249516441006,
"grad_norm": 1.5044289827346802,
"learning_rate": 0.0001997673136504236,
"loss": 1.1941,
"step": 240
},
{
"epoch": 3.851063829787234,
"grad_norm": 1.5571215152740479,
"learning_rate": 0.0001997326447445829,
"loss": 1.1606,
"step": 250
},
{
"epoch": 4.0,
"grad_norm": 1.3265422582626343,
"learning_rate": 0.00019969557271429638,
"loss": 1.0955,
"step": 260
},
{
"epoch": 4.154738878143133,
"grad_norm": 1.442328929901123,
"learning_rate": 0.00019965609845283927,
"loss": 0.9869,
"step": 270
},
{
"epoch": 4.309477756286267,
"grad_norm": 1.4749681949615479,
"learning_rate": 0.00019961422291137017,
"loss": 0.9421,
"step": 280
},
{
"epoch": 4.4642166344294,
"grad_norm": 1.3018440008163452,
"learning_rate": 0.00019956994709890812,
"loss": 0.9624,
"step": 290
},
{
"epoch": 4.6189555125725335,
"grad_norm": 1.3579668998718262,
"learning_rate": 0.00019952327208230827,
"loss": 0.9667,
"step": 300
},
{
"epoch": 4.773694390715667,
"grad_norm": 1.5000991821289062,
"learning_rate": 0.00019947419898623626,
"loss": 0.9447,
"step": 310
},
{
"epoch": 4.928433268858801,
"grad_norm": 1.312663197517395,
"learning_rate": 0.00019942272899314093,
"loss": 0.9647,
"step": 320
},
{
"epoch": 5.077369439071567,
"grad_norm": 1.447487711906433,
"learning_rate": 0.00019936886334322607,
"loss": 0.8304,
"step": 330
},
{
"epoch": 5.2321083172147,
"grad_norm": 1.2982898950576782,
"learning_rate": 0.00019931260333442034,
"loss": 0.8223,
"step": 340
},
{
"epoch": 5.386847195357833,
"grad_norm": 1.397681713104248,
"learning_rate": 0.0001992539503223461,
"loss": 0.8659,
"step": 350
},
{
"epoch": 5.541586073500967,
"grad_norm": 1.500657320022583,
"learning_rate": 0.0001991929057202867,
"loss": 0.8642,
"step": 360
},
{
"epoch": 5.696324951644101,
"grad_norm": 1.2767484188079834,
"learning_rate": 0.00019912947099915245,
"loss": 0.8468,
"step": 370
},
{
"epoch": 5.851063829787234,
"grad_norm": 1.1882458925247192,
"learning_rate": 0.00019906364768744515,
"loss": 0.8237,
"step": 380
},
{
"epoch": 6.0,
"grad_norm": 1.177846908569336,
"learning_rate": 0.00019899543737122137,
"loss": 0.8083,
"step": 390
},
{
"epoch": 6.154738878143133,
"grad_norm": 1.3184840679168701,
"learning_rate": 0.00019892484169405398,
"loss": 0.7436,
"step": 400
},
{
"epoch": 6.309477756286267,
"grad_norm": 1.4562623500823975,
"learning_rate": 0.0001988518623569929,
"loss": 0.7613,
"step": 410
},
{
"epoch": 6.4642166344294,
"grad_norm": 1.1735275983810425,
"learning_rate": 0.00019877650111852372,
"loss": 0.7749,
"step": 420
},
{
"epoch": 6.6189555125725335,
"grad_norm": 1.1183767318725586,
"learning_rate": 0.0001986987597945257,
"loss": 0.7551,
"step": 430
},
{
"epoch": 6.773694390715667,
"grad_norm": 1.3398388624191284,
"learning_rate": 0.00019861864025822777,
"loss": 0.7592,
"step": 440
},
{
"epoch": 6.928433268858801,
"grad_norm": 1.5443936586380005,
"learning_rate": 0.0001985361444401635,
"loss": 0.755,
"step": 450
},
{
"epoch": 7.077369439071567,
"grad_norm": 1.1259782314300537,
"learning_rate": 0.0001984512743281244,
"loss": 0.6959,
"step": 460
},
{
"epoch": 7.2321083172147,
"grad_norm": 1.2532756328582764,
"learning_rate": 0.0001983640319671125,
"loss": 0.6675,
"step": 470
},
{
"epoch": 7.386847195357833,
"grad_norm": 1.1728101968765259,
"learning_rate": 0.0001982744194592904,
"loss": 0.6659,
"step": 480
},
{
"epoch": 7.541586073500967,
"grad_norm": 1.2080200910568237,
"learning_rate": 0.00019818243896393113,
"loss": 0.6645,
"step": 490
},
{
"epoch": 7.696324951644101,
"grad_norm": 1.2065773010253906,
"learning_rate": 0.00019808809269736597,
"loss": 0.6763,
"step": 500
},
{
"epoch": 7.851063829787234,
"grad_norm": 1.327473521232605,
"learning_rate": 0.000197991382932931,
"loss": 0.7002,
"step": 510
},
{
"epoch": 8.0,
"grad_norm": 1.059760332107544,
"learning_rate": 0.00019789231200091232,
"loss": 0.6841,
"step": 520
},
{
"epoch": 8.154738878143133,
"grad_norm": 1.1210976839065552,
"learning_rate": 0.00019779088228849,
"loss": 0.5626,
"step": 530
},
{
"epoch": 8.309477756286267,
"grad_norm": 1.2302063703536987,
"learning_rate": 0.00019768709623968044,
"loss": 0.5725,
"step": 540
},
{
"epoch": 8.4642166344294,
"grad_norm": 1.220201849937439,
"learning_rate": 0.0001975809563552776,
"loss": 0.6054,
"step": 550
},
{
"epoch": 8.618955512572533,
"grad_norm": 1.1307240724563599,
"learning_rate": 0.00019747246519279257,
"loss": 0.5996,
"step": 560
},
{
"epoch": 8.773694390715667,
"grad_norm": 1.2768648862838745,
"learning_rate": 0.00019736162536639212,
"loss": 0.6139,
"step": 570
},
{
"epoch": 8.9284332688588,
"grad_norm": 1.0077459812164307,
"learning_rate": 0.00019724843954683566,
"loss": 0.5967,
"step": 580
},
{
"epoch": 9.077369439071568,
"grad_norm": 1.5466549396514893,
"learning_rate": 0.0001971329104614108,
"loss": 0.5228,
"step": 590
},
{
"epoch": 9.232108317214701,
"grad_norm": 1.1988252401351929,
"learning_rate": 0.00019701504089386776,
"loss": 0.4919,
"step": 600
},
{
"epoch": 9.386847195357834,
"grad_norm": 1.1134389638900757,
"learning_rate": 0.00019689483368435221,
"loss": 0.4845,
"step": 610
},
{
"epoch": 9.541586073500968,
"grad_norm": 1.1322001218795776,
"learning_rate": 0.00019677229172933687,
"loss": 0.5015,
"step": 620
},
{
"epoch": 9.696324951644101,
"grad_norm": 0.9991782307624817,
"learning_rate": 0.00019664741798155165,
"loss": 0.5082,
"step": 630
},
{
"epoch": 9.851063829787234,
"grad_norm": 1.0379105806350708,
"learning_rate": 0.00019652021544991265,
"loss": 0.5316,
"step": 640
},
{
"epoch": 10.0,
"grad_norm": 0.8915793895721436,
"learning_rate": 0.0001963906871994495,
"loss": 0.5098,
"step": 650
},
{
"epoch": 10.154738878143133,
"grad_norm": 0.9435361623764038,
"learning_rate": 0.00019625883635123161,
"loss": 0.402,
"step": 660
},
{
"epoch": 10.309477756286267,
"grad_norm": 1.3759244680404663,
"learning_rate": 0.00019612466608229289,
"loss": 0.4105,
"step": 670
},
{
"epoch": 10.4642166344294,
"grad_norm": 1.0236597061157227,
"learning_rate": 0.00019598817962555528,
"loss": 0.4189,
"step": 680
},
{
"epoch": 10.618955512572533,
"grad_norm": 1.0466105937957764,
"learning_rate": 0.0001958493802697508,
"loss": 0.4303,
"step": 690
},
{
"epoch": 10.773694390715667,
"grad_norm": 1.0155956745147705,
"learning_rate": 0.00019570827135934225,
"loss": 0.4335,
"step": 700
},
{
"epoch": 10.9284332688588,
"grad_norm": 1.0753840208053589,
"learning_rate": 0.00019556485629444276,
"loss": 0.4433,
"step": 710
},
{
"epoch": 11.077369439071568,
"grad_norm": 1.177145004272461,
"learning_rate": 0.00019541913853073374,
"loss": 0.386,
"step": 720
},
{
"epoch": 11.232108317214701,
"grad_norm": 1.0875651836395264,
"learning_rate": 0.00019527112157938174,
"loss": 0.3408,
"step": 730
},
{
"epoch": 11.386847195357834,
"grad_norm": 1.0649162530899048,
"learning_rate": 0.00019512080900695362,
"loss": 0.3511,
"step": 740
},
{
"epoch": 11.541586073500968,
"grad_norm": 0.9660040736198425,
"learning_rate": 0.0001949682044353309,
"loss": 0.3614,
"step": 750
},
{
"epoch": 11.696324951644101,
"grad_norm": 0.9773010611534119,
"learning_rate": 0.00019481331154162229,
"loss": 0.3708,
"step": 760
},
{
"epoch": 11.851063829787234,
"grad_norm": 1.141906499862671,
"learning_rate": 0.00019465613405807508,
"loss": 0.3787,
"step": 770
},
{
"epoch": 12.0,
"grad_norm": 1.0721315145492554,
"learning_rate": 0.00019449667577198544,
"loss": 0.368,
"step": 780
},
{
"epoch": 12.154738878143133,
"grad_norm": 1.0237089395523071,
"learning_rate": 0.00019433494052560678,
"loss": 0.2921,
"step": 790
},
{
"epoch": 12.309477756286267,
"grad_norm": 1.3155955076217651,
"learning_rate": 0.0001941709322160576,
"loss": 0.2935,
"step": 800
},
{
"epoch": 12.4642166344294,
"grad_norm": 1.04567551612854,
"learning_rate": 0.0001940046547952272,
"loss": 0.3098,
"step": 810
},
{
"epoch": 12.618955512572533,
"grad_norm": 0.8313108682632446,
"learning_rate": 0.00019383611226968066,
"loss": 0.3218,
"step": 820
},
{
"epoch": 12.773694390715667,
"grad_norm": 1.182775616645813,
"learning_rate": 0.00019366530870056238,
"loss": 0.3197,
"step": 830
},
{
"epoch": 12.9284332688588,
"grad_norm": 1.1929161548614502,
"learning_rate": 0.00019349224820349798,
"loss": 0.3334,
"step": 840
},
{
"epoch": 13.077369439071568,
"grad_norm": 1.1046805381774902,
"learning_rate": 0.0001933169349484953,
"loss": 0.2845,
"step": 850
},
{
"epoch": 13.232108317214701,
"grad_norm": 0.9451972246170044,
"learning_rate": 0.0001931393731598439,
"loss": 0.2657,
"step": 860
},
{
"epoch": 13.386847195357834,
"grad_norm": 0.8607316017150879,
"learning_rate": 0.00019295956711601323,
"loss": 0.2738,
"step": 870
},
{
"epoch": 13.541586073500968,
"grad_norm": 1.0280941724777222,
"learning_rate": 0.00019277752114954958,
"loss": 0.2745,
"step": 880
},
{
"epoch": 13.696324951644101,
"grad_norm": 1.003117322921753,
"learning_rate": 0.0001925932396469717,
"loss": 0.28,
"step": 890
},
{
"epoch": 13.851063829787234,
"grad_norm": 0.9840229749679565,
"learning_rate": 0.00019240672704866495,
"loss": 0.2946,
"step": 900
},
{
"epoch": 14.0,
"grad_norm": 0.7164958715438843,
"learning_rate": 0.0001922179878487746,
"loss": 0.2876,
"step": 910
},
{
"epoch": 14.154738878143133,
"grad_norm": 1.0938618183135986,
"learning_rate": 0.0001920270265950972,
"loss": 0.237,
"step": 920
},
{
"epoch": 14.309477756286267,
"grad_norm": 0.742213249206543,
"learning_rate": 0.00019183384788897128,
"loss": 0.2457,
"step": 930
},
{
"epoch": 14.4642166344294,
"grad_norm": 0.7443627715110779,
"learning_rate": 0.0001916384563851663,
"loss": 0.2535,
"step": 940
},
{
"epoch": 14.618955512572533,
"grad_norm": 0.9166159629821777,
"learning_rate": 0.0001914408567917706,
"loss": 0.2538,
"step": 950
},
{
"epoch": 14.773694390715667,
"grad_norm": 0.8441450595855713,
"learning_rate": 0.00019124105387007787,
"loss": 0.2663,
"step": 960
},
{
"epoch": 14.9284332688588,
"grad_norm": 0.8441358804702759,
"learning_rate": 0.00019103905243447245,
"loss": 0.2684,
"step": 970
},
{
"epoch": 15.077369439071568,
"grad_norm": 0.8372294902801514,
"learning_rate": 0.0001908348573523134,
"loss": 0.2342,
"step": 980
},
{
"epoch": 15.232108317214701,
"grad_norm": 0.8525702357292175,
"learning_rate": 0.0001906284735438171,
"loss": 0.2369,
"step": 990
},
{
"epoch": 15.386847195357834,
"grad_norm": 0.7618408799171448,
"learning_rate": 0.00019041990598193874,
"loss": 0.2313,
"step": 1000
},
{
"epoch": 15.541586073500968,
"grad_norm": 0.7583808302879333,
"learning_rate": 0.00019020915969225258,
"loss": 0.2376,
"step": 1010
},
{
"epoch": 15.696324951644101,
"grad_norm": 0.9798568487167358,
"learning_rate": 0.00018999623975283063,
"loss": 0.2438,
"step": 1020
},
{
"epoch": 15.851063829787234,
"grad_norm": 1.0058543682098389,
"learning_rate": 0.0001897811512941206,
"loss": 0.2445,
"step": 1030
},
{
"epoch": 16.0,
"grad_norm": 0.7295846343040466,
"learning_rate": 0.00018956389949882199,
"loss": 0.2399,
"step": 1040
},
{
"epoch": 16.154738878143135,
"grad_norm": 0.7581143975257874,
"learning_rate": 0.0001893444896017614,
"loss": 0.2087,
"step": 1050
},
{
"epoch": 16.309477756286267,
"grad_norm": 0.7100822329521179,
"learning_rate": 0.0001891229268897664,
"loss": 0.2202,
"step": 1060
},
{
"epoch": 16.464216634429402,
"grad_norm": 0.7861729860305786,
"learning_rate": 0.00018889921670153784,
"loss": 0.2222,
"step": 1070
},
{
"epoch": 16.618955512572533,
"grad_norm": 0.5772430300712585,
"learning_rate": 0.00018867336442752165,
"loss": 0.2184,
"step": 1080
},
{
"epoch": 16.77369439071567,
"grad_norm": 0.8656666278839111,
"learning_rate": 0.0001884453755097787,
"loss": 0.225,
"step": 1090
},
{
"epoch": 16.9284332688588,
"grad_norm": 0.8098858594894409,
"learning_rate": 0.00018821525544185366,
"loss": 0.2298,
"step": 1100
},
{
"epoch": 17.077369439071568,
"grad_norm": 0.77182537317276,
"learning_rate": 0.00018798300976864267,
"loss": 0.2072,
"step": 1110
},
{
"epoch": 17.2321083172147,
"grad_norm": 0.7229182720184326,
"learning_rate": 0.00018774864408625986,
"loss": 0.1998,
"step": 1120
},
{
"epoch": 17.386847195357834,
"grad_norm": 0.914027214050293,
"learning_rate": 0.00018751216404190227,
"loss": 0.2061,
"step": 1130
},
{
"epoch": 17.541586073500966,
"grad_norm": 0.6615231037139893,
"learning_rate": 0.00018727357533371392,
"loss": 0.212,
"step": 1140
},
{
"epoch": 17.6963249516441,
"grad_norm": 0.7795395255088806,
"learning_rate": 0.00018703288371064856,
"loss": 0.2189,
"step": 1150
},
{
"epoch": 17.851063829787233,
"grad_norm": 0.593342661857605,
"learning_rate": 0.000186790094972331,
"loss": 0.2138,
"step": 1160
},
{
"epoch": 18.0,
"grad_norm": 0.5554038286209106,
"learning_rate": 0.00018654521496891746,
"loss": 0.2113,
"step": 1170
},
{
"epoch": 18.154738878143135,
"grad_norm": 0.5869312286376953,
"learning_rate": 0.00018629824960095462,
"loss": 0.1905,
"step": 1180
},
{
"epoch": 18.309477756286267,
"grad_norm": 0.6991078853607178,
"learning_rate": 0.00018604920481923732,
"loss": 0.1925,
"step": 1190
},
{
"epoch": 18.464216634429402,
"grad_norm": 0.4971669912338257,
"learning_rate": 0.00018579808662466533,
"loss": 0.1965,
"step": 1200
},
{
"epoch": 18.618955512572533,
"grad_norm": 0.6267327070236206,
"learning_rate": 0.00018554490106809866,
"loss": 0.2043,
"step": 1210
},
{
"epoch": 18.77369439071567,
"grad_norm": 0.7161979079246521,
"learning_rate": 0.00018528965425021184,
"loss": 0.2046,
"step": 1220
},
{
"epoch": 18.9284332688588,
"grad_norm": 0.6604463458061218,
"learning_rate": 0.00018503235232134673,
"loss": 0.2085,
"step": 1230
},
{
"epoch": 19.077369439071568,
"grad_norm": 0.6707203388214111,
"learning_rate": 0.0001847730014813646,
"loss": 0.1895,
"step": 1240
},
{
"epoch": 19.2321083172147,
"grad_norm": 0.9623780846595764,
"learning_rate": 0.00018451160797949644,
"loss": 0.1872,
"step": 1250
},
{
"epoch": 19.386847195357834,
"grad_norm": 0.6840048432350159,
"learning_rate": 0.0001842481781141928,
"loss": 0.1901,
"step": 1260
},
{
"epoch": 19.541586073500966,
"grad_norm": 0.6736961007118225,
"learning_rate": 0.00018398271823297143,
"loss": 0.1957,
"step": 1270
},
{
"epoch": 19.6963249516441,
"grad_norm": 0.7020850777626038,
"learning_rate": 0.00018371523473226491,
"loss": 0.1958,
"step": 1280
},
{
"epoch": 19.851063829787233,
"grad_norm": 0.6539646983146667,
"learning_rate": 0.00018344573405726616,
"loss": 0.1966,
"step": 1290
},
{
"epoch": 20.0,
"grad_norm": 0.5781111717224121,
"learning_rate": 0.00018317422270177334,
"loss": 0.1912,
"step": 1300
},
{
"epoch": 20.154738878143135,
"grad_norm": 0.5996780395507812,
"learning_rate": 0.00018290070720803318,
"loss": 0.1754,
"step": 1310
},
{
"epoch": 20.309477756286267,
"grad_norm": 0.4023078680038452,
"learning_rate": 0.00018262519416658362,
"loss": 0.1788,
"step": 1320
},
{
"epoch": 20.464216634429402,
"grad_norm": 0.5035756826400757,
"learning_rate": 0.00018234769021609464,
"loss": 0.1797,
"step": 1330
},
{
"epoch": 20.618955512572533,
"grad_norm": 0.8370582461357117,
"learning_rate": 0.00018206820204320865,
"loss": 0.189,
"step": 1340
},
{
"epoch": 20.77369439071567,
"grad_norm": 0.5572733879089355,
"learning_rate": 0.00018178673638237917,
"loss": 0.1907,
"step": 1350
},
{
"epoch": 20.9284332688588,
"grad_norm": 0.5380248427391052,
"learning_rate": 0.0001815033000157086,
"loss": 0.1887,
"step": 1360
},
{
"epoch": 21.077369439071568,
"grad_norm": 0.5686184167861938,
"learning_rate": 0.00018121789977278483,
"loss": 0.1724,
"step": 1370
},
{
"epoch": 21.2321083172147,
"grad_norm": 0.7584247589111328,
"learning_rate": 0.00018093054253051665,
"loss": 0.1714,
"step": 1380
},
{
"epoch": 21.386847195357834,
"grad_norm": 0.376000314950943,
"learning_rate": 0.00018064123521296802,
"loss": 0.1731,
"step": 1390
},
{
"epoch": 21.541586073500966,
"grad_norm": 0.4537977874279022,
"learning_rate": 0.0001803499847911913,
"loss": 0.1781,
"step": 1400
},
{
"epoch": 21.6963249516441,
"grad_norm": 0.5281225442886353,
"learning_rate": 0.0001800567982830592,
"loss": 0.1794,
"step": 1410
},
{
"epoch": 21.851063829787233,
"grad_norm": 0.5692486763000488,
"learning_rate": 0.00017976168275309583,
"loss": 0.1822,
"step": 1420
},
{
"epoch": 22.0,
"grad_norm": 0.41789963841438293,
"learning_rate": 0.00017946464531230617,
"loss": 0.1765,
"step": 1430
},
{
"epoch": 22.154738878143135,
"grad_norm": 0.8695152401924133,
"learning_rate": 0.0001791656931180051,
"loss": 0.1621,
"step": 1440
},
{
"epoch": 22.309477756286267,
"grad_norm": 0.5791840553283691,
"learning_rate": 0.00017886483337364466,
"loss": 0.1685,
"step": 1450
},
{
"epoch": 22.464216634429402,
"grad_norm": 0.5616587400436401,
"learning_rate": 0.0001785620733286406,
"loss": 0.1745,
"step": 1460
},
{
"epoch": 22.618955512572533,
"grad_norm": 0.5013086199760437,
"learning_rate": 0.00017825742027819774,
"loss": 0.177,
"step": 1470
},
{
"epoch": 22.77369439071567,
"grad_norm": 0.4899880290031433,
"learning_rate": 0.00017795088156313406,
"loss": 0.1762,
"step": 1480
},
{
"epoch": 22.9284332688588,
"grad_norm": 0.37300869822502136,
"learning_rate": 0.00017764246456970382,
"loss": 0.1786,
"step": 1490
},
{
"epoch": 23.077369439071568,
"grad_norm": 0.637178361415863,
"learning_rate": 0.00017733217672941982,
"loss": 0.1652,
"step": 1500
},
{
"epoch": 23.2321083172147,
"grad_norm": 0.4123108685016632,
"learning_rate": 0.000177020025518874,
"loss": 0.1635,
"step": 1510
},
{
"epoch": 23.386847195357834,
"grad_norm": 0.592748761177063,
"learning_rate": 0.00017670601845955753,
"loss": 0.1675,
"step": 1520
},
{
"epoch": 23.541586073500966,
"grad_norm": 0.8656735420227051,
"learning_rate": 0.00017639016311767948,
"loss": 0.1724,
"step": 1530
},
{
"epoch": 23.6963249516441,
"grad_norm": 0.6082174181938171,
"learning_rate": 0.00017607246710398452,
"loss": 0.1721,
"step": 1540
},
{
"epoch": 23.851063829787233,
"grad_norm": 0.5370825529098511,
"learning_rate": 0.0001757529380735695,
"loss": 0.1811,
"step": 1550
},
{
"epoch": 24.0,
"grad_norm": 0.42122682929039,
"learning_rate": 0.00017543158372569905,
"loss": 0.1715,
"step": 1560
},
{
"epoch": 24.154738878143135,
"grad_norm": 0.5614141821861267,
"learning_rate": 0.00017510841180362006,
"loss": 0.1571,
"step": 1570
},
{
"epoch": 24.309477756286267,
"grad_norm": 0.5628191232681274,
"learning_rate": 0.00017478343009437501,
"loss": 0.1649,
"step": 1580
},
{
"epoch": 24.464216634429402,
"grad_norm": 0.46745532751083374,
"learning_rate": 0.00017445664642861452,
"loss": 0.1684,
"step": 1590
},
{
"epoch": 24.618955512572533,
"grad_norm": 0.6663879752159119,
"learning_rate": 0.00017412806868040846,
"loss": 0.1704,
"step": 1600
},
{
"epoch": 24.77369439071567,
"grad_norm": 0.6331315636634827,
"learning_rate": 0.00017379770476705632,
"loss": 0.1698,
"step": 1610
},
{
"epoch": 24.9284332688588,
"grad_norm": 0.46386483311653137,
"learning_rate": 0.00017346556264889647,
"loss": 0.174,
"step": 1620
},
{
"epoch": 25.077369439071568,
"grad_norm": 0.7119632363319397,
"learning_rate": 0.00017313165032911427,
"loss": 0.1551,
"step": 1630
},
{
"epoch": 25.2321083172147,
"grad_norm": 0.6400099396705627,
"learning_rate": 0.00017279597585354926,
"loss": 0.1555,
"step": 1640
},
{
"epoch": 25.386847195357834,
"grad_norm": 0.5790190100669861,
"learning_rate": 0.00017245854731050137,
"loss": 0.1622,
"step": 1650
},
{
"epoch": 25.541586073500966,
"grad_norm": 0.7252941131591797,
"learning_rate": 0.00017211937283053582,
"loss": 0.1665,
"step": 1660
},
{
"epoch": 25.6963249516441,
"grad_norm": 0.6140386462211609,
"learning_rate": 0.00017177846058628751,
"loss": 0.1729,
"step": 1670
},
{
"epoch": 25.851063829787233,
"grad_norm": 0.6094592213630676,
"learning_rate": 0.00017143581879226378,
"loss": 0.1739,
"step": 1680
},
{
"epoch": 26.0,
"grad_norm": 0.5876917243003845,
"learning_rate": 0.0001710914557046467,
"loss": 0.1662,
"step": 1690
},
{
"epoch": 26.154738878143135,
"grad_norm": 0.3905578553676605,
"learning_rate": 0.00017074537962109403,
"loss": 0.1524,
"step": 1700
},
{
"epoch": 26.309477756286267,
"grad_norm": 0.6748397350311279,
"learning_rate": 0.0001703975988805393,
"loss": 0.1555,
"step": 1710
},
{
"epoch": 26.464216634429402,
"grad_norm": 0.7327801585197449,
"learning_rate": 0.0001700481218629909,
"loss": 0.158,
"step": 1720
},
{
"epoch": 26.618955512572533,
"grad_norm": 0.5541703701019287,
"learning_rate": 0.00016969695698933017,
"loss": 0.1616,
"step": 1730
},
{
"epoch": 26.77369439071567,
"grad_norm": 0.5566617846488953,
"learning_rate": 0.0001693441127211084,
"loss": 0.1679,
"step": 1740
},
{
"epoch": 26.9284332688588,
"grad_norm": 0.5030473470687866,
"learning_rate": 0.00016898959756034306,
"loss": 0.1688,
"step": 1750
},
{
"epoch": 27.077369439071568,
"grad_norm": 0.5635802745819092,
"learning_rate": 0.0001686334200493129,
"loss": 0.152,
"step": 1760
},
{
"epoch": 27.2321083172147,
"grad_norm": 0.6304295659065247,
"learning_rate": 0.000168275588770352,
"loss": 0.1494,
"step": 1770
},
{
"epoch": 27.386847195357834,
"grad_norm": 0.5501868724822998,
"learning_rate": 0.00016791611234564323,
"loss": 0.1567,
"step": 1780
},
{
"epoch": 27.541586073500966,
"grad_norm": 0.4970678389072418,
"learning_rate": 0.0001675549994370103,
"loss": 0.1605,
"step": 1790
},
{
"epoch": 27.6963249516441,
"grad_norm": 0.6317483186721802,
"learning_rate": 0.00016719225874570898,
"loss": 0.1645,
"step": 1800
},
{
"epoch": 27.851063829787233,
"grad_norm": 0.5586022734642029,
"learning_rate": 0.00016682789901221762,
"loss": 0.1631,
"step": 1810
},
{
"epoch": 28.0,
"grad_norm": 0.39034706354141235,
"learning_rate": 0.00016646192901602656,
"loss": 0.1575,
"step": 1820
},
{
"epoch": 28.154738878143135,
"grad_norm": 0.4670889377593994,
"learning_rate": 0.00016609435757542634,
"loss": 0.1466,
"step": 1830
},
{
"epoch": 28.309477756286267,
"grad_norm": 0.36485323309898376,
"learning_rate": 0.00016572519354729546,
"loss": 0.1501,
"step": 1840
},
{
"epoch": 28.464216634429402,
"grad_norm": 0.49343135952949524,
"learning_rate": 0.00016535444582688684,
"loss": 0.1562,
"step": 1850
},
{
"epoch": 28.618955512572533,
"grad_norm": 0.4284234941005707,
"learning_rate": 0.00016498212334761352,
"loss": 0.1593,
"step": 1860
},
{
"epoch": 28.77369439071567,
"grad_norm": 0.3760448396205902,
"learning_rate": 0.00016460823508083344,
"loss": 0.1594,
"step": 1870
},
{
"epoch": 28.9284332688588,
"grad_norm": 0.4843062460422516,
"learning_rate": 0.0001642327900356332,
"loss": 0.159,
"step": 1880
},
{
"epoch": 29.077369439071568,
"grad_norm": 0.361465185880661,
"learning_rate": 0.00016385579725861099,
"loss": 0.1435,
"step": 1890
},
{
"epoch": 29.2321083172147,
"grad_norm": 0.33738720417022705,
"learning_rate": 0.0001634772658336587,
"loss": 0.1446,
"step": 1900
},
{
"epoch": 29.386847195357834,
"grad_norm": 0.47809138894081116,
"learning_rate": 0.0001630972048817429,
"loss": 0.1479,
"step": 1910
},
{
"epoch": 29.541586073500966,
"grad_norm": 0.574562668800354,
"learning_rate": 0.00016271562356068515,
"loss": 0.1531,
"step": 1920
},
{
"epoch": 29.6963249516441,
"grad_norm": 0.3374098539352417,
"learning_rate": 0.00016233253106494132,
"loss": 0.1535,
"step": 1930
},
{
"epoch": 29.851063829787233,
"grad_norm": 0.3124029338359833,
"learning_rate": 0.00016194793662538002,
"loss": 0.1538,
"step": 1940
},
{
"epoch": 30.0,
"grad_norm": 0.19837027788162231,
"learning_rate": 0.00016156184950906027,
"loss": 0.1507,
"step": 1950
},
{
"epoch": 30.154738878143135,
"grad_norm": 0.451399028301239,
"learning_rate": 0.00016117427901900797,
"loss": 0.1397,
"step": 1960
},
{
"epoch": 30.309477756286267,
"grad_norm": 0.5017818212509155,
"learning_rate": 0.0001607852344939921,
"loss": 0.1431,
"step": 1970
},
{
"epoch": 30.464216634429402,
"grad_norm": 0.5677542686462402,
"learning_rate": 0.00016039472530829938,
"loss": 0.1472,
"step": 1980
},
{
"epoch": 30.618955512572533,
"grad_norm": 0.7163184881210327,
"learning_rate": 0.00016000276087150848,
"loss": 0.1523,
"step": 1990
},
{
"epoch": 30.77369439071567,
"grad_norm": 0.4324893355369568,
"learning_rate": 0.00015960935062826333,
"loss": 0.1489,
"step": 2000
},
{
"epoch": 30.9284332688588,
"grad_norm": 0.3189820945262909,
"learning_rate": 0.00015921450405804563,
"loss": 0.1532,
"step": 2010
},
{
"epoch": 31.077369439071568,
"grad_norm": 0.2678401470184326,
"learning_rate": 0.0001588182306749462,
"loss": 0.1406,
"step": 2020
},
{
"epoch": 31.2321083172147,
"grad_norm": 0.2673105001449585,
"learning_rate": 0.00015842054002743593,
"loss": 0.1391,
"step": 2030
},
{
"epoch": 31.386847195357834,
"grad_norm": 0.28863251209259033,
"learning_rate": 0.00015802144169813564,
"loss": 0.1447,
"step": 2040
},
{
"epoch": 31.541586073500966,
"grad_norm": 0.4464472234249115,
"learning_rate": 0.00015762094530358516,
"loss": 0.1463,
"step": 2050
},
{
"epoch": 31.6963249516441,
"grad_norm": 0.4640148878097534,
"learning_rate": 0.00015721906049401166,
"loss": 0.1484,
"step": 2060
},
{
"epoch": 31.851063829787233,
"grad_norm": 0.33794838190078735,
"learning_rate": 0.000156815796953097,
"loss": 0.1528,
"step": 2070
},
{
"epoch": 32.0,
"grad_norm": 0.27132901549339294,
"learning_rate": 0.0001564111643977447,
"loss": 0.1454,
"step": 2080
},
{
"epoch": 32.154738878143135,
"grad_norm": 0.25137776136398315,
"learning_rate": 0.00015600517257784532,
"loss": 0.1345,
"step": 2090
},
{
"epoch": 32.30947775628627,
"grad_norm": 0.40778201818466187,
"learning_rate": 0.00015559783127604203,
"loss": 0.14,
"step": 2100
},
{
"epoch": 32.4642166344294,
"grad_norm": 0.2597282826900482,
"learning_rate": 0.00015518915030749455,
"loss": 0.1396,
"step": 2110
},
{
"epoch": 32.61895551257253,
"grad_norm": 0.2305850237607956,
"learning_rate": 0.0001547791395196428,
"loss": 0.1435,
"step": 2120
},
{
"epoch": 32.77369439071567,
"grad_norm": 0.441969096660614,
"learning_rate": 0.00015436780879196955,
"loss": 0.1458,
"step": 2130
},
{
"epoch": 32.928433268858804,
"grad_norm": 0.313723623752594,
"learning_rate": 0.0001539551680357624,
"loss": 0.1504,
"step": 2140
},
{
"epoch": 33.07736943907157,
"grad_norm": 0.3285030722618103,
"learning_rate": 0.00015354122719387503,
"loss": 0.133,
"step": 2150
},
{
"epoch": 33.2321083172147,
"grad_norm": 0.22142446041107178,
"learning_rate": 0.00015312599624048748,
"loss": 0.1334,
"step": 2160
},
{
"epoch": 33.38684719535783,
"grad_norm": 0.20920687913894653,
"learning_rate": 0.00015270948518086587,
"loss": 0.1403,
"step": 2170
},
{
"epoch": 33.541586073500966,
"grad_norm": 0.246036559343338,
"learning_rate": 0.00015229170405112142,
"loss": 0.1404,
"step": 2180
},
{
"epoch": 33.6963249516441,
"grad_norm": 0.2238244116306305,
"learning_rate": 0.0001518726629179684,
"loss": 0.144,
"step": 2190
},
{
"epoch": 33.851063829787236,
"grad_norm": 0.23082295060157776,
"learning_rate": 0.00015145237187848184,
"loss": 0.1435,
"step": 2200
},
{
"epoch": 34.0,
"grad_norm": 0.17774420976638794,
"learning_rate": 0.00015103084105985405,
"loss": 0.1421,
"step": 2210
},
{
"epoch": 34.154738878143135,
"grad_norm": 0.22231119871139526,
"learning_rate": 0.0001506080806191506,
"loss": 0.1314,
"step": 2220
},
{
"epoch": 34.30947775628627,
"grad_norm": 0.31579744815826416,
"learning_rate": 0.00015018410074306565,
"loss": 0.1357,
"step": 2230
},
{
"epoch": 34.4642166344294,
"grad_norm": 0.27794158458709717,
"learning_rate": 0.00014975891164767643,
"loss": 0.1381,
"step": 2240
},
{
"epoch": 34.61895551257253,
"grad_norm": 0.2816390097141266,
"learning_rate": 0.0001493325235781972,
"loss": 0.1454,
"step": 2250
},
{
"epoch": 34.77369439071567,
"grad_norm": 0.21555857360363007,
"learning_rate": 0.00014890494680873224,
"loss": 0.1413,
"step": 2260
},
{
"epoch": 34.928433268858804,
"grad_norm": 0.20860950648784637,
"learning_rate": 0.00014847619164202834,
"loss": 0.1431,
"step": 2270
},
{
"epoch": 35.07736943907157,
"grad_norm": 0.4063090980052948,
"learning_rate": 0.0001480462684092266,
"loss": 0.1331,
"step": 2280
},
{
"epoch": 35.2321083172147,
"grad_norm": 0.4503425657749176,
"learning_rate": 0.00014761518746961343,
"loss": 0.1334,
"step": 2290
},
{
"epoch": 35.38684719535783,
"grad_norm": 0.2942167818546295,
"learning_rate": 0.00014718295921037092,
"loss": 0.1343,
"step": 2300
},
{
"epoch": 35.541586073500966,
"grad_norm": 0.21005859971046448,
"learning_rate": 0.0001467495940463266,
"loss": 0.1393,
"step": 2310
},
{
"epoch": 35.6963249516441,
"grad_norm": 0.36378660798072815,
"learning_rate": 0.00014631510241970252,
"loss": 0.1405,
"step": 2320
},
{
"epoch": 35.851063829787236,
"grad_norm": 0.35616129636764526,
"learning_rate": 0.00014587949479986362,
"loss": 0.1441,
"step": 2330
},
{
"epoch": 36.0,
"grad_norm": 0.26007241010665894,
"learning_rate": 0.00014544278168306528,
"loss": 0.1395,
"step": 2340
},
{
"epoch": 36.154738878143135,
"grad_norm": 0.26370298862457275,
"learning_rate": 0.00014500497359220077,
"loss": 0.1321,
"step": 2350
},
{
"epoch": 36.30947775628627,
"grad_norm": 0.2448490560054779,
"learning_rate": 0.00014456608107654736,
"loss": 0.1332,
"step": 2360
},
{
"epoch": 36.4642166344294,
"grad_norm": 0.2294849306344986,
"learning_rate": 0.0001441261147115123,
"loss": 0.1379,
"step": 2370
},
{
"epoch": 36.61895551257253,
"grad_norm": 0.3313272297382355,
"learning_rate": 0.00014368508509837797,
"loss": 0.1426,
"step": 2380
},
{
"epoch": 36.77369439071567,
"grad_norm": 0.40856489539146423,
"learning_rate": 0.0001432430028640463,
"loss": 0.1432,
"step": 2390
},
{
"epoch": 36.928433268858804,
"grad_norm": 0.47346943616867065,
"learning_rate": 0.00014279987866078306,
"loss": 0.1432,
"step": 2400
},
{
"epoch": 37.07736943907157,
"grad_norm": 0.2977467477321625,
"learning_rate": 0.00014235572316596072,
"loss": 0.1307,
"step": 2410
},
{
"epoch": 37.2321083172147,
"grad_norm": 0.25430828332901,
"learning_rate": 0.00014191054708180155,
"loss": 0.1319,
"step": 2420
},
{
"epoch": 37.38684719535783,
"grad_norm": 0.2358369529247284,
"learning_rate": 0.00014146436113511958,
"loss": 0.1348,
"step": 2430
},
{
"epoch": 37.541586073500966,
"grad_norm": 0.3575883209705353,
"learning_rate": 0.00014101717607706206,
"loss": 0.135,
"step": 2440
},
{
"epoch": 37.6963249516441,
"grad_norm": 0.40014350414276123,
"learning_rate": 0.00014056900268285063,
"loss": 0.1386,
"step": 2450
},
{
"epoch": 37.851063829787236,
"grad_norm": 0.2617679238319397,
"learning_rate": 0.0001401198517515214,
"loss": 0.1406,
"step": 2460
},
{
"epoch": 38.0,
"grad_norm": 0.3119979202747345,
"learning_rate": 0.00013966973410566502,
"loss": 0.1389,
"step": 2470
},
{
"epoch": 38.154738878143135,
"grad_norm": 0.7342913150787354,
"learning_rate": 0.00013921866059116573,
"loss": 0.1314,
"step": 2480
},
{
"epoch": 38.30947775628627,
"grad_norm": 0.4472424387931824,
"learning_rate": 0.00013876664207694,
"loss": 0.1343,
"step": 2490
},
{
"epoch": 38.4642166344294,
"grad_norm": 0.29962313175201416,
"learning_rate": 0.00013831368945467476,
"loss": 0.1361,
"step": 2500
}
],
"logging_steps": 10,
"max_steps": 6500,
"num_input_tokens_seen": 0,
"num_train_epochs": 100,
"save_steps": 500,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 1.6034440281665126e+18,
"train_batch_size": 6,
"trial_name": null,
"trial_params": null
}