| { | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 1.0, | |
| "eval_steps": 500, | |
| "global_step": 812, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.0012315270935960591, | |
| "grad_norm": 0.3202645778656006, | |
| "learning_rate": 2.4390243902439027e-06, | |
| "loss": 1.0034, | |
| "step": 1 | |
| }, | |
| { | |
| "epoch": 0.006157635467980296, | |
| "grad_norm": 0.34312453866004944, | |
| "learning_rate": 1.2195121951219513e-05, | |
| "loss": 0.978, | |
| "step": 5 | |
| }, | |
| { | |
| "epoch": 0.012315270935960592, | |
| "grad_norm": 0.3679831922054291, | |
| "learning_rate": 2.4390243902439026e-05, | |
| "loss": 0.9639, | |
| "step": 10 | |
| }, | |
| { | |
| "epoch": 0.01847290640394089, | |
| "grad_norm": 0.12030908465385437, | |
| "learning_rate": 3.6585365853658535e-05, | |
| "loss": 0.979, | |
| "step": 15 | |
| }, | |
| { | |
| "epoch": 0.024630541871921183, | |
| "grad_norm": 0.1297861486673355, | |
| "learning_rate": 4.878048780487805e-05, | |
| "loss": 0.978, | |
| "step": 20 | |
| }, | |
| { | |
| "epoch": 0.03078817733990148, | |
| "grad_norm": 0.10942083597183228, | |
| "learning_rate": 6.097560975609756e-05, | |
| "loss": 0.9532, | |
| "step": 25 | |
| }, | |
| { | |
| "epoch": 0.03694581280788178, | |
| "grad_norm": 0.11847982555627823, | |
| "learning_rate": 7.317073170731707e-05, | |
| "loss": 0.8874, | |
| "step": 30 | |
| }, | |
| { | |
| "epoch": 0.04310344827586207, | |
| "grad_norm": 0.10410485416650772, | |
| "learning_rate": 8.53658536585366e-05, | |
| "loss": 0.9053, | |
| "step": 35 | |
| }, | |
| { | |
| "epoch": 0.04926108374384237, | |
| "grad_norm": 0.11295271664857864, | |
| "learning_rate": 9.75609756097561e-05, | |
| "loss": 0.9043, | |
| "step": 40 | |
| }, | |
| { | |
| "epoch": 0.05541871921182266, | |
| "grad_norm": 0.09398676455020905, | |
| "learning_rate": 0.00010975609756097563, | |
| "loss": 0.8799, | |
| "step": 45 | |
| }, | |
| { | |
| "epoch": 0.06157635467980296, | |
| "grad_norm": 0.10218280553817749, | |
| "learning_rate": 0.00012195121951219512, | |
| "loss": 0.8744, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 0.06773399014778325, | |
| "grad_norm": 0.10606866329908371, | |
| "learning_rate": 0.00013414634146341464, | |
| "loss": 0.8531, | |
| "step": 55 | |
| }, | |
| { | |
| "epoch": 0.07389162561576355, | |
| "grad_norm": 0.09138131886720657, | |
| "learning_rate": 0.00014634146341463414, | |
| "loss": 0.8552, | |
| "step": 60 | |
| }, | |
| { | |
| "epoch": 0.08004926108374384, | |
| "grad_norm": 0.10347121208906174, | |
| "learning_rate": 0.00015853658536585366, | |
| "loss": 0.8607, | |
| "step": 65 | |
| }, | |
| { | |
| "epoch": 0.08620689655172414, | |
| "grad_norm": 0.09866124391555786, | |
| "learning_rate": 0.0001707317073170732, | |
| "loss": 0.8585, | |
| "step": 70 | |
| }, | |
| { | |
| "epoch": 0.09236453201970443, | |
| "grad_norm": 0.10218536853790283, | |
| "learning_rate": 0.0001829268292682927, | |
| "loss": 0.8677, | |
| "step": 75 | |
| }, | |
| { | |
| "epoch": 0.09852216748768473, | |
| "grad_norm": 0.10015806555747986, | |
| "learning_rate": 0.0001951219512195122, | |
| "loss": 0.861, | |
| "step": 80 | |
| }, | |
| { | |
| "epoch": 0.10467980295566502, | |
| "grad_norm": 0.09940984100103378, | |
| "learning_rate": 0.0001999916658654738, | |
| "loss": 0.865, | |
| "step": 85 | |
| }, | |
| { | |
| "epoch": 0.11083743842364532, | |
| "grad_norm": 0.09865906834602356, | |
| "learning_rate": 0.0001999407400739705, | |
| "loss": 0.8697, | |
| "step": 90 | |
| }, | |
| { | |
| "epoch": 0.11699507389162561, | |
| "grad_norm": 0.10052310675382614, | |
| "learning_rate": 0.00019984354211555644, | |
| "loss": 0.8674, | |
| "step": 95 | |
| }, | |
| { | |
| "epoch": 0.12315270935960591, | |
| "grad_norm": 0.09879495203495026, | |
| "learning_rate": 0.00019970011699250152, | |
| "loss": 0.8218, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 0.12931034482758622, | |
| "grad_norm": 0.10285568237304688, | |
| "learning_rate": 0.00019951053111006976, | |
| "loss": 0.8663, | |
| "step": 105 | |
| }, | |
| { | |
| "epoch": 0.1354679802955665, | |
| "grad_norm": 0.10076833516359329, | |
| "learning_rate": 0.00019927487224577402, | |
| "loss": 0.8656, | |
| "step": 110 | |
| }, | |
| { | |
| "epoch": 0.1416256157635468, | |
| "grad_norm": 0.10918958485126495, | |
| "learning_rate": 0.0001989932495087353, | |
| "loss": 0.8466, | |
| "step": 115 | |
| }, | |
| { | |
| "epoch": 0.1477832512315271, | |
| "grad_norm": 0.10598968714475632, | |
| "learning_rate": 0.0001986657932891657, | |
| "loss": 0.8359, | |
| "step": 120 | |
| }, | |
| { | |
| "epoch": 0.1539408866995074, | |
| "grad_norm": 0.10305009037256241, | |
| "learning_rate": 0.0001982926551979982, | |
| "loss": 0.8633, | |
| "step": 125 | |
| }, | |
| { | |
| "epoch": 0.16009852216748768, | |
| "grad_norm": 0.1001807376742363, | |
| "learning_rate": 0.00019787400799669154, | |
| "loss": 0.8669, | |
| "step": 130 | |
| }, | |
| { | |
| "epoch": 0.16625615763546797, | |
| "grad_norm": 0.11495460569858551, | |
| "learning_rate": 0.00019741004551724207, | |
| "loss": 0.8379, | |
| "step": 135 | |
| }, | |
| { | |
| "epoch": 0.1724137931034483, | |
| "grad_norm": 0.09955433756113052, | |
| "learning_rate": 0.00019690098257244064, | |
| "loss": 0.847, | |
| "step": 140 | |
| }, | |
| { | |
| "epoch": 0.17857142857142858, | |
| "grad_norm": 0.09870877861976624, | |
| "learning_rate": 0.00019634705485641488, | |
| "loss": 0.8419, | |
| "step": 145 | |
| }, | |
| { | |
| "epoch": 0.18472906403940886, | |
| "grad_norm": 0.09604347497224808, | |
| "learning_rate": 0.00019574851883550395, | |
| "loss": 0.8751, | |
| "step": 150 | |
| }, | |
| { | |
| "epoch": 0.19088669950738915, | |
| "grad_norm": 0.09326177835464478, | |
| "learning_rate": 0.00019510565162951537, | |
| "loss": 0.8586, | |
| "step": 155 | |
| }, | |
| { | |
| "epoch": 0.19704433497536947, | |
| "grad_norm": 0.09584691375494003, | |
| "learning_rate": 0.00019441875088341997, | |
| "loss": 0.8613, | |
| "step": 160 | |
| }, | |
| { | |
| "epoch": 0.20320197044334976, | |
| "grad_norm": 0.08351971209049225, | |
| "learning_rate": 0.00019368813462954316, | |
| "loss": 0.8533, | |
| "step": 165 | |
| }, | |
| { | |
| "epoch": 0.20935960591133004, | |
| "grad_norm": 0.09796614199876785, | |
| "learning_rate": 0.00019291414114031743, | |
| "loss": 0.8551, | |
| "step": 170 | |
| }, | |
| { | |
| "epoch": 0.21551724137931033, | |
| "grad_norm": 0.09448806941509247, | |
| "learning_rate": 0.00019209712877166349, | |
| "loss": 0.8531, | |
| "step": 175 | |
| }, | |
| { | |
| "epoch": 0.22167487684729065, | |
| "grad_norm": 0.09499138593673706, | |
| "learning_rate": 0.00019123747579707275, | |
| "loss": 0.8679, | |
| "step": 180 | |
| }, | |
| { | |
| "epoch": 0.22783251231527094, | |
| "grad_norm": 0.11031397432088852, | |
| "learning_rate": 0.00019033558023246844, | |
| "loss": 0.8238, | |
| "step": 185 | |
| }, | |
| { | |
| "epoch": 0.23399014778325122, | |
| "grad_norm": 0.09239893406629562, | |
| "learning_rate": 0.0001893918596519257, | |
| "loss": 0.8438, | |
| "step": 190 | |
| }, | |
| { | |
| "epoch": 0.24014778325123154, | |
| "grad_norm": 0.09333900362253189, | |
| "learning_rate": 0.00018840675099433636, | |
| "loss": 0.8437, | |
| "step": 195 | |
| }, | |
| { | |
| "epoch": 0.24630541871921183, | |
| "grad_norm": 0.10700402408838272, | |
| "learning_rate": 0.00018738071036110808, | |
| "loss": 0.8108, | |
| "step": 200 | |
| }, | |
| { | |
| "epoch": 0.2524630541871921, | |
| "grad_norm": 0.09482070803642273, | |
| "learning_rate": 0.00018631421280499116, | |
| "loss": 0.8326, | |
| "step": 205 | |
| }, | |
| { | |
| "epoch": 0.25862068965517243, | |
| "grad_norm": 0.0902240052819252, | |
| "learning_rate": 0.00018520775211013093, | |
| "loss": 0.8637, | |
| "step": 210 | |
| }, | |
| { | |
| "epoch": 0.2647783251231527, | |
| "grad_norm": 0.0930478647351265, | |
| "learning_rate": 0.00018406184056344782, | |
| "loss": 0.8424, | |
| "step": 215 | |
| }, | |
| { | |
| "epoch": 0.270935960591133, | |
| "grad_norm": 0.09855949133634567, | |
| "learning_rate": 0.00018287700871745036, | |
| "loss": 0.839, | |
| "step": 220 | |
| }, | |
| { | |
| "epoch": 0.2770935960591133, | |
| "grad_norm": 0.10178953409194946, | |
| "learning_rate": 0.0001816538051445916, | |
| "loss": 0.8726, | |
| "step": 225 | |
| }, | |
| { | |
| "epoch": 0.2832512315270936, | |
| "grad_norm": 0.09509410709142685, | |
| "learning_rate": 0.00018039279618328212, | |
| "loss": 0.8709, | |
| "step": 230 | |
| }, | |
| { | |
| "epoch": 0.2894088669950739, | |
| "grad_norm": 0.09647022932767868, | |
| "learning_rate": 0.00017909456567567772, | |
| "loss": 0.8695, | |
| "step": 235 | |
| }, | |
| { | |
| "epoch": 0.2955665024630542, | |
| "grad_norm": 0.0976739376783371, | |
| "learning_rate": 0.0001777597146973627, | |
| "loss": 0.8577, | |
| "step": 240 | |
| }, | |
| { | |
| "epoch": 0.3017241379310345, | |
| "grad_norm": 0.09781508892774582, | |
| "learning_rate": 0.00017638886127905427, | |
| "loss": 0.8495, | |
| "step": 245 | |
| }, | |
| { | |
| "epoch": 0.3078817733990148, | |
| "grad_norm": 0.09447339177131653, | |
| "learning_rate": 0.00017498264012045687, | |
| "loss": 0.8569, | |
| "step": 250 | |
| }, | |
| { | |
| "epoch": 0.31403940886699505, | |
| "grad_norm": 0.09428857266902924, | |
| "learning_rate": 0.00017354170229639856, | |
| "loss": 0.8309, | |
| "step": 255 | |
| }, | |
| { | |
| "epoch": 0.32019704433497537, | |
| "grad_norm": 0.09784238040447235, | |
| "learning_rate": 0.00017206671495538612, | |
| "loss": 0.88, | |
| "step": 260 | |
| }, | |
| { | |
| "epoch": 0.3263546798029557, | |
| "grad_norm": 0.10478725284337997, | |
| "learning_rate": 0.0001705583610107178, | |
| "loss": 0.8466, | |
| "step": 265 | |
| }, | |
| { | |
| "epoch": 0.33251231527093594, | |
| "grad_norm": 0.10171061754226685, | |
| "learning_rate": 0.0001690173388242972, | |
| "loss": 0.8407, | |
| "step": 270 | |
| }, | |
| { | |
| "epoch": 0.33866995073891626, | |
| "grad_norm": 0.09914927929639816, | |
| "learning_rate": 0.00016744436188329456, | |
| "loss": 0.8362, | |
| "step": 275 | |
| }, | |
| { | |
| "epoch": 0.3448275862068966, | |
| "grad_norm": 0.10386697202920914, | |
| "learning_rate": 0.0001658401584698049, | |
| "loss": 0.842, | |
| "step": 280 | |
| }, | |
| { | |
| "epoch": 0.35098522167487683, | |
| "grad_norm": 0.09510869532823563, | |
| "learning_rate": 0.00016420547132365635, | |
| "loss": 0.8615, | |
| "step": 285 | |
| }, | |
| { | |
| "epoch": 0.35714285714285715, | |
| "grad_norm": 0.10018317401409149, | |
| "learning_rate": 0.00016254105729852464, | |
| "loss": 0.88, | |
| "step": 290 | |
| }, | |
| { | |
| "epoch": 0.3633004926108374, | |
| "grad_norm": 0.0965588241815567, | |
| "learning_rate": 0.00016084768701151263, | |
| "loss": 0.8345, | |
| "step": 295 | |
| }, | |
| { | |
| "epoch": 0.3694581280788177, | |
| "grad_norm": 0.09997261315584183, | |
| "learning_rate": 0.00015912614448635782, | |
| "loss": 0.8474, | |
| "step": 300 | |
| }, | |
| { | |
| "epoch": 0.37561576354679804, | |
| "grad_norm": 0.1020544171333313, | |
| "learning_rate": 0.00015737722679043248, | |
| "loss": 0.8398, | |
| "step": 305 | |
| }, | |
| { | |
| "epoch": 0.3817733990147783, | |
| "grad_norm": 0.09508492052555084, | |
| "learning_rate": 0.00015560174366570446, | |
| "loss": 0.8346, | |
| "step": 310 | |
| }, | |
| { | |
| "epoch": 0.3879310344827586, | |
| "grad_norm": 0.09102219343185425, | |
| "learning_rate": 0.00015380051715382996, | |
| "loss": 0.8834, | |
| "step": 315 | |
| }, | |
| { | |
| "epoch": 0.39408866995073893, | |
| "grad_norm": 0.09441574662923813, | |
| "learning_rate": 0.0001519743812155516, | |
| "loss": 0.8431, | |
| "step": 320 | |
| }, | |
| { | |
| "epoch": 0.4002463054187192, | |
| "grad_norm": 0.10017410665750504, | |
| "learning_rate": 0.00015012418134457755, | |
| "loss": 0.868, | |
| "step": 325 | |
| }, | |
| { | |
| "epoch": 0.4064039408866995, | |
| "grad_norm": 0.09937512874603271, | |
| "learning_rate": 0.00014825077417612186, | |
| "loss": 0.8404, | |
| "step": 330 | |
| }, | |
| { | |
| "epoch": 0.4125615763546798, | |
| "grad_norm": 0.1004796102643013, | |
| "learning_rate": 0.0001463550270902851, | |
| "loss": 0.8417, | |
| "step": 335 | |
| }, | |
| { | |
| "epoch": 0.4187192118226601, | |
| "grad_norm": 0.09814818203449249, | |
| "learning_rate": 0.00014443781781046136, | |
| "loss": 0.8592, | |
| "step": 340 | |
| }, | |
| { | |
| "epoch": 0.4248768472906404, | |
| "grad_norm": 0.09534980356693268, | |
| "learning_rate": 0.0001425000339969554, | |
| "loss": 0.8637, | |
| "step": 345 | |
| }, | |
| { | |
| "epoch": 0.43103448275862066, | |
| "grad_norm": 0.09524567425251007, | |
| "learning_rate": 0.00014054257283599973, | |
| "loss": 0.8278, | |
| "step": 350 | |
| }, | |
| { | |
| "epoch": 0.437192118226601, | |
| "grad_norm": 0.09492156654596329, | |
| "learning_rate": 0.0001385663406243607, | |
| "loss": 0.8371, | |
| "step": 355 | |
| }, | |
| { | |
| "epoch": 0.4433497536945813, | |
| "grad_norm": 0.10173512995243073, | |
| "learning_rate": 0.00013657225234972695, | |
| "loss": 0.8592, | |
| "step": 360 | |
| }, | |
| { | |
| "epoch": 0.44950738916256155, | |
| "grad_norm": 0.11124896258115768, | |
| "learning_rate": 0.00013456123126707334, | |
| "loss": 0.8543, | |
| "step": 365 | |
| }, | |
| { | |
| "epoch": 0.45566502463054187, | |
| "grad_norm": 0.09352728724479675, | |
| "learning_rate": 0.00013253420847119803, | |
| "loss": 0.8592, | |
| "step": 370 | |
| }, | |
| { | |
| "epoch": 0.4618226600985222, | |
| "grad_norm": 0.11169470101594925, | |
| "learning_rate": 0.0001304921224656289, | |
| "loss": 0.8233, | |
| "step": 375 | |
| }, | |
| { | |
| "epoch": 0.46798029556650245, | |
| "grad_norm": 0.09410259872674942, | |
| "learning_rate": 0.0001284359187281004, | |
| "loss": 0.8702, | |
| "step": 380 | |
| }, | |
| { | |
| "epoch": 0.47413793103448276, | |
| "grad_norm": 0.09482759982347488, | |
| "learning_rate": 0.00012636654927280073, | |
| "loss": 0.818, | |
| "step": 385 | |
| }, | |
| { | |
| "epoch": 0.4802955665024631, | |
| "grad_norm": 0.11693406850099564, | |
| "learning_rate": 0.0001242849722095936, | |
| "loss": 0.8476, | |
| "step": 390 | |
| }, | |
| { | |
| "epoch": 0.48645320197044334, | |
| "grad_norm": 0.09488484263420105, | |
| "learning_rate": 0.00012219215130041656, | |
| "loss": 0.7966, | |
| "step": 395 | |
| }, | |
| { | |
| "epoch": 0.49261083743842365, | |
| "grad_norm": 0.09644895046949387, | |
| "learning_rate": 0.00012008905551306356, | |
| "loss": 0.8411, | |
| "step": 400 | |
| }, | |
| { | |
| "epoch": 0.4987684729064039, | |
| "grad_norm": 0.09742914140224457, | |
| "learning_rate": 0.00011797665857255621, | |
| "loss": 0.8306, | |
| "step": 405 | |
| }, | |
| { | |
| "epoch": 0.5049261083743842, | |
| "grad_norm": 0.11031042784452438, | |
| "learning_rate": 0.00011585593851031347, | |
| "loss": 0.8356, | |
| "step": 410 | |
| }, | |
| { | |
| "epoch": 0.5110837438423645, | |
| "grad_norm": 0.11047101765871048, | |
| "learning_rate": 0.00011372787721132648, | |
| "loss": 0.8616, | |
| "step": 415 | |
| }, | |
| { | |
| "epoch": 0.5172413793103449, | |
| "grad_norm": 0.11460606008768082, | |
| "learning_rate": 0.00011159345995955006, | |
| "loss": 0.8332, | |
| "step": 420 | |
| }, | |
| { | |
| "epoch": 0.5233990147783252, | |
| "grad_norm": 0.09033536165952682, | |
| "learning_rate": 0.00010945367498171993, | |
| "loss": 0.8455, | |
| "step": 425 | |
| }, | |
| { | |
| "epoch": 0.5295566502463054, | |
| "grad_norm": 0.09113427996635437, | |
| "learning_rate": 0.00010730951298980776, | |
| "loss": 0.871, | |
| "step": 430 | |
| }, | |
| { | |
| "epoch": 0.5357142857142857, | |
| "grad_norm": 0.09332713484764099, | |
| "learning_rate": 0.00010516196672232539, | |
| "loss": 0.8643, | |
| "step": 435 | |
| }, | |
| { | |
| "epoch": 0.541871921182266, | |
| "grad_norm": 0.09998837858438492, | |
| "learning_rate": 0.00010301203048469083, | |
| "loss": 0.838, | |
| "step": 440 | |
| }, | |
| { | |
| "epoch": 0.5480295566502463, | |
| "grad_norm": 0.09564919024705887, | |
| "learning_rate": 0.00010086069968886885, | |
| "loss": 0.8245, | |
| "step": 445 | |
| }, | |
| { | |
| "epoch": 0.5541871921182266, | |
| "grad_norm": 0.10122521966695786, | |
| "learning_rate": 9.870897039249911e-05, | |
| "loss": 0.8496, | |
| "step": 450 | |
| }, | |
| { | |
| "epoch": 0.5603448275862069, | |
| "grad_norm": 0.09907156229019165, | |
| "learning_rate": 9.655783883772545e-05, | |
| "loss": 0.8678, | |
| "step": 455 | |
| }, | |
| { | |
| "epoch": 0.5665024630541872, | |
| "grad_norm": 0.10225577652454376, | |
| "learning_rate": 9.440830098993969e-05, | |
| "loss": 0.8295, | |
| "step": 460 | |
| }, | |
| { | |
| "epoch": 0.5726600985221675, | |
| "grad_norm": 0.09626160562038422, | |
| "learning_rate": 9.22613520766537e-05, | |
| "loss": 0.8396, | |
| "step": 465 | |
| }, | |
| { | |
| "epoch": 0.5788177339901478, | |
| "grad_norm": 0.10784432291984558, | |
| "learning_rate": 9.011798612671286e-05, | |
| "loss": 0.8471, | |
| "step": 470 | |
| }, | |
| { | |
| "epoch": 0.5849753694581281, | |
| "grad_norm": 0.10687639564275742, | |
| "learning_rate": 8.797919551006475e-05, | |
| "loss": 0.8497, | |
| "step": 475 | |
| }, | |
| { | |
| "epoch": 0.5911330049261084, | |
| "grad_norm": 0.09171903878450394, | |
| "learning_rate": 8.58459704782957e-05, | |
| "loss": 0.8569, | |
| "step": 480 | |
| }, | |
| { | |
| "epoch": 0.5972906403940886, | |
| "grad_norm": 0.09949865937232971, | |
| "learning_rate": 8.371929870614833e-05, | |
| "loss": 0.8751, | |
| "step": 485 | |
| }, | |
| { | |
| "epoch": 0.603448275862069, | |
| "grad_norm": 0.10091466456651688, | |
| "learning_rate": 8.160016483423199e-05, | |
| "loss": 0.827, | |
| "step": 490 | |
| }, | |
| { | |
| "epoch": 0.6096059113300493, | |
| "grad_norm": 0.09927868098020554, | |
| "learning_rate": 7.948955001313811e-05, | |
| "loss": 0.837, | |
| "step": 495 | |
| }, | |
| { | |
| "epoch": 0.6157635467980296, | |
| "grad_norm": 0.09563671797513962, | |
| "learning_rate": 7.738843144917119e-05, | |
| "loss": 0.846, | |
| "step": 500 | |
| }, | |
| { | |
| "epoch": 0.6219211822660099, | |
| "grad_norm": 0.09975801408290863, | |
| "learning_rate": 7.529778195190645e-05, | |
| "loss": 0.8362, | |
| "step": 505 | |
| }, | |
| { | |
| "epoch": 0.6280788177339901, | |
| "grad_norm": 0.10187681764364243, | |
| "learning_rate": 7.321856948378259e-05, | |
| "loss": 0.8295, | |
| "step": 510 | |
| }, | |
| { | |
| "epoch": 0.6342364532019704, | |
| "grad_norm": 0.10099563747644424, | |
| "learning_rate": 7.115175671193913e-05, | |
| "loss": 0.8567, | |
| "step": 515 | |
| }, | |
| { | |
| "epoch": 0.6403940886699507, | |
| "grad_norm": 0.0974687784910202, | |
| "learning_rate": 6.909830056250527e-05, | |
| "loss": 0.8356, | |
| "step": 520 | |
| }, | |
| { | |
| "epoch": 0.646551724137931, | |
| "grad_norm": 0.11131696403026581, | |
| "learning_rate": 6.7059151777547e-05, | |
| "loss": 0.8212, | |
| "step": 525 | |
| }, | |
| { | |
| "epoch": 0.6527093596059114, | |
| "grad_norm": 0.09260319918394089, | |
| "learning_rate": 6.503525447487715e-05, | |
| "loss": 0.8279, | |
| "step": 530 | |
| }, | |
| { | |
| "epoch": 0.6588669950738916, | |
| "grad_norm": 0.09697587788105011, | |
| "learning_rate": 6.30275457109327e-05, | |
| "loss": 0.8031, | |
| "step": 535 | |
| }, | |
| { | |
| "epoch": 0.6650246305418719, | |
| "grad_norm": 0.11103641986846924, | |
| "learning_rate": 6.103695504692122e-05, | |
| "loss": 0.8436, | |
| "step": 540 | |
| }, | |
| { | |
| "epoch": 0.6711822660098522, | |
| "grad_norm": 0.1072748601436615, | |
| "learning_rate": 5.906440411843787e-05, | |
| "loss": 0.8248, | |
| "step": 545 | |
| }, | |
| { | |
| "epoch": 0.6773399014778325, | |
| "grad_norm": 0.1020703986287117, | |
| "learning_rate": 5.7110806208751655e-05, | |
| "loss": 0.8498, | |
| "step": 550 | |
| }, | |
| { | |
| "epoch": 0.6834975369458128, | |
| "grad_norm": 0.10344262421131134, | |
| "learning_rate": 5.5177065825958966e-05, | |
| "loss": 0.8355, | |
| "step": 555 | |
| }, | |
| { | |
| "epoch": 0.6896551724137931, | |
| "grad_norm": 0.10204443335533142, | |
| "learning_rate": 5.326407828419979e-05, | |
| "loss": 0.8224, | |
| "step": 560 | |
| }, | |
| { | |
| "epoch": 0.6958128078817734, | |
| "grad_norm": 0.10229265689849854, | |
| "learning_rate": 5.137272928913097e-05, | |
| "loss": 0.8366, | |
| "step": 565 | |
| }, | |
| { | |
| "epoch": 0.7019704433497537, | |
| "grad_norm": 0.10783268511295319, | |
| "learning_rate": 4.9503894527847964e-05, | |
| "loss": 0.8345, | |
| "step": 570 | |
| }, | |
| { | |
| "epoch": 0.708128078817734, | |
| "grad_norm": 0.09613071382045746, | |
| "learning_rate": 4.7658439263445e-05, | |
| "loss": 0.8786, | |
| "step": 575 | |
| }, | |
| { | |
| "epoch": 0.7142857142857143, | |
| "grad_norm": 0.09664251655340195, | |
| "learning_rate": 4.583721793440188e-05, | |
| "loss": 0.8881, | |
| "step": 580 | |
| }, | |
| { | |
| "epoch": 0.7204433497536946, | |
| "grad_norm": 0.09146658331155777, | |
| "learning_rate": 4.4041073758982335e-05, | |
| "loss": 0.8425, | |
| "step": 585 | |
| }, | |
| { | |
| "epoch": 0.7266009852216748, | |
| "grad_norm": 0.10532376915216446, | |
| "learning_rate": 4.227083834482728e-05, | |
| "loss": 0.8319, | |
| "step": 590 | |
| }, | |
| { | |
| "epoch": 0.7327586206896551, | |
| "grad_norm": 0.09909343719482422, | |
| "learning_rate": 4.052733130392367e-05, | |
| "loss": 0.8396, | |
| "step": 595 | |
| }, | |
| { | |
| "epoch": 0.7389162561576355, | |
| "grad_norm": 0.10966886579990387, | |
| "learning_rate": 3.881135987312757e-05, | |
| "loss": 0.8404, | |
| "step": 600 | |
| }, | |
| { | |
| "epoch": 0.7450738916256158, | |
| "grad_norm": 0.10393289476633072, | |
| "learning_rate": 3.712371854041654e-05, | |
| "loss": 0.8524, | |
| "step": 605 | |
| }, | |
| { | |
| "epoch": 0.7512315270935961, | |
| "grad_norm": 0.09633342921733856, | |
| "learning_rate": 3.546518867704499e-05, | |
| "loss": 0.8542, | |
| "step": 610 | |
| }, | |
| { | |
| "epoch": 0.7573891625615764, | |
| "grad_norm": 0.10098910331726074, | |
| "learning_rate": 3.383653817577216e-05, | |
| "loss": 0.8552, | |
| "step": 615 | |
| }, | |
| { | |
| "epoch": 0.7635467980295566, | |
| "grad_norm": 0.10073598474264145, | |
| "learning_rate": 3.223852109533112e-05, | |
| "loss": 0.8423, | |
| "step": 620 | |
| }, | |
| { | |
| "epoch": 0.7697044334975369, | |
| "grad_norm": 0.10078535974025726, | |
| "learning_rate": 3.0671877311302244e-05, | |
| "loss": 0.8402, | |
| "step": 625 | |
| }, | |
| { | |
| "epoch": 0.7758620689655172, | |
| "grad_norm": 0.10436087101697922, | |
| "learning_rate": 2.9137332173554043e-05, | |
| "loss": 0.8111, | |
| "step": 630 | |
| }, | |
| { | |
| "epoch": 0.7820197044334976, | |
| "grad_norm": 0.09869997948408127, | |
| "learning_rate": 2.763559617040876e-05, | |
| "loss": 0.8407, | |
| "step": 635 | |
| }, | |
| { | |
| "epoch": 0.7881773399014779, | |
| "grad_norm": 0.10569991171360016, | |
| "learning_rate": 2.616736459968936e-05, | |
| "loss": 0.8173, | |
| "step": 640 | |
| }, | |
| { | |
| "epoch": 0.7943349753694581, | |
| "grad_norm": 0.09898445755243301, | |
| "learning_rate": 2.473331724679917e-05, | |
| "loss": 0.8236, | |
| "step": 645 | |
| }, | |
| { | |
| "epoch": 0.8004926108374384, | |
| "grad_norm": 0.10241784900426865, | |
| "learning_rate": 2.33341180699841e-05, | |
| "loss": 0.8373, | |
| "step": 650 | |
| }, | |
| { | |
| "epoch": 0.8066502463054187, | |
| "grad_norm": 0.10218991339206696, | |
| "learning_rate": 2.1970414892922442e-05, | |
| "loss": 0.8255, | |
| "step": 655 | |
| }, | |
| { | |
| "epoch": 0.812807881773399, | |
| "grad_norm": 0.09686244279146194, | |
| "learning_rate": 2.0642839104785272e-05, | |
| "loss": 0.837, | |
| "step": 660 | |
| }, | |
| { | |
| "epoch": 0.8189655172413793, | |
| "grad_norm": 0.09895747154951096, | |
| "learning_rate": 1.9352005367905536e-05, | |
| "loss": 0.8532, | |
| "step": 665 | |
| }, | |
| { | |
| "epoch": 0.8251231527093597, | |
| "grad_norm": 0.10845907032489777, | |
| "learning_rate": 1.8098511333192024e-05, | |
| "loss": 0.8555, | |
| "step": 670 | |
| }, | |
| { | |
| "epoch": 0.8312807881773399, | |
| "grad_norm": 0.10947497189044952, | |
| "learning_rate": 1.6882937363419203e-05, | |
| "loss": 0.8312, | |
| "step": 675 | |
| }, | |
| { | |
| "epoch": 0.8374384236453202, | |
| "grad_norm": 0.10034690797328949, | |
| "learning_rate": 1.570584626452173e-05, | |
| "loss": 0.8377, | |
| "step": 680 | |
| }, | |
| { | |
| "epoch": 0.8435960591133005, | |
| "grad_norm": 0.0978924036026001, | |
| "learning_rate": 1.4567783025017301e-05, | |
| "loss": 0.8337, | |
| "step": 685 | |
| }, | |
| { | |
| "epoch": 0.8497536945812808, | |
| "grad_norm": 0.09215282648801804, | |
| "learning_rate": 1.3469274563679402e-05, | |
| "loss": 0.8057, | |
| "step": 690 | |
| }, | |
| { | |
| "epoch": 0.8559113300492611, | |
| "grad_norm": 0.09425260126590729, | |
| "learning_rate": 1.2410829485575704e-05, | |
| "loss": 0.83, | |
| "step": 695 | |
| }, | |
| { | |
| "epoch": 0.8620689655172413, | |
| "grad_norm": 0.09816047549247742, | |
| "learning_rate": 1.1392937846586215e-05, | |
| "loss": 0.8389, | |
| "step": 700 | |
| }, | |
| { | |
| "epoch": 0.8682266009852216, | |
| "grad_norm": 0.09517823904752731, | |
| "learning_rate": 1.0416070926509113e-05, | |
| "loss": 0.8154, | |
| "step": 705 | |
| }, | |
| { | |
| "epoch": 0.874384236453202, | |
| "grad_norm": 0.097825787961483, | |
| "learning_rate": 9.48068101086026e-06, | |
| "loss": 0.8492, | |
| "step": 710 | |
| }, | |
| { | |
| "epoch": 0.8805418719211823, | |
| "grad_norm": 0.10528585314750671, | |
| "learning_rate": 8.58720118146662e-06, | |
| "loss": 0.8618, | |
| "step": 715 | |
| }, | |
| { | |
| "epoch": 0.8866995073891626, | |
| "grad_norm": 0.09678342193365097, | |
| "learning_rate": 7.736045115951251e-06, | |
| "loss": 0.8349, | |
| "step": 720 | |
| }, | |
| { | |
| "epoch": 0.8928571428571429, | |
| "grad_norm": 0.10073202848434448, | |
| "learning_rate": 6.927606896202066e-06, | |
| "loss": 0.823, | |
| "step": 725 | |
| }, | |
| { | |
| "epoch": 0.8990147783251231, | |
| "grad_norm": 0.10746844857931137, | |
| "learning_rate": 6.16226082591359e-06, | |
| "loss": 0.8406, | |
| "step": 730 | |
| }, | |
| { | |
| "epoch": 0.9051724137931034, | |
| "grad_norm": 0.09784899652004242, | |
| "learning_rate": 5.440361257285742e-06, | |
| "loss": 0.8522, | |
| "step": 735 | |
| }, | |
| { | |
| "epoch": 0.9113300492610837, | |
| "grad_norm": 0.0988786518573761, | |
| "learning_rate": 4.762242426960262e-06, | |
| "loss": 0.838, | |
| "step": 740 | |
| }, | |
| { | |
| "epoch": 0.9174876847290641, | |
| "grad_norm": 0.11292924731969833, | |
| "learning_rate": 4.128218301270359e-06, | |
| "loss": 0.8077, | |
| "step": 745 | |
| }, | |
| { | |
| "epoch": 0.9236453201970444, | |
| "grad_norm": 0.10150548070669174, | |
| "learning_rate": 3.5385824308756587e-06, | |
| "loss": 0.8248, | |
| "step": 750 | |
| }, | |
| { | |
| "epoch": 0.9298029556650246, | |
| "grad_norm": 0.09723605215549469, | |
| "learning_rate": 2.9936078148492973e-06, | |
| "loss": 0.8465, | |
| "step": 755 | |
| }, | |
| { | |
| "epoch": 0.9359605911330049, | |
| "grad_norm": 0.0988769456744194, | |
| "learning_rate": 2.493546774280531e-06, | |
| "loss": 0.8601, | |
| "step": 760 | |
| }, | |
| { | |
| "epoch": 0.9421182266009852, | |
| "grad_norm": 0.10142029821872711, | |
| "learning_rate": 2.0386308354509942e-06, | |
| "loss": 0.8635, | |
| "step": 765 | |
| }, | |
| { | |
| "epoch": 0.9482758620689655, | |
| "grad_norm": 0.10347957164049149, | |
| "learning_rate": 1.6290706226390285e-06, | |
| "loss": 0.8088, | |
| "step": 770 | |
| }, | |
| { | |
| "epoch": 0.9544334975369458, | |
| "grad_norm": 0.0945996567606926, | |
| "learning_rate": 1.2650557606013635e-06, | |
| "loss": 0.8074, | |
| "step": 775 | |
| }, | |
| { | |
| "epoch": 0.9605911330049262, | |
| "grad_norm": 0.09384027868509293, | |
| "learning_rate": 9.46754786777726e-07, | |
| "loss": 0.8235, | |
| "step": 780 | |
| }, | |
| { | |
| "epoch": 0.9667487684729064, | |
| "grad_norm": 0.10256475955247879, | |
| "learning_rate": 6.74315073258569e-07, | |
| "loss": 0.8613, | |
| "step": 785 | |
| }, | |
| { | |
| "epoch": 0.9729064039408867, | |
| "grad_norm": 0.09143529087305069, | |
| "learning_rate": 4.4786275855247527e-07, | |
| "loss": 0.8044, | |
| "step": 790 | |
| }, | |
| { | |
| "epoch": 0.979064039408867, | |
| "grad_norm": 0.10207488387823105, | |
| "learning_rate": 2.675026891844512e-07, | |
| "loss": 0.8265, | |
| "step": 795 | |
| }, | |
| { | |
| "epoch": 0.9852216748768473, | |
| "grad_norm": 0.100164495408535, | |
| "learning_rate": 1.333183711524133e-07, | |
| "loss": 0.8307, | |
| "step": 800 | |
| }, | |
| { | |
| "epoch": 0.9913793103448276, | |
| "grad_norm": 0.10022486746311188, | |
| "learning_rate": 4.5371931264270864e-08, | |
| "loss": 0.8189, | |
| "step": 805 | |
| }, | |
| { | |
| "epoch": 0.9975369458128078, | |
| "grad_norm": 0.10022631287574768, | |
| "learning_rate": 3.7040883734462683e-09, | |
| "loss": 0.8204, | |
| "step": 810 | |
| }, | |
| { | |
| "epoch": 1.0, | |
| "eval_loss": 1.2506881952285767, | |
| "eval_runtime": 107.8412, | |
| "eval_samples_per_second": 10.71, | |
| "eval_steps_per_second": 0.677, | |
| "step": 812 | |
| }, | |
| { | |
| "epoch": 1.0, | |
| "step": 812, | |
| "total_flos": 2.1301458550160097e+18, | |
| "train_loss": 0.26981097944264343, | |
| "train_runtime": 10382.4605, | |
| "train_samples_per_second": 10.01, | |
| "train_steps_per_second": 0.078 | |
| } | |
| ], | |
| "logging_steps": 5, | |
| "max_steps": 812, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 1, | |
| "save_steps": 25, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": false | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 2.1301458550160097e+18, | |
| "train_batch_size": 8, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |