| { | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 1.0, | |
| "eval_steps": 500, | |
| "global_step": 454, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.0022026431718061676, | |
| "grad_norm": 1.0935207970465015, | |
| "learning_rate": 4.347826086956522e-06, | |
| "loss": 1.3623, | |
| "step": 1 | |
| }, | |
| { | |
| "epoch": 0.011013215859030838, | |
| "grad_norm": 1.0619934894299083, | |
| "learning_rate": 2.173913043478261e-05, | |
| "loss": 1.3747, | |
| "step": 5 | |
| }, | |
| { | |
| "epoch": 0.022026431718061675, | |
| "grad_norm": 0.25351576673529874, | |
| "learning_rate": 4.347826086956522e-05, | |
| "loss": 1.3541, | |
| "step": 10 | |
| }, | |
| { | |
| "epoch": 0.03303964757709251, | |
| "grad_norm": 0.2278849128178173, | |
| "learning_rate": 6.521739130434783e-05, | |
| "loss": 1.3162, | |
| "step": 15 | |
| }, | |
| { | |
| "epoch": 0.04405286343612335, | |
| "grad_norm": 0.1855238377034502, | |
| "learning_rate": 8.695652173913044e-05, | |
| "loss": 1.259, | |
| "step": 20 | |
| }, | |
| { | |
| "epoch": 0.05506607929515418, | |
| "grad_norm": 0.1452973877554258, | |
| "learning_rate": 0.00010869565217391305, | |
| "loss": 1.2263, | |
| "step": 25 | |
| }, | |
| { | |
| "epoch": 0.06607929515418502, | |
| "grad_norm": 0.10223461213864794, | |
| "learning_rate": 0.00013043478260869567, | |
| "loss": 1.2169, | |
| "step": 30 | |
| }, | |
| { | |
| "epoch": 0.07709251101321586, | |
| "grad_norm": 0.09738462561658244, | |
| "learning_rate": 0.00015217391304347827, | |
| "loss": 1.2041, | |
| "step": 35 | |
| }, | |
| { | |
| "epoch": 0.0881057268722467, | |
| "grad_norm": 0.09054576261597842, | |
| "learning_rate": 0.00017391304347826088, | |
| "loss": 1.1826, | |
| "step": 40 | |
| }, | |
| { | |
| "epoch": 0.09911894273127753, | |
| "grad_norm": 0.08674850660946061, | |
| "learning_rate": 0.0001956521739130435, | |
| "loss": 1.1734, | |
| "step": 45 | |
| }, | |
| { | |
| "epoch": 0.11013215859030837, | |
| "grad_norm": 0.09808296793590265, | |
| "learning_rate": 0.0001999525719713366, | |
| "loss": 1.1869, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 0.1211453744493392, | |
| "grad_norm": 0.10256381268719783, | |
| "learning_rate": 0.0001997599727063717, | |
| "loss": 1.137, | |
| "step": 55 | |
| }, | |
| { | |
| "epoch": 0.13215859030837004, | |
| "grad_norm": 0.09401999180532912, | |
| "learning_rate": 0.00019941952317728147, | |
| "loss": 1.1505, | |
| "step": 60 | |
| }, | |
| { | |
| "epoch": 0.14317180616740088, | |
| "grad_norm": 0.07941243235115872, | |
| "learning_rate": 0.00019893172795069144, | |
| "loss": 1.1563, | |
| "step": 65 | |
| }, | |
| { | |
| "epoch": 0.15418502202643172, | |
| "grad_norm": 0.09518349218263038, | |
| "learning_rate": 0.0001982973099683902, | |
| "loss": 1.1397, | |
| "step": 70 | |
| }, | |
| { | |
| "epoch": 0.16519823788546256, | |
| "grad_norm": 0.07317938488194159, | |
| "learning_rate": 0.00019751720947588602, | |
| "loss": 1.1214, | |
| "step": 75 | |
| }, | |
| { | |
| "epoch": 0.1762114537444934, | |
| "grad_norm": 0.07067546841879777, | |
| "learning_rate": 0.00019659258262890683, | |
| "loss": 1.1514, | |
| "step": 80 | |
| }, | |
| { | |
| "epoch": 0.18722466960352424, | |
| "grad_norm": 0.08477547989478108, | |
| "learning_rate": 0.000195524799779908, | |
| "loss": 1.1413, | |
| "step": 85 | |
| }, | |
| { | |
| "epoch": 0.19823788546255505, | |
| "grad_norm": 0.08043454343502093, | |
| "learning_rate": 0.00019431544344712776, | |
| "loss": 1.1204, | |
| "step": 90 | |
| }, | |
| { | |
| "epoch": 0.2092511013215859, | |
| "grad_norm": 0.07585966181487593, | |
| "learning_rate": 0.00019296630596920023, | |
| "loss": 1.1363, | |
| "step": 95 | |
| }, | |
| { | |
| "epoch": 0.22026431718061673, | |
| "grad_norm": 0.06977946901923308, | |
| "learning_rate": 0.0001914793868488021, | |
| "loss": 1.1388, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 0.23127753303964757, | |
| "grad_norm": 0.09828209309581792, | |
| "learning_rate": 0.0001898568897892697, | |
| "loss": 1.1323, | |
| "step": 105 | |
| }, | |
| { | |
| "epoch": 0.2422907488986784, | |
| "grad_norm": 0.08128484219994006, | |
| "learning_rate": 0.00018810121942857845, | |
| "loss": 1.1282, | |
| "step": 110 | |
| }, | |
| { | |
| "epoch": 0.2533039647577093, | |
| "grad_norm": 0.0705528387018198, | |
| "learning_rate": 0.00018621497777552507, | |
| "loss": 1.1478, | |
| "step": 115 | |
| }, | |
| { | |
| "epoch": 0.2643171806167401, | |
| "grad_norm": 0.07222951437049585, | |
| "learning_rate": 0.00018420096035339452, | |
| "loss": 1.1281, | |
| "step": 120 | |
| }, | |
| { | |
| "epoch": 0.2753303964757709, | |
| "grad_norm": 0.07745334706329449, | |
| "learning_rate": 0.00018206215205682683, | |
| "loss": 1.1305, | |
| "step": 125 | |
| }, | |
| { | |
| "epoch": 0.28634361233480177, | |
| "grad_norm": 0.07138923529008301, | |
| "learning_rate": 0.000179801722728024, | |
| "loss": 1.1248, | |
| "step": 130 | |
| }, | |
| { | |
| "epoch": 0.2973568281938326, | |
| "grad_norm": 0.07479603203788834, | |
| "learning_rate": 0.00017742302245885383, | |
| "loss": 1.1273, | |
| "step": 135 | |
| }, | |
| { | |
| "epoch": 0.30837004405286345, | |
| "grad_norm": 0.07733525226694472, | |
| "learning_rate": 0.00017492957662581295, | |
| "loss": 1.1109, | |
| "step": 140 | |
| }, | |
| { | |
| "epoch": 0.31938325991189426, | |
| "grad_norm": 0.07371826786635874, | |
| "learning_rate": 0.00017232508066520702, | |
| "loss": 1.1358, | |
| "step": 145 | |
| }, | |
| { | |
| "epoch": 0.3303964757709251, | |
| "grad_norm": 0.07313288578427013, | |
| "learning_rate": 0.0001696133945962927, | |
| "loss": 1.1366, | |
| "step": 150 | |
| }, | |
| { | |
| "epoch": 0.34140969162995594, | |
| "grad_norm": 0.07112918824502681, | |
| "learning_rate": 0.00016679853730049743, | |
| "loss": 1.1278, | |
| "step": 155 | |
| }, | |
| { | |
| "epoch": 0.3524229074889868, | |
| "grad_norm": 0.07765780862323055, | |
| "learning_rate": 0.00016388468056519612, | |
| "loss": 1.114, | |
| "step": 160 | |
| }, | |
| { | |
| "epoch": 0.3634361233480176, | |
| "grad_norm": 0.08642134716689472, | |
| "learning_rate": 0.00016087614290087208, | |
| "loss": 1.1254, | |
| "step": 165 | |
| }, | |
| { | |
| "epoch": 0.3744493392070485, | |
| "grad_norm": 0.08329985092613008, | |
| "learning_rate": 0.00015777738314082514, | |
| "loss": 1.1424, | |
| "step": 170 | |
| }, | |
| { | |
| "epoch": 0.3854625550660793, | |
| "grad_norm": 0.08324403766808654, | |
| "learning_rate": 0.00015459299383291345, | |
| "loss": 1.126, | |
| "step": 175 | |
| }, | |
| { | |
| "epoch": 0.3964757709251101, | |
| "grad_norm": 0.0824304632426703, | |
| "learning_rate": 0.00015132769443312207, | |
| "loss": 1.109, | |
| "step": 180 | |
| }, | |
| { | |
| "epoch": 0.40748898678414097, | |
| "grad_norm": 0.07117156561482839, | |
| "learning_rate": 0.00014798632431104592, | |
| "loss": 1.1036, | |
| "step": 185 | |
| }, | |
| { | |
| "epoch": 0.4185022026431718, | |
| "grad_norm": 0.07457290539374888, | |
| "learning_rate": 0.00014457383557765386, | |
| "loss": 1.1315, | |
| "step": 190 | |
| }, | |
| { | |
| "epoch": 0.42951541850220265, | |
| "grad_norm": 0.07127883301068033, | |
| "learning_rate": 0.00014109528574596301, | |
| "loss": 1.115, | |
| "step": 195 | |
| }, | |
| { | |
| "epoch": 0.44052863436123346, | |
| "grad_norm": 0.07115222351808868, | |
| "learning_rate": 0.00013755583023550126, | |
| "loss": 1.1125, | |
| "step": 200 | |
| }, | |
| { | |
| "epoch": 0.45154185022026433, | |
| "grad_norm": 0.07230426175014865, | |
| "learning_rate": 0.00013396071473166613, | |
| "loss": 1.1264, | |
| "step": 205 | |
| }, | |
| { | |
| "epoch": 0.46255506607929514, | |
| "grad_norm": 0.07086336369316362, | |
| "learning_rate": 0.00013031526741130435, | |
| "loss": 1.1395, | |
| "step": 210 | |
| }, | |
| { | |
| "epoch": 0.473568281938326, | |
| "grad_norm": 0.07876282139247363, | |
| "learning_rate": 0.0001266248910460341, | |
| "loss": 1.1198, | |
| "step": 215 | |
| }, | |
| { | |
| "epoch": 0.4845814977973568, | |
| "grad_norm": 0.07398923802908818, | |
| "learning_rate": 0.0001228950549950134, | |
| "loss": 1.1191, | |
| "step": 220 | |
| }, | |
| { | |
| "epoch": 0.4955947136563877, | |
| "grad_norm": 0.07631700509520113, | |
| "learning_rate": 0.00011913128709902181, | |
| "loss": 1.1015, | |
| "step": 225 | |
| }, | |
| { | |
| "epoch": 0.5066079295154186, | |
| "grad_norm": 0.07776652535484273, | |
| "learning_rate": 0.00011533916548786857, | |
| "loss": 1.0927, | |
| "step": 230 | |
| }, | |
| { | |
| "epoch": 0.5176211453744494, | |
| "grad_norm": 0.0779293920050639, | |
| "learning_rate": 0.00011152431031326978, | |
| "loss": 1.1106, | |
| "step": 235 | |
| }, | |
| { | |
| "epoch": 0.5286343612334802, | |
| "grad_norm": 0.07173097000465865, | |
| "learning_rate": 0.0001076923754194464, | |
| "loss": 1.1192, | |
| "step": 240 | |
| }, | |
| { | |
| "epoch": 0.539647577092511, | |
| "grad_norm": 0.06808021677810269, | |
| "learning_rate": 0.00010384903996378783, | |
| "loss": 1.1141, | |
| "step": 245 | |
| }, | |
| { | |
| "epoch": 0.5506607929515418, | |
| "grad_norm": 0.07092915110115385, | |
| "learning_rate": 0.0001, | |
| "loss": 1.1005, | |
| "step": 250 | |
| }, | |
| { | |
| "epoch": 0.5616740088105727, | |
| "grad_norm": 0.07272607907211924, | |
| "learning_rate": 9.615096003621221e-05, | |
| "loss": 1.1199, | |
| "step": 255 | |
| }, | |
| { | |
| "epoch": 0.5726872246696035, | |
| "grad_norm": 0.07107512047821021, | |
| "learning_rate": 9.230762458055363e-05, | |
| "loss": 1.1026, | |
| "step": 260 | |
| }, | |
| { | |
| "epoch": 0.5837004405286343, | |
| "grad_norm": 0.07442425494453343, | |
| "learning_rate": 8.847568968673026e-05, | |
| "loss": 1.1419, | |
| "step": 265 | |
| }, | |
| { | |
| "epoch": 0.5947136563876652, | |
| "grad_norm": 0.07080798721909222, | |
| "learning_rate": 8.466083451213144e-05, | |
| "loss": 1.0954, | |
| "step": 270 | |
| }, | |
| { | |
| "epoch": 0.6057268722466961, | |
| "grad_norm": 0.07188012114083321, | |
| "learning_rate": 8.086871290097821e-05, | |
| "loss": 1.1214, | |
| "step": 275 | |
| }, | |
| { | |
| "epoch": 0.6167400881057269, | |
| "grad_norm": 0.07245225851302386, | |
| "learning_rate": 7.710494500498662e-05, | |
| "loss": 1.095, | |
| "step": 280 | |
| }, | |
| { | |
| "epoch": 0.6277533039647577, | |
| "grad_norm": 0.07006313027766538, | |
| "learning_rate": 7.337510895396591e-05, | |
| "loss": 1.0988, | |
| "step": 285 | |
| }, | |
| { | |
| "epoch": 0.6387665198237885, | |
| "grad_norm": 0.07232286737424776, | |
| "learning_rate": 6.968473258869566e-05, | |
| "loss": 1.118, | |
| "step": 290 | |
| }, | |
| { | |
| "epoch": 0.6497797356828194, | |
| "grad_norm": 0.07049128615067844, | |
| "learning_rate": 6.603928526833387e-05, | |
| "loss": 1.1004, | |
| "step": 295 | |
| }, | |
| { | |
| "epoch": 0.6607929515418502, | |
| "grad_norm": 0.0746599685264263, | |
| "learning_rate": 6.244416976449875e-05, | |
| "loss": 1.1059, | |
| "step": 300 | |
| }, | |
| { | |
| "epoch": 0.6718061674008811, | |
| "grad_norm": 0.07192896430027275, | |
| "learning_rate": 5.890471425403703e-05, | |
| "loss": 1.1028, | |
| "step": 305 | |
| }, | |
| { | |
| "epoch": 0.6828193832599119, | |
| "grad_norm": 0.06921226870329861, | |
| "learning_rate": 5.542616442234618e-05, | |
| "loss": 1.1112, | |
| "step": 310 | |
| }, | |
| { | |
| "epoch": 0.6938325991189427, | |
| "grad_norm": 0.07384850031320968, | |
| "learning_rate": 5.201367568895408e-05, | |
| "loss": 1.1116, | |
| "step": 315 | |
| }, | |
| { | |
| "epoch": 0.7048458149779736, | |
| "grad_norm": 0.07138390105976088, | |
| "learning_rate": 4.8672305566877964e-05, | |
| "loss": 1.1309, | |
| "step": 320 | |
| }, | |
| { | |
| "epoch": 0.7158590308370044, | |
| "grad_norm": 0.06880496070020393, | |
| "learning_rate": 4.540700616708658e-05, | |
| "loss": 1.1076, | |
| "step": 325 | |
| }, | |
| { | |
| "epoch": 0.7268722466960352, | |
| "grad_norm": 0.06923475218927538, | |
| "learning_rate": 4.222261685917489e-05, | |
| "loss": 1.108, | |
| "step": 330 | |
| }, | |
| { | |
| "epoch": 0.737885462555066, | |
| "grad_norm": 0.07248582746669294, | |
| "learning_rate": 3.9123857099127936e-05, | |
| "loss": 1.1317, | |
| "step": 335 | |
| }, | |
| { | |
| "epoch": 0.748898678414097, | |
| "grad_norm": 0.07283051607038368, | |
| "learning_rate": 3.6115319434803894e-05, | |
| "loss": 1.1133, | |
| "step": 340 | |
| }, | |
| { | |
| "epoch": 0.7599118942731278, | |
| "grad_norm": 0.06818813267960931, | |
| "learning_rate": 3.32014626995026e-05, | |
| "loss": 1.112, | |
| "step": 345 | |
| }, | |
| { | |
| "epoch": 0.7709251101321586, | |
| "grad_norm": 0.08412840165527498, | |
| "learning_rate": 3.0386605403707346e-05, | |
| "loss": 1.1116, | |
| "step": 350 | |
| }, | |
| { | |
| "epoch": 0.7819383259911894, | |
| "grad_norm": 0.07095418638014062, | |
| "learning_rate": 2.7674919334793035e-05, | |
| "loss": 1.104, | |
| "step": 355 | |
| }, | |
| { | |
| "epoch": 0.7929515418502202, | |
| "grad_norm": 0.07547366887662493, | |
| "learning_rate": 2.507042337418707e-05, | |
| "loss": 1.0963, | |
| "step": 360 | |
| }, | |
| { | |
| "epoch": 0.8039647577092511, | |
| "grad_norm": 0.06961831699039754, | |
| "learning_rate": 2.2576977541146193e-05, | |
| "loss": 1.1181, | |
| "step": 365 | |
| }, | |
| { | |
| "epoch": 0.8149779735682819, | |
| "grad_norm": 0.06804271947624164, | |
| "learning_rate": 2.0198277271976052e-05, | |
| "loss": 1.0993, | |
| "step": 370 | |
| }, | |
| { | |
| "epoch": 0.8259911894273128, | |
| "grad_norm": 0.07293836582897208, | |
| "learning_rate": 1.793784794317319e-05, | |
| "loss": 1.1165, | |
| "step": 375 | |
| }, | |
| { | |
| "epoch": 0.8370044052863436, | |
| "grad_norm": 0.06982718554857356, | |
| "learning_rate": 1.5799039646605486e-05, | |
| "loss": 1.1063, | |
| "step": 380 | |
| }, | |
| { | |
| "epoch": 0.8480176211453745, | |
| "grad_norm": 0.06858309519021612, | |
| "learning_rate": 1.3785022224474942e-05, | |
| "loss": 1.0918, | |
| "step": 385 | |
| }, | |
| { | |
| "epoch": 0.8590308370044053, | |
| "grad_norm": 0.0682809739031729, | |
| "learning_rate": 1.1898780571421552e-05, | |
| "loss": 1.0911, | |
| "step": 390 | |
| }, | |
| { | |
| "epoch": 0.8700440528634361, | |
| "grad_norm": 0.07165844802397829, | |
| "learning_rate": 1.0143110210730312e-05, | |
| "loss": 1.1113, | |
| "step": 395 | |
| }, | |
| { | |
| "epoch": 0.8810572687224669, | |
| "grad_norm": 0.0692445707168849, | |
| "learning_rate": 8.520613151197898e-06, | |
| "loss": 1.1252, | |
| "step": 400 | |
| }, | |
| { | |
| "epoch": 0.8920704845814978, | |
| "grad_norm": 0.06866962638477613, | |
| "learning_rate": 7.033694030799787e-06, | |
| "loss": 1.0891, | |
| "step": 405 | |
| }, | |
| { | |
| "epoch": 0.9030837004405287, | |
| "grad_norm": 0.07273616328237217, | |
| "learning_rate": 5.684556552872256e-06, | |
| "loss": 1.116, | |
| "step": 410 | |
| }, | |
| { | |
| "epoch": 0.9140969162995595, | |
| "grad_norm": 0.06934023088458012, | |
| "learning_rate": 4.475200220092002e-06, | |
| "loss": 1.0892, | |
| "step": 415 | |
| }, | |
| { | |
| "epoch": 0.9251101321585903, | |
| "grad_norm": 0.0701825275643681, | |
| "learning_rate": 3.40741737109318e-06, | |
| "loss": 1.0988, | |
| "step": 420 | |
| }, | |
| { | |
| "epoch": 0.9361233480176211, | |
| "grad_norm": 0.06927772495486725, | |
| "learning_rate": 2.482790524113998e-06, | |
| "loss": 1.1088, | |
| "step": 425 | |
| }, | |
| { | |
| "epoch": 0.947136563876652, | |
| "grad_norm": 0.0685769456667081, | |
| "learning_rate": 1.7026900316098215e-06, | |
| "loss": 1.1237, | |
| "step": 430 | |
| }, | |
| { | |
| "epoch": 0.9581497797356828, | |
| "grad_norm": 0.06848296141460276, | |
| "learning_rate": 1.0682720493085607e-06, | |
| "loss": 1.0978, | |
| "step": 435 | |
| }, | |
| { | |
| "epoch": 0.9691629955947136, | |
| "grad_norm": 0.06944396554705634, | |
| "learning_rate": 5.804768227185565e-07, | |
| "loss": 1.1155, | |
| "step": 440 | |
| }, | |
| { | |
| "epoch": 0.9801762114537445, | |
| "grad_norm": 0.0711777792122946, | |
| "learning_rate": 2.400272936283088e-07, | |
| "loss": 1.1072, | |
| "step": 445 | |
| }, | |
| { | |
| "epoch": 0.9911894273127754, | |
| "grad_norm": 0.06801313867689823, | |
| "learning_rate": 4.74280286634099e-08, | |
| "loss": 1.0891, | |
| "step": 450 | |
| }, | |
| { | |
| "epoch": 1.0, | |
| "eval_loss": 0.9785001873970032, | |
| "eval_runtime": 2.2823, | |
| "eval_samples_per_second": 3.067, | |
| "eval_steps_per_second": 0.876, | |
| "step": 454 | |
| }, | |
| { | |
| "epoch": 1.0, | |
| "step": 454, | |
| "total_flos": 1.4408836278386688e+16, | |
| "train_loss": 1.1317675271223295, | |
| "train_runtime": 17254.8753, | |
| "train_samples_per_second": 3.367, | |
| "train_steps_per_second": 0.026 | |
| } | |
| ], | |
| "logging_steps": 5, | |
| "max_steps": 454, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 1, | |
| "save_steps": 100, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": true | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 1.4408836278386688e+16, | |
| "train_batch_size": 8, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |