| { | |
| "best_global_step": 24000, | |
| "best_metric": 0.15508916974067688, | |
| "best_model_checkpoint": "./patchtst_tsmixup_final/checkpoint-24000", | |
| "epoch": 2.285292577250869, | |
| "eval_steps": 1000, | |
| "global_step": 24000, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.004761224586963767, | |
| "grad_norm": 2.066147804260254, | |
| "learning_rate": 4.9000000000000005e-06, | |
| "loss": 0.5158, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 0.009522449173927534, | |
| "grad_norm": 1.1728284358978271, | |
| "learning_rate": 9.900000000000002e-06, | |
| "loss": 0.4068, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 0.014283673760891302, | |
| "grad_norm": 1.0786150693893433, | |
| "learning_rate": 1.49e-05, | |
| "loss": 0.3107, | |
| "step": 150 | |
| }, | |
| { | |
| "epoch": 0.01904489834785507, | |
| "grad_norm": 1.0987305641174316, | |
| "learning_rate": 1.9900000000000003e-05, | |
| "loss": 0.2444, | |
| "step": 200 | |
| }, | |
| { | |
| "epoch": 0.023806122934818836, | |
| "grad_norm": 0.9221014976501465, | |
| "learning_rate": 2.4900000000000002e-05, | |
| "loss": 0.2106, | |
| "step": 250 | |
| }, | |
| { | |
| "epoch": 0.028567347521782603, | |
| "grad_norm": 1.1374775171279907, | |
| "learning_rate": 2.9900000000000002e-05, | |
| "loss": 0.1931, | |
| "step": 300 | |
| }, | |
| { | |
| "epoch": 0.03332857210874637, | |
| "grad_norm": 1.2504770755767822, | |
| "learning_rate": 3.49e-05, | |
| "loss": 0.1971, | |
| "step": 350 | |
| }, | |
| { | |
| "epoch": 0.03808979669571014, | |
| "grad_norm": 0.8367598056793213, | |
| "learning_rate": 3.99e-05, | |
| "loss": 0.1918, | |
| "step": 400 | |
| }, | |
| { | |
| "epoch": 0.0428510212826739, | |
| "grad_norm": 0.9541486501693726, | |
| "learning_rate": 4.49e-05, | |
| "loss": 0.192, | |
| "step": 450 | |
| }, | |
| { | |
| "epoch": 0.04761224586963767, | |
| "grad_norm": 0.9134742021560669, | |
| "learning_rate": 4.99e-05, | |
| "loss": 0.1875, | |
| "step": 500 | |
| }, | |
| { | |
| "epoch": 0.052373470456601436, | |
| "grad_norm": 2.1370866298675537, | |
| "learning_rate": 5.4900000000000006e-05, | |
| "loss": 0.19, | |
| "step": 550 | |
| }, | |
| { | |
| "epoch": 0.057134695043565206, | |
| "grad_norm": 1.8110992908477783, | |
| "learning_rate": 5.99e-05, | |
| "loss": 0.1902, | |
| "step": 600 | |
| }, | |
| { | |
| "epoch": 0.06189591963052897, | |
| "grad_norm": 0.8074783682823181, | |
| "learning_rate": 6.49e-05, | |
| "loss": 0.1867, | |
| "step": 650 | |
| }, | |
| { | |
| "epoch": 0.06665714421749273, | |
| "grad_norm": 1.8686498403549194, | |
| "learning_rate": 6.99e-05, | |
| "loss": 0.1839, | |
| "step": 700 | |
| }, | |
| { | |
| "epoch": 0.07141836880445651, | |
| "grad_norm": 0.9226100444793701, | |
| "learning_rate": 7.49e-05, | |
| "loss": 0.1832, | |
| "step": 750 | |
| }, | |
| { | |
| "epoch": 0.07617959339142027, | |
| "grad_norm": 1.0714315176010132, | |
| "learning_rate": 7.99e-05, | |
| "loss": 0.1862, | |
| "step": 800 | |
| }, | |
| { | |
| "epoch": 0.08094081797838404, | |
| "grad_norm": 0.7746614813804626, | |
| "learning_rate": 8.49e-05, | |
| "loss": 0.1836, | |
| "step": 850 | |
| }, | |
| { | |
| "epoch": 0.0857020425653478, | |
| "grad_norm": 0.8154539465904236, | |
| "learning_rate": 8.99e-05, | |
| "loss": 0.1863, | |
| "step": 900 | |
| }, | |
| { | |
| "epoch": 0.09046326715231158, | |
| "grad_norm": 0.7890446186065674, | |
| "learning_rate": 9.49e-05, | |
| "loss": 0.1865, | |
| "step": 950 | |
| }, | |
| { | |
| "epoch": 0.09522449173927534, | |
| "grad_norm": 0.9673619270324707, | |
| "learning_rate": 9.99e-05, | |
| "loss": 0.1797, | |
| "step": 1000 | |
| }, | |
| { | |
| "epoch": 0.09522449173927534, | |
| "eval_loss": 0.17560431361198425, | |
| "eval_mae": 0.7397370934486389, | |
| "eval_mse": 447.359619140625, | |
| "eval_rmse": 21.15087750285139, | |
| "eval_runtime": 58.709, | |
| "eval_samples_per_second": 10175.863, | |
| "eval_smape": 90.89710712432861, | |
| "eval_steps_per_second": 19.878, | |
| "step": 1000 | |
| }, | |
| { | |
| "epoch": 0.09998571632623911, | |
| "grad_norm": 0.8122360110282898, | |
| "learning_rate": 9.999532932990182e-05, | |
| "loss": 0.1813, | |
| "step": 1050 | |
| }, | |
| { | |
| "epoch": 0.10474694091320287, | |
| "grad_norm": 1.058344841003418, | |
| "learning_rate": 9.999056334000573e-05, | |
| "loss": 0.1797, | |
| "step": 1100 | |
| }, | |
| { | |
| "epoch": 0.10950816550016665, | |
| "grad_norm": 0.9517110586166382, | |
| "learning_rate": 9.998579735010962e-05, | |
| "loss": 0.1816, | |
| "step": 1150 | |
| }, | |
| { | |
| "epoch": 0.11426939008713041, | |
| "grad_norm": 1.0868945121765137, | |
| "learning_rate": 9.998103136021352e-05, | |
| "loss": 0.1786, | |
| "step": 1200 | |
| }, | |
| { | |
| "epoch": 0.11903061467409418, | |
| "grad_norm": 1.1176084280014038, | |
| "learning_rate": 9.997626537031743e-05, | |
| "loss": 0.1766, | |
| "step": 1250 | |
| }, | |
| { | |
| "epoch": 0.12379183926105794, | |
| "grad_norm": 1.489853858947754, | |
| "learning_rate": 9.997149938042131e-05, | |
| "loss": 0.1763, | |
| "step": 1300 | |
| }, | |
| { | |
| "epoch": 0.1285530638480217, | |
| "grad_norm": 1.1758699417114258, | |
| "learning_rate": 9.996673339052521e-05, | |
| "loss": 0.1808, | |
| "step": 1350 | |
| }, | |
| { | |
| "epoch": 0.13331428843498547, | |
| "grad_norm": 1.3600101470947266, | |
| "learning_rate": 9.996196740062911e-05, | |
| "loss": 0.177, | |
| "step": 1400 | |
| }, | |
| { | |
| "epoch": 0.13807551302194926, | |
| "grad_norm": 0.7045785784721375, | |
| "learning_rate": 9.995720141073301e-05, | |
| "loss": 0.1781, | |
| "step": 1450 | |
| }, | |
| { | |
| "epoch": 0.14283673760891302, | |
| "grad_norm": 1.386483907699585, | |
| "learning_rate": 9.995243542083691e-05, | |
| "loss": 0.1773, | |
| "step": 1500 | |
| }, | |
| { | |
| "epoch": 0.1475979621958768, | |
| "grad_norm": 1.3609524965286255, | |
| "learning_rate": 9.994766943094081e-05, | |
| "loss": 0.1777, | |
| "step": 1550 | |
| }, | |
| { | |
| "epoch": 0.15235918678284055, | |
| "grad_norm": 0.7538266777992249, | |
| "learning_rate": 9.994290344104471e-05, | |
| "loss": 0.1771, | |
| "step": 1600 | |
| }, | |
| { | |
| "epoch": 0.1571204113698043, | |
| "grad_norm": 0.6669739484786987, | |
| "learning_rate": 9.993813745114861e-05, | |
| "loss": 0.1757, | |
| "step": 1650 | |
| }, | |
| { | |
| "epoch": 0.16188163595676808, | |
| "grad_norm": 1.321413278579712, | |
| "learning_rate": 9.993337146125251e-05, | |
| "loss": 0.1729, | |
| "step": 1700 | |
| }, | |
| { | |
| "epoch": 0.16664286054373184, | |
| "grad_norm": 0.7625342011451721, | |
| "learning_rate": 9.992860547135641e-05, | |
| "loss": 0.1744, | |
| "step": 1750 | |
| }, | |
| { | |
| "epoch": 0.1714040851306956, | |
| "grad_norm": 1.4134427309036255, | |
| "learning_rate": 9.99238394814603e-05, | |
| "loss": 0.1726, | |
| "step": 1800 | |
| }, | |
| { | |
| "epoch": 0.17616530971765937, | |
| "grad_norm": 0.8000154495239258, | |
| "learning_rate": 9.991907349156421e-05, | |
| "loss": 0.175, | |
| "step": 1850 | |
| }, | |
| { | |
| "epoch": 0.18092653430462316, | |
| "grad_norm": 0.8776112198829651, | |
| "learning_rate": 9.99143075016681e-05, | |
| "loss": 0.1742, | |
| "step": 1900 | |
| }, | |
| { | |
| "epoch": 0.18568775889158692, | |
| "grad_norm": 0.9763234853744507, | |
| "learning_rate": 9.9909541511772e-05, | |
| "loss": 0.1753, | |
| "step": 1950 | |
| }, | |
| { | |
| "epoch": 0.1904489834785507, | |
| "grad_norm": 0.6149079203605652, | |
| "learning_rate": 9.99047755218759e-05, | |
| "loss": 0.1709, | |
| "step": 2000 | |
| }, | |
| { | |
| "epoch": 0.1904489834785507, | |
| "eval_loss": 0.1690738946199417, | |
| "eval_mae": 0.7152817249298096, | |
| "eval_mse": 425.0924072265625, | |
| "eval_rmse": 20.617769210721185, | |
| "eval_runtime": 60.02, | |
| "eval_samples_per_second": 9953.597, | |
| "eval_smape": 112.30494976043701, | |
| "eval_steps_per_second": 19.444, | |
| "step": 2000 | |
| }, | |
| { | |
| "epoch": 0.19521020806551445, | |
| "grad_norm": 0.8438695669174194, | |
| "learning_rate": 9.99000095319798e-05, | |
| "loss": 0.1753, | |
| "step": 2050 | |
| }, | |
| { | |
| "epoch": 0.19997143265247821, | |
| "grad_norm": 0.8969755172729492, | |
| "learning_rate": 9.98952435420837e-05, | |
| "loss": 0.1745, | |
| "step": 2100 | |
| }, | |
| { | |
| "epoch": 0.20473265723944198, | |
| "grad_norm": 0.9189475178718567, | |
| "learning_rate": 9.98904775521876e-05, | |
| "loss": 0.1709, | |
| "step": 2150 | |
| }, | |
| { | |
| "epoch": 0.20949388182640574, | |
| "grad_norm": 0.8711380362510681, | |
| "learning_rate": 9.98857115622915e-05, | |
| "loss": 0.1692, | |
| "step": 2200 | |
| }, | |
| { | |
| "epoch": 0.2142551064133695, | |
| "grad_norm": 0.7816225290298462, | |
| "learning_rate": 9.98809455723954e-05, | |
| "loss": 0.1709, | |
| "step": 2250 | |
| }, | |
| { | |
| "epoch": 0.2190163310003333, | |
| "grad_norm": 0.6408753395080566, | |
| "learning_rate": 9.987617958249928e-05, | |
| "loss": 0.1707, | |
| "step": 2300 | |
| }, | |
| { | |
| "epoch": 0.22377755558729706, | |
| "grad_norm": 0.7021253705024719, | |
| "learning_rate": 9.987141359260319e-05, | |
| "loss": 0.1739, | |
| "step": 2350 | |
| }, | |
| { | |
| "epoch": 0.22853878017426082, | |
| "grad_norm": 0.9026205539703369, | |
| "learning_rate": 9.986664760270709e-05, | |
| "loss": 0.1705, | |
| "step": 2400 | |
| }, | |
| { | |
| "epoch": 0.2333000047612246, | |
| "grad_norm": 0.6956352591514587, | |
| "learning_rate": 9.986188161281098e-05, | |
| "loss": 0.1705, | |
| "step": 2450 | |
| }, | |
| { | |
| "epoch": 0.23806122934818835, | |
| "grad_norm": 0.7024583220481873, | |
| "learning_rate": 9.985711562291489e-05, | |
| "loss": 0.1718, | |
| "step": 2500 | |
| }, | |
| { | |
| "epoch": 0.24282245393515212, | |
| "grad_norm": 0.6184080839157104, | |
| "learning_rate": 9.985234963301878e-05, | |
| "loss": 0.1697, | |
| "step": 2550 | |
| }, | |
| { | |
| "epoch": 0.24758367852211588, | |
| "grad_norm": 0.9684680700302124, | |
| "learning_rate": 9.984758364312268e-05, | |
| "loss": 0.1696, | |
| "step": 2600 | |
| }, | |
| { | |
| "epoch": 0.25234490310907964, | |
| "grad_norm": 0.8625733852386475, | |
| "learning_rate": 9.984281765322659e-05, | |
| "loss": 0.1731, | |
| "step": 2650 | |
| }, | |
| { | |
| "epoch": 0.2571061276960434, | |
| "grad_norm": 0.6156722903251648, | |
| "learning_rate": 9.983805166333048e-05, | |
| "loss": 0.1731, | |
| "step": 2700 | |
| }, | |
| { | |
| "epoch": 0.26186735228300717, | |
| "grad_norm": 0.7371954917907715, | |
| "learning_rate": 9.983328567343438e-05, | |
| "loss": 0.173, | |
| "step": 2750 | |
| }, | |
| { | |
| "epoch": 0.26662857686997093, | |
| "grad_norm": 0.658812940120697, | |
| "learning_rate": 9.982851968353828e-05, | |
| "loss": 0.1691, | |
| "step": 2800 | |
| }, | |
| { | |
| "epoch": 0.2713898014569347, | |
| "grad_norm": 0.8242245316505432, | |
| "learning_rate": 9.982375369364217e-05, | |
| "loss": 0.1715, | |
| "step": 2850 | |
| }, | |
| { | |
| "epoch": 0.2761510260438985, | |
| "grad_norm": 0.6063680052757263, | |
| "learning_rate": 9.981898770374607e-05, | |
| "loss": 0.1722, | |
| "step": 2900 | |
| }, | |
| { | |
| "epoch": 0.2809122506308623, | |
| "grad_norm": 0.7695409655570984, | |
| "learning_rate": 9.981422171384997e-05, | |
| "loss": 0.1712, | |
| "step": 2950 | |
| }, | |
| { | |
| "epoch": 0.28567347521782605, | |
| "grad_norm": 0.665675163269043, | |
| "learning_rate": 9.980945572395387e-05, | |
| "loss": 0.1722, | |
| "step": 3000 | |
| }, | |
| { | |
| "epoch": 0.28567347521782605, | |
| "eval_loss": 0.16621138155460358, | |
| "eval_mae": 0.7008960247039795, | |
| "eval_mse": 516.21533203125, | |
| "eval_rmse": 22.72037262087156, | |
| "eval_runtime": 62.5998, | |
| "eval_samples_per_second": 9543.397, | |
| "eval_smape": 89.52359557151794, | |
| "eval_steps_per_second": 18.642, | |
| "step": 3000 | |
| }, | |
| { | |
| "epoch": 0.2904346998047898, | |
| "grad_norm": 0.7732612490653992, | |
| "learning_rate": 9.980468973405776e-05, | |
| "loss": 0.1708, | |
| "step": 3050 | |
| }, | |
| { | |
| "epoch": 0.2951959243917536, | |
| "grad_norm": 1.0461828708648682, | |
| "learning_rate": 9.979992374416167e-05, | |
| "loss": 0.1692, | |
| "step": 3100 | |
| }, | |
| { | |
| "epoch": 0.29995714897871734, | |
| "grad_norm": 0.6057863831520081, | |
| "learning_rate": 9.979515775426557e-05, | |
| "loss": 0.1681, | |
| "step": 3150 | |
| }, | |
| { | |
| "epoch": 0.3047183735656811, | |
| "grad_norm": 0.5380491018295288, | |
| "learning_rate": 9.979039176436946e-05, | |
| "loss": 0.1687, | |
| "step": 3200 | |
| }, | |
| { | |
| "epoch": 0.30947959815264486, | |
| "grad_norm": 0.7164149284362793, | |
| "learning_rate": 9.978562577447337e-05, | |
| "loss": 0.1678, | |
| "step": 3250 | |
| }, | |
| { | |
| "epoch": 0.3142408227396086, | |
| "grad_norm": 0.9223781228065491, | |
| "learning_rate": 9.978085978457726e-05, | |
| "loss": 0.1698, | |
| "step": 3300 | |
| }, | |
| { | |
| "epoch": 0.3190020473265724, | |
| "grad_norm": 0.645452082157135, | |
| "learning_rate": 9.977609379468116e-05, | |
| "loss": 0.17, | |
| "step": 3350 | |
| }, | |
| { | |
| "epoch": 0.32376327191353615, | |
| "grad_norm": 0.6378370523452759, | |
| "learning_rate": 9.977132780478507e-05, | |
| "loss": 0.1673, | |
| "step": 3400 | |
| }, | |
| { | |
| "epoch": 0.3285244965004999, | |
| "grad_norm": 0.511216402053833, | |
| "learning_rate": 9.976656181488896e-05, | |
| "loss": 0.1678, | |
| "step": 3450 | |
| }, | |
| { | |
| "epoch": 0.3332857210874637, | |
| "grad_norm": 0.64838707447052, | |
| "learning_rate": 9.976179582499286e-05, | |
| "loss": 0.1675, | |
| "step": 3500 | |
| }, | |
| { | |
| "epoch": 0.33804694567442745, | |
| "grad_norm": 0.6467918753623962, | |
| "learning_rate": 9.975702983509676e-05, | |
| "loss": 0.1699, | |
| "step": 3550 | |
| }, | |
| { | |
| "epoch": 0.3428081702613912, | |
| "grad_norm": 0.6198284029960632, | |
| "learning_rate": 9.975226384520065e-05, | |
| "loss": 0.169, | |
| "step": 3600 | |
| }, | |
| { | |
| "epoch": 0.347569394848355, | |
| "grad_norm": 0.6328741312026978, | |
| "learning_rate": 9.974749785530455e-05, | |
| "loss": 0.1685, | |
| "step": 3650 | |
| }, | |
| { | |
| "epoch": 0.35233061943531874, | |
| "grad_norm": 0.8264518976211548, | |
| "learning_rate": 9.974273186540844e-05, | |
| "loss": 0.1731, | |
| "step": 3700 | |
| }, | |
| { | |
| "epoch": 0.35709184402228256, | |
| "grad_norm": 0.7238495945930481, | |
| "learning_rate": 9.973796587551235e-05, | |
| "loss": 0.1674, | |
| "step": 3750 | |
| }, | |
| { | |
| "epoch": 0.3618530686092463, | |
| "grad_norm": 0.6243422031402588, | |
| "learning_rate": 9.973319988561624e-05, | |
| "loss": 0.1699, | |
| "step": 3800 | |
| }, | |
| { | |
| "epoch": 0.3666142931962101, | |
| "grad_norm": 0.76638263463974, | |
| "learning_rate": 9.972843389572014e-05, | |
| "loss": 0.1707, | |
| "step": 3850 | |
| }, | |
| { | |
| "epoch": 0.37137551778317385, | |
| "grad_norm": 0.5346329212188721, | |
| "learning_rate": 9.972366790582405e-05, | |
| "loss": 0.1669, | |
| "step": 3900 | |
| }, | |
| { | |
| "epoch": 0.3761367423701376, | |
| "grad_norm": 0.6198967695236206, | |
| "learning_rate": 9.971890191592794e-05, | |
| "loss": 0.1663, | |
| "step": 3950 | |
| }, | |
| { | |
| "epoch": 0.3808979669571014, | |
| "grad_norm": 0.936530590057373, | |
| "learning_rate": 9.971413592603184e-05, | |
| "loss": 0.1694, | |
| "step": 4000 | |
| }, | |
| { | |
| "epoch": 0.3808979669571014, | |
| "eval_loss": 0.16426624357700348, | |
| "eval_mae": 0.6708112359046936, | |
| "eval_mse": 321.2046813964844, | |
| "eval_rmse": 17.922184057655596, | |
| "eval_runtime": 59.1224, | |
| "eval_samples_per_second": 10104.719, | |
| "eval_smape": 93.0514931678772, | |
| "eval_steps_per_second": 19.739, | |
| "step": 4000 | |
| }, | |
| { | |
| "epoch": 0.38565919154406514, | |
| "grad_norm": 0.5151750445365906, | |
| "learning_rate": 9.970936993613574e-05, | |
| "loss": 0.1676, | |
| "step": 4050 | |
| }, | |
| { | |
| "epoch": 0.3904204161310289, | |
| "grad_norm": 0.8430535793304443, | |
| "learning_rate": 9.970460394623964e-05, | |
| "loss": 0.1699, | |
| "step": 4100 | |
| }, | |
| { | |
| "epoch": 0.39518164071799267, | |
| "grad_norm": 0.7711997628211975, | |
| "learning_rate": 9.969983795634354e-05, | |
| "loss": 0.1664, | |
| "step": 4150 | |
| }, | |
| { | |
| "epoch": 0.39994286530495643, | |
| "grad_norm": 0.5547206401824951, | |
| "learning_rate": 9.969507196644744e-05, | |
| "loss": 0.1679, | |
| "step": 4200 | |
| }, | |
| { | |
| "epoch": 0.4047040898919202, | |
| "grad_norm": 0.7514538764953613, | |
| "learning_rate": 9.969030597655134e-05, | |
| "loss": 0.1685, | |
| "step": 4250 | |
| }, | |
| { | |
| "epoch": 0.40946531447888396, | |
| "grad_norm": 0.6667706370353699, | |
| "learning_rate": 9.968553998665524e-05, | |
| "loss": 0.1691, | |
| "step": 4300 | |
| }, | |
| { | |
| "epoch": 0.4142265390658477, | |
| "grad_norm": 0.5886721611022949, | |
| "learning_rate": 9.968077399675914e-05, | |
| "loss": 0.1649, | |
| "step": 4350 | |
| }, | |
| { | |
| "epoch": 0.4189877636528115, | |
| "grad_norm": 0.5160133838653564, | |
| "learning_rate": 9.967600800686303e-05, | |
| "loss": 0.1673, | |
| "step": 4400 | |
| }, | |
| { | |
| "epoch": 0.42374898823977525, | |
| "grad_norm": 0.6817535758018494, | |
| "learning_rate": 9.967124201696692e-05, | |
| "loss": 0.1687, | |
| "step": 4450 | |
| }, | |
| { | |
| "epoch": 0.428510212826739, | |
| "grad_norm": 0.5424938201904297, | |
| "learning_rate": 9.966647602707083e-05, | |
| "loss": 0.1687, | |
| "step": 4500 | |
| }, | |
| { | |
| "epoch": 0.43327143741370283, | |
| "grad_norm": 0.483815461397171, | |
| "learning_rate": 9.966171003717473e-05, | |
| "loss": 0.1673, | |
| "step": 4550 | |
| }, | |
| { | |
| "epoch": 0.4380326620006666, | |
| "grad_norm": 0.555853009223938, | |
| "learning_rate": 9.965694404727862e-05, | |
| "loss": 0.1667, | |
| "step": 4600 | |
| }, | |
| { | |
| "epoch": 0.44279388658763036, | |
| "grad_norm": 0.45629894733428955, | |
| "learning_rate": 9.965217805738253e-05, | |
| "loss": 0.1678, | |
| "step": 4650 | |
| }, | |
| { | |
| "epoch": 0.4475551111745941, | |
| "grad_norm": 0.47480854392051697, | |
| "learning_rate": 9.964741206748642e-05, | |
| "loss": 0.1651, | |
| "step": 4700 | |
| }, | |
| { | |
| "epoch": 0.4523163357615579, | |
| "grad_norm": 0.5411455631256104, | |
| "learning_rate": 9.964264607759032e-05, | |
| "loss": 0.168, | |
| "step": 4750 | |
| }, | |
| { | |
| "epoch": 0.45707756034852165, | |
| "grad_norm": 0.7176097631454468, | |
| "learning_rate": 9.963788008769422e-05, | |
| "loss": 0.1654, | |
| "step": 4800 | |
| }, | |
| { | |
| "epoch": 0.4618387849354854, | |
| "grad_norm": 0.7010710835456848, | |
| "learning_rate": 9.963311409779812e-05, | |
| "loss": 0.165, | |
| "step": 4850 | |
| }, | |
| { | |
| "epoch": 0.4666000095224492, | |
| "grad_norm": 0.5730286240577698, | |
| "learning_rate": 9.962834810790202e-05, | |
| "loss": 0.1654, | |
| "step": 4900 | |
| }, | |
| { | |
| "epoch": 0.47136123410941294, | |
| "grad_norm": 0.532320499420166, | |
| "learning_rate": 9.96235821180059e-05, | |
| "loss": 0.1665, | |
| "step": 4950 | |
| }, | |
| { | |
| "epoch": 0.4761224586963767, | |
| "grad_norm": 0.5974966883659363, | |
| "learning_rate": 9.961881612810982e-05, | |
| "loss": 0.1648, | |
| "step": 5000 | |
| }, | |
| { | |
| "epoch": 0.4761224586963767, | |
| "eval_loss": 0.16261516511440277, | |
| "eval_mae": 0.6730566620826721, | |
| "eval_mse": 350.6869812011719, | |
| "eval_rmse": 18.726638278163325, | |
| "eval_runtime": 61.1422, | |
| "eval_samples_per_second": 9770.909, | |
| "eval_smape": 94.07484531402588, | |
| "eval_steps_per_second": 19.087, | |
| "step": 5000 | |
| }, | |
| { | |
| "epoch": 0.48088368328334047, | |
| "grad_norm": 0.6836587190628052, | |
| "learning_rate": 9.961405013821372e-05, | |
| "loss": 0.164, | |
| "step": 5050 | |
| }, | |
| { | |
| "epoch": 0.48564490787030423, | |
| "grad_norm": 0.6935145854949951, | |
| "learning_rate": 9.96092841483176e-05, | |
| "loss": 0.1656, | |
| "step": 5100 | |
| }, | |
| { | |
| "epoch": 0.490406132457268, | |
| "grad_norm": 0.5300805568695068, | |
| "learning_rate": 9.960451815842151e-05, | |
| "loss": 0.1661, | |
| "step": 5150 | |
| }, | |
| { | |
| "epoch": 0.49516735704423176, | |
| "grad_norm": 0.6059597134590149, | |
| "learning_rate": 9.95997521685254e-05, | |
| "loss": 0.1665, | |
| "step": 5200 | |
| }, | |
| { | |
| "epoch": 0.4999285816311955, | |
| "grad_norm": 0.6202102303504944, | |
| "learning_rate": 9.95949861786293e-05, | |
| "loss": 0.1667, | |
| "step": 5250 | |
| }, | |
| { | |
| "epoch": 0.5046898062181593, | |
| "grad_norm": 0.6857314705848694, | |
| "learning_rate": 9.959022018873321e-05, | |
| "loss": 0.1648, | |
| "step": 5300 | |
| }, | |
| { | |
| "epoch": 0.5094510308051231, | |
| "grad_norm": 0.5026215314865112, | |
| "learning_rate": 9.95854541988371e-05, | |
| "loss": 0.1656, | |
| "step": 5350 | |
| }, | |
| { | |
| "epoch": 0.5142122553920868, | |
| "grad_norm": 0.8072870969772339, | |
| "learning_rate": 9.9580688208941e-05, | |
| "loss": 0.1637, | |
| "step": 5400 | |
| }, | |
| { | |
| "epoch": 0.5189734799790506, | |
| "grad_norm": 0.5563872456550598, | |
| "learning_rate": 9.95759222190449e-05, | |
| "loss": 0.1665, | |
| "step": 5450 | |
| }, | |
| { | |
| "epoch": 0.5237347045660143, | |
| "grad_norm": 0.4486568868160248, | |
| "learning_rate": 9.95711562291488e-05, | |
| "loss": 0.1665, | |
| "step": 5500 | |
| }, | |
| { | |
| "epoch": 0.5284959291529782, | |
| "grad_norm": 0.5072858929634094, | |
| "learning_rate": 9.95663902392527e-05, | |
| "loss": 0.1671, | |
| "step": 5550 | |
| }, | |
| { | |
| "epoch": 0.5332571537399419, | |
| "grad_norm": 0.4768078327178955, | |
| "learning_rate": 9.95616242493566e-05, | |
| "loss": 0.165, | |
| "step": 5600 | |
| }, | |
| { | |
| "epoch": 0.5380183783269057, | |
| "grad_norm": 0.5484294891357422, | |
| "learning_rate": 9.95568582594605e-05, | |
| "loss": 0.1615, | |
| "step": 5650 | |
| }, | |
| { | |
| "epoch": 0.5427796029138694, | |
| "grad_norm": 0.5098631978034973, | |
| "learning_rate": 9.955209226956438e-05, | |
| "loss": 0.1663, | |
| "step": 5700 | |
| }, | |
| { | |
| "epoch": 0.5475408275008332, | |
| "grad_norm": 0.5663777589797974, | |
| "learning_rate": 9.95473262796683e-05, | |
| "loss": 0.1653, | |
| "step": 5750 | |
| }, | |
| { | |
| "epoch": 0.552302052087797, | |
| "grad_norm": 0.5557841658592224, | |
| "learning_rate": 9.95425602897722e-05, | |
| "loss": 0.163, | |
| "step": 5800 | |
| }, | |
| { | |
| "epoch": 0.5570632766747607, | |
| "grad_norm": 0.545656144618988, | |
| "learning_rate": 9.953779429987608e-05, | |
| "loss": 0.1619, | |
| "step": 5850 | |
| }, | |
| { | |
| "epoch": 0.5618245012617246, | |
| "grad_norm": 0.6774228811264038, | |
| "learning_rate": 9.953302830998e-05, | |
| "loss": 0.1649, | |
| "step": 5900 | |
| }, | |
| { | |
| "epoch": 0.5665857258486883, | |
| "grad_norm": 0.4831783175468445, | |
| "learning_rate": 9.952826232008388e-05, | |
| "loss": 0.1626, | |
| "step": 5950 | |
| }, | |
| { | |
| "epoch": 0.5713469504356521, | |
| "grad_norm": 0.46657130122184753, | |
| "learning_rate": 9.952349633018778e-05, | |
| "loss": 0.1672, | |
| "step": 6000 | |
| }, | |
| { | |
| "epoch": 0.5713469504356521, | |
| "eval_loss": 0.16118212044239044, | |
| "eval_mae": 0.6796970963478088, | |
| "eval_mse": 370.8824768066406, | |
| "eval_rmse": 19.25830929252723, | |
| "eval_runtime": 58.3327, | |
| "eval_samples_per_second": 10241.503, | |
| "eval_smape": 84.66194272041321, | |
| "eval_steps_per_second": 20.006, | |
| "step": 6000 | |
| }, | |
| { | |
| "epoch": 0.5761081750226158, | |
| "grad_norm": 0.5452147126197815, | |
| "learning_rate": 9.95187303402917e-05, | |
| "loss": 0.1645, | |
| "step": 6050 | |
| }, | |
| { | |
| "epoch": 0.5808693996095796, | |
| "grad_norm": 0.6225939989089966, | |
| "learning_rate": 9.951396435039558e-05, | |
| "loss": 0.1629, | |
| "step": 6100 | |
| }, | |
| { | |
| "epoch": 0.5856306241965433, | |
| "grad_norm": 0.618532121181488, | |
| "learning_rate": 9.950919836049948e-05, | |
| "loss": 0.1638, | |
| "step": 6150 | |
| }, | |
| { | |
| "epoch": 0.5903918487835071, | |
| "grad_norm": 0.6065341830253601, | |
| "learning_rate": 9.950443237060338e-05, | |
| "loss": 0.1672, | |
| "step": 6200 | |
| }, | |
| { | |
| "epoch": 0.5951530733704709, | |
| "grad_norm": 0.7495716214179993, | |
| "learning_rate": 9.949966638070728e-05, | |
| "loss": 0.1648, | |
| "step": 6250 | |
| }, | |
| { | |
| "epoch": 0.5999142979574347, | |
| "grad_norm": 0.6554955244064331, | |
| "learning_rate": 9.949490039081118e-05, | |
| "loss": 0.1654, | |
| "step": 6300 | |
| }, | |
| { | |
| "epoch": 0.6046755225443984, | |
| "grad_norm": 0.5830172300338745, | |
| "learning_rate": 9.949013440091506e-05, | |
| "loss": 0.1629, | |
| "step": 6350 | |
| }, | |
| { | |
| "epoch": 0.6094367471313622, | |
| "grad_norm": 0.5021042823791504, | |
| "learning_rate": 9.948536841101898e-05, | |
| "loss": 0.1622, | |
| "step": 6400 | |
| }, | |
| { | |
| "epoch": 0.6141979717183259, | |
| "grad_norm": 0.47169509530067444, | |
| "learning_rate": 9.948060242112288e-05, | |
| "loss": 0.1632, | |
| "step": 6450 | |
| }, | |
| { | |
| "epoch": 0.6189591963052897, | |
| "grad_norm": 0.7609395980834961, | |
| "learning_rate": 9.947583643122676e-05, | |
| "loss": 0.1641, | |
| "step": 6500 | |
| }, | |
| { | |
| "epoch": 0.6237204208922534, | |
| "grad_norm": 0.5191305875778198, | |
| "learning_rate": 9.947107044133068e-05, | |
| "loss": 0.1672, | |
| "step": 6550 | |
| }, | |
| { | |
| "epoch": 0.6284816454792173, | |
| "grad_norm": 0.5454711318016052, | |
| "learning_rate": 9.946630445143456e-05, | |
| "loss": 0.1648, | |
| "step": 6600 | |
| }, | |
| { | |
| "epoch": 0.6332428700661811, | |
| "grad_norm": 0.49112918972969055, | |
| "learning_rate": 9.946153846153846e-05, | |
| "loss": 0.1648, | |
| "step": 6650 | |
| }, | |
| { | |
| "epoch": 0.6380040946531448, | |
| "grad_norm": 0.4859708249568939, | |
| "learning_rate": 9.945677247164236e-05, | |
| "loss": 0.1652, | |
| "step": 6700 | |
| }, | |
| { | |
| "epoch": 0.6427653192401086, | |
| "grad_norm": 0.506971001625061, | |
| "learning_rate": 9.945200648174626e-05, | |
| "loss": 0.1623, | |
| "step": 6750 | |
| }, | |
| { | |
| "epoch": 0.6475265438270723, | |
| "grad_norm": 0.5732383131980896, | |
| "learning_rate": 9.944724049185016e-05, | |
| "loss": 0.1657, | |
| "step": 6800 | |
| }, | |
| { | |
| "epoch": 0.6522877684140361, | |
| "grad_norm": 0.548362672328949, | |
| "learning_rate": 9.944247450195406e-05, | |
| "loss": 0.1628, | |
| "step": 6850 | |
| }, | |
| { | |
| "epoch": 0.6570489930009998, | |
| "grad_norm": 0.5271615982055664, | |
| "learning_rate": 9.943770851205796e-05, | |
| "loss": 0.1627, | |
| "step": 6900 | |
| }, | |
| { | |
| "epoch": 0.6618102175879637, | |
| "grad_norm": 0.7555857300758362, | |
| "learning_rate": 9.943294252216186e-05, | |
| "loss": 0.1635, | |
| "step": 6950 | |
| }, | |
| { | |
| "epoch": 0.6665714421749274, | |
| "grad_norm": 0.5426679849624634, | |
| "learning_rate": 9.942817653226576e-05, | |
| "loss": 0.1623, | |
| "step": 7000 | |
| }, | |
| { | |
| "epoch": 0.6665714421749274, | |
| "eval_loss": 0.1605178564786911, | |
| "eval_mae": 0.6715303063392639, | |
| "eval_mse": 400.0790100097656, | |
| "eval_rmse": 20.001975152713435, | |
| "eval_runtime": 61.0562, | |
| "eval_samples_per_second": 9784.671, | |
| "eval_smape": 89.75983262062073, | |
| "eval_steps_per_second": 19.114, | |
| "step": 7000 | |
| }, | |
| { | |
| "epoch": 0.6713326667618912, | |
| "grad_norm": 0.5322990417480469, | |
| "learning_rate": 9.942341054236966e-05, | |
| "loss": 0.1625, | |
| "step": 7050 | |
| }, | |
| { | |
| "epoch": 0.6760938913488549, | |
| "grad_norm": 0.6016077995300293, | |
| "learning_rate": 9.941864455247354e-05, | |
| "loss": 0.1647, | |
| "step": 7100 | |
| }, | |
| { | |
| "epoch": 0.6808551159358187, | |
| "grad_norm": 0.5076338648796082, | |
| "learning_rate": 9.941387856257746e-05, | |
| "loss": 0.1618, | |
| "step": 7150 | |
| }, | |
| { | |
| "epoch": 0.6856163405227824, | |
| "grad_norm": 0.5658571124076843, | |
| "learning_rate": 9.940911257268136e-05, | |
| "loss": 0.1662, | |
| "step": 7200 | |
| }, | |
| { | |
| "epoch": 0.6903775651097462, | |
| "grad_norm": 0.6107982993125916, | |
| "learning_rate": 9.940434658278524e-05, | |
| "loss": 0.1608, | |
| "step": 7250 | |
| }, | |
| { | |
| "epoch": 0.69513878969671, | |
| "grad_norm": 0.4623304307460785, | |
| "learning_rate": 9.939958059288916e-05, | |
| "loss": 0.1635, | |
| "step": 7300 | |
| }, | |
| { | |
| "epoch": 0.6999000142836738, | |
| "grad_norm": 0.6437474489212036, | |
| "learning_rate": 9.939481460299304e-05, | |
| "loss": 0.1606, | |
| "step": 7350 | |
| }, | |
| { | |
| "epoch": 0.7046612388706375, | |
| "grad_norm": 0.6315158605575562, | |
| "learning_rate": 9.939004861309694e-05, | |
| "loss": 0.1615, | |
| "step": 7400 | |
| }, | |
| { | |
| "epoch": 0.7094224634576013, | |
| "grad_norm": 0.6503571271896362, | |
| "learning_rate": 9.938528262320085e-05, | |
| "loss": 0.1619, | |
| "step": 7450 | |
| }, | |
| { | |
| "epoch": 0.7141836880445651, | |
| "grad_norm": 0.46252092719078064, | |
| "learning_rate": 9.938051663330474e-05, | |
| "loss": 0.1625, | |
| "step": 7500 | |
| }, | |
| { | |
| "epoch": 0.7189449126315288, | |
| "grad_norm": 0.5186336636543274, | |
| "learning_rate": 9.937575064340864e-05, | |
| "loss": 0.1628, | |
| "step": 7550 | |
| }, | |
| { | |
| "epoch": 0.7237061372184926, | |
| "grad_norm": 0.5236070156097412, | |
| "learning_rate": 9.937098465351254e-05, | |
| "loss": 0.1624, | |
| "step": 7600 | |
| }, | |
| { | |
| "epoch": 0.7284673618054563, | |
| "grad_norm": 0.4777911901473999, | |
| "learning_rate": 9.936621866361644e-05, | |
| "loss": 0.1625, | |
| "step": 7650 | |
| }, | |
| { | |
| "epoch": 0.7332285863924202, | |
| "grad_norm": 0.5092161297798157, | |
| "learning_rate": 9.936145267372034e-05, | |
| "loss": 0.163, | |
| "step": 7700 | |
| }, | |
| { | |
| "epoch": 0.7379898109793839, | |
| "grad_norm": 0.5161564350128174, | |
| "learning_rate": 9.935668668382424e-05, | |
| "loss": 0.1615, | |
| "step": 7750 | |
| }, | |
| { | |
| "epoch": 0.7427510355663477, | |
| "grad_norm": 0.48548147082328796, | |
| "learning_rate": 9.935192069392814e-05, | |
| "loss": 0.1592, | |
| "step": 7800 | |
| }, | |
| { | |
| "epoch": 0.7475122601533114, | |
| "grad_norm": 0.6095620393753052, | |
| "learning_rate": 9.934715470403202e-05, | |
| "loss": 0.1613, | |
| "step": 7850 | |
| }, | |
| { | |
| "epoch": 0.7522734847402752, | |
| "grad_norm": 0.49965670704841614, | |
| "learning_rate": 9.934238871413592e-05, | |
| "loss": 0.163, | |
| "step": 7900 | |
| }, | |
| { | |
| "epoch": 0.7570347093272389, | |
| "grad_norm": 0.5934204459190369, | |
| "learning_rate": 9.933762272423984e-05, | |
| "loss": 0.1638, | |
| "step": 7950 | |
| }, | |
| { | |
| "epoch": 0.7617959339142027, | |
| "grad_norm": 0.6522780060768127, | |
| "learning_rate": 9.933285673434372e-05, | |
| "loss": 0.1638, | |
| "step": 8000 | |
| }, | |
| { | |
| "epoch": 0.7617959339142027, | |
| "eval_loss": 0.16129492223262787, | |
| "eval_mae": 0.6771246194839478, | |
| "eval_mse": 387.6971435546875, | |
| "eval_rmse": 19.69002649959333, | |
| "eval_runtime": 57.6529, | |
| "eval_samples_per_second": 10362.273, | |
| "eval_smape": 122.37988710403442, | |
| "eval_steps_per_second": 20.242, | |
| "step": 8000 | |
| }, | |
| { | |
| "epoch": 0.7665571585011665, | |
| "grad_norm": 0.47631314396858215, | |
| "learning_rate": 9.932809074444762e-05, | |
| "loss": 0.1594, | |
| "step": 8050 | |
| }, | |
| { | |
| "epoch": 0.7713183830881303, | |
| "grad_norm": 0.4288536310195923, | |
| "learning_rate": 9.932332475455152e-05, | |
| "loss": 0.1627, | |
| "step": 8100 | |
| }, | |
| { | |
| "epoch": 0.776079607675094, | |
| "grad_norm": 0.4548576772212982, | |
| "learning_rate": 9.931855876465542e-05, | |
| "loss": 0.1638, | |
| "step": 8150 | |
| }, | |
| { | |
| "epoch": 0.7808408322620578, | |
| "grad_norm": 0.5950626730918884, | |
| "learning_rate": 9.931379277475932e-05, | |
| "loss": 0.1646, | |
| "step": 8200 | |
| }, | |
| { | |
| "epoch": 0.7856020568490216, | |
| "grad_norm": 0.5772454738616943, | |
| "learning_rate": 9.930902678486322e-05, | |
| "loss": 0.1629, | |
| "step": 8250 | |
| }, | |
| { | |
| "epoch": 0.7903632814359853, | |
| "grad_norm": 0.5833305716514587, | |
| "learning_rate": 9.930426079496712e-05, | |
| "loss": 0.1635, | |
| "step": 8300 | |
| }, | |
| { | |
| "epoch": 0.7951245060229492, | |
| "grad_norm": 0.4767976701259613, | |
| "learning_rate": 9.929949480507102e-05, | |
| "loss": 0.1621, | |
| "step": 8350 | |
| }, | |
| { | |
| "epoch": 0.7998857306099129, | |
| "grad_norm": 0.586681604385376, | |
| "learning_rate": 9.929472881517492e-05, | |
| "loss": 0.1633, | |
| "step": 8400 | |
| }, | |
| { | |
| "epoch": 0.8046469551968767, | |
| "grad_norm": 0.46445733308792114, | |
| "learning_rate": 9.928996282527882e-05, | |
| "loss": 0.1621, | |
| "step": 8450 | |
| }, | |
| { | |
| "epoch": 0.8094081797838404, | |
| "grad_norm": 0.4659370183944702, | |
| "learning_rate": 9.92851968353827e-05, | |
| "loss": 0.1644, | |
| "step": 8500 | |
| }, | |
| { | |
| "epoch": 0.8141694043708042, | |
| "grad_norm": 0.48823997378349304, | |
| "learning_rate": 9.928043084548662e-05, | |
| "loss": 0.1654, | |
| "step": 8550 | |
| }, | |
| { | |
| "epoch": 0.8189306289577679, | |
| "grad_norm": 0.5804855823516846, | |
| "learning_rate": 9.92756648555905e-05, | |
| "loss": 0.1624, | |
| "step": 8600 | |
| }, | |
| { | |
| "epoch": 0.8236918535447317, | |
| "grad_norm": 0.4181581139564514, | |
| "learning_rate": 9.92708988656944e-05, | |
| "loss": 0.1593, | |
| "step": 8650 | |
| }, | |
| { | |
| "epoch": 0.8284530781316954, | |
| "grad_norm": 0.6322731971740723, | |
| "learning_rate": 9.926613287579832e-05, | |
| "loss": 0.1628, | |
| "step": 8700 | |
| }, | |
| { | |
| "epoch": 0.8332143027186593, | |
| "grad_norm": 0.39184707403182983, | |
| "learning_rate": 9.92613668859022e-05, | |
| "loss": 0.1639, | |
| "step": 8750 | |
| }, | |
| { | |
| "epoch": 0.837975527305623, | |
| "grad_norm": 0.5011768341064453, | |
| "learning_rate": 9.92566008960061e-05, | |
| "loss": 0.1634, | |
| "step": 8800 | |
| }, | |
| { | |
| "epoch": 0.8427367518925868, | |
| "grad_norm": 0.47292882204055786, | |
| "learning_rate": 9.925183490611e-05, | |
| "loss": 0.1609, | |
| "step": 8850 | |
| }, | |
| { | |
| "epoch": 0.8474979764795505, | |
| "grad_norm": 0.5086949467658997, | |
| "learning_rate": 9.92470689162139e-05, | |
| "loss": 0.1616, | |
| "step": 8900 | |
| }, | |
| { | |
| "epoch": 0.8522592010665143, | |
| "grad_norm": 0.4720959961414337, | |
| "learning_rate": 9.92423029263178e-05, | |
| "loss": 0.1611, | |
| "step": 8950 | |
| }, | |
| { | |
| "epoch": 0.857020425653478, | |
| "grad_norm": 0.8099896311759949, | |
| "learning_rate": 9.92375369364217e-05, | |
| "loss": 0.1609, | |
| "step": 9000 | |
| }, | |
| { | |
| "epoch": 0.857020425653478, | |
| "eval_loss": 0.16015625, | |
| "eval_mae": 0.6603007912635803, | |
| "eval_mse": 335.3426818847656, | |
| "eval_rmse": 18.31236418065034, | |
| "eval_runtime": 56.2142, | |
| "eval_samples_per_second": 10627.482, | |
| "eval_smape": 109.38767194747925, | |
| "eval_steps_per_second": 20.76, | |
| "step": 9000 | |
| }, | |
| { | |
| "epoch": 0.8617816502404418, | |
| "grad_norm": 0.5228590369224548, | |
| "learning_rate": 9.92327709465256e-05, | |
| "loss": 0.1617, | |
| "step": 9050 | |
| }, | |
| { | |
| "epoch": 0.8665428748274057, | |
| "grad_norm": 0.5515425205230713, | |
| "learning_rate": 9.92280049566295e-05, | |
| "loss": 0.1612, | |
| "step": 9100 | |
| }, | |
| { | |
| "epoch": 0.8713040994143694, | |
| "grad_norm": 0.5289241075515747, | |
| "learning_rate": 9.92232389667334e-05, | |
| "loss": 0.1646, | |
| "step": 9150 | |
| }, | |
| { | |
| "epoch": 0.8760653240013332, | |
| "grad_norm": 0.5692815780639648, | |
| "learning_rate": 9.92184729768373e-05, | |
| "loss": 0.1603, | |
| "step": 9200 | |
| }, | |
| { | |
| "epoch": 0.8808265485882969, | |
| "grad_norm": 0.41486117243766785, | |
| "learning_rate": 9.921370698694119e-05, | |
| "loss": 0.1632, | |
| "step": 9250 | |
| }, | |
| { | |
| "epoch": 0.8855877731752607, | |
| "grad_norm": 0.488235741853714, | |
| "learning_rate": 9.920894099704509e-05, | |
| "loss": 0.1613, | |
| "step": 9300 | |
| }, | |
| { | |
| "epoch": 0.8903489977622244, | |
| "grad_norm": 0.6576530337333679, | |
| "learning_rate": 9.9204175007149e-05, | |
| "loss": 0.1618, | |
| "step": 9350 | |
| }, | |
| { | |
| "epoch": 0.8951102223491882, | |
| "grad_norm": 0.49431854486465454, | |
| "learning_rate": 9.919940901725288e-05, | |
| "loss": 0.1618, | |
| "step": 9400 | |
| }, | |
| { | |
| "epoch": 0.899871446936152, | |
| "grad_norm": 0.5491801500320435, | |
| "learning_rate": 9.919464302735678e-05, | |
| "loss": 0.162, | |
| "step": 9450 | |
| }, | |
| { | |
| "epoch": 0.9046326715231158, | |
| "grad_norm": 0.5839897990226746, | |
| "learning_rate": 9.918987703746068e-05, | |
| "loss": 0.1587, | |
| "step": 9500 | |
| }, | |
| { | |
| "epoch": 0.9093938961100795, | |
| "grad_norm": 0.5631112456321716, | |
| "learning_rate": 9.918511104756458e-05, | |
| "loss": 0.1607, | |
| "step": 9550 | |
| }, | |
| { | |
| "epoch": 0.9141551206970433, | |
| "grad_norm": 0.5420098900794983, | |
| "learning_rate": 9.918034505766848e-05, | |
| "loss": 0.1642, | |
| "step": 9600 | |
| }, | |
| { | |
| "epoch": 0.918916345284007, | |
| "grad_norm": 0.568087637424469, | |
| "learning_rate": 9.917557906777238e-05, | |
| "loss": 0.1624, | |
| "step": 9650 | |
| }, | |
| { | |
| "epoch": 0.9236775698709708, | |
| "grad_norm": 0.5823555588722229, | |
| "learning_rate": 9.917081307787628e-05, | |
| "loss": 0.1639, | |
| "step": 9700 | |
| }, | |
| { | |
| "epoch": 0.9284387944579345, | |
| "grad_norm": 0.5538271069526672, | |
| "learning_rate": 9.916604708798017e-05, | |
| "loss": 0.1625, | |
| "step": 9750 | |
| }, | |
| { | |
| "epoch": 0.9332000190448984, | |
| "grad_norm": 0.5160115957260132, | |
| "learning_rate": 9.916128109808408e-05, | |
| "loss": 0.1582, | |
| "step": 9800 | |
| }, | |
| { | |
| "epoch": 0.9379612436318621, | |
| "grad_norm": 0.48957574367523193, | |
| "learning_rate": 9.915651510818798e-05, | |
| "loss": 0.16, | |
| "step": 9850 | |
| }, | |
| { | |
| "epoch": 0.9427224682188259, | |
| "grad_norm": 0.601917564868927, | |
| "learning_rate": 9.915174911829187e-05, | |
| "loss": 0.162, | |
| "step": 9900 | |
| }, | |
| { | |
| "epoch": 0.9474836928057897, | |
| "grad_norm": 0.40893155336380005, | |
| "learning_rate": 9.914698312839578e-05, | |
| "loss": 0.165, | |
| "step": 9950 | |
| }, | |
| { | |
| "epoch": 0.9522449173927534, | |
| "grad_norm": 0.5392901301383972, | |
| "learning_rate": 9.914221713849967e-05, | |
| "loss": 0.1618, | |
| "step": 10000 | |
| }, | |
| { | |
| "epoch": 0.9522449173927534, | |
| "eval_loss": 0.15920588374137878, | |
| "eval_mae": 0.6688477993011475, | |
| "eval_mse": 318.14923095703125, | |
| "eval_rmse": 17.83673823761035, | |
| "eval_runtime": 59.3096, | |
| "eval_samples_per_second": 10072.824, | |
| "eval_smape": 76.33218169212341, | |
| "eval_steps_per_second": 19.676, | |
| "step": 10000 | |
| }, | |
| { | |
| "epoch": 0.9570061419797172, | |
| "grad_norm": 0.600141704082489, | |
| "learning_rate": 9.913745114860357e-05, | |
| "loss": 0.1596, | |
| "step": 10050 | |
| }, | |
| { | |
| "epoch": 0.9617673665666809, | |
| "grad_norm": 0.49183207750320435, | |
| "learning_rate": 9.913268515870748e-05, | |
| "loss": 0.1636, | |
| "step": 10100 | |
| }, | |
| { | |
| "epoch": 0.9665285911536448, | |
| "grad_norm": 0.4760478138923645, | |
| "learning_rate": 9.912791916881136e-05, | |
| "loss": 0.1584, | |
| "step": 10150 | |
| }, | |
| { | |
| "epoch": 0.9712898157406085, | |
| "grad_norm": 0.4729287624359131, | |
| "learning_rate": 9.912315317891526e-05, | |
| "loss": 0.1617, | |
| "step": 10200 | |
| }, | |
| { | |
| "epoch": 0.9760510403275723, | |
| "grad_norm": 0.578062117099762, | |
| "learning_rate": 9.911838718901916e-05, | |
| "loss": 0.1606, | |
| "step": 10250 | |
| }, | |
| { | |
| "epoch": 0.980812264914536, | |
| "grad_norm": 0.43438589572906494, | |
| "learning_rate": 9.911362119912306e-05, | |
| "loss": 0.1617, | |
| "step": 10300 | |
| }, | |
| { | |
| "epoch": 0.9855734895014998, | |
| "grad_norm": 0.5887606143951416, | |
| "learning_rate": 9.910885520922696e-05, | |
| "loss": 0.1616, | |
| "step": 10350 | |
| }, | |
| { | |
| "epoch": 0.9903347140884635, | |
| "grad_norm": 0.4835382401943207, | |
| "learning_rate": 9.910408921933086e-05, | |
| "loss": 0.16, | |
| "step": 10400 | |
| }, | |
| { | |
| "epoch": 0.9950959386754273, | |
| "grad_norm": 0.4186175763607025, | |
| "learning_rate": 9.909932322943476e-05, | |
| "loss": 0.1636, | |
| "step": 10450 | |
| }, | |
| { | |
| "epoch": 0.999857163262391, | |
| "grad_norm": 0.6031454205513, | |
| "learning_rate": 9.909455723953865e-05, | |
| "loss": 0.16, | |
| "step": 10500 | |
| }, | |
| { | |
| "epoch": 1.0045707756034852, | |
| "grad_norm": 0.444181352853775, | |
| "learning_rate": 9.908979124964256e-05, | |
| "loss": 0.1588, | |
| "step": 10550 | |
| }, | |
| { | |
| "epoch": 1.0093320001904489, | |
| "grad_norm": 0.474399596452713, | |
| "learning_rate": 9.908502525974646e-05, | |
| "loss": 0.1651, | |
| "step": 10600 | |
| }, | |
| { | |
| "epoch": 1.0140932247774128, | |
| "grad_norm": 0.4467703700065613, | |
| "learning_rate": 9.908025926985035e-05, | |
| "loss": 0.1598, | |
| "step": 10650 | |
| }, | |
| { | |
| "epoch": 1.0188544493643765, | |
| "grad_norm": 0.5107735395431519, | |
| "learning_rate": 9.907549327995426e-05, | |
| "loss": 0.1609, | |
| "step": 10700 | |
| }, | |
| { | |
| "epoch": 1.0236156739513402, | |
| "grad_norm": 0.4578382968902588, | |
| "learning_rate": 9.907072729005815e-05, | |
| "loss": 0.1614, | |
| "step": 10750 | |
| }, | |
| { | |
| "epoch": 1.028376898538304, | |
| "grad_norm": 0.4924687445163727, | |
| "learning_rate": 9.906596130016205e-05, | |
| "loss": 0.1594, | |
| "step": 10800 | |
| }, | |
| { | |
| "epoch": 1.0331381231252679, | |
| "grad_norm": 0.4967709183692932, | |
| "learning_rate": 9.906119531026595e-05, | |
| "loss": 0.1614, | |
| "step": 10850 | |
| }, | |
| { | |
| "epoch": 1.0378993477122316, | |
| "grad_norm": 0.5059126615524292, | |
| "learning_rate": 9.905642932036984e-05, | |
| "loss": 0.1622, | |
| "step": 10900 | |
| }, | |
| { | |
| "epoch": 1.0426605722991953, | |
| "grad_norm": 0.5229778289794922, | |
| "learning_rate": 9.905166333047374e-05, | |
| "loss": 0.1592, | |
| "step": 10950 | |
| }, | |
| { | |
| "epoch": 1.0474217968861592, | |
| "grad_norm": 0.45143234729766846, | |
| "learning_rate": 9.904689734057764e-05, | |
| "loss": 0.1588, | |
| "step": 11000 | |
| }, | |
| { | |
| "epoch": 1.0474217968861592, | |
| "eval_loss": 0.15856099128723145, | |
| "eval_mae": 0.6628284454345703, | |
| "eval_mse": 345.36749267578125, | |
| "eval_rmse": 18.584065558315846, | |
| "eval_runtime": 57.9218, | |
| "eval_samples_per_second": 10314.166, | |
| "eval_smape": 94.50321197509766, | |
| "eval_steps_per_second": 20.148, | |
| "step": 11000 | |
| }, | |
| { | |
| "epoch": 1.052183021473123, | |
| "grad_norm": 0.7248756885528564, | |
| "learning_rate": 9.904213135068154e-05, | |
| "loss": 0.1614, | |
| "step": 11050 | |
| }, | |
| { | |
| "epoch": 1.0569442460600866, | |
| "grad_norm": 0.5051783323287964, | |
| "learning_rate": 9.903736536078544e-05, | |
| "loss": 0.1567, | |
| "step": 11100 | |
| }, | |
| { | |
| "epoch": 1.0617054706470503, | |
| "grad_norm": 0.5285612940788269, | |
| "learning_rate": 9.903259937088933e-05, | |
| "loss": 0.1613, | |
| "step": 11150 | |
| }, | |
| { | |
| "epoch": 1.0664666952340143, | |
| "grad_norm": 0.5270511507987976, | |
| "learning_rate": 9.902783338099324e-05, | |
| "loss": 0.1615, | |
| "step": 11200 | |
| }, | |
| { | |
| "epoch": 1.071227919820978, | |
| "grad_norm": 0.5635538101196289, | |
| "learning_rate": 9.902306739109714e-05, | |
| "loss": 0.1606, | |
| "step": 11250 | |
| }, | |
| { | |
| "epoch": 1.0759891444079417, | |
| "grad_norm": 0.5780921578407288, | |
| "learning_rate": 9.901830140120103e-05, | |
| "loss": 0.1619, | |
| "step": 11300 | |
| }, | |
| { | |
| "epoch": 1.0807503689949054, | |
| "grad_norm": 0.5149776935577393, | |
| "learning_rate": 9.901353541130494e-05, | |
| "loss": 0.1638, | |
| "step": 11350 | |
| }, | |
| { | |
| "epoch": 1.0855115935818693, | |
| "grad_norm": 0.46075335144996643, | |
| "learning_rate": 9.900876942140883e-05, | |
| "loss": 0.1597, | |
| "step": 11400 | |
| }, | |
| { | |
| "epoch": 1.090272818168833, | |
| "grad_norm": 0.42272669076919556, | |
| "learning_rate": 9.900400343151273e-05, | |
| "loss": 0.16, | |
| "step": 11450 | |
| }, | |
| { | |
| "epoch": 1.0950340427557967, | |
| "grad_norm": 0.5383277535438538, | |
| "learning_rate": 9.899923744161663e-05, | |
| "loss": 0.159, | |
| "step": 11500 | |
| }, | |
| { | |
| "epoch": 1.0997952673427607, | |
| "grad_norm": 0.45635008811950684, | |
| "learning_rate": 9.899447145172053e-05, | |
| "loss": 0.1596, | |
| "step": 11550 | |
| }, | |
| { | |
| "epoch": 1.1045564919297244, | |
| "grad_norm": 0.4863174259662628, | |
| "learning_rate": 9.898970546182443e-05, | |
| "loss": 0.1613, | |
| "step": 11600 | |
| }, | |
| { | |
| "epoch": 1.109317716516688, | |
| "grad_norm": 0.5197418332099915, | |
| "learning_rate": 9.898493947192832e-05, | |
| "loss": 0.1601, | |
| "step": 11650 | |
| }, | |
| { | |
| "epoch": 1.1140789411036518, | |
| "grad_norm": 0.6299956440925598, | |
| "learning_rate": 9.898017348203222e-05, | |
| "loss": 0.1621, | |
| "step": 11700 | |
| }, | |
| { | |
| "epoch": 1.1188401656906157, | |
| "grad_norm": 0.47120076417922974, | |
| "learning_rate": 9.897540749213612e-05, | |
| "loss": 0.1624, | |
| "step": 11750 | |
| }, | |
| { | |
| "epoch": 1.1236013902775794, | |
| "grad_norm": 0.4576870799064636, | |
| "learning_rate": 9.897064150224002e-05, | |
| "loss": 0.1629, | |
| "step": 11800 | |
| }, | |
| { | |
| "epoch": 1.1283626148645431, | |
| "grad_norm": 0.531906008720398, | |
| "learning_rate": 9.896587551234392e-05, | |
| "loss": 0.1593, | |
| "step": 11850 | |
| }, | |
| { | |
| "epoch": 1.1331238394515069, | |
| "grad_norm": 0.4709097743034363, | |
| "learning_rate": 9.896110952244781e-05, | |
| "loss": 0.1613, | |
| "step": 11900 | |
| }, | |
| { | |
| "epoch": 1.1378850640384708, | |
| "grad_norm": 0.5573967695236206, | |
| "learning_rate": 9.895634353255172e-05, | |
| "loss": 0.1623, | |
| "step": 11950 | |
| }, | |
| { | |
| "epoch": 1.1426462886254345, | |
| "grad_norm": 0.5798735022544861, | |
| "learning_rate": 9.895157754265562e-05, | |
| "loss": 0.1601, | |
| "step": 12000 | |
| }, | |
| { | |
| "epoch": 1.1426462886254345, | |
| "eval_loss": 0.15796181559562683, | |
| "eval_mae": 0.65403813123703, | |
| "eval_mse": 326.8865051269531, | |
| "eval_rmse": 18.0800029072717, | |
| "eval_runtime": 56.7165, | |
| "eval_samples_per_second": 10533.356, | |
| "eval_smape": 81.25044703483582, | |
| "eval_steps_per_second": 20.576, | |
| "step": 12000 | |
| }, | |
| { | |
| "epoch": 1.1474075132123982, | |
| "grad_norm": 0.5247554183006287, | |
| "learning_rate": 9.894681155275951e-05, | |
| "loss": 0.1574, | |
| "step": 12050 | |
| }, | |
| { | |
| "epoch": 1.152168737799362, | |
| "grad_norm": 0.6056792736053467, | |
| "learning_rate": 9.894204556286342e-05, | |
| "loss": 0.1583, | |
| "step": 12100 | |
| }, | |
| { | |
| "epoch": 1.1569299623863258, | |
| "grad_norm": 0.5114259123802185, | |
| "learning_rate": 9.893727957296731e-05, | |
| "loss": 0.1641, | |
| "step": 12150 | |
| }, | |
| { | |
| "epoch": 1.1616911869732895, | |
| "grad_norm": 0.40764451026916504, | |
| "learning_rate": 9.89325135830712e-05, | |
| "loss": 0.1621, | |
| "step": 12200 | |
| }, | |
| { | |
| "epoch": 1.1664524115602533, | |
| "grad_norm": 0.5216367244720459, | |
| "learning_rate": 9.89277475931751e-05, | |
| "loss": 0.1597, | |
| "step": 12250 | |
| }, | |
| { | |
| "epoch": 1.171213636147217, | |
| "grad_norm": 0.48652514815330505, | |
| "learning_rate": 9.8922981603279e-05, | |
| "loss": 0.1605, | |
| "step": 12300 | |
| }, | |
| { | |
| "epoch": 1.175974860734181, | |
| "grad_norm": 0.46615609526634216, | |
| "learning_rate": 9.89182156133829e-05, | |
| "loss": 0.1571, | |
| "step": 12350 | |
| }, | |
| { | |
| "epoch": 1.1807360853211446, | |
| "grad_norm": 0.5413913130760193, | |
| "learning_rate": 9.891344962348679e-05, | |
| "loss": 0.1599, | |
| "step": 12400 | |
| }, | |
| { | |
| "epoch": 1.1854973099081083, | |
| "grad_norm": 0.42148470878601074, | |
| "learning_rate": 9.89086836335907e-05, | |
| "loss": 0.1605, | |
| "step": 12450 | |
| }, | |
| { | |
| "epoch": 1.1902585344950722, | |
| "grad_norm": 0.7329843044281006, | |
| "learning_rate": 9.89039176436946e-05, | |
| "loss": 0.1603, | |
| "step": 12500 | |
| }, | |
| { | |
| "epoch": 1.195019759082036, | |
| "grad_norm": 0.42273497581481934, | |
| "learning_rate": 9.889915165379849e-05, | |
| "loss": 0.1617, | |
| "step": 12550 | |
| }, | |
| { | |
| "epoch": 1.1997809836689997, | |
| "grad_norm": 0.5869929790496826, | |
| "learning_rate": 9.88943856639024e-05, | |
| "loss": 0.1612, | |
| "step": 12600 | |
| }, | |
| { | |
| "epoch": 1.2045422082559634, | |
| "grad_norm": 0.4401240944862366, | |
| "learning_rate": 9.888961967400629e-05, | |
| "loss": 0.1577, | |
| "step": 12650 | |
| }, | |
| { | |
| "epoch": 1.2093034328429273, | |
| "grad_norm": 0.48275691270828247, | |
| "learning_rate": 9.888485368411019e-05, | |
| "loss": 0.1595, | |
| "step": 12700 | |
| }, | |
| { | |
| "epoch": 1.214064657429891, | |
| "grad_norm": 0.5482587218284607, | |
| "learning_rate": 9.88800876942141e-05, | |
| "loss": 0.16, | |
| "step": 12750 | |
| }, | |
| { | |
| "epoch": 1.2188258820168547, | |
| "grad_norm": 0.6347602009773254, | |
| "learning_rate": 9.887532170431799e-05, | |
| "loss": 0.1596, | |
| "step": 12800 | |
| }, | |
| { | |
| "epoch": 1.2235871066038184, | |
| "grad_norm": 0.4231024384498596, | |
| "learning_rate": 9.887055571442189e-05, | |
| "loss": 0.1624, | |
| "step": 12850 | |
| }, | |
| { | |
| "epoch": 1.2283483311907823, | |
| "grad_norm": 0.5505658984184265, | |
| "learning_rate": 9.886578972452579e-05, | |
| "loss": 0.1582, | |
| "step": 12900 | |
| }, | |
| { | |
| "epoch": 1.233109555777746, | |
| "grad_norm": 0.4918171763420105, | |
| "learning_rate": 9.886102373462969e-05, | |
| "loss": 0.1577, | |
| "step": 12950 | |
| }, | |
| { | |
| "epoch": 1.2378707803647098, | |
| "grad_norm": 0.45393800735473633, | |
| "learning_rate": 9.885625774473359e-05, | |
| "loss": 0.1585, | |
| "step": 13000 | |
| }, | |
| { | |
| "epoch": 1.2378707803647098, | |
| "eval_loss": 0.15753023326396942, | |
| "eval_mae": 0.6532484889030457, | |
| "eval_mse": 279.79644775390625, | |
| "eval_rmse": 16.72711713816539, | |
| "eval_runtime": 57.893, | |
| "eval_samples_per_second": 10319.3, | |
| "eval_smape": 107.6181173324585, | |
| "eval_steps_per_second": 20.158, | |
| "step": 13000 | |
| }, | |
| { | |
| "epoch": 1.2426320049516735, | |
| "grad_norm": 0.3777833878993988, | |
| "learning_rate": 9.885149175483749e-05, | |
| "loss": 0.1577, | |
| "step": 13050 | |
| }, | |
| { | |
| "epoch": 1.2473932295386374, | |
| "grad_norm": 0.5700230002403259, | |
| "learning_rate": 9.884672576494139e-05, | |
| "loss": 0.1587, | |
| "step": 13100 | |
| }, | |
| { | |
| "epoch": 1.2521544541256011, | |
| "grad_norm": 0.4470745027065277, | |
| "learning_rate": 9.884195977504529e-05, | |
| "loss": 0.1604, | |
| "step": 13150 | |
| }, | |
| { | |
| "epoch": 1.2569156787125648, | |
| "grad_norm": 0.4185906648635864, | |
| "learning_rate": 9.883719378514918e-05, | |
| "loss": 0.1562, | |
| "step": 13200 | |
| }, | |
| { | |
| "epoch": 1.2616769032995285, | |
| "grad_norm": 0.3835722804069519, | |
| "learning_rate": 9.883242779525308e-05, | |
| "loss": 0.1583, | |
| "step": 13250 | |
| }, | |
| { | |
| "epoch": 1.2664381278864925, | |
| "grad_norm": 0.45621258020401, | |
| "learning_rate": 9.882766180535697e-05, | |
| "loss": 0.1577, | |
| "step": 13300 | |
| }, | |
| { | |
| "epoch": 1.2711993524734562, | |
| "grad_norm": 0.5590381622314453, | |
| "learning_rate": 9.882289581546088e-05, | |
| "loss": 0.1608, | |
| "step": 13350 | |
| }, | |
| { | |
| "epoch": 1.2759605770604199, | |
| "grad_norm": 0.5501840114593506, | |
| "learning_rate": 9.881812982556477e-05, | |
| "loss": 0.1623, | |
| "step": 13400 | |
| }, | |
| { | |
| "epoch": 1.2807218016473838, | |
| "grad_norm": 0.4480155408382416, | |
| "learning_rate": 9.881336383566867e-05, | |
| "loss": 0.1566, | |
| "step": 13450 | |
| }, | |
| { | |
| "epoch": 1.2854830262343475, | |
| "grad_norm": 0.5472989678382874, | |
| "learning_rate": 9.880859784577258e-05, | |
| "loss": 0.1613, | |
| "step": 13500 | |
| }, | |
| { | |
| "epoch": 1.2902442508213112, | |
| "grad_norm": 0.48176899552345276, | |
| "learning_rate": 9.880383185587647e-05, | |
| "loss": 0.1614, | |
| "step": 13550 | |
| }, | |
| { | |
| "epoch": 1.295005475408275, | |
| "grad_norm": 0.40140554308891296, | |
| "learning_rate": 9.879906586598037e-05, | |
| "loss": 0.1586, | |
| "step": 13600 | |
| }, | |
| { | |
| "epoch": 1.2997666999952386, | |
| "grad_norm": 0.5117682218551636, | |
| "learning_rate": 9.879429987608427e-05, | |
| "loss": 0.1602, | |
| "step": 13650 | |
| }, | |
| { | |
| "epoch": 1.3045279245822026, | |
| "grad_norm": 0.7169548273086548, | |
| "learning_rate": 9.878953388618817e-05, | |
| "loss": 0.1623, | |
| "step": 13700 | |
| }, | |
| { | |
| "epoch": 1.3092891491691663, | |
| "grad_norm": 0.44949373602867126, | |
| "learning_rate": 9.878476789629207e-05, | |
| "loss": 0.1601, | |
| "step": 13750 | |
| }, | |
| { | |
| "epoch": 1.31405037375613, | |
| "grad_norm": 0.597175121307373, | |
| "learning_rate": 9.878000190639595e-05, | |
| "loss": 0.1606, | |
| "step": 13800 | |
| }, | |
| { | |
| "epoch": 1.318811598343094, | |
| "grad_norm": 0.553895115852356, | |
| "learning_rate": 9.877523591649987e-05, | |
| "loss": 0.1617, | |
| "step": 13850 | |
| }, | |
| { | |
| "epoch": 1.3235728229300576, | |
| "grad_norm": 0.5089927315711975, | |
| "learning_rate": 9.877046992660377e-05, | |
| "loss": 0.1595, | |
| "step": 13900 | |
| }, | |
| { | |
| "epoch": 1.3283340475170213, | |
| "grad_norm": 0.430867463350296, | |
| "learning_rate": 9.876570393670765e-05, | |
| "loss": 0.1582, | |
| "step": 13950 | |
| }, | |
| { | |
| "epoch": 1.3330952721039853, | |
| "grad_norm": 0.39970675110816956, | |
| "learning_rate": 9.876093794681156e-05, | |
| "loss": 0.1567, | |
| "step": 14000 | |
| }, | |
| { | |
| "epoch": 1.3330952721039853, | |
| "eval_loss": 0.15753522515296936, | |
| "eval_mae": 0.6622462272644043, | |
| "eval_mse": 328.3490295410156, | |
| "eval_rmse": 18.120403680409982, | |
| "eval_runtime": 58.0545, | |
| "eval_samples_per_second": 10290.588, | |
| "eval_smape": 91.98985695838928, | |
| "eval_steps_per_second": 20.102, | |
| "step": 14000 | |
| }, | |
| { | |
| "epoch": 1.337856496690949, | |
| "grad_norm": 0.5808453559875488, | |
| "learning_rate": 9.875617195691545e-05, | |
| "loss": 0.1589, | |
| "step": 14050 | |
| }, | |
| { | |
| "epoch": 1.3426177212779127, | |
| "grad_norm": 0.4418608546257019, | |
| "learning_rate": 9.875140596701935e-05, | |
| "loss": 0.1584, | |
| "step": 14100 | |
| }, | |
| { | |
| "epoch": 1.3473789458648764, | |
| "grad_norm": 0.6623143553733826, | |
| "learning_rate": 9.874663997712325e-05, | |
| "loss": 0.1623, | |
| "step": 14150 | |
| }, | |
| { | |
| "epoch": 1.35214017045184, | |
| "grad_norm": 0.4194190204143524, | |
| "learning_rate": 9.874187398722715e-05, | |
| "loss": 0.1596, | |
| "step": 14200 | |
| }, | |
| { | |
| "epoch": 1.356901395038804, | |
| "grad_norm": 0.6208611130714417, | |
| "learning_rate": 9.873710799733105e-05, | |
| "loss": 0.1583, | |
| "step": 14250 | |
| }, | |
| { | |
| "epoch": 1.3616626196257677, | |
| "grad_norm": 0.4435657262802124, | |
| "learning_rate": 9.873234200743495e-05, | |
| "loss": 0.16, | |
| "step": 14300 | |
| }, | |
| { | |
| "epoch": 1.3664238442127314, | |
| "grad_norm": 0.4906177222728729, | |
| "learning_rate": 9.872757601753885e-05, | |
| "loss": 0.1581, | |
| "step": 14350 | |
| }, | |
| { | |
| "epoch": 1.3711850687996954, | |
| "grad_norm": 0.5340787172317505, | |
| "learning_rate": 9.872281002764275e-05, | |
| "loss": 0.1588, | |
| "step": 14400 | |
| }, | |
| { | |
| "epoch": 1.375946293386659, | |
| "grad_norm": 0.4307633340358734, | |
| "learning_rate": 9.871804403774665e-05, | |
| "loss": 0.1585, | |
| "step": 14450 | |
| }, | |
| { | |
| "epoch": 1.3807075179736228, | |
| "grad_norm": 0.49194300174713135, | |
| "learning_rate": 9.871327804785055e-05, | |
| "loss": 0.1572, | |
| "step": 14500 | |
| }, | |
| { | |
| "epoch": 1.3854687425605867, | |
| "grad_norm": 0.4391520917415619, | |
| "learning_rate": 9.870851205795443e-05, | |
| "loss": 0.1573, | |
| "step": 14550 | |
| }, | |
| { | |
| "epoch": 1.3902299671475504, | |
| "grad_norm": 0.4503444731235504, | |
| "learning_rate": 9.870374606805835e-05, | |
| "loss": 0.1577, | |
| "step": 14600 | |
| }, | |
| { | |
| "epoch": 1.3949911917345141, | |
| "grad_norm": 0.5113334655761719, | |
| "learning_rate": 9.869898007816225e-05, | |
| "loss": 0.1594, | |
| "step": 14650 | |
| }, | |
| { | |
| "epoch": 1.3997524163214778, | |
| "grad_norm": 0.3871005177497864, | |
| "learning_rate": 9.869421408826613e-05, | |
| "loss": 0.1559, | |
| "step": 14700 | |
| }, | |
| { | |
| "epoch": 1.4045136409084416, | |
| "grad_norm": 0.5482053756713867, | |
| "learning_rate": 9.868944809837004e-05, | |
| "loss": 0.1592, | |
| "step": 14750 | |
| }, | |
| { | |
| "epoch": 1.4092748654954055, | |
| "grad_norm": 0.45771437883377075, | |
| "learning_rate": 9.868468210847393e-05, | |
| "loss": 0.1572, | |
| "step": 14800 | |
| }, | |
| { | |
| "epoch": 1.4140360900823692, | |
| "grad_norm": 0.4550696015357971, | |
| "learning_rate": 9.867991611857783e-05, | |
| "loss": 0.1605, | |
| "step": 14850 | |
| }, | |
| { | |
| "epoch": 1.418797314669333, | |
| "grad_norm": 0.6991235613822937, | |
| "learning_rate": 9.867515012868174e-05, | |
| "loss": 0.1589, | |
| "step": 14900 | |
| }, | |
| { | |
| "epoch": 1.4235585392562968, | |
| "grad_norm": 0.5531545877456665, | |
| "learning_rate": 9.867038413878563e-05, | |
| "loss": 0.1574, | |
| "step": 14950 | |
| }, | |
| { | |
| "epoch": 1.4283197638432605, | |
| "grad_norm": 0.4692751169204712, | |
| "learning_rate": 9.866561814888953e-05, | |
| "loss": 0.1592, | |
| "step": 15000 | |
| }, | |
| { | |
| "epoch": 1.4283197638432605, | |
| "eval_loss": 0.15674826502799988, | |
| "eval_mae": 0.6522776484489441, | |
| "eval_mse": 376.8973388671875, | |
| "eval_rmse": 19.41384400027948, | |
| "eval_runtime": 57.2088, | |
| "eval_samples_per_second": 10442.706, | |
| "eval_smape": 89.79519009590149, | |
| "eval_steps_per_second": 20.399, | |
| "step": 15000 | |
| }, | |
| { | |
| "epoch": 1.4330809884302242, | |
| "grad_norm": 0.41401219367980957, | |
| "learning_rate": 9.866085215899343e-05, | |
| "loss": 0.1594, | |
| "step": 15050 | |
| }, | |
| { | |
| "epoch": 1.437842213017188, | |
| "grad_norm": 0.6155771613121033, | |
| "learning_rate": 9.865608616909733e-05, | |
| "loss": 0.1597, | |
| "step": 15100 | |
| }, | |
| { | |
| "epoch": 1.4426034376041517, | |
| "grad_norm": 0.6229146718978882, | |
| "learning_rate": 9.865132017920123e-05, | |
| "loss": 0.1595, | |
| "step": 15150 | |
| }, | |
| { | |
| "epoch": 1.4473646621911156, | |
| "grad_norm": 0.5311764478683472, | |
| "learning_rate": 9.864655418930511e-05, | |
| "loss": 0.1613, | |
| "step": 15200 | |
| }, | |
| { | |
| "epoch": 1.4521258867780793, | |
| "grad_norm": 0.4075564742088318, | |
| "learning_rate": 9.864178819940903e-05, | |
| "loss": 0.1581, | |
| "step": 15250 | |
| }, | |
| { | |
| "epoch": 1.456887111365043, | |
| "grad_norm": 0.5737677216529846, | |
| "learning_rate": 9.863702220951291e-05, | |
| "loss": 0.1622, | |
| "step": 15300 | |
| }, | |
| { | |
| "epoch": 1.461648335952007, | |
| "grad_norm": 0.5977826118469238, | |
| "learning_rate": 9.863225621961681e-05, | |
| "loss": 0.1586, | |
| "step": 15350 | |
| }, | |
| { | |
| "epoch": 1.4664095605389706, | |
| "grad_norm": 0.4717673659324646, | |
| "learning_rate": 9.862749022972073e-05, | |
| "loss": 0.1566, | |
| "step": 15400 | |
| }, | |
| { | |
| "epoch": 1.4711707851259344, | |
| "grad_norm": 0.4783164858818054, | |
| "learning_rate": 9.862272423982461e-05, | |
| "loss": 0.162, | |
| "step": 15450 | |
| }, | |
| { | |
| "epoch": 1.4759320097128983, | |
| "grad_norm": 0.4709276258945465, | |
| "learning_rate": 9.861795824992851e-05, | |
| "loss": 0.1628, | |
| "step": 15500 | |
| }, | |
| { | |
| "epoch": 1.480693234299862, | |
| "grad_norm": 0.46055328845977783, | |
| "learning_rate": 9.861319226003241e-05, | |
| "loss": 0.1573, | |
| "step": 15550 | |
| }, | |
| { | |
| "epoch": 1.4854544588868257, | |
| "grad_norm": 0.4675583839416504, | |
| "learning_rate": 9.860842627013631e-05, | |
| "loss": 0.1612, | |
| "step": 15600 | |
| }, | |
| { | |
| "epoch": 1.4902156834737894, | |
| "grad_norm": 0.49638524651527405, | |
| "learning_rate": 9.860366028024021e-05, | |
| "loss": 0.1565, | |
| "step": 15650 | |
| }, | |
| { | |
| "epoch": 1.4949769080607531, | |
| "grad_norm": 0.4782148003578186, | |
| "learning_rate": 9.859889429034411e-05, | |
| "loss": 0.1559, | |
| "step": 15700 | |
| }, | |
| { | |
| "epoch": 1.499738132647717, | |
| "grad_norm": 0.39972543716430664, | |
| "learning_rate": 9.859412830044801e-05, | |
| "loss": 0.1597, | |
| "step": 15750 | |
| }, | |
| { | |
| "epoch": 1.5044993572346808, | |
| "grad_norm": 0.46148207783699036, | |
| "learning_rate": 9.858936231055191e-05, | |
| "loss": 0.165, | |
| "step": 15800 | |
| }, | |
| { | |
| "epoch": 1.5092605818216445, | |
| "grad_norm": 0.6166296005249023, | |
| "learning_rate": 9.858459632065581e-05, | |
| "loss": 0.1601, | |
| "step": 15850 | |
| }, | |
| { | |
| "epoch": 1.5140218064086084, | |
| "grad_norm": 0.6877084970474243, | |
| "learning_rate": 9.857983033075971e-05, | |
| "loss": 0.1622, | |
| "step": 15900 | |
| }, | |
| { | |
| "epoch": 1.518783030995572, | |
| "grad_norm": 0.4697044789791107, | |
| "learning_rate": 9.85750643408636e-05, | |
| "loss": 0.1591, | |
| "step": 15950 | |
| }, | |
| { | |
| "epoch": 1.5235442555825358, | |
| "grad_norm": 0.4562913179397583, | |
| "learning_rate": 9.857029835096751e-05, | |
| "loss": 0.16, | |
| "step": 16000 | |
| }, | |
| { | |
| "epoch": 1.5235442555825358, | |
| "eval_loss": 0.15759705007076263, | |
| "eval_mae": 0.6579864621162415, | |
| "eval_mse": 327.5270690917969, | |
| "eval_rmse": 18.097708945935583, | |
| "eval_runtime": 59.3074, | |
| "eval_samples_per_second": 10073.191, | |
| "eval_smape": 105.73155879974365, | |
| "eval_steps_per_second": 19.677, | |
| "step": 16000 | |
| }, | |
| { | |
| "epoch": 1.5283054801694997, | |
| "grad_norm": 0.4631439447402954, | |
| "learning_rate": 9.856553236107139e-05, | |
| "loss": 0.1596, | |
| "step": 16050 | |
| }, | |
| { | |
| "epoch": 1.5330667047564632, | |
| "grad_norm": 0.5179547667503357, | |
| "learning_rate": 9.856076637117529e-05, | |
| "loss": 0.161, | |
| "step": 16100 | |
| }, | |
| { | |
| "epoch": 1.5378279293434272, | |
| "grad_norm": 0.5846447348594666, | |
| "learning_rate": 9.85560003812792e-05, | |
| "loss": 0.157, | |
| "step": 16150 | |
| }, | |
| { | |
| "epoch": 1.5425891539303909, | |
| "grad_norm": 0.41618812084198, | |
| "learning_rate": 9.855123439138309e-05, | |
| "loss": 0.1604, | |
| "step": 16200 | |
| }, | |
| { | |
| "epoch": 1.5473503785173546, | |
| "grad_norm": 0.5047721862792969, | |
| "learning_rate": 9.854646840148699e-05, | |
| "loss": 0.1607, | |
| "step": 16250 | |
| }, | |
| { | |
| "epoch": 1.5521116031043185, | |
| "grad_norm": 0.5991541147232056, | |
| "learning_rate": 9.854170241159089e-05, | |
| "loss": 0.1596, | |
| "step": 16300 | |
| }, | |
| { | |
| "epoch": 1.5568728276912822, | |
| "grad_norm": 0.43497374653816223, | |
| "learning_rate": 9.853693642169479e-05, | |
| "loss": 0.1577, | |
| "step": 16350 | |
| }, | |
| { | |
| "epoch": 1.561634052278246, | |
| "grad_norm": 0.40854403376579285, | |
| "learning_rate": 9.853217043179869e-05, | |
| "loss": 0.1599, | |
| "step": 16400 | |
| }, | |
| { | |
| "epoch": 1.5663952768652099, | |
| "grad_norm": 0.4918048679828644, | |
| "learning_rate": 9.852740444190259e-05, | |
| "loss": 0.1603, | |
| "step": 16450 | |
| }, | |
| { | |
| "epoch": 1.5711565014521733, | |
| "grad_norm": 0.3920314908027649, | |
| "learning_rate": 9.852263845200649e-05, | |
| "loss": 0.1573, | |
| "step": 16500 | |
| }, | |
| { | |
| "epoch": 1.5759177260391373, | |
| "grad_norm": 0.3981036841869354, | |
| "learning_rate": 9.851787246211039e-05, | |
| "loss": 0.1586, | |
| "step": 16550 | |
| }, | |
| { | |
| "epoch": 1.5806789506261012, | |
| "grad_norm": 0.6724033355712891, | |
| "learning_rate": 9.851310647221427e-05, | |
| "loss": 0.1577, | |
| "step": 16600 | |
| }, | |
| { | |
| "epoch": 1.5854401752130647, | |
| "grad_norm": 0.45915940403938293, | |
| "learning_rate": 9.850834048231819e-05, | |
| "loss": 0.1634, | |
| "step": 16650 | |
| }, | |
| { | |
| "epoch": 1.5902013998000286, | |
| "grad_norm": 0.4456646740436554, | |
| "learning_rate": 9.850357449242207e-05, | |
| "loss": 0.1598, | |
| "step": 16700 | |
| }, | |
| { | |
| "epoch": 1.5949626243869923, | |
| "grad_norm": 0.4617702066898346, | |
| "learning_rate": 9.849880850252597e-05, | |
| "loss": 0.1588, | |
| "step": 16750 | |
| }, | |
| { | |
| "epoch": 1.599723848973956, | |
| "grad_norm": 0.4858147203922272, | |
| "learning_rate": 9.849404251262989e-05, | |
| "loss": 0.1603, | |
| "step": 16800 | |
| }, | |
| { | |
| "epoch": 1.60448507356092, | |
| "grad_norm": 0.5424185395240784, | |
| "learning_rate": 9.848927652273377e-05, | |
| "loss": 0.1588, | |
| "step": 16850 | |
| }, | |
| { | |
| "epoch": 1.6092462981478837, | |
| "grad_norm": 0.432170033454895, | |
| "learning_rate": 9.848451053283767e-05, | |
| "loss": 0.1593, | |
| "step": 16900 | |
| }, | |
| { | |
| "epoch": 1.6140075227348474, | |
| "grad_norm": 0.403414249420166, | |
| "learning_rate": 9.847974454294157e-05, | |
| "loss": 0.1564, | |
| "step": 16950 | |
| }, | |
| { | |
| "epoch": 1.6187687473218113, | |
| "grad_norm": 0.5686953067779541, | |
| "learning_rate": 9.847497855304547e-05, | |
| "loss": 0.1586, | |
| "step": 17000 | |
| }, | |
| { | |
| "epoch": 1.6187687473218113, | |
| "eval_loss": 0.15684476494789124, | |
| "eval_mae": 0.6601889133453369, | |
| "eval_mse": 399.5775146484375, | |
| "eval_rmse": 19.989435075770338, | |
| "eval_runtime": 60.8742, | |
| "eval_samples_per_second": 9813.93, | |
| "eval_smape": 88.6057436466217, | |
| "eval_steps_per_second": 19.171, | |
| "step": 17000 | |
| }, | |
| { | |
| "epoch": 1.6235299719087748, | |
| "grad_norm": 0.472042977809906, | |
| "learning_rate": 9.847021256314937e-05, | |
| "loss": 0.1585, | |
| "step": 17050 | |
| }, | |
| { | |
| "epoch": 1.6282911964957387, | |
| "grad_norm": 0.6247090697288513, | |
| "learning_rate": 9.846544657325327e-05, | |
| "loss": 0.1583, | |
| "step": 17100 | |
| }, | |
| { | |
| "epoch": 1.6330524210827024, | |
| "grad_norm": 0.4843044579029083, | |
| "learning_rate": 9.846068058335717e-05, | |
| "loss": 0.1592, | |
| "step": 17150 | |
| }, | |
| { | |
| "epoch": 1.6378136456696661, | |
| "grad_norm": 0.48187774419784546, | |
| "learning_rate": 9.845591459346106e-05, | |
| "loss": 0.1592, | |
| "step": 17200 | |
| }, | |
| { | |
| "epoch": 1.64257487025663, | |
| "grad_norm": 0.4763176441192627, | |
| "learning_rate": 9.845114860356497e-05, | |
| "loss": 0.1592, | |
| "step": 17250 | |
| }, | |
| { | |
| "epoch": 1.6473360948435938, | |
| "grad_norm": 0.5375500321388245, | |
| "learning_rate": 9.844638261366887e-05, | |
| "loss": 0.159, | |
| "step": 17300 | |
| }, | |
| { | |
| "epoch": 1.6520973194305575, | |
| "grad_norm": 0.4639647305011749, | |
| "learning_rate": 9.844161662377276e-05, | |
| "loss": 0.1578, | |
| "step": 17350 | |
| }, | |
| { | |
| "epoch": 1.6568585440175214, | |
| "grad_norm": 0.44177523255348206, | |
| "learning_rate": 9.843685063387667e-05, | |
| "loss": 0.1593, | |
| "step": 17400 | |
| }, | |
| { | |
| "epoch": 1.6616197686044851, | |
| "grad_norm": 0.5092636942863464, | |
| "learning_rate": 9.843208464398055e-05, | |
| "loss": 0.161, | |
| "step": 17450 | |
| }, | |
| { | |
| "epoch": 1.6663809931914488, | |
| "grad_norm": 0.4873998165130615, | |
| "learning_rate": 9.842731865408445e-05, | |
| "loss": 0.1584, | |
| "step": 17500 | |
| }, | |
| { | |
| "epoch": 1.6711422177784128, | |
| "grad_norm": 0.622734546661377, | |
| "learning_rate": 9.842255266418837e-05, | |
| "loss": 0.1597, | |
| "step": 17550 | |
| }, | |
| { | |
| "epoch": 1.6759034423653763, | |
| "grad_norm": 0.5134626030921936, | |
| "learning_rate": 9.841778667429225e-05, | |
| "loss": 0.1591, | |
| "step": 17600 | |
| }, | |
| { | |
| "epoch": 1.6806646669523402, | |
| "grad_norm": 0.42936229705810547, | |
| "learning_rate": 9.841302068439615e-05, | |
| "loss": 0.1575, | |
| "step": 17650 | |
| }, | |
| { | |
| "epoch": 1.6854258915393039, | |
| "grad_norm": 0.44326427578926086, | |
| "learning_rate": 9.840825469450005e-05, | |
| "loss": 0.1568, | |
| "step": 17700 | |
| }, | |
| { | |
| "epoch": 1.6901871161262676, | |
| "grad_norm": 0.3937668204307556, | |
| "learning_rate": 9.840348870460395e-05, | |
| "loss": 0.1593, | |
| "step": 17750 | |
| }, | |
| { | |
| "epoch": 1.6949483407132315, | |
| "grad_norm": 0.45583653450012207, | |
| "learning_rate": 9.839872271470785e-05, | |
| "loss": 0.157, | |
| "step": 17800 | |
| }, | |
| { | |
| "epoch": 1.6997095653001952, | |
| "grad_norm": 0.5357958078384399, | |
| "learning_rate": 9.839395672481175e-05, | |
| "loss": 0.1604, | |
| "step": 17850 | |
| }, | |
| { | |
| "epoch": 1.704470789887159, | |
| "grad_norm": 0.421678751707077, | |
| "learning_rate": 9.838919073491565e-05, | |
| "loss": 0.1598, | |
| "step": 17900 | |
| }, | |
| { | |
| "epoch": 1.7092320144741229, | |
| "grad_norm": 0.42326951026916504, | |
| "learning_rate": 9.838442474501954e-05, | |
| "loss": 0.157, | |
| "step": 17950 | |
| }, | |
| { | |
| "epoch": 1.7139932390610864, | |
| "grad_norm": 0.43803659081459045, | |
| "learning_rate": 9.837965875512344e-05, | |
| "loss": 0.1593, | |
| "step": 18000 | |
| }, | |
| { | |
| "epoch": 1.7139932390610864, | |
| "eval_loss": 0.15651458501815796, | |
| "eval_mae": 0.6603885889053345, | |
| "eval_mse": 359.5630187988281, | |
| "eval_rmse": 18.962146998660995, | |
| "eval_runtime": 60.8886, | |
| "eval_samples_per_second": 9811.606, | |
| "eval_smape": 325.50642490386963, | |
| "eval_steps_per_second": 19.166, | |
| "step": 18000 | |
| }, | |
| { | |
| "epoch": 1.7187544636480503, | |
| "grad_norm": 0.42728474736213684, | |
| "learning_rate": 9.837489276522735e-05, | |
| "loss": 0.1588, | |
| "step": 18050 | |
| }, | |
| { | |
| "epoch": 1.723515688235014, | |
| "grad_norm": 0.4929046928882599, | |
| "learning_rate": 9.837012677533124e-05, | |
| "loss": 0.1587, | |
| "step": 18100 | |
| }, | |
| { | |
| "epoch": 1.7282769128219777, | |
| "grad_norm": 0.47886890172958374, | |
| "learning_rate": 9.836536078543513e-05, | |
| "loss": 0.1572, | |
| "step": 18150 | |
| }, | |
| { | |
| "epoch": 1.7330381374089416, | |
| "grad_norm": 0.4829745590686798, | |
| "learning_rate": 9.836059479553903e-05, | |
| "loss": 0.1575, | |
| "step": 18200 | |
| }, | |
| { | |
| "epoch": 1.7377993619959053, | |
| "grad_norm": 0.4256702661514282, | |
| "learning_rate": 9.835582880564293e-05, | |
| "loss": 0.1575, | |
| "step": 18250 | |
| }, | |
| { | |
| "epoch": 1.742560586582869, | |
| "grad_norm": 0.4803942143917084, | |
| "learning_rate": 9.835106281574683e-05, | |
| "loss": 0.1559, | |
| "step": 18300 | |
| }, | |
| { | |
| "epoch": 1.747321811169833, | |
| "grad_norm": 0.5108392834663391, | |
| "learning_rate": 9.834629682585073e-05, | |
| "loss": 0.1558, | |
| "step": 18350 | |
| }, | |
| { | |
| "epoch": 1.7520830357567967, | |
| "grad_norm": 0.5541846752166748, | |
| "learning_rate": 9.834153083595463e-05, | |
| "loss": 0.1571, | |
| "step": 18400 | |
| }, | |
| { | |
| "epoch": 1.7568442603437604, | |
| "grad_norm": 0.3996049463748932, | |
| "learning_rate": 9.833676484605853e-05, | |
| "loss": 0.1588, | |
| "step": 18450 | |
| }, | |
| { | |
| "epoch": 1.7616054849307243, | |
| "grad_norm": 0.4311594069004059, | |
| "learning_rate": 9.833199885616243e-05, | |
| "loss": 0.1566, | |
| "step": 18500 | |
| }, | |
| { | |
| "epoch": 1.7663667095176878, | |
| "grad_norm": 0.45717188715934753, | |
| "learning_rate": 9.832723286626633e-05, | |
| "loss": 0.1586, | |
| "step": 18550 | |
| }, | |
| { | |
| "epoch": 1.7711279341046517, | |
| "grad_norm": 0.48945853114128113, | |
| "learning_rate": 9.832246687637022e-05, | |
| "loss": 0.1581, | |
| "step": 18600 | |
| }, | |
| { | |
| "epoch": 1.7758891586916155, | |
| "grad_norm": 0.4877380430698395, | |
| "learning_rate": 9.831770088647413e-05, | |
| "loss": 0.1594, | |
| "step": 18650 | |
| }, | |
| { | |
| "epoch": 1.7806503832785792, | |
| "grad_norm": 0.47437113523483276, | |
| "learning_rate": 9.831293489657803e-05, | |
| "loss": 0.1566, | |
| "step": 18700 | |
| }, | |
| { | |
| "epoch": 1.785411607865543, | |
| "grad_norm": 0.6995478272438049, | |
| "learning_rate": 9.830816890668192e-05, | |
| "loss": 0.1564, | |
| "step": 18750 | |
| }, | |
| { | |
| "epoch": 1.7901728324525068, | |
| "grad_norm": 0.4471156895160675, | |
| "learning_rate": 9.830340291678583e-05, | |
| "loss": 0.1567, | |
| "step": 18800 | |
| }, | |
| { | |
| "epoch": 1.7949340570394705, | |
| "grad_norm": 0.459011435508728, | |
| "learning_rate": 9.829863692688972e-05, | |
| "loss": 0.1599, | |
| "step": 18850 | |
| }, | |
| { | |
| "epoch": 1.7996952816264344, | |
| "grad_norm": 0.4757770001888275, | |
| "learning_rate": 9.829387093699362e-05, | |
| "loss": 0.1574, | |
| "step": 18900 | |
| }, | |
| { | |
| "epoch": 1.804456506213398, | |
| "grad_norm": 0.4678910970687866, | |
| "learning_rate": 9.828910494709751e-05, | |
| "loss": 0.1604, | |
| "step": 18950 | |
| }, | |
| { | |
| "epoch": 1.8092177308003619, | |
| "grad_norm": 0.4852876663208008, | |
| "learning_rate": 9.828433895720141e-05, | |
| "loss": 0.1562, | |
| "step": 19000 | |
| }, | |
| { | |
| "epoch": 1.8092177308003619, | |
| "eval_loss": 0.15658971667289734, | |
| "eval_mae": 0.6544845700263977, | |
| "eval_mse": 281.27392578125, | |
| "eval_rmse": 16.77122314505564, | |
| "eval_runtime": 61.2247, | |
| "eval_samples_per_second": 9757.737, | |
| "eval_smape": 80.4527759552002, | |
| "eval_steps_per_second": 19.061, | |
| "step": 19000 | |
| }, | |
| { | |
| "epoch": 1.8139789553873258, | |
| "grad_norm": 0.37452152371406555, | |
| "learning_rate": 9.827957296730531e-05, | |
| "loss": 0.1561, | |
| "step": 19050 | |
| }, | |
| { | |
| "epoch": 1.8187401799742893, | |
| "grad_norm": 0.4231972396373749, | |
| "learning_rate": 9.827480697740921e-05, | |
| "loss": 0.1588, | |
| "step": 19100 | |
| }, | |
| { | |
| "epoch": 1.8235014045612532, | |
| "grad_norm": 0.423755943775177, | |
| "learning_rate": 9.827004098751311e-05, | |
| "loss": 0.1566, | |
| "step": 19150 | |
| }, | |
| { | |
| "epoch": 1.828262629148217, | |
| "grad_norm": 0.4381329119205475, | |
| "learning_rate": 9.826527499761701e-05, | |
| "loss": 0.1596, | |
| "step": 19200 | |
| }, | |
| { | |
| "epoch": 1.8330238537351806, | |
| "grad_norm": 0.4936196804046631, | |
| "learning_rate": 9.826050900772091e-05, | |
| "loss": 0.1598, | |
| "step": 19250 | |
| }, | |
| { | |
| "epoch": 1.8377850783221445, | |
| "grad_norm": 0.6386341452598572, | |
| "learning_rate": 9.825574301782481e-05, | |
| "loss": 0.1601, | |
| "step": 19300 | |
| }, | |
| { | |
| "epoch": 1.8425463029091083, | |
| "grad_norm": 0.49802061915397644, | |
| "learning_rate": 9.82509770279287e-05, | |
| "loss": 0.1587, | |
| "step": 19350 | |
| }, | |
| { | |
| "epoch": 1.847307527496072, | |
| "grad_norm": 0.4627436697483063, | |
| "learning_rate": 9.824621103803261e-05, | |
| "loss": 0.1587, | |
| "step": 19400 | |
| }, | |
| { | |
| "epoch": 1.852068752083036, | |
| "grad_norm": 0.4635562598705292, | |
| "learning_rate": 9.824144504813651e-05, | |
| "loss": 0.1532, | |
| "step": 19450 | |
| }, | |
| { | |
| "epoch": 1.8568299766699994, | |
| "grad_norm": 0.4869281053543091, | |
| "learning_rate": 9.82366790582404e-05, | |
| "loss": 0.1576, | |
| "step": 19500 | |
| }, | |
| { | |
| "epoch": 1.8615912012569633, | |
| "grad_norm": 0.4622304141521454, | |
| "learning_rate": 9.82319130683443e-05, | |
| "loss": 0.1587, | |
| "step": 19550 | |
| }, | |
| { | |
| "epoch": 1.866352425843927, | |
| "grad_norm": 0.42829111218452454, | |
| "learning_rate": 9.82271470784482e-05, | |
| "loss": 0.1584, | |
| "step": 19600 | |
| }, | |
| { | |
| "epoch": 1.8711136504308907, | |
| "grad_norm": 0.44152921438217163, | |
| "learning_rate": 9.82223810885521e-05, | |
| "loss": 0.1597, | |
| "step": 19650 | |
| }, | |
| { | |
| "epoch": 1.8758748750178547, | |
| "grad_norm": 0.4362374246120453, | |
| "learning_rate": 9.8217615098656e-05, | |
| "loss": 0.1535, | |
| "step": 19700 | |
| }, | |
| { | |
| "epoch": 1.8806360996048184, | |
| "grad_norm": 0.42130181193351746, | |
| "learning_rate": 9.82128491087599e-05, | |
| "loss": 0.1585, | |
| "step": 19750 | |
| }, | |
| { | |
| "epoch": 1.885397324191782, | |
| "grad_norm": 0.5120296478271484, | |
| "learning_rate": 9.82080831188638e-05, | |
| "loss": 0.1583, | |
| "step": 19800 | |
| }, | |
| { | |
| "epoch": 1.890158548778746, | |
| "grad_norm": 0.4205983579158783, | |
| "learning_rate": 9.820331712896768e-05, | |
| "loss": 0.1585, | |
| "step": 19850 | |
| }, | |
| { | |
| "epoch": 1.8949197733657097, | |
| "grad_norm": 0.4189218580722809, | |
| "learning_rate": 9.819855113907159e-05, | |
| "loss": 0.1561, | |
| "step": 19900 | |
| }, | |
| { | |
| "epoch": 1.8996809979526734, | |
| "grad_norm": 0.6145504117012024, | |
| "learning_rate": 9.819378514917549e-05, | |
| "loss": 0.1625, | |
| "step": 19950 | |
| }, | |
| { | |
| "epoch": 1.9044422225396374, | |
| "grad_norm": 0.5396240949630737, | |
| "learning_rate": 9.818901915927938e-05, | |
| "loss": 0.1601, | |
| "step": 20000 | |
| }, | |
| { | |
| "epoch": 1.9044422225396374, | |
| "eval_loss": 0.15698538720607758, | |
| "eval_mae": 0.6542770862579346, | |
| "eval_mse": 287.357666015625, | |
| "eval_rmse": 16.951627237985885, | |
| "eval_runtime": 58.5657, | |
| "eval_samples_per_second": 10200.761, | |
| "eval_smape": 79.55442667007446, | |
| "eval_steps_per_second": 19.926, | |
| "step": 20000 | |
| }, | |
| { | |
| "epoch": 1.9092034471266008, | |
| "grad_norm": 0.45280078053474426, | |
| "learning_rate": 9.818425316938329e-05, | |
| "loss": 0.1593, | |
| "step": 20050 | |
| }, | |
| { | |
| "epoch": 1.9139646717135648, | |
| "grad_norm": 0.455552339553833, | |
| "learning_rate": 9.817948717948718e-05, | |
| "loss": 0.1579, | |
| "step": 20100 | |
| }, | |
| { | |
| "epoch": 1.9187258963005285, | |
| "grad_norm": 0.44774943590164185, | |
| "learning_rate": 9.817472118959108e-05, | |
| "loss": 0.1577, | |
| "step": 20150 | |
| }, | |
| { | |
| "epoch": 1.9234871208874922, | |
| "grad_norm": 0.4146822392940521, | |
| "learning_rate": 9.816995519969499e-05, | |
| "loss": 0.1567, | |
| "step": 20200 | |
| }, | |
| { | |
| "epoch": 1.9282483454744561, | |
| "grad_norm": 0.48615461587905884, | |
| "learning_rate": 9.816518920979888e-05, | |
| "loss": 0.1596, | |
| "step": 20250 | |
| }, | |
| { | |
| "epoch": 1.9330095700614198, | |
| "grad_norm": 0.5081954598426819, | |
| "learning_rate": 9.816042321990278e-05, | |
| "loss": 0.1576, | |
| "step": 20300 | |
| }, | |
| { | |
| "epoch": 1.9377707946483835, | |
| "grad_norm": 0.5155813097953796, | |
| "learning_rate": 9.815565723000668e-05, | |
| "loss": 0.1551, | |
| "step": 20350 | |
| }, | |
| { | |
| "epoch": 1.9425320192353475, | |
| "grad_norm": 0.5232491493225098, | |
| "learning_rate": 9.815089124011058e-05, | |
| "loss": 0.1597, | |
| "step": 20400 | |
| }, | |
| { | |
| "epoch": 1.947293243822311, | |
| "grad_norm": 0.4078335762023926, | |
| "learning_rate": 9.814612525021447e-05, | |
| "loss": 0.1556, | |
| "step": 20450 | |
| }, | |
| { | |
| "epoch": 1.9520544684092749, | |
| "grad_norm": 0.4975152611732483, | |
| "learning_rate": 9.814135926031837e-05, | |
| "loss": 0.1591, | |
| "step": 20500 | |
| }, | |
| { | |
| "epoch": 1.9568156929962386, | |
| "grad_norm": 0.6481941938400269, | |
| "learning_rate": 9.813659327042227e-05, | |
| "loss": 0.1565, | |
| "step": 20550 | |
| }, | |
| { | |
| "epoch": 1.9615769175832023, | |
| "grad_norm": 0.4526354968547821, | |
| "learning_rate": 9.813182728052617e-05, | |
| "loss": 0.1546, | |
| "step": 20600 | |
| }, | |
| { | |
| "epoch": 1.9663381421701662, | |
| "grad_norm": 0.4288252294063568, | |
| "learning_rate": 9.812706129063007e-05, | |
| "loss": 0.1551, | |
| "step": 20650 | |
| }, | |
| { | |
| "epoch": 1.97109936675713, | |
| "grad_norm": 0.434865266084671, | |
| "learning_rate": 9.812229530073397e-05, | |
| "loss": 0.1557, | |
| "step": 20700 | |
| }, | |
| { | |
| "epoch": 1.9758605913440936, | |
| "grad_norm": 0.4131234586238861, | |
| "learning_rate": 9.811752931083786e-05, | |
| "loss": 0.156, | |
| "step": 20750 | |
| }, | |
| { | |
| "epoch": 1.9806218159310576, | |
| "grad_norm": 0.519029438495636, | |
| "learning_rate": 9.811276332094177e-05, | |
| "loss": 0.1596, | |
| "step": 20800 | |
| }, | |
| { | |
| "epoch": 1.9853830405180213, | |
| "grad_norm": 0.4048980474472046, | |
| "learning_rate": 9.810799733104566e-05, | |
| "loss": 0.153, | |
| "step": 20850 | |
| }, | |
| { | |
| "epoch": 1.990144265104985, | |
| "grad_norm": 0.44951915740966797, | |
| "learning_rate": 9.810323134114956e-05, | |
| "loss": 0.1562, | |
| "step": 20900 | |
| }, | |
| { | |
| "epoch": 1.994905489691949, | |
| "grad_norm": 0.40820789337158203, | |
| "learning_rate": 9.809846535125346e-05, | |
| "loss": 0.1576, | |
| "step": 20950 | |
| }, | |
| { | |
| "epoch": 1.9996667142789124, | |
| "grad_norm": 0.6201032400131226, | |
| "learning_rate": 9.809369936135736e-05, | |
| "loss": 0.1551, | |
| "step": 21000 | |
| }, | |
| { | |
| "epoch": 1.9996667142789124, | |
| "eval_loss": 0.15608514845371246, | |
| "eval_mae": 0.6444294452667236, | |
| "eval_mse": 279.2149658203125, | |
| "eval_rmse": 16.709726682992528, | |
| "eval_runtime": 54.4875, | |
| "eval_samples_per_second": 10964.25, | |
| "eval_smape": 102.60157585144043, | |
| "eval_steps_per_second": 21.418, | |
| "step": 21000 | |
| }, | |
| { | |
| "epoch": 2.004380326620007, | |
| "grad_norm": 0.4798502027988434, | |
| "learning_rate": 9.808893337146126e-05, | |
| "loss": 0.1558, | |
| "step": 21050 | |
| }, | |
| { | |
| "epoch": 2.0091415512069704, | |
| "grad_norm": 0.47718650102615356, | |
| "learning_rate": 9.808416738156516e-05, | |
| "loss": 0.1581, | |
| "step": 21100 | |
| }, | |
| { | |
| "epoch": 2.0139027757939343, | |
| "grad_norm": 0.4789866507053375, | |
| "learning_rate": 9.807940139166906e-05, | |
| "loss": 0.1558, | |
| "step": 21150 | |
| }, | |
| { | |
| "epoch": 2.0186640003808978, | |
| "grad_norm": 0.5219734907150269, | |
| "learning_rate": 9.807463540177296e-05, | |
| "loss": 0.1564, | |
| "step": 21200 | |
| }, | |
| { | |
| "epoch": 2.0234252249678617, | |
| "grad_norm": 0.4981229305267334, | |
| "learning_rate": 9.806986941187684e-05, | |
| "loss": 0.1583, | |
| "step": 21250 | |
| }, | |
| { | |
| "epoch": 2.0281864495548256, | |
| "grad_norm": 0.4696637690067291, | |
| "learning_rate": 9.806510342198075e-05, | |
| "loss": 0.1568, | |
| "step": 21300 | |
| }, | |
| { | |
| "epoch": 2.032947674141789, | |
| "grad_norm": 0.44064363837242126, | |
| "learning_rate": 9.806033743208465e-05, | |
| "loss": 0.1579, | |
| "step": 21350 | |
| }, | |
| { | |
| "epoch": 2.037708898728753, | |
| "grad_norm": 0.5143409967422485, | |
| "learning_rate": 9.805557144218854e-05, | |
| "loss": 0.156, | |
| "step": 21400 | |
| }, | |
| { | |
| "epoch": 2.042470123315717, | |
| "grad_norm": 0.49321916699409485, | |
| "learning_rate": 9.805080545229245e-05, | |
| "loss": 0.155, | |
| "step": 21450 | |
| }, | |
| { | |
| "epoch": 2.0472313479026805, | |
| "grad_norm": 0.4427430331707001, | |
| "learning_rate": 9.804603946239634e-05, | |
| "loss": 0.1566, | |
| "step": 21500 | |
| }, | |
| { | |
| "epoch": 2.0519925724896444, | |
| "grad_norm": 0.45803865790367126, | |
| "learning_rate": 9.804127347250024e-05, | |
| "loss": 0.1593, | |
| "step": 21550 | |
| }, | |
| { | |
| "epoch": 2.056753797076608, | |
| "grad_norm": 0.4853162467479706, | |
| "learning_rate": 9.803650748260415e-05, | |
| "loss": 0.1575, | |
| "step": 21600 | |
| }, | |
| { | |
| "epoch": 2.061515021663572, | |
| "grad_norm": 0.47955945134162903, | |
| "learning_rate": 9.803174149270804e-05, | |
| "loss": 0.1594, | |
| "step": 21650 | |
| }, | |
| { | |
| "epoch": 2.0662762462505357, | |
| "grad_norm": 0.44243359565734863, | |
| "learning_rate": 9.802697550281194e-05, | |
| "loss": 0.1597, | |
| "step": 21700 | |
| }, | |
| { | |
| "epoch": 2.0710374708374992, | |
| "grad_norm": 0.5727405548095703, | |
| "learning_rate": 9.802220951291584e-05, | |
| "loss": 0.1576, | |
| "step": 21750 | |
| }, | |
| { | |
| "epoch": 2.075798695424463, | |
| "grad_norm": 0.5481444001197815, | |
| "learning_rate": 9.801744352301974e-05, | |
| "loss": 0.1547, | |
| "step": 21800 | |
| }, | |
| { | |
| "epoch": 2.080559920011427, | |
| "grad_norm": 0.46369367837905884, | |
| "learning_rate": 9.801267753312364e-05, | |
| "loss": 0.1553, | |
| "step": 21850 | |
| }, | |
| { | |
| "epoch": 2.0853211445983906, | |
| "grad_norm": 0.48963698744773865, | |
| "learning_rate": 9.800791154322754e-05, | |
| "loss": 0.1579, | |
| "step": 21900 | |
| }, | |
| { | |
| "epoch": 2.0900823691853545, | |
| "grad_norm": 0.5048883557319641, | |
| "learning_rate": 9.800314555333144e-05, | |
| "loss": 0.1597, | |
| "step": 21950 | |
| }, | |
| { | |
| "epoch": 2.0948435937723184, | |
| "grad_norm": 0.4893467128276825, | |
| "learning_rate": 9.799837956343532e-05, | |
| "loss": 0.1532, | |
| "step": 22000 | |
| }, | |
| { | |
| "epoch": 2.0948435937723184, | |
| "eval_loss": 0.1553574949502945, | |
| "eval_mae": 0.6453903317451477, | |
| "eval_mse": 282.9573974609375, | |
| "eval_rmse": 16.821337564561787, | |
| "eval_runtime": 60.2846, | |
| "eval_samples_per_second": 9909.912, | |
| "eval_smape": 85.01211404800415, | |
| "eval_steps_per_second": 19.358, | |
| "step": 22000 | |
| }, | |
| { | |
| "epoch": 2.099604818359282, | |
| "grad_norm": 0.46226996183395386, | |
| "learning_rate": 9.799361357353923e-05, | |
| "loss": 0.1564, | |
| "step": 22050 | |
| }, | |
| { | |
| "epoch": 2.104366042946246, | |
| "grad_norm": 0.46276605129241943, | |
| "learning_rate": 9.798884758364313e-05, | |
| "loss": 0.1562, | |
| "step": 22100 | |
| }, | |
| { | |
| "epoch": 2.1091272675332093, | |
| "grad_norm": 0.4646179974079132, | |
| "learning_rate": 9.798408159374702e-05, | |
| "loss": 0.1576, | |
| "step": 22150 | |
| }, | |
| { | |
| "epoch": 2.1138884921201733, | |
| "grad_norm": 0.4897124171257019, | |
| "learning_rate": 9.797931560385093e-05, | |
| "loss": 0.1568, | |
| "step": 22200 | |
| }, | |
| { | |
| "epoch": 2.118649716707137, | |
| "grad_norm": 0.43058913946151733, | |
| "learning_rate": 9.797454961395482e-05, | |
| "loss": 0.1579, | |
| "step": 22250 | |
| }, | |
| { | |
| "epoch": 2.1234109412941007, | |
| "grad_norm": 0.37743493914604187, | |
| "learning_rate": 9.796978362405872e-05, | |
| "loss": 0.1578, | |
| "step": 22300 | |
| }, | |
| { | |
| "epoch": 2.1281721658810646, | |
| "grad_norm": 0.4362202286720276, | |
| "learning_rate": 9.796501763416263e-05, | |
| "loss": 0.1579, | |
| "step": 22350 | |
| }, | |
| { | |
| "epoch": 2.1329333904680285, | |
| "grad_norm": 0.3836491107940674, | |
| "learning_rate": 9.796025164426652e-05, | |
| "loss": 0.1576, | |
| "step": 22400 | |
| }, | |
| { | |
| "epoch": 2.137694615054992, | |
| "grad_norm": 0.4960291385650635, | |
| "learning_rate": 9.795548565437042e-05, | |
| "loss": 0.1563, | |
| "step": 22450 | |
| }, | |
| { | |
| "epoch": 2.142455839641956, | |
| "grad_norm": 0.43519532680511475, | |
| "learning_rate": 9.795071966447432e-05, | |
| "loss": 0.1598, | |
| "step": 22500 | |
| }, | |
| { | |
| "epoch": 2.1472170642289194, | |
| "grad_norm": 0.4592967629432678, | |
| "learning_rate": 9.794595367457822e-05, | |
| "loss": 0.158, | |
| "step": 22550 | |
| }, | |
| { | |
| "epoch": 2.1519782888158834, | |
| "grad_norm": 0.5372556447982788, | |
| "learning_rate": 9.794118768468212e-05, | |
| "loss": 0.1591, | |
| "step": 22600 | |
| }, | |
| { | |
| "epoch": 2.1567395134028473, | |
| "grad_norm": 0.40967512130737305, | |
| "learning_rate": 9.7936421694786e-05, | |
| "loss": 0.159, | |
| "step": 22650 | |
| }, | |
| { | |
| "epoch": 2.161500737989811, | |
| "grad_norm": 0.4951903223991394, | |
| "learning_rate": 9.793165570488992e-05, | |
| "loss": 0.1559, | |
| "step": 22700 | |
| }, | |
| { | |
| "epoch": 2.1662619625767747, | |
| "grad_norm": 0.467052698135376, | |
| "learning_rate": 9.79268897149938e-05, | |
| "loss": 0.1612, | |
| "step": 22750 | |
| }, | |
| { | |
| "epoch": 2.1710231871637387, | |
| "grad_norm": 0.42239800095558167, | |
| "learning_rate": 9.79221237250977e-05, | |
| "loss": 0.1597, | |
| "step": 22800 | |
| }, | |
| { | |
| "epoch": 2.175784411750702, | |
| "grad_norm": 0.4856722354888916, | |
| "learning_rate": 9.791735773520161e-05, | |
| "loss": 0.1572, | |
| "step": 22850 | |
| }, | |
| { | |
| "epoch": 2.180545636337666, | |
| "grad_norm": 0.47815021872520447, | |
| "learning_rate": 9.79125917453055e-05, | |
| "loss": 0.1607, | |
| "step": 22900 | |
| }, | |
| { | |
| "epoch": 2.18530686092463, | |
| "grad_norm": 0.5437564849853516, | |
| "learning_rate": 9.79078257554094e-05, | |
| "loss": 0.1587, | |
| "step": 22950 | |
| }, | |
| { | |
| "epoch": 2.1900680855115935, | |
| "grad_norm": 0.42942744493484497, | |
| "learning_rate": 9.79030597655133e-05, | |
| "loss": 0.1564, | |
| "step": 23000 | |
| }, | |
| { | |
| "epoch": 2.1900680855115935, | |
| "eval_loss": 0.15535122156143188, | |
| "eval_mae": 0.6484833359718323, | |
| "eval_mse": 332.3757629394531, | |
| "eval_rmse": 18.231175577549934, | |
| "eval_runtime": 56.6725, | |
| "eval_samples_per_second": 10541.528, | |
| "eval_smape": 76.03496313095093, | |
| "eval_steps_per_second": 20.592, | |
| "step": 23000 | |
| }, | |
| { | |
| "epoch": 2.1948293100985574, | |
| "grad_norm": 0.5147453546524048, | |
| "learning_rate": 9.78982937756172e-05, | |
| "loss": 0.1584, | |
| "step": 23050 | |
| }, | |
| { | |
| "epoch": 2.1995905346855213, | |
| "grad_norm": 0.46314072608947754, | |
| "learning_rate": 9.78935277857211e-05, | |
| "loss": 0.1585, | |
| "step": 23100 | |
| }, | |
| { | |
| "epoch": 2.204351759272485, | |
| "grad_norm": 0.5470126867294312, | |
| "learning_rate": 9.7888761795825e-05, | |
| "loss": 0.1594, | |
| "step": 23150 | |
| }, | |
| { | |
| "epoch": 2.2091129838594488, | |
| "grad_norm": 0.5074204802513123, | |
| "learning_rate": 9.78839958059289e-05, | |
| "loss": 0.1576, | |
| "step": 23200 | |
| }, | |
| { | |
| "epoch": 2.2138742084464123, | |
| "grad_norm": 0.3913464844226837, | |
| "learning_rate": 9.78792298160328e-05, | |
| "loss": 0.1564, | |
| "step": 23250 | |
| }, | |
| { | |
| "epoch": 2.218635433033376, | |
| "grad_norm": 0.5663050413131714, | |
| "learning_rate": 9.78744638261367e-05, | |
| "loss": 0.1607, | |
| "step": 23300 | |
| }, | |
| { | |
| "epoch": 2.22339665762034, | |
| "grad_norm": 0.5126326084136963, | |
| "learning_rate": 9.78696978362406e-05, | |
| "loss": 0.1575, | |
| "step": 23350 | |
| }, | |
| { | |
| "epoch": 2.2281578822073036, | |
| "grad_norm": 0.531508207321167, | |
| "learning_rate": 9.786493184634448e-05, | |
| "loss": 0.1552, | |
| "step": 23400 | |
| }, | |
| { | |
| "epoch": 2.2329191067942675, | |
| "grad_norm": 0.42278382182121277, | |
| "learning_rate": 9.78601658564484e-05, | |
| "loss": 0.1563, | |
| "step": 23450 | |
| }, | |
| { | |
| "epoch": 2.2376803313812315, | |
| "grad_norm": 0.4637637436389923, | |
| "learning_rate": 9.78553998665523e-05, | |
| "loss": 0.1589, | |
| "step": 23500 | |
| }, | |
| { | |
| "epoch": 2.242441555968195, | |
| "grad_norm": 0.4949529767036438, | |
| "learning_rate": 9.785063387665618e-05, | |
| "loss": 0.1572, | |
| "step": 23550 | |
| }, | |
| { | |
| "epoch": 2.247202780555159, | |
| "grad_norm": 0.5856890678405762, | |
| "learning_rate": 9.78458678867601e-05, | |
| "loss": 0.1571, | |
| "step": 23600 | |
| }, | |
| { | |
| "epoch": 2.2519640051421224, | |
| "grad_norm": 0.6656904220581055, | |
| "learning_rate": 9.784110189686398e-05, | |
| "loss": 0.1578, | |
| "step": 23650 | |
| }, | |
| { | |
| "epoch": 2.2567252297290863, | |
| "grad_norm": 0.49895474314689636, | |
| "learning_rate": 9.783633590696788e-05, | |
| "loss": 0.1544, | |
| "step": 23700 | |
| }, | |
| { | |
| "epoch": 2.26148645431605, | |
| "grad_norm": 0.5674960017204285, | |
| "learning_rate": 9.783156991707178e-05, | |
| "loss": 0.1586, | |
| "step": 23750 | |
| }, | |
| { | |
| "epoch": 2.2662476789030137, | |
| "grad_norm": 0.6098377108573914, | |
| "learning_rate": 9.782680392717568e-05, | |
| "loss": 0.1563, | |
| "step": 23800 | |
| }, | |
| { | |
| "epoch": 2.2710089034899776, | |
| "grad_norm": 0.5532990097999573, | |
| "learning_rate": 9.782203793727958e-05, | |
| "loss": 0.1558, | |
| "step": 23850 | |
| }, | |
| { | |
| "epoch": 2.2757701280769416, | |
| "grad_norm": 0.44679224491119385, | |
| "learning_rate": 9.781727194738346e-05, | |
| "loss": 0.1582, | |
| "step": 23900 | |
| }, | |
| { | |
| "epoch": 2.280531352663905, | |
| "grad_norm": 0.5477356314659119, | |
| "learning_rate": 9.781250595748738e-05, | |
| "loss": 0.1583, | |
| "step": 23950 | |
| }, | |
| { | |
| "epoch": 2.285292577250869, | |
| "grad_norm": 0.5364423990249634, | |
| "learning_rate": 9.780773996759128e-05, | |
| "loss": 0.1568, | |
| "step": 24000 | |
| }, | |
| { | |
| "epoch": 2.285292577250869, | |
| "eval_loss": 0.15508916974067688, | |
| "eval_mae": 0.6528403162956238, | |
| "eval_mse": 356.0440979003906, | |
| "eval_rmse": 18.869130819950097, | |
| "eval_runtime": 55.4778, | |
| "eval_samples_per_second": 10768.544, | |
| "eval_smape": 92.25972294807434, | |
| "eval_steps_per_second": 21.035, | |
| "step": 24000 | |
| } | |
| ], | |
| "logging_steps": 50, | |
| "max_steps": 1050100, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 100, | |
| "save_steps": 2000, | |
| "stateful_callbacks": { | |
| "EarlyStoppingCallback": { | |
| "args": { | |
| "early_stopping_patience": 3, | |
| "early_stopping_threshold": 0.0 | |
| }, | |
| "attributes": { | |
| "early_stopping_patience_counter": 0 | |
| } | |
| }, | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": false | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 4.560071489513472e+16, | |
| "train_batch_size": 256, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |