Invalid JSON:
Unexpected token 'I', ..."ad_norm": Infinity,
"... is not valid JSON
| { | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 4.0, | |
| "eval_steps": 146, | |
| "global_step": 1456, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.005494505494505495, | |
| "grad_norm": Infinity, | |
| "learning_rate": 0.0, | |
| "loss": 2.0983, | |
| "step": 2 | |
| }, | |
| { | |
| "epoch": 0.01098901098901099, | |
| "grad_norm": 5.516454696655273, | |
| "learning_rate": 1.36986301369863e-07, | |
| "loss": 2.0688, | |
| "step": 4 | |
| }, | |
| { | |
| "epoch": 0.016483516483516484, | |
| "grad_norm": 5.619696617126465, | |
| "learning_rate": 2.73972602739726e-07, | |
| "loss": 2.0901, | |
| "step": 6 | |
| }, | |
| { | |
| "epoch": 0.02197802197802198, | |
| "grad_norm": 5.722362995147705, | |
| "learning_rate": 4.1095890410958903e-07, | |
| "loss": 2.1, | |
| "step": 8 | |
| }, | |
| { | |
| "epoch": 0.027472527472527472, | |
| "grad_norm": 5.279537200927734, | |
| "learning_rate": 5.47945205479452e-07, | |
| "loss": 2.0519, | |
| "step": 10 | |
| }, | |
| { | |
| "epoch": 0.03296703296703297, | |
| "grad_norm": 4.9255170822143555, | |
| "learning_rate": 6.849315068493151e-07, | |
| "loss": 2.0328, | |
| "step": 12 | |
| }, | |
| { | |
| "epoch": 0.038461538461538464, | |
| "grad_norm": 4.8295369148254395, | |
| "learning_rate": 8.219178082191781e-07, | |
| "loss": 1.9894, | |
| "step": 14 | |
| }, | |
| { | |
| "epoch": 0.04395604395604396, | |
| "grad_norm": 6.103366374969482, | |
| "learning_rate": 9.589041095890411e-07, | |
| "loss": 1.9078, | |
| "step": 16 | |
| }, | |
| { | |
| "epoch": 0.04945054945054945, | |
| "grad_norm": 5.239104747772217, | |
| "learning_rate": 1.095890410958904e-06, | |
| "loss": 1.8896, | |
| "step": 18 | |
| }, | |
| { | |
| "epoch": 0.054945054945054944, | |
| "grad_norm": 4.494051933288574, | |
| "learning_rate": 1.2328767123287673e-06, | |
| "loss": 1.6875, | |
| "step": 20 | |
| }, | |
| { | |
| "epoch": 0.06043956043956044, | |
| "grad_norm": 4.799252510070801, | |
| "learning_rate": 1.3698630136986302e-06, | |
| "loss": 1.6452, | |
| "step": 22 | |
| }, | |
| { | |
| "epoch": 0.06593406593406594, | |
| "grad_norm": 4.425687789916992, | |
| "learning_rate": 1.5068493150684932e-06, | |
| "loss": 1.5281, | |
| "step": 24 | |
| }, | |
| { | |
| "epoch": 0.07142857142857142, | |
| "grad_norm": 2.157963514328003, | |
| "learning_rate": 1.6438356164383561e-06, | |
| "loss": 1.3483, | |
| "step": 26 | |
| }, | |
| { | |
| "epoch": 0.07692307692307693, | |
| "grad_norm": 1.2025972604751587, | |
| "learning_rate": 1.7808219178082193e-06, | |
| "loss": 1.3064, | |
| "step": 28 | |
| }, | |
| { | |
| "epoch": 0.08241758241758242, | |
| "grad_norm": 0.9520919322967529, | |
| "learning_rate": 1.9178082191780823e-06, | |
| "loss": 1.2681, | |
| "step": 30 | |
| }, | |
| { | |
| "epoch": 0.08791208791208792, | |
| "grad_norm": 0.6353013515472412, | |
| "learning_rate": 2.0547945205479454e-06, | |
| "loss": 1.2484, | |
| "step": 32 | |
| }, | |
| { | |
| "epoch": 0.09340659340659341, | |
| "grad_norm": 0.46617600321769714, | |
| "learning_rate": 2.191780821917808e-06, | |
| "loss": 1.24, | |
| "step": 34 | |
| }, | |
| { | |
| "epoch": 0.0989010989010989, | |
| "grad_norm": 0.44788283109664917, | |
| "learning_rate": 2.3287671232876713e-06, | |
| "loss": 1.2342, | |
| "step": 36 | |
| }, | |
| { | |
| "epoch": 0.1043956043956044, | |
| "grad_norm": 0.3921670615673065, | |
| "learning_rate": 2.4657534246575345e-06, | |
| "loss": 1.217, | |
| "step": 38 | |
| }, | |
| { | |
| "epoch": 0.10989010989010989, | |
| "grad_norm": 0.3610630929470062, | |
| "learning_rate": 2.6027397260273973e-06, | |
| "loss": 1.225, | |
| "step": 40 | |
| }, | |
| { | |
| "epoch": 0.11538461538461539, | |
| "grad_norm": 0.3526926040649414, | |
| "learning_rate": 2.7397260273972604e-06, | |
| "loss": 1.2088, | |
| "step": 42 | |
| }, | |
| { | |
| "epoch": 0.12087912087912088, | |
| "grad_norm": 0.3451712727546692, | |
| "learning_rate": 2.876712328767123e-06, | |
| "loss": 1.2198, | |
| "step": 44 | |
| }, | |
| { | |
| "epoch": 0.12637362637362637, | |
| "grad_norm": 0.33328160643577576, | |
| "learning_rate": 3.0136986301369864e-06, | |
| "loss": 1.2173, | |
| "step": 46 | |
| }, | |
| { | |
| "epoch": 0.13186813186813187, | |
| "grad_norm": 0.3273652195930481, | |
| "learning_rate": 3.1506849315068495e-06, | |
| "loss": 1.2088, | |
| "step": 48 | |
| }, | |
| { | |
| "epoch": 0.13736263736263737, | |
| "grad_norm": 0.3195970952510834, | |
| "learning_rate": 3.2876712328767123e-06, | |
| "loss": 1.2054, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 0.14285714285714285, | |
| "grad_norm": 0.2997814416885376, | |
| "learning_rate": 3.4246575342465754e-06, | |
| "loss": 1.2046, | |
| "step": 52 | |
| }, | |
| { | |
| "epoch": 0.14835164835164835, | |
| "grad_norm": 0.2609250247478485, | |
| "learning_rate": 3.5616438356164386e-06, | |
| "loss": 1.2058, | |
| "step": 54 | |
| }, | |
| { | |
| "epoch": 0.15384615384615385, | |
| "grad_norm": 0.21140357851982117, | |
| "learning_rate": 3.6986301369863014e-06, | |
| "loss": 1.2001, | |
| "step": 56 | |
| }, | |
| { | |
| "epoch": 0.15934065934065933, | |
| "grad_norm": 0.16791734099388123, | |
| "learning_rate": 3.8356164383561645e-06, | |
| "loss": 1.2017, | |
| "step": 58 | |
| }, | |
| { | |
| "epoch": 0.16483516483516483, | |
| "grad_norm": 0.13620080053806305, | |
| "learning_rate": 3.972602739726027e-06, | |
| "loss": 1.1951, | |
| "step": 60 | |
| }, | |
| { | |
| "epoch": 0.17032967032967034, | |
| "grad_norm": 0.12290852516889572, | |
| "learning_rate": 4.109589041095891e-06, | |
| "loss": 1.2006, | |
| "step": 62 | |
| }, | |
| { | |
| "epoch": 0.17582417582417584, | |
| "grad_norm": 0.1265764832496643, | |
| "learning_rate": 4.246575342465754e-06, | |
| "loss": 1.2009, | |
| "step": 64 | |
| }, | |
| { | |
| "epoch": 0.1813186813186813, | |
| "grad_norm": 0.10449671745300293, | |
| "learning_rate": 4.383561643835616e-06, | |
| "loss": 1.1948, | |
| "step": 66 | |
| }, | |
| { | |
| "epoch": 0.18681318681318682, | |
| "grad_norm": 0.09835775941610336, | |
| "learning_rate": 4.52054794520548e-06, | |
| "loss": 1.186, | |
| "step": 68 | |
| }, | |
| { | |
| "epoch": 0.19230769230769232, | |
| "grad_norm": 0.10505539923906326, | |
| "learning_rate": 4.657534246575343e-06, | |
| "loss": 1.1913, | |
| "step": 70 | |
| }, | |
| { | |
| "epoch": 0.1978021978021978, | |
| "grad_norm": 0.13944971561431885, | |
| "learning_rate": 4.7945205479452054e-06, | |
| "loss": 1.1911, | |
| "step": 72 | |
| }, | |
| { | |
| "epoch": 0.2032967032967033, | |
| "grad_norm": 0.1453215479850769, | |
| "learning_rate": 4.931506849315069e-06, | |
| "loss": 1.1969, | |
| "step": 74 | |
| }, | |
| { | |
| "epoch": 0.2087912087912088, | |
| "grad_norm": 0.12393910437822342, | |
| "learning_rate": 5.068493150684932e-06, | |
| "loss": 1.199, | |
| "step": 76 | |
| }, | |
| { | |
| "epoch": 0.21428571428571427, | |
| "grad_norm": 0.10112921893596649, | |
| "learning_rate": 5.2054794520547945e-06, | |
| "loss": 1.1914, | |
| "step": 78 | |
| }, | |
| { | |
| "epoch": 0.21978021978021978, | |
| "grad_norm": 0.1048625037074089, | |
| "learning_rate": 5.342465753424658e-06, | |
| "loss": 1.1937, | |
| "step": 80 | |
| }, | |
| { | |
| "epoch": 0.22527472527472528, | |
| "grad_norm": 0.1040705144405365, | |
| "learning_rate": 5.479452054794521e-06, | |
| "loss": 1.1889, | |
| "step": 82 | |
| }, | |
| { | |
| "epoch": 0.23076923076923078, | |
| "grad_norm": 0.14360405504703522, | |
| "learning_rate": 5.6164383561643845e-06, | |
| "loss": 1.1946, | |
| "step": 84 | |
| }, | |
| { | |
| "epoch": 0.23626373626373626, | |
| "grad_norm": 0.12936367094516754, | |
| "learning_rate": 5.753424657534246e-06, | |
| "loss": 1.1882, | |
| "step": 86 | |
| }, | |
| { | |
| "epoch": 0.24175824175824176, | |
| "grad_norm": 0.08769886940717697, | |
| "learning_rate": 5.89041095890411e-06, | |
| "loss": 1.1873, | |
| "step": 88 | |
| }, | |
| { | |
| "epoch": 0.24725274725274726, | |
| "grad_norm": 0.1197541207075119, | |
| "learning_rate": 6.027397260273973e-06, | |
| "loss": 1.1863, | |
| "step": 90 | |
| }, | |
| { | |
| "epoch": 0.25274725274725274, | |
| "grad_norm": 0.12938760221004486, | |
| "learning_rate": 6.164383561643836e-06, | |
| "loss": 1.188, | |
| "step": 92 | |
| }, | |
| { | |
| "epoch": 0.25824175824175827, | |
| "grad_norm": 0.14707419276237488, | |
| "learning_rate": 6.301369863013699e-06, | |
| "loss": 1.1886, | |
| "step": 94 | |
| }, | |
| { | |
| "epoch": 0.26373626373626374, | |
| "grad_norm": 0.12229153513908386, | |
| "learning_rate": 6.438356164383563e-06, | |
| "loss": 1.1908, | |
| "step": 96 | |
| }, | |
| { | |
| "epoch": 0.2692307692307692, | |
| "grad_norm": 0.1133919209241867, | |
| "learning_rate": 6.5753424657534245e-06, | |
| "loss": 1.1825, | |
| "step": 98 | |
| }, | |
| { | |
| "epoch": 0.27472527472527475, | |
| "grad_norm": 0.12165709584951401, | |
| "learning_rate": 6.712328767123288e-06, | |
| "loss": 1.1807, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 0.2802197802197802, | |
| "grad_norm": 0.12554234266281128, | |
| "learning_rate": 6.849315068493151e-06, | |
| "loss": 1.1831, | |
| "step": 102 | |
| }, | |
| { | |
| "epoch": 0.2857142857142857, | |
| "grad_norm": 0.11460216343402863, | |
| "learning_rate": 6.9863013698630145e-06, | |
| "loss": 1.1865, | |
| "step": 104 | |
| }, | |
| { | |
| "epoch": 0.29120879120879123, | |
| "grad_norm": 0.1516668051481247, | |
| "learning_rate": 7.123287671232877e-06, | |
| "loss": 1.1871, | |
| "step": 106 | |
| }, | |
| { | |
| "epoch": 0.2967032967032967, | |
| "grad_norm": 0.15025198459625244, | |
| "learning_rate": 7.260273972602741e-06, | |
| "loss": 1.1844, | |
| "step": 108 | |
| }, | |
| { | |
| "epoch": 0.3021978021978022, | |
| "grad_norm": 0.13930697739124298, | |
| "learning_rate": 7.397260273972603e-06, | |
| "loss": 1.1803, | |
| "step": 110 | |
| }, | |
| { | |
| "epoch": 0.3076923076923077, | |
| "grad_norm": 0.14465640485286713, | |
| "learning_rate": 7.534246575342466e-06, | |
| "loss": 1.1895, | |
| "step": 112 | |
| }, | |
| { | |
| "epoch": 0.3131868131868132, | |
| "grad_norm": 0.12900669872760773, | |
| "learning_rate": 7.671232876712329e-06, | |
| "loss": 1.1874, | |
| "step": 114 | |
| }, | |
| { | |
| "epoch": 0.31868131868131866, | |
| "grad_norm": 0.12059827893972397, | |
| "learning_rate": 7.808219178082192e-06, | |
| "loss": 1.184, | |
| "step": 116 | |
| }, | |
| { | |
| "epoch": 0.3241758241758242, | |
| "grad_norm": 0.11196708679199219, | |
| "learning_rate": 7.945205479452055e-06, | |
| "loss": 1.1876, | |
| "step": 118 | |
| }, | |
| { | |
| "epoch": 0.32967032967032966, | |
| "grad_norm": 0.11420655995607376, | |
| "learning_rate": 8.082191780821919e-06, | |
| "loss": 1.1799, | |
| "step": 120 | |
| }, | |
| { | |
| "epoch": 0.33516483516483514, | |
| "grad_norm": 0.10233011841773987, | |
| "learning_rate": 8.219178082191782e-06, | |
| "loss": 1.1836, | |
| "step": 122 | |
| }, | |
| { | |
| "epoch": 0.34065934065934067, | |
| "grad_norm": 0.13560251891613007, | |
| "learning_rate": 8.356164383561644e-06, | |
| "loss": 1.1825, | |
| "step": 124 | |
| }, | |
| { | |
| "epoch": 0.34615384615384615, | |
| "grad_norm": 0.1417393684387207, | |
| "learning_rate": 8.493150684931507e-06, | |
| "loss": 1.181, | |
| "step": 126 | |
| }, | |
| { | |
| "epoch": 0.3516483516483517, | |
| "grad_norm": 0.12368721514940262, | |
| "learning_rate": 8.63013698630137e-06, | |
| "loss": 1.1857, | |
| "step": 128 | |
| }, | |
| { | |
| "epoch": 0.35714285714285715, | |
| "grad_norm": 0.14341862499713898, | |
| "learning_rate": 8.767123287671233e-06, | |
| "loss": 1.1839, | |
| "step": 130 | |
| }, | |
| { | |
| "epoch": 0.3626373626373626, | |
| "grad_norm": 0.14925454556941986, | |
| "learning_rate": 8.904109589041097e-06, | |
| "loss": 1.1792, | |
| "step": 132 | |
| }, | |
| { | |
| "epoch": 0.36813186813186816, | |
| "grad_norm": 0.1684715300798416, | |
| "learning_rate": 9.04109589041096e-06, | |
| "loss": 1.1806, | |
| "step": 134 | |
| }, | |
| { | |
| "epoch": 0.37362637362637363, | |
| "grad_norm": 0.10095636546611786, | |
| "learning_rate": 9.178082191780823e-06, | |
| "loss": 1.176, | |
| "step": 136 | |
| }, | |
| { | |
| "epoch": 0.3791208791208791, | |
| "grad_norm": 0.11856956034898758, | |
| "learning_rate": 9.315068493150685e-06, | |
| "loss": 1.1849, | |
| "step": 138 | |
| }, | |
| { | |
| "epoch": 0.38461538461538464, | |
| "grad_norm": 0.19565819203853607, | |
| "learning_rate": 9.452054794520548e-06, | |
| "loss": 1.1803, | |
| "step": 140 | |
| }, | |
| { | |
| "epoch": 0.3901098901098901, | |
| "grad_norm": 0.15888364613056183, | |
| "learning_rate": 9.589041095890411e-06, | |
| "loss": 1.1756, | |
| "step": 142 | |
| }, | |
| { | |
| "epoch": 0.3956043956043956, | |
| "grad_norm": 0.16350317001342773, | |
| "learning_rate": 9.726027397260275e-06, | |
| "loss": 1.1777, | |
| "step": 144 | |
| }, | |
| { | |
| "epoch": 0.4010989010989011, | |
| "grad_norm": 0.13230757415294647, | |
| "learning_rate": 9.863013698630138e-06, | |
| "loss": 1.1843, | |
| "step": 146 | |
| }, | |
| { | |
| "epoch": 0.4010989010989011, | |
| "eval_loss": 1.1792316436767578, | |
| "eval_runtime": 299.2609, | |
| "eval_samples_per_second": 17.296, | |
| "eval_steps_per_second": 0.271, | |
| "step": 146 | |
| }, | |
| { | |
| "epoch": 0.4065934065934066, | |
| "grad_norm": 0.1627519279718399, | |
| "learning_rate": 1e-05, | |
| "loss": 1.174, | |
| "step": 148 | |
| }, | |
| { | |
| "epoch": 0.41208791208791207, | |
| "grad_norm": 0.13225838541984558, | |
| "learning_rate": 9.999942488284598e-06, | |
| "loss": 1.1779, | |
| "step": 150 | |
| }, | |
| { | |
| "epoch": 0.4175824175824176, | |
| "grad_norm": 0.15863117575645447, | |
| "learning_rate": 9.999769954461425e-06, | |
| "loss": 1.1767, | |
| "step": 152 | |
| }, | |
| { | |
| "epoch": 0.4230769230769231, | |
| "grad_norm": 0.15866652131080627, | |
| "learning_rate": 9.999482402499569e-06, | |
| "loss": 1.1758, | |
| "step": 154 | |
| }, | |
| { | |
| "epoch": 0.42857142857142855, | |
| "grad_norm": 0.1270761787891388, | |
| "learning_rate": 9.999079839014074e-06, | |
| "loss": 1.1756, | |
| "step": 156 | |
| }, | |
| { | |
| "epoch": 0.4340659340659341, | |
| "grad_norm": 0.10430735349655151, | |
| "learning_rate": 9.998562273265786e-06, | |
| "loss": 1.1728, | |
| "step": 158 | |
| }, | |
| { | |
| "epoch": 0.43956043956043955, | |
| "grad_norm": 0.13678255677223206, | |
| "learning_rate": 9.997929717161142e-06, | |
| "loss": 1.1774, | |
| "step": 160 | |
| }, | |
| { | |
| "epoch": 0.44505494505494503, | |
| "grad_norm": 0.13817106187343597, | |
| "learning_rate": 9.997182185251896e-06, | |
| "loss": 1.1804, | |
| "step": 162 | |
| }, | |
| { | |
| "epoch": 0.45054945054945056, | |
| "grad_norm": 0.15956689417362213, | |
| "learning_rate": 9.996319694734787e-06, | |
| "loss": 1.1802, | |
| "step": 164 | |
| }, | |
| { | |
| "epoch": 0.45604395604395603, | |
| "grad_norm": 0.2157265841960907, | |
| "learning_rate": 9.995342265451138e-06, | |
| "loss": 1.1803, | |
| "step": 166 | |
| }, | |
| { | |
| "epoch": 0.46153846153846156, | |
| "grad_norm": 0.17677265405654907, | |
| "learning_rate": 9.994249919886402e-06, | |
| "loss": 1.1764, | |
| "step": 168 | |
| }, | |
| { | |
| "epoch": 0.46703296703296704, | |
| "grad_norm": 0.16796888411045074, | |
| "learning_rate": 9.993042683169647e-06, | |
| "loss": 1.1702, | |
| "step": 170 | |
| }, | |
| { | |
| "epoch": 0.4725274725274725, | |
| "grad_norm": 0.16481636464595795, | |
| "learning_rate": 9.991720583072975e-06, | |
| "loss": 1.1844, | |
| "step": 172 | |
| }, | |
| { | |
| "epoch": 0.47802197802197804, | |
| "grad_norm": 0.19735674560070038, | |
| "learning_rate": 9.990283650010883e-06, | |
| "loss": 1.1791, | |
| "step": 174 | |
| }, | |
| { | |
| "epoch": 0.4835164835164835, | |
| "grad_norm": 0.15785232186317444, | |
| "learning_rate": 9.988731917039564e-06, | |
| "loss": 1.1783, | |
| "step": 176 | |
| }, | |
| { | |
| "epoch": 0.489010989010989, | |
| "grad_norm": 0.17257508635520935, | |
| "learning_rate": 9.98706541985615e-06, | |
| "loss": 1.1769, | |
| "step": 178 | |
| }, | |
| { | |
| "epoch": 0.4945054945054945, | |
| "grad_norm": 0.1660294085741043, | |
| "learning_rate": 9.985284196797884e-06, | |
| "loss": 1.181, | |
| "step": 180 | |
| }, | |
| { | |
| "epoch": 0.5, | |
| "grad_norm": 0.1156650260090828, | |
| "learning_rate": 9.983388288841246e-06, | |
| "loss": 1.1865, | |
| "step": 182 | |
| }, | |
| { | |
| "epoch": 0.5054945054945055, | |
| "grad_norm": 0.0954098179936409, | |
| "learning_rate": 9.981377739601002e-06, | |
| "loss": 1.1787, | |
| "step": 184 | |
| }, | |
| { | |
| "epoch": 0.510989010989011, | |
| "grad_norm": 0.0957237258553505, | |
| "learning_rate": 9.979252595329204e-06, | |
| "loss": 1.1731, | |
| "step": 186 | |
| }, | |
| { | |
| "epoch": 0.5164835164835165, | |
| "grad_norm": 0.11179114878177643, | |
| "learning_rate": 9.977012904914133e-06, | |
| "loss": 1.166, | |
| "step": 188 | |
| }, | |
| { | |
| "epoch": 0.521978021978022, | |
| "grad_norm": 0.12088882923126221, | |
| "learning_rate": 9.974658719879163e-06, | |
| "loss": 1.1706, | |
| "step": 190 | |
| }, | |
| { | |
| "epoch": 0.5274725274725275, | |
| "grad_norm": 0.1067051887512207, | |
| "learning_rate": 9.972190094381578e-06, | |
| "loss": 1.178, | |
| "step": 192 | |
| }, | |
| { | |
| "epoch": 0.532967032967033, | |
| "grad_norm": 0.09633362293243408, | |
| "learning_rate": 9.96960708521134e-06, | |
| "loss": 1.1763, | |
| "step": 194 | |
| }, | |
| { | |
| "epoch": 0.5384615384615384, | |
| "grad_norm": 0.11866485327482224, | |
| "learning_rate": 9.966909751789758e-06, | |
| "loss": 1.1777, | |
| "step": 196 | |
| }, | |
| { | |
| "epoch": 0.5439560439560439, | |
| "grad_norm": 0.11669634282588959, | |
| "learning_rate": 9.964098156168143e-06, | |
| "loss": 1.1753, | |
| "step": 198 | |
| }, | |
| { | |
| "epoch": 0.5494505494505495, | |
| "grad_norm": 0.1488696038722992, | |
| "learning_rate": 9.96117236302637e-06, | |
| "loss": 1.1656, | |
| "step": 200 | |
| }, | |
| { | |
| "epoch": 0.554945054945055, | |
| "grad_norm": 0.1773654669523239, | |
| "learning_rate": 9.958132439671392e-06, | |
| "loss": 1.1809, | |
| "step": 202 | |
| }, | |
| { | |
| "epoch": 0.5604395604395604, | |
| "grad_norm": 0.17689326405525208, | |
| "learning_rate": 9.954978456035695e-06, | |
| "loss": 1.1722, | |
| "step": 204 | |
| }, | |
| { | |
| "epoch": 0.5659340659340659, | |
| "grad_norm": 0.13418439030647278, | |
| "learning_rate": 9.951710484675677e-06, | |
| "loss": 1.167, | |
| "step": 206 | |
| }, | |
| { | |
| "epoch": 0.5714285714285714, | |
| "grad_norm": 0.14982537925243378, | |
| "learning_rate": 9.948328600769996e-06, | |
| "loss": 1.1713, | |
| "step": 208 | |
| }, | |
| { | |
| "epoch": 0.5769230769230769, | |
| "grad_norm": 0.12595325708389282, | |
| "learning_rate": 9.94483288211783e-06, | |
| "loss": 1.1677, | |
| "step": 210 | |
| }, | |
| { | |
| "epoch": 0.5824175824175825, | |
| "grad_norm": 0.10920244455337524, | |
| "learning_rate": 9.941223409137088e-06, | |
| "loss": 1.1727, | |
| "step": 212 | |
| }, | |
| { | |
| "epoch": 0.5879120879120879, | |
| "grad_norm": 0.10856951773166656, | |
| "learning_rate": 9.937500264862567e-06, | |
| "loss": 1.1739, | |
| "step": 214 | |
| }, | |
| { | |
| "epoch": 0.5934065934065934, | |
| "grad_norm": 0.13141585886478424, | |
| "learning_rate": 9.933663534944029e-06, | |
| "loss": 1.176, | |
| "step": 216 | |
| }, | |
| { | |
| "epoch": 0.5989010989010989, | |
| "grad_norm": 0.13502047955989838, | |
| "learning_rate": 9.929713307644245e-06, | |
| "loss": 1.178, | |
| "step": 218 | |
| }, | |
| { | |
| "epoch": 0.6043956043956044, | |
| "grad_norm": 0.1468881517648697, | |
| "learning_rate": 9.925649673836949e-06, | |
| "loss": 1.1819, | |
| "step": 220 | |
| }, | |
| { | |
| "epoch": 0.6098901098901099, | |
| "grad_norm": 0.13739052414894104, | |
| "learning_rate": 9.921472727004765e-06, | |
| "loss": 1.1784, | |
| "step": 222 | |
| }, | |
| { | |
| "epoch": 0.6153846153846154, | |
| "grad_norm": 0.13080830872058868, | |
| "learning_rate": 9.917182563237045e-06, | |
| "loss": 1.1762, | |
| "step": 224 | |
| }, | |
| { | |
| "epoch": 0.6208791208791209, | |
| "grad_norm": 0.11544028669595718, | |
| "learning_rate": 9.912779281227656e-06, | |
| "loss": 1.1733, | |
| "step": 226 | |
| }, | |
| { | |
| "epoch": 0.6263736263736264, | |
| "grad_norm": 0.14270079135894775, | |
| "learning_rate": 9.908262982272724e-06, | |
| "loss": 1.1812, | |
| "step": 228 | |
| }, | |
| { | |
| "epoch": 0.6318681318681318, | |
| "grad_norm": 0.11610660701990128, | |
| "learning_rate": 9.903633770268286e-06, | |
| "loss": 1.1803, | |
| "step": 230 | |
| }, | |
| { | |
| "epoch": 0.6373626373626373, | |
| "grad_norm": 0.10949360579252243, | |
| "learning_rate": 9.89889175170791e-06, | |
| "loss": 1.1776, | |
| "step": 232 | |
| }, | |
| { | |
| "epoch": 0.6428571428571429, | |
| "grad_norm": 0.11754804104566574, | |
| "learning_rate": 9.894037035680246e-06, | |
| "loss": 1.169, | |
| "step": 234 | |
| }, | |
| { | |
| "epoch": 0.6483516483516484, | |
| "grad_norm": 0.1328520029783249, | |
| "learning_rate": 9.889069733866515e-06, | |
| "loss": 1.1663, | |
| "step": 236 | |
| }, | |
| { | |
| "epoch": 0.6538461538461539, | |
| "grad_norm": 0.12216369807720184, | |
| "learning_rate": 9.883989960537934e-06, | |
| "loss": 1.1779, | |
| "step": 238 | |
| }, | |
| { | |
| "epoch": 0.6593406593406593, | |
| "grad_norm": 0.11219753324985504, | |
| "learning_rate": 9.878797832553093e-06, | |
| "loss": 1.1718, | |
| "step": 240 | |
| }, | |
| { | |
| "epoch": 0.6648351648351648, | |
| "grad_norm": 0.11396931111812592, | |
| "learning_rate": 9.873493469355271e-06, | |
| "loss": 1.175, | |
| "step": 242 | |
| }, | |
| { | |
| "epoch": 0.6703296703296703, | |
| "grad_norm": 0.104461669921875, | |
| "learning_rate": 9.868076992969672e-06, | |
| "loss": 1.1761, | |
| "step": 244 | |
| }, | |
| { | |
| "epoch": 0.6758241758241759, | |
| "grad_norm": 0.11892971396446228, | |
| "learning_rate": 9.862548528000644e-06, | |
| "loss": 1.1713, | |
| "step": 246 | |
| }, | |
| { | |
| "epoch": 0.6813186813186813, | |
| "grad_norm": 0.10305490344762802, | |
| "learning_rate": 9.85690820162878e-06, | |
| "loss": 1.1751, | |
| "step": 248 | |
| }, | |
| { | |
| "epoch": 0.6868131868131868, | |
| "grad_norm": 0.11825086921453476, | |
| "learning_rate": 9.851156143608025e-06, | |
| "loss": 1.1749, | |
| "step": 250 | |
| }, | |
| { | |
| "epoch": 0.6923076923076923, | |
| "grad_norm": 0.09655775129795074, | |
| "learning_rate": 9.845292486262664e-06, | |
| "loss": 1.1733, | |
| "step": 252 | |
| }, | |
| { | |
| "epoch": 0.6978021978021978, | |
| "grad_norm": 0.09013015031814575, | |
| "learning_rate": 9.839317364484295e-06, | |
| "loss": 1.1736, | |
| "step": 254 | |
| }, | |
| { | |
| "epoch": 0.7032967032967034, | |
| "grad_norm": 0.12731756269931793, | |
| "learning_rate": 9.83323091572872e-06, | |
| "loss": 1.1735, | |
| "step": 256 | |
| }, | |
| { | |
| "epoch": 0.7087912087912088, | |
| "grad_norm": 0.11326012015342712, | |
| "learning_rate": 9.827033280012783e-06, | |
| "loss": 1.1678, | |
| "step": 258 | |
| }, | |
| { | |
| "epoch": 0.7142857142857143, | |
| "grad_norm": 0.09636470675468445, | |
| "learning_rate": 9.820724599911147e-06, | |
| "loss": 1.1735, | |
| "step": 260 | |
| }, | |
| { | |
| "epoch": 0.7197802197802198, | |
| "grad_norm": 0.12517008185386658, | |
| "learning_rate": 9.81430502055302e-06, | |
| "loss": 1.1839, | |
| "step": 262 | |
| }, | |
| { | |
| "epoch": 0.7252747252747253, | |
| "grad_norm": 0.14088405668735504, | |
| "learning_rate": 9.807774689618806e-06, | |
| "loss": 1.1734, | |
| "step": 264 | |
| }, | |
| { | |
| "epoch": 0.7307692307692307, | |
| "grad_norm": 0.09721764922142029, | |
| "learning_rate": 9.801133757336726e-06, | |
| "loss": 1.1681, | |
| "step": 266 | |
| }, | |
| { | |
| "epoch": 0.7362637362637363, | |
| "grad_norm": 0.08106404542922974, | |
| "learning_rate": 9.794382376479334e-06, | |
| "loss": 1.1684, | |
| "step": 268 | |
| }, | |
| { | |
| "epoch": 0.7417582417582418, | |
| "grad_norm": 0.1210760697722435, | |
| "learning_rate": 9.787520702360035e-06, | |
| "loss": 1.1656, | |
| "step": 270 | |
| }, | |
| { | |
| "epoch": 0.7472527472527473, | |
| "grad_norm": 0.12102904915809631, | |
| "learning_rate": 9.780548892829486e-06, | |
| "loss": 1.1743, | |
| "step": 272 | |
| }, | |
| { | |
| "epoch": 0.7527472527472527, | |
| "grad_norm": 0.11343714594841003, | |
| "learning_rate": 9.773467108271978e-06, | |
| "loss": 1.1756, | |
| "step": 274 | |
| }, | |
| { | |
| "epoch": 0.7582417582417582, | |
| "grad_norm": 0.11999528110027313, | |
| "learning_rate": 9.766275511601742e-06, | |
| "loss": 1.1782, | |
| "step": 276 | |
| }, | |
| { | |
| "epoch": 0.7637362637362637, | |
| "grad_norm": 0.148450568318367, | |
| "learning_rate": 9.7589742682592e-06, | |
| "loss": 1.1796, | |
| "step": 278 | |
| }, | |
| { | |
| "epoch": 0.7692307692307693, | |
| "grad_norm": 0.09977131336927414, | |
| "learning_rate": 9.751563546207167e-06, | |
| "loss": 1.1686, | |
| "step": 280 | |
| }, | |
| { | |
| "epoch": 0.7747252747252747, | |
| "grad_norm": 0.12250227481126785, | |
| "learning_rate": 9.744043515926975e-06, | |
| "loss": 1.1673, | |
| "step": 282 | |
| }, | |
| { | |
| "epoch": 0.7802197802197802, | |
| "grad_norm": 0.12362032383680344, | |
| "learning_rate": 9.736414350414564e-06, | |
| "loss": 1.1707, | |
| "step": 284 | |
| }, | |
| { | |
| "epoch": 0.7857142857142857, | |
| "grad_norm": 0.09825081378221512, | |
| "learning_rate": 9.72867622517649e-06, | |
| "loss": 1.1719, | |
| "step": 286 | |
| }, | |
| { | |
| "epoch": 0.7912087912087912, | |
| "grad_norm": 0.13811303675174713, | |
| "learning_rate": 9.720829318225897e-06, | |
| "loss": 1.1711, | |
| "step": 288 | |
| }, | |
| { | |
| "epoch": 0.7967032967032966, | |
| "grad_norm": 0.14676132798194885, | |
| "learning_rate": 9.712873810078415e-06, | |
| "loss": 1.1728, | |
| "step": 290 | |
| }, | |
| { | |
| "epoch": 0.8021978021978022, | |
| "grad_norm": 0.09035024791955948, | |
| "learning_rate": 9.704809883748012e-06, | |
| "loss": 1.1776, | |
| "step": 292 | |
| }, | |
| { | |
| "epoch": 0.8021978021978022, | |
| "eval_loss": 1.1732887029647827, | |
| "eval_runtime": 298.7065, | |
| "eval_samples_per_second": 17.328, | |
| "eval_steps_per_second": 0.271, | |
| "step": 292 | |
| }, | |
| { | |
| "epoch": 0.8076923076923077, | |
| "grad_norm": 0.10824787616729736, | |
| "learning_rate": 9.696637724742785e-06, | |
| "loss": 1.1735, | |
| "step": 294 | |
| }, | |
| { | |
| "epoch": 0.8131868131868132, | |
| "grad_norm": 0.10512899607419968, | |
| "learning_rate": 9.688357521060685e-06, | |
| "loss": 1.1702, | |
| "step": 296 | |
| }, | |
| { | |
| "epoch": 0.8186813186813187, | |
| "grad_norm": 0.12007107585668564, | |
| "learning_rate": 9.6799694631852e-06, | |
| "loss": 1.1687, | |
| "step": 298 | |
| }, | |
| { | |
| "epoch": 0.8241758241758241, | |
| "grad_norm": 0.1161542534828186, | |
| "learning_rate": 9.67147374408097e-06, | |
| "loss": 1.1726, | |
| "step": 300 | |
| }, | |
| { | |
| "epoch": 0.8296703296703297, | |
| "grad_norm": 0.16156896948814392, | |
| "learning_rate": 9.662870559189344e-06, | |
| "loss": 1.1639, | |
| "step": 302 | |
| }, | |
| { | |
| "epoch": 0.8351648351648352, | |
| "grad_norm": 0.12350093573331833, | |
| "learning_rate": 9.654160106423891e-06, | |
| "loss": 1.1711, | |
| "step": 304 | |
| }, | |
| { | |
| "epoch": 0.8406593406593407, | |
| "grad_norm": 0.1292959749698639, | |
| "learning_rate": 9.645342586165845e-06, | |
| "loss": 1.1677, | |
| "step": 306 | |
| }, | |
| { | |
| "epoch": 0.8461538461538461, | |
| "grad_norm": 0.09656315296888351, | |
| "learning_rate": 9.63641820125949e-06, | |
| "loss": 1.1773, | |
| "step": 308 | |
| }, | |
| { | |
| "epoch": 0.8516483516483516, | |
| "grad_norm": 0.13036847114562988, | |
| "learning_rate": 9.627387157007502e-06, | |
| "loss": 1.1819, | |
| "step": 310 | |
| }, | |
| { | |
| "epoch": 0.8571428571428571, | |
| "grad_norm": 0.12335841357707977, | |
| "learning_rate": 9.618249661166218e-06, | |
| "loss": 1.1688, | |
| "step": 312 | |
| }, | |
| { | |
| "epoch": 0.8626373626373627, | |
| "grad_norm": 0.13838279247283936, | |
| "learning_rate": 9.609005923940865e-06, | |
| "loss": 1.1665, | |
| "step": 314 | |
| }, | |
| { | |
| "epoch": 0.8681318681318682, | |
| "grad_norm": 0.13689468801021576, | |
| "learning_rate": 9.599656157980715e-06, | |
| "loss": 1.1696, | |
| "step": 316 | |
| }, | |
| { | |
| "epoch": 0.8736263736263736, | |
| "grad_norm": 0.13239113986492157, | |
| "learning_rate": 9.590200578374198e-06, | |
| "loss": 1.1686, | |
| "step": 318 | |
| }, | |
| { | |
| "epoch": 0.8791208791208791, | |
| "grad_norm": 0.09575112909078598, | |
| "learning_rate": 9.580639402643957e-06, | |
| "loss": 1.1673, | |
| "step": 320 | |
| }, | |
| { | |
| "epoch": 0.8846153846153846, | |
| "grad_norm": 0.11522715538740158, | |
| "learning_rate": 9.570972850741839e-06, | |
| "loss": 1.1685, | |
| "step": 322 | |
| }, | |
| { | |
| "epoch": 0.8901098901098901, | |
| "grad_norm": 0.14126405119895935, | |
| "learning_rate": 9.561201145043835e-06, | |
| "loss": 1.1788, | |
| "step": 324 | |
| }, | |
| { | |
| "epoch": 0.8956043956043956, | |
| "grad_norm": 0.17362023890018463, | |
| "learning_rate": 9.551324510344972e-06, | |
| "loss": 1.1746, | |
| "step": 326 | |
| }, | |
| { | |
| "epoch": 0.9010989010989011, | |
| "grad_norm": 0.1431788206100464, | |
| "learning_rate": 9.541343173854128e-06, | |
| "loss": 1.1719, | |
| "step": 328 | |
| }, | |
| { | |
| "epoch": 0.9065934065934066, | |
| "grad_norm": 0.11747945845127106, | |
| "learning_rate": 9.531257365188818e-06, | |
| "loss": 1.1763, | |
| "step": 330 | |
| }, | |
| { | |
| "epoch": 0.9120879120879121, | |
| "grad_norm": 0.10873839259147644, | |
| "learning_rate": 9.521067316369903e-06, | |
| "loss": 1.173, | |
| "step": 332 | |
| }, | |
| { | |
| "epoch": 0.9175824175824175, | |
| "grad_norm": 0.10153867304325104, | |
| "learning_rate": 9.510773261816261e-06, | |
| "loss": 1.1681, | |
| "step": 334 | |
| }, | |
| { | |
| "epoch": 0.9230769230769231, | |
| "grad_norm": 0.07133303582668304, | |
| "learning_rate": 9.500375438339384e-06, | |
| "loss": 1.1643, | |
| "step": 336 | |
| }, | |
| { | |
| "epoch": 0.9285714285714286, | |
| "grad_norm": 0.09183746576309204, | |
| "learning_rate": 9.48987408513794e-06, | |
| "loss": 1.1788, | |
| "step": 338 | |
| }, | |
| { | |
| "epoch": 0.9340659340659341, | |
| "grad_norm": 0.10790558159351349, | |
| "learning_rate": 9.47926944379226e-06, | |
| "loss": 1.1817, | |
| "step": 340 | |
| }, | |
| { | |
| "epoch": 0.9395604395604396, | |
| "grad_norm": 0.09112295508384705, | |
| "learning_rate": 9.468561758258795e-06, | |
| "loss": 1.1674, | |
| "step": 342 | |
| }, | |
| { | |
| "epoch": 0.945054945054945, | |
| "grad_norm": 0.08475514501333237, | |
| "learning_rate": 9.457751274864486e-06, | |
| "loss": 1.1706, | |
| "step": 344 | |
| }, | |
| { | |
| "epoch": 0.9505494505494505, | |
| "grad_norm": 0.10173246264457703, | |
| "learning_rate": 9.446838242301113e-06, | |
| "loss": 1.1644, | |
| "step": 346 | |
| }, | |
| { | |
| "epoch": 0.9560439560439561, | |
| "grad_norm": 0.10165045410394669, | |
| "learning_rate": 9.435822911619564e-06, | |
| "loss": 1.1694, | |
| "step": 348 | |
| }, | |
| { | |
| "epoch": 0.9615384615384616, | |
| "grad_norm": 0.10506884753704071, | |
| "learning_rate": 9.424705536224065e-06, | |
| "loss": 1.1708, | |
| "step": 350 | |
| }, | |
| { | |
| "epoch": 0.967032967032967, | |
| "grad_norm": 0.10648410022258759, | |
| "learning_rate": 9.41348637186635e-06, | |
| "loss": 1.1712, | |
| "step": 352 | |
| }, | |
| { | |
| "epoch": 0.9725274725274725, | |
| "grad_norm": 0.11728779226541519, | |
| "learning_rate": 9.40216567663977e-06, | |
| "loss": 1.1838, | |
| "step": 354 | |
| }, | |
| { | |
| "epoch": 0.978021978021978, | |
| "grad_norm": 0.12955018877983093, | |
| "learning_rate": 9.390743710973366e-06, | |
| "loss": 1.1773, | |
| "step": 356 | |
| }, | |
| { | |
| "epoch": 0.9835164835164835, | |
| "grad_norm": 0.14313377439975739, | |
| "learning_rate": 9.379220737625877e-06, | |
| "loss": 1.1693, | |
| "step": 358 | |
| }, | |
| { | |
| "epoch": 0.989010989010989, | |
| "grad_norm": 0.17154935002326965, | |
| "learning_rate": 9.367597021679686e-06, | |
| "loss": 1.1674, | |
| "step": 360 | |
| }, | |
| { | |
| "epoch": 0.9945054945054945, | |
| "grad_norm": 0.13390299677848816, | |
| "learning_rate": 9.35587283053473e-06, | |
| "loss": 1.1636, | |
| "step": 362 | |
| }, | |
| { | |
| "epoch": 1.0, | |
| "grad_norm": 0.13049638271331787, | |
| "learning_rate": 9.344048433902351e-06, | |
| "loss": 1.1678, | |
| "step": 364 | |
| }, | |
| { | |
| "epoch": 1.0054945054945055, | |
| "grad_norm": 0.11013516038656235, | |
| "learning_rate": 9.332124103799075e-06, | |
| "loss": 1.1627, | |
| "step": 366 | |
| }, | |
| { | |
| "epoch": 1.010989010989011, | |
| "grad_norm": 0.11757113039493561, | |
| "learning_rate": 9.320100114540382e-06, | |
| "loss": 1.1679, | |
| "step": 368 | |
| }, | |
| { | |
| "epoch": 1.0164835164835164, | |
| "grad_norm": 0.11947564780712128, | |
| "learning_rate": 9.307976742734366e-06, | |
| "loss": 1.1767, | |
| "step": 370 | |
| }, | |
| { | |
| "epoch": 1.021978021978022, | |
| "grad_norm": 0.1045905202627182, | |
| "learning_rate": 9.295754267275393e-06, | |
| "loss": 1.1694, | |
| "step": 372 | |
| }, | |
| { | |
| "epoch": 1.0274725274725274, | |
| "grad_norm": 0.10813715308904648, | |
| "learning_rate": 9.283432969337672e-06, | |
| "loss": 1.1752, | |
| "step": 374 | |
| }, | |
| { | |
| "epoch": 1.032967032967033, | |
| "grad_norm": 0.11131364107131958, | |
| "learning_rate": 9.271013132368799e-06, | |
| "loss": 1.1714, | |
| "step": 376 | |
| }, | |
| { | |
| "epoch": 1.0384615384615385, | |
| "grad_norm": 0.09973058849573135, | |
| "learning_rate": 9.258495042083222e-06, | |
| "loss": 1.1696, | |
| "step": 378 | |
| }, | |
| { | |
| "epoch": 1.043956043956044, | |
| "grad_norm": 0.10416287928819656, | |
| "learning_rate": 9.245878986455684e-06, | |
| "loss": 1.1641, | |
| "step": 380 | |
| }, | |
| { | |
| "epoch": 1.0494505494505495, | |
| "grad_norm": 0.08854486793279648, | |
| "learning_rate": 9.23316525571458e-06, | |
| "loss": 1.1649, | |
| "step": 382 | |
| }, | |
| { | |
| "epoch": 1.054945054945055, | |
| "grad_norm": 0.1364666223526001, | |
| "learning_rate": 9.2203541423353e-06, | |
| "loss": 1.1769, | |
| "step": 384 | |
| }, | |
| { | |
| "epoch": 1.0604395604395604, | |
| "grad_norm": 0.14936769008636475, | |
| "learning_rate": 9.207445941033483e-06, | |
| "loss": 1.1686, | |
| "step": 386 | |
| }, | |
| { | |
| "epoch": 1.065934065934066, | |
| "grad_norm": 0.18906323611736298, | |
| "learning_rate": 9.19444094875825e-06, | |
| "loss": 1.1692, | |
| "step": 388 | |
| }, | |
| { | |
| "epoch": 1.0714285714285714, | |
| "grad_norm": 0.44419270753860474, | |
| "learning_rate": 9.18133946468537e-06, | |
| "loss": 1.1746, | |
| "step": 390 | |
| }, | |
| { | |
| "epoch": 1.0769230769230769, | |
| "grad_norm": 0.1541990488767624, | |
| "learning_rate": 9.16814179021037e-06, | |
| "loss": 1.1735, | |
| "step": 392 | |
| }, | |
| { | |
| "epoch": 1.0824175824175823, | |
| "grad_norm": 0.10936509072780609, | |
| "learning_rate": 9.154848228941607e-06, | |
| "loss": 1.1734, | |
| "step": 394 | |
| }, | |
| { | |
| "epoch": 1.0879120879120878, | |
| "grad_norm": 0.0885339230298996, | |
| "learning_rate": 9.14145908669329e-06, | |
| "loss": 1.1702, | |
| "step": 396 | |
| }, | |
| { | |
| "epoch": 1.0934065934065935, | |
| "grad_norm": 0.07241977006196976, | |
| "learning_rate": 9.127974671478432e-06, | |
| "loss": 1.1714, | |
| "step": 398 | |
| }, | |
| { | |
| "epoch": 1.098901098901099, | |
| "grad_norm": 0.08955956995487213, | |
| "learning_rate": 9.114395293501775e-06, | |
| "loss": 1.1712, | |
| "step": 400 | |
| }, | |
| { | |
| "epoch": 1.1043956043956045, | |
| "grad_norm": 0.11948683112859726, | |
| "learning_rate": 9.100721265152644e-06, | |
| "loss": 1.1799, | |
| "step": 402 | |
| }, | |
| { | |
| "epoch": 1.10989010989011, | |
| "grad_norm": 0.11843076348304749, | |
| "learning_rate": 9.086952900997774e-06, | |
| "loss": 1.1716, | |
| "step": 404 | |
| }, | |
| { | |
| "epoch": 1.1153846153846154, | |
| "grad_norm": 0.08606066554784775, | |
| "learning_rate": 9.073090517774057e-06, | |
| "loss": 1.1777, | |
| "step": 406 | |
| }, | |
| { | |
| "epoch": 1.120879120879121, | |
| "grad_norm": 0.08759862184524536, | |
| "learning_rate": 9.059134434381274e-06, | |
| "loss": 1.1698, | |
| "step": 408 | |
| }, | |
| { | |
| "epoch": 1.1263736263736264, | |
| "grad_norm": 0.10086913406848907, | |
| "learning_rate": 9.045084971874738e-06, | |
| "loss": 1.1641, | |
| "step": 410 | |
| }, | |
| { | |
| "epoch": 1.1318681318681318, | |
| "grad_norm": 0.11529091000556946, | |
| "learning_rate": 9.030942453457928e-06, | |
| "loss": 1.1655, | |
| "step": 412 | |
| }, | |
| { | |
| "epoch": 1.1373626373626373, | |
| "grad_norm": 0.0781669095158577, | |
| "learning_rate": 9.01670720447504e-06, | |
| "loss": 1.1678, | |
| "step": 414 | |
| }, | |
| { | |
| "epoch": 1.1428571428571428, | |
| "grad_norm": 0.09989538788795471, | |
| "learning_rate": 9.00237955240351e-06, | |
| "loss": 1.1689, | |
| "step": 416 | |
| }, | |
| { | |
| "epoch": 1.1483516483516483, | |
| "grad_norm": 0.12073778361082077, | |
| "learning_rate": 8.987959826846479e-06, | |
| "loss": 1.1747, | |
| "step": 418 | |
| }, | |
| { | |
| "epoch": 1.1538461538461537, | |
| "grad_norm": 0.12605726718902588, | |
| "learning_rate": 8.973448359525207e-06, | |
| "loss": 1.1725, | |
| "step": 420 | |
| }, | |
| { | |
| "epoch": 1.1593406593406592, | |
| "grad_norm": 0.12503278255462646, | |
| "learning_rate": 8.958845484271443e-06, | |
| "loss": 1.1709, | |
| "step": 422 | |
| }, | |
| { | |
| "epoch": 1.164835164835165, | |
| "grad_norm": 0.10487185418605804, | |
| "learning_rate": 8.944151537019752e-06, | |
| "loss": 1.1566, | |
| "step": 424 | |
| }, | |
| { | |
| "epoch": 1.1703296703296704, | |
| "grad_norm": 0.09222570061683655, | |
| "learning_rate": 8.929366855799777e-06, | |
| "loss": 1.1808, | |
| "step": 426 | |
| }, | |
| { | |
| "epoch": 1.1758241758241759, | |
| "grad_norm": 0.10268251597881317, | |
| "learning_rate": 8.914491780728471e-06, | |
| "loss": 1.1782, | |
| "step": 428 | |
| }, | |
| { | |
| "epoch": 1.1813186813186813, | |
| "grad_norm": 0.08701249957084656, | |
| "learning_rate": 8.899526654002268e-06, | |
| "loss": 1.1716, | |
| "step": 430 | |
| }, | |
| { | |
| "epoch": 1.1868131868131868, | |
| "grad_norm": 0.11233299225568771, | |
| "learning_rate": 8.88447181988921e-06, | |
| "loss": 1.1699, | |
| "step": 432 | |
| }, | |
| { | |
| "epoch": 1.1923076923076923, | |
| "grad_norm": 0.09972134232521057, | |
| "learning_rate": 8.869327624721033e-06, | |
| "loss": 1.1687, | |
| "step": 434 | |
| }, | |
| { | |
| "epoch": 1.1978021978021978, | |
| "grad_norm": 0.10448331385850906, | |
| "learning_rate": 8.854094416885192e-06, | |
| "loss": 1.1656, | |
| "step": 436 | |
| }, | |
| { | |
| "epoch": 1.2032967032967032, | |
| "grad_norm": 0.11365893483161926, | |
| "learning_rate": 8.838772546816857e-06, | |
| "loss": 1.1764, | |
| "step": 438 | |
| }, | |
| { | |
| "epoch": 1.2032967032967032, | |
| "eval_loss": 1.1709927320480347, | |
| "eval_runtime": 299.4182, | |
| "eval_samples_per_second": 17.287, | |
| "eval_steps_per_second": 0.271, | |
| "step": 438 | |
| }, | |
| { | |
| "epoch": 1.2087912087912087, | |
| "grad_norm": 0.1238979920744896, | |
| "learning_rate": 8.823362366990833e-06, | |
| "loss": 1.1718, | |
| "step": 440 | |
| }, | |
| { | |
| "epoch": 1.2142857142857142, | |
| "grad_norm": 0.12039055675268173, | |
| "learning_rate": 8.807864231913475e-06, | |
| "loss": 1.1686, | |
| "step": 442 | |
| }, | |
| { | |
| "epoch": 1.2197802197802199, | |
| "grad_norm": 0.09191784262657166, | |
| "learning_rate": 8.792278498114517e-06, | |
| "loss": 1.1702, | |
| "step": 444 | |
| }, | |
| { | |
| "epoch": 1.2252747252747254, | |
| "grad_norm": 0.08927666395902634, | |
| "learning_rate": 8.77660552413887e-06, | |
| "loss": 1.1761, | |
| "step": 446 | |
| }, | |
| { | |
| "epoch": 1.2307692307692308, | |
| "grad_norm": 0.08614211529493332, | |
| "learning_rate": 8.760845670538387e-06, | |
| "loss": 1.1743, | |
| "step": 448 | |
| }, | |
| { | |
| "epoch": 1.2362637362637363, | |
| "grad_norm": 0.10282223671674728, | |
| "learning_rate": 8.744999299863549e-06, | |
| "loss": 1.1705, | |
| "step": 450 | |
| }, | |
| { | |
| "epoch": 1.2417582417582418, | |
| "grad_norm": 0.10722998529672623, | |
| "learning_rate": 8.729066776655144e-06, | |
| "loss": 1.169, | |
| "step": 452 | |
| }, | |
| { | |
| "epoch": 1.2472527472527473, | |
| "grad_norm": 0.09755399823188782, | |
| "learning_rate": 8.713048467435865e-06, | |
| "loss": 1.1732, | |
| "step": 454 | |
| }, | |
| { | |
| "epoch": 1.2527472527472527, | |
| "grad_norm": 0.09675360471010208, | |
| "learning_rate": 8.696944740701891e-06, | |
| "loss": 1.1696, | |
| "step": 456 | |
| }, | |
| { | |
| "epoch": 1.2582417582417582, | |
| "grad_norm": 0.08405736833810806, | |
| "learning_rate": 8.6807559669144e-06, | |
| "loss": 1.1678, | |
| "step": 458 | |
| }, | |
| { | |
| "epoch": 1.2637362637362637, | |
| "grad_norm": 0.08441252261400223, | |
| "learning_rate": 8.664482518491053e-06, | |
| "loss": 1.1684, | |
| "step": 460 | |
| }, | |
| { | |
| "epoch": 1.2692307692307692, | |
| "grad_norm": 0.11046714335680008, | |
| "learning_rate": 8.648124769797424e-06, | |
| "loss": 1.1747, | |
| "step": 462 | |
| }, | |
| { | |
| "epoch": 1.2747252747252746, | |
| "grad_norm": 0.10532383620738983, | |
| "learning_rate": 8.631683097138386e-06, | |
| "loss": 1.1728, | |
| "step": 464 | |
| }, | |
| { | |
| "epoch": 1.2802197802197801, | |
| "grad_norm": 0.09521500766277313, | |
| "learning_rate": 8.615157878749462e-06, | |
| "loss": 1.169, | |
| "step": 466 | |
| }, | |
| { | |
| "epoch": 1.2857142857142856, | |
| "grad_norm": 0.09845519065856934, | |
| "learning_rate": 8.598549494788111e-06, | |
| "loss": 1.1667, | |
| "step": 468 | |
| }, | |
| { | |
| "epoch": 1.2912087912087913, | |
| "grad_norm": 0.10502909123897552, | |
| "learning_rate": 8.581858327324996e-06, | |
| "loss": 1.1709, | |
| "step": 470 | |
| }, | |
| { | |
| "epoch": 1.2967032967032968, | |
| "grad_norm": 0.0786222293972969, | |
| "learning_rate": 8.565084760335188e-06, | |
| "loss": 1.165, | |
| "step": 472 | |
| }, | |
| { | |
| "epoch": 1.3021978021978022, | |
| "grad_norm": 0.10599285364151001, | |
| "learning_rate": 8.548229179689325e-06, | |
| "loss": 1.1718, | |
| "step": 474 | |
| }, | |
| { | |
| "epoch": 1.3076923076923077, | |
| "grad_norm": 0.11965076625347137, | |
| "learning_rate": 8.531291973144755e-06, | |
| "loss": 1.1683, | |
| "step": 476 | |
| }, | |
| { | |
| "epoch": 1.3131868131868132, | |
| "grad_norm": 0.10626693069934845, | |
| "learning_rate": 8.5142735303366e-06, | |
| "loss": 1.1676, | |
| "step": 478 | |
| }, | |
| { | |
| "epoch": 1.3186813186813187, | |
| "grad_norm": 0.10863650590181351, | |
| "learning_rate": 8.497174242768792e-06, | |
| "loss": 1.17, | |
| "step": 480 | |
| }, | |
| { | |
| "epoch": 1.3241758241758241, | |
| "grad_norm": 0.11272372305393219, | |
| "learning_rate": 8.479994503805079e-06, | |
| "loss": 1.1674, | |
| "step": 482 | |
| }, | |
| { | |
| "epoch": 1.3296703296703296, | |
| "grad_norm": 0.1116040050983429, | |
| "learning_rate": 8.462734708659959e-06, | |
| "loss": 1.1711, | |
| "step": 484 | |
| }, | |
| { | |
| "epoch": 1.335164835164835, | |
| "grad_norm": 0.1068933829665184, | |
| "learning_rate": 8.445395254389605e-06, | |
| "loss": 1.179, | |
| "step": 486 | |
| }, | |
| { | |
| "epoch": 1.3406593406593408, | |
| "grad_norm": 0.11034112423658371, | |
| "learning_rate": 8.427976539882725e-06, | |
| "loss": 1.1617, | |
| "step": 488 | |
| }, | |
| { | |
| "epoch": 1.3461538461538463, | |
| "grad_norm": 0.11086605489253998, | |
| "learning_rate": 8.410478965851371e-06, | |
| "loss": 1.171, | |
| "step": 490 | |
| }, | |
| { | |
| "epoch": 1.3516483516483517, | |
| "grad_norm": 0.1053035780787468, | |
| "learning_rate": 8.39290293482175e-06, | |
| "loss": 1.1696, | |
| "step": 492 | |
| }, | |
| { | |
| "epoch": 1.3571428571428572, | |
| "grad_norm": 0.11563800275325775, | |
| "learning_rate": 8.375248851124937e-06, | |
| "loss": 1.1817, | |
| "step": 494 | |
| }, | |
| { | |
| "epoch": 1.3626373626373627, | |
| "grad_norm": 0.09770724177360535, | |
| "learning_rate": 8.357517120887586e-06, | |
| "loss": 1.1726, | |
| "step": 496 | |
| }, | |
| { | |
| "epoch": 1.3681318681318682, | |
| "grad_norm": 0.10480888187885284, | |
| "learning_rate": 8.339708152022586e-06, | |
| "loss": 1.1716, | |
| "step": 498 | |
| }, | |
| { | |
| "epoch": 1.3736263736263736, | |
| "grad_norm": 0.0983305424451828, | |
| "learning_rate": 8.321822354219677e-06, | |
| "loss": 1.1708, | |
| "step": 500 | |
| }, | |
| { | |
| "epoch": 1.379120879120879, | |
| "grad_norm": 0.11572114378213882, | |
| "learning_rate": 8.303860138936027e-06, | |
| "loss": 1.1669, | |
| "step": 502 | |
| }, | |
| { | |
| "epoch": 1.3846153846153846, | |
| "grad_norm": 0.15876273810863495, | |
| "learning_rate": 8.285821919386758e-06, | |
| "loss": 1.172, | |
| "step": 504 | |
| }, | |
| { | |
| "epoch": 1.39010989010989, | |
| "grad_norm": 0.09302208572626114, | |
| "learning_rate": 8.267708110535449e-06, | |
| "loss": 1.1689, | |
| "step": 506 | |
| }, | |
| { | |
| "epoch": 1.3956043956043955, | |
| "grad_norm": 0.0982600748538971, | |
| "learning_rate": 8.24951912908459e-06, | |
| "loss": 1.1683, | |
| "step": 508 | |
| }, | |
| { | |
| "epoch": 1.401098901098901, | |
| "grad_norm": 0.1328704059123993, | |
| "learning_rate": 8.231255393465993e-06, | |
| "loss": 1.1686, | |
| "step": 510 | |
| }, | |
| { | |
| "epoch": 1.4065934065934065, | |
| "grad_norm": 0.1403021216392517, | |
| "learning_rate": 8.21291732383116e-06, | |
| "loss": 1.1654, | |
| "step": 512 | |
| }, | |
| { | |
| "epoch": 1.412087912087912, | |
| "grad_norm": 0.10671450942754745, | |
| "learning_rate": 8.19450534204163e-06, | |
| "loss": 1.1671, | |
| "step": 514 | |
| }, | |
| { | |
| "epoch": 1.4175824175824177, | |
| "grad_norm": 0.10965994000434875, | |
| "learning_rate": 8.176019871659263e-06, | |
| "loss": 1.1791, | |
| "step": 516 | |
| }, | |
| { | |
| "epoch": 1.4230769230769231, | |
| "grad_norm": 0.09535852819681168, | |
| "learning_rate": 8.157461337936506e-06, | |
| "loss": 1.1654, | |
| "step": 518 | |
| }, | |
| { | |
| "epoch": 1.4285714285714286, | |
| "grad_norm": 0.09910845011472702, | |
| "learning_rate": 8.138830167806601e-06, | |
| "loss": 1.1612, | |
| "step": 520 | |
| }, | |
| { | |
| "epoch": 1.434065934065934, | |
| "grad_norm": 0.08044712990522385, | |
| "learning_rate": 8.120126789873775e-06, | |
| "loss": 1.169, | |
| "step": 522 | |
| }, | |
| { | |
| "epoch": 1.4395604395604396, | |
| "grad_norm": 0.11208353191614151, | |
| "learning_rate": 8.10135163440336e-06, | |
| "loss": 1.1676, | |
| "step": 524 | |
| }, | |
| { | |
| "epoch": 1.445054945054945, | |
| "grad_norm": 0.09039778262376785, | |
| "learning_rate": 8.08250513331192e-06, | |
| "loss": 1.1647, | |
| "step": 526 | |
| }, | |
| { | |
| "epoch": 1.4505494505494505, | |
| "grad_norm": 0.08799196779727936, | |
| "learning_rate": 8.063587720157298e-06, | |
| "loss": 1.1594, | |
| "step": 528 | |
| }, | |
| { | |
| "epoch": 1.456043956043956, | |
| "grad_norm": 0.09703148156404495, | |
| "learning_rate": 8.044599830128643e-06, | |
| "loss": 1.1751, | |
| "step": 530 | |
| }, | |
| { | |
| "epoch": 1.4615384615384617, | |
| "grad_norm": 0.10245784372091293, | |
| "learning_rate": 8.02554190003641e-06, | |
| "loss": 1.1746, | |
| "step": 532 | |
| }, | |
| { | |
| "epoch": 1.4670329670329672, | |
| "grad_norm": 0.09949609637260437, | |
| "learning_rate": 8.006414368302297e-06, | |
| "loss": 1.1696, | |
| "step": 534 | |
| }, | |
| { | |
| "epoch": 1.4725274725274726, | |
| "grad_norm": 0.11576993763446808, | |
| "learning_rate": 7.98721767494917e-06, | |
| "loss": 1.1662, | |
| "step": 536 | |
| }, | |
| { | |
| "epoch": 1.478021978021978, | |
| "grad_norm": 0.09676701575517654, | |
| "learning_rate": 7.967952261590936e-06, | |
| "loss": 1.1652, | |
| "step": 538 | |
| }, | |
| { | |
| "epoch": 1.4835164835164836, | |
| "grad_norm": 0.10620467364788055, | |
| "learning_rate": 7.94861857142238e-06, | |
| "loss": 1.1659, | |
| "step": 540 | |
| }, | |
| { | |
| "epoch": 1.489010989010989, | |
| "grad_norm": 0.09917207062244415, | |
| "learning_rate": 7.929217049208977e-06, | |
| "loss": 1.175, | |
| "step": 542 | |
| }, | |
| { | |
| "epoch": 1.4945054945054945, | |
| "grad_norm": 0.131545290350914, | |
| "learning_rate": 7.90974814127666e-06, | |
| "loss": 1.1619, | |
| "step": 544 | |
| }, | |
| { | |
| "epoch": 1.5, | |
| "grad_norm": 0.16173675656318665, | |
| "learning_rate": 7.890212295501542e-06, | |
| "loss": 1.1718, | |
| "step": 546 | |
| }, | |
| { | |
| "epoch": 1.5054945054945055, | |
| "grad_norm": 0.11190006136894226, | |
| "learning_rate": 7.870609961299627e-06, | |
| "loss": 1.1707, | |
| "step": 548 | |
| }, | |
| { | |
| "epoch": 1.510989010989011, | |
| "grad_norm": 0.0947834774851799, | |
| "learning_rate": 7.850941589616458e-06, | |
| "loss": 1.1703, | |
| "step": 550 | |
| }, | |
| { | |
| "epoch": 1.5164835164835164, | |
| "grad_norm": 0.12736476957798004, | |
| "learning_rate": 7.831207632916757e-06, | |
| "loss": 1.1743, | |
| "step": 552 | |
| }, | |
| { | |
| "epoch": 1.521978021978022, | |
| "grad_norm": 0.17213790118694305, | |
| "learning_rate": 7.811408545174001e-06, | |
| "loss": 1.1763, | |
| "step": 554 | |
| }, | |
| { | |
| "epoch": 1.5274725274725274, | |
| "grad_norm": 0.26195263862609863, | |
| "learning_rate": 7.791544781859993e-06, | |
| "loss": 1.1741, | |
| "step": 556 | |
| }, | |
| { | |
| "epoch": 1.5329670329670328, | |
| "grad_norm": 0.1335473209619522, | |
| "learning_rate": 7.771616799934372e-06, | |
| "loss": 1.169, | |
| "step": 558 | |
| }, | |
| { | |
| "epoch": 1.5384615384615383, | |
| "grad_norm": 0.11248943954706192, | |
| "learning_rate": 7.751625057834107e-06, | |
| "loss": 1.1611, | |
| "step": 560 | |
| }, | |
| { | |
| "epoch": 1.5439560439560438, | |
| "grad_norm": 0.12497013807296753, | |
| "learning_rate": 7.731570015462953e-06, | |
| "loss": 1.1657, | |
| "step": 562 | |
| }, | |
| { | |
| "epoch": 1.5494505494505495, | |
| "grad_norm": 0.10039670765399933, | |
| "learning_rate": 7.711452134180865e-06, | |
| "loss": 1.1689, | |
| "step": 564 | |
| }, | |
| { | |
| "epoch": 1.554945054945055, | |
| "grad_norm": 0.09104856103658676, | |
| "learning_rate": 7.691271876793387e-06, | |
| "loss": 1.1691, | |
| "step": 566 | |
| }, | |
| { | |
| "epoch": 1.5604395604395604, | |
| "grad_norm": 0.10671708732843399, | |
| "learning_rate": 7.67102970754101e-06, | |
| "loss": 1.1622, | |
| "step": 568 | |
| }, | |
| { | |
| "epoch": 1.565934065934066, | |
| "grad_norm": 0.10622277110815048, | |
| "learning_rate": 7.65072609208848e-06, | |
| "loss": 1.1716, | |
| "step": 570 | |
| }, | |
| { | |
| "epoch": 1.5714285714285714, | |
| "grad_norm": 0.09694940596818924, | |
| "learning_rate": 7.630361497514104e-06, | |
| "loss": 1.1681, | |
| "step": 572 | |
| }, | |
| { | |
| "epoch": 1.5769230769230769, | |
| "grad_norm": 0.11600632965564728, | |
| "learning_rate": 7.6099363922989845e-06, | |
| "loss": 1.169, | |
| "step": 574 | |
| }, | |
| { | |
| "epoch": 1.5824175824175826, | |
| "grad_norm": 0.10654427111148834, | |
| "learning_rate": 7.5894512463162595e-06, | |
| "loss": 1.1711, | |
| "step": 576 | |
| }, | |
| { | |
| "epoch": 1.587912087912088, | |
| "grad_norm": 0.08246386051177979, | |
| "learning_rate": 7.568906530820281e-06, | |
| "loss": 1.1661, | |
| "step": 578 | |
| }, | |
| { | |
| "epoch": 1.5934065934065935, | |
| "grad_norm": 0.09424301981925964, | |
| "learning_rate": 7.5483027184357825e-06, | |
| "loss": 1.166, | |
| "step": 580 | |
| }, | |
| { | |
| "epoch": 1.598901098901099, | |
| "grad_norm": 0.08908534795045853, | |
| "learning_rate": 7.527640283147003e-06, | |
| "loss": 1.1698, | |
| "step": 582 | |
| }, | |
| { | |
| "epoch": 1.6043956043956045, | |
| "grad_norm": 0.10692695528268814, | |
| "learning_rate": 7.50691970028678e-06, | |
| "loss": 1.1701, | |
| "step": 584 | |
| }, | |
| { | |
| "epoch": 1.6043956043956045, | |
| "eval_loss": 1.1695584058761597, | |
| "eval_runtime": 299.2734, | |
| "eval_samples_per_second": 17.295, | |
| "eval_steps_per_second": 0.271, | |
| "step": 584 | |
| }, | |
| { | |
| "epoch": 1.60989010989011, | |
| "grad_norm": 0.09625241905450821, | |
| "learning_rate": 7.486141446525619e-06, | |
| "loss": 1.1734, | |
| "step": 586 | |
| }, | |
| { | |
| "epoch": 1.6153846153846154, | |
| "grad_norm": 0.09392037242650986, | |
| "learning_rate": 7.465305999860728e-06, | |
| "loss": 1.1698, | |
| "step": 588 | |
| }, | |
| { | |
| "epoch": 1.620879120879121, | |
| "grad_norm": 0.08401469886302948, | |
| "learning_rate": 7.444413839605017e-06, | |
| "loss": 1.1659, | |
| "step": 590 | |
| }, | |
| { | |
| "epoch": 1.6263736263736264, | |
| "grad_norm": 0.11073325574398041, | |
| "learning_rate": 7.423465446376079e-06, | |
| "loss": 1.166, | |
| "step": 592 | |
| }, | |
| { | |
| "epoch": 1.6318681318681318, | |
| "grad_norm": 0.09533084183931351, | |
| "learning_rate": 7.402461302085121e-06, | |
| "loss": 1.1734, | |
| "step": 594 | |
| }, | |
| { | |
| "epoch": 1.6373626373626373, | |
| "grad_norm": 0.08371090888977051, | |
| "learning_rate": 7.381401889925894e-06, | |
| "loss": 1.167, | |
| "step": 596 | |
| }, | |
| { | |
| "epoch": 1.6428571428571428, | |
| "grad_norm": 0.0968126654624939, | |
| "learning_rate": 7.360287694363566e-06, | |
| "loss": 1.1679, | |
| "step": 598 | |
| }, | |
| { | |
| "epoch": 1.6483516483516483, | |
| "grad_norm": 0.09040292352437973, | |
| "learning_rate": 7.3391192011235764e-06, | |
| "loss": 1.1754, | |
| "step": 600 | |
| }, | |
| { | |
| "epoch": 1.6538461538461537, | |
| "grad_norm": 0.09428130835294724, | |
| "learning_rate": 7.317896897180472e-06, | |
| "loss": 1.1713, | |
| "step": 602 | |
| }, | |
| { | |
| "epoch": 1.6593406593406592, | |
| "grad_norm": 0.08703873306512833, | |
| "learning_rate": 7.296621270746691e-06, | |
| "loss": 1.1688, | |
| "step": 604 | |
| }, | |
| { | |
| "epoch": 1.6648351648351647, | |
| "grad_norm": 0.09342585504055023, | |
| "learning_rate": 7.275292811261346e-06, | |
| "loss": 1.1651, | |
| "step": 606 | |
| }, | |
| { | |
| "epoch": 1.6703296703296702, | |
| "grad_norm": 0.11370475590229034, | |
| "learning_rate": 7.253912009378953e-06, | |
| "loss": 1.1721, | |
| "step": 608 | |
| }, | |
| { | |
| "epoch": 1.6758241758241759, | |
| "grad_norm": 0.0810871347784996, | |
| "learning_rate": 7.2324793569581474e-06, | |
| "loss": 1.174, | |
| "step": 610 | |
| }, | |
| { | |
| "epoch": 1.6813186813186813, | |
| "grad_norm": 0.09331346303224564, | |
| "learning_rate": 7.210995347050372e-06, | |
| "loss": 1.1691, | |
| "step": 612 | |
| }, | |
| { | |
| "epoch": 1.6868131868131868, | |
| "grad_norm": 0.1314326524734497, | |
| "learning_rate": 7.189460473888535e-06, | |
| "loss": 1.1672, | |
| "step": 614 | |
| }, | |
| { | |
| "epoch": 1.6923076923076923, | |
| "grad_norm": 0.12525762617588043, | |
| "learning_rate": 7.167875232875632e-06, | |
| "loss": 1.1645, | |
| "step": 616 | |
| }, | |
| { | |
| "epoch": 1.6978021978021978, | |
| "grad_norm": 0.13776274025440216, | |
| "learning_rate": 7.146240120573358e-06, | |
| "loss": 1.1693, | |
| "step": 618 | |
| }, | |
| { | |
| "epoch": 1.7032967032967035, | |
| "grad_norm": 0.1336820125579834, | |
| "learning_rate": 7.124555634690684e-06, | |
| "loss": 1.1649, | |
| "step": 620 | |
| }, | |
| { | |
| "epoch": 1.708791208791209, | |
| "grad_norm": 0.12508928775787354, | |
| "learning_rate": 7.1028222740724e-06, | |
| "loss": 1.169, | |
| "step": 622 | |
| }, | |
| { | |
| "epoch": 1.7142857142857144, | |
| "grad_norm": 0.10577461123466492, | |
| "learning_rate": 7.081040538687649e-06, | |
| "loss": 1.1575, | |
| "step": 624 | |
| }, | |
| { | |
| "epoch": 1.7197802197802199, | |
| "grad_norm": 0.11875636875629425, | |
| "learning_rate": 7.059210929618416e-06, | |
| "loss": 1.1666, | |
| "step": 626 | |
| }, | |
| { | |
| "epoch": 1.7252747252747254, | |
| "grad_norm": 0.10840770602226257, | |
| "learning_rate": 7.037333949048005e-06, | |
| "loss": 1.1664, | |
| "step": 628 | |
| }, | |
| { | |
| "epoch": 1.7307692307692308, | |
| "grad_norm": 0.11352021992206573, | |
| "learning_rate": 7.0154101002494914e-06, | |
| "loss": 1.1643, | |
| "step": 630 | |
| }, | |
| { | |
| "epoch": 1.7362637362637363, | |
| "grad_norm": 0.08095169812440872, | |
| "learning_rate": 6.993439887574133e-06, | |
| "loss": 1.1691, | |
| "step": 632 | |
| }, | |
| { | |
| "epoch": 1.7417582417582418, | |
| "grad_norm": 0.09175344556570053, | |
| "learning_rate": 6.971423816439782e-06, | |
| "loss": 1.1664, | |
| "step": 634 | |
| }, | |
| { | |
| "epoch": 1.7472527472527473, | |
| "grad_norm": 0.07321044057607651, | |
| "learning_rate": 6.949362393319239e-06, | |
| "loss": 1.1649, | |
| "step": 636 | |
| }, | |
| { | |
| "epoch": 1.7527472527472527, | |
| "grad_norm": 0.10566896945238113, | |
| "learning_rate": 6.927256125728624e-06, | |
| "loss": 1.167, | |
| "step": 638 | |
| }, | |
| { | |
| "epoch": 1.7582417582417582, | |
| "grad_norm": 0.10230179876089096, | |
| "learning_rate": 6.905105522215684e-06, | |
| "loss": 1.1569, | |
| "step": 640 | |
| }, | |
| { | |
| "epoch": 1.7637362637362637, | |
| "grad_norm": 0.09293391555547714, | |
| "learning_rate": 6.8829110923481e-06, | |
| "loss": 1.1677, | |
| "step": 642 | |
| }, | |
| { | |
| "epoch": 1.7692307692307692, | |
| "grad_norm": 0.08087108284235, | |
| "learning_rate": 6.8606733467017675e-06, | |
| "loss": 1.1691, | |
| "step": 644 | |
| }, | |
| { | |
| "epoch": 1.7747252747252746, | |
| "grad_norm": 0.08191601186990738, | |
| "learning_rate": 6.838392796849042e-06, | |
| "loss": 1.1683, | |
| "step": 646 | |
| }, | |
| { | |
| "epoch": 1.7802197802197801, | |
| "grad_norm": 0.08746813237667084, | |
| "learning_rate": 6.816069955346986e-06, | |
| "loss": 1.1643, | |
| "step": 648 | |
| }, | |
| { | |
| "epoch": 1.7857142857142856, | |
| "grad_norm": 0.10125511139631271, | |
| "learning_rate": 6.7937053357255585e-06, | |
| "loss": 1.1716, | |
| "step": 650 | |
| }, | |
| { | |
| "epoch": 1.791208791208791, | |
| "grad_norm": 0.08800845593214035, | |
| "learning_rate": 6.771299452475818e-06, | |
| "loss": 1.1721, | |
| "step": 652 | |
| }, | |
| { | |
| "epoch": 1.7967032967032965, | |
| "grad_norm": 0.09477917104959488, | |
| "learning_rate": 6.748852821038075e-06, | |
| "loss": 1.1649, | |
| "step": 654 | |
| }, | |
| { | |
| "epoch": 1.8021978021978022, | |
| "grad_norm": 0.10864371061325073, | |
| "learning_rate": 6.7263659577900375e-06, | |
| "loss": 1.1621, | |
| "step": 656 | |
| }, | |
| { | |
| "epoch": 1.8076923076923077, | |
| "grad_norm": 0.07818944752216339, | |
| "learning_rate": 6.703839380034945e-06, | |
| "loss": 1.1667, | |
| "step": 658 | |
| }, | |
| { | |
| "epoch": 1.8131868131868132, | |
| "grad_norm": 0.0869673416018486, | |
| "learning_rate": 6.681273605989643e-06, | |
| "loss": 1.1692, | |
| "step": 660 | |
| }, | |
| { | |
| "epoch": 1.8186813186813187, | |
| "grad_norm": 0.10159242898225784, | |
| "learning_rate": 6.6586691547726855e-06, | |
| "loss": 1.1708, | |
| "step": 662 | |
| }, | |
| { | |
| "epoch": 1.8241758241758241, | |
| "grad_norm": 0.20149534940719604, | |
| "learning_rate": 6.636026546392374e-06, | |
| "loss": 1.175, | |
| "step": 664 | |
| }, | |
| { | |
| "epoch": 1.8296703296703298, | |
| "grad_norm": 0.35707324743270874, | |
| "learning_rate": 6.613346301734813e-06, | |
| "loss": 1.1556, | |
| "step": 666 | |
| }, | |
| { | |
| "epoch": 1.8351648351648353, | |
| "grad_norm": 0.10949152708053589, | |
| "learning_rate": 6.590628942551909e-06, | |
| "loss": 1.1696, | |
| "step": 668 | |
| }, | |
| { | |
| "epoch": 1.8406593406593408, | |
| "grad_norm": 0.07581349462270737, | |
| "learning_rate": 6.567874991449383e-06, | |
| "loss": 1.1701, | |
| "step": 670 | |
| }, | |
| { | |
| "epoch": 1.8461538461538463, | |
| "grad_norm": 0.11389974504709244, | |
| "learning_rate": 6.545084971874738e-06, | |
| "loss": 1.1721, | |
| "step": 672 | |
| }, | |
| { | |
| "epoch": 1.8516483516483517, | |
| "grad_norm": 0.10378382354974747, | |
| "learning_rate": 6.522259408105223e-06, | |
| "loss": 1.1674, | |
| "step": 674 | |
| }, | |
| { | |
| "epoch": 1.8571428571428572, | |
| "grad_norm": 0.09723298251628876, | |
| "learning_rate": 6.499398825235767e-06, | |
| "loss": 1.1696, | |
| "step": 676 | |
| }, | |
| { | |
| "epoch": 1.8626373626373627, | |
| "grad_norm": 0.10554076731204987, | |
| "learning_rate": 6.476503749166903e-06, | |
| "loss": 1.1674, | |
| "step": 678 | |
| }, | |
| { | |
| "epoch": 1.8681318681318682, | |
| "grad_norm": 0.09231211990118027, | |
| "learning_rate": 6.453574706592676e-06, | |
| "loss": 1.1652, | |
| "step": 680 | |
| }, | |
| { | |
| "epoch": 1.8736263736263736, | |
| "grad_norm": 0.09046723693609238, | |
| "learning_rate": 6.4306122249885105e-06, | |
| "loss": 1.1716, | |
| "step": 682 | |
| }, | |
| { | |
| "epoch": 1.879120879120879, | |
| "grad_norm": 0.09619590640068054, | |
| "learning_rate": 6.407616832599091e-06, | |
| "loss": 1.1699, | |
| "step": 684 | |
| }, | |
| { | |
| "epoch": 1.8846153846153846, | |
| "grad_norm": 0.08878965675830841, | |
| "learning_rate": 6.384589058426201e-06, | |
| "loss": 1.1701, | |
| "step": 686 | |
| }, | |
| { | |
| "epoch": 1.89010989010989, | |
| "grad_norm": 0.0882963240146637, | |
| "learning_rate": 6.36152943221656e-06, | |
| "loss": 1.1757, | |
| "step": 688 | |
| }, | |
| { | |
| "epoch": 1.8956043956043955, | |
| "grad_norm": 0.10285267233848572, | |
| "learning_rate": 6.338438484449632e-06, | |
| "loss": 1.1612, | |
| "step": 690 | |
| }, | |
| { | |
| "epoch": 1.901098901098901, | |
| "grad_norm": 0.08342345803976059, | |
| "learning_rate": 6.31531674632542e-06, | |
| "loss": 1.1696, | |
| "step": 692 | |
| }, | |
| { | |
| "epoch": 1.9065934065934065, | |
| "grad_norm": 0.09322002530097961, | |
| "learning_rate": 6.292164749752256e-06, | |
| "loss": 1.1733, | |
| "step": 694 | |
| }, | |
| { | |
| "epoch": 1.912087912087912, | |
| "grad_norm": 0.10594601929187775, | |
| "learning_rate": 6.268983027334557e-06, | |
| "loss": 1.1675, | |
| "step": 696 | |
| }, | |
| { | |
| "epoch": 1.9175824175824174, | |
| "grad_norm": 0.10415042191743851, | |
| "learning_rate": 6.245772112360568e-06, | |
| "loss": 1.1706, | |
| "step": 698 | |
| }, | |
| { | |
| "epoch": 1.9230769230769231, | |
| "grad_norm": 0.08474233746528625, | |
| "learning_rate": 6.222532538790107e-06, | |
| "loss": 1.1717, | |
| "step": 700 | |
| }, | |
| { | |
| "epoch": 1.9285714285714286, | |
| "grad_norm": 0.09690573066473007, | |
| "learning_rate": 6.199264841242267e-06, | |
| "loss": 1.1579, | |
| "step": 702 | |
| }, | |
| { | |
| "epoch": 1.934065934065934, | |
| "grad_norm": 0.09174709767103195, | |
| "learning_rate": 6.17596955498313e-06, | |
| "loss": 1.1707, | |
| "step": 704 | |
| }, | |
| { | |
| "epoch": 1.9395604395604396, | |
| "grad_norm": 0.09256250411272049, | |
| "learning_rate": 6.1526472159134454e-06, | |
| "loss": 1.1675, | |
| "step": 706 | |
| }, | |
| { | |
| "epoch": 1.945054945054945, | |
| "grad_norm": 0.115385040640831, | |
| "learning_rate": 6.129298360556304e-06, | |
| "loss": 1.1666, | |
| "step": 708 | |
| }, | |
| { | |
| "epoch": 1.9505494505494505, | |
| "grad_norm": 0.09870624542236328, | |
| "learning_rate": 6.105923526044794e-06, | |
| "loss": 1.1712, | |
| "step": 710 | |
| }, | |
| { | |
| "epoch": 1.9560439560439562, | |
| "grad_norm": 0.09220367670059204, | |
| "learning_rate": 6.08252325010965e-06, | |
| "loss": 1.1702, | |
| "step": 712 | |
| }, | |
| { | |
| "epoch": 1.9615384615384617, | |
| "grad_norm": 0.08229216188192368, | |
| "learning_rate": 6.059098071066874e-06, | |
| "loss": 1.1641, | |
| "step": 714 | |
| }, | |
| { | |
| "epoch": 1.9670329670329672, | |
| "grad_norm": 0.08601760119199753, | |
| "learning_rate": 6.035648527805359e-06, | |
| "loss": 1.1658, | |
| "step": 716 | |
| }, | |
| { | |
| "epoch": 1.9725274725274726, | |
| "grad_norm": 0.09920581430196762, | |
| "learning_rate": 6.012175159774488e-06, | |
| "loss": 1.1627, | |
| "step": 718 | |
| }, | |
| { | |
| "epoch": 1.978021978021978, | |
| "grad_norm": 0.09022627770900726, | |
| "learning_rate": 5.988678506971726e-06, | |
| "loss": 1.1723, | |
| "step": 720 | |
| }, | |
| { | |
| "epoch": 1.9835164835164836, | |
| "grad_norm": 0.08053141087293625, | |
| "learning_rate": 5.965159109930196e-06, | |
| "loss": 1.1785, | |
| "step": 722 | |
| }, | |
| { | |
| "epoch": 1.989010989010989, | |
| "grad_norm": 0.07670550793409348, | |
| "learning_rate": 5.941617509706247e-06, | |
| "loss": 1.1606, | |
| "step": 724 | |
| }, | |
| { | |
| "epoch": 1.9945054945054945, | |
| "grad_norm": 0.0779750868678093, | |
| "learning_rate": 5.9180542478670025e-06, | |
| "loss": 1.1624, | |
| "step": 726 | |
| }, | |
| { | |
| "epoch": 2.0, | |
| "grad_norm": 0.0955984815955162, | |
| "learning_rate": 5.894469866477905e-06, | |
| "loss": 1.1647, | |
| "step": 728 | |
| }, | |
| { | |
| "epoch": 2.0054945054945055, | |
| "grad_norm": 0.08561510592699051, | |
| "learning_rate": 5.87086490809025e-06, | |
| "loss": 1.1675, | |
| "step": 730 | |
| }, | |
| { | |
| "epoch": 2.0054945054945055, | |
| "eval_loss": 1.168563961982727, | |
| "eval_runtime": 298.1262, | |
| "eval_samples_per_second": 17.362, | |
| "eval_steps_per_second": 0.272, | |
| "step": 730 | |
| }, | |
| { | |
| "epoch": 2.010989010989011, | |
| "grad_norm": 0.08887989073991776, | |
| "learning_rate": 5.847239915728695e-06, | |
| "loss": 1.1711, | |
| "step": 732 | |
| }, | |
| { | |
| "epoch": 2.0164835164835164, | |
| "grad_norm": 0.1793489307165146, | |
| "learning_rate": 5.823595432878775e-06, | |
| "loss": 1.1689, | |
| "step": 734 | |
| }, | |
| { | |
| "epoch": 2.021978021978022, | |
| "grad_norm": 0.14795880019664764, | |
| "learning_rate": 5.799932003474398e-06, | |
| "loss": 1.1692, | |
| "step": 736 | |
| }, | |
| { | |
| "epoch": 2.0274725274725274, | |
| "grad_norm": 0.08862569183111191, | |
| "learning_rate": 5.776250171885329e-06, | |
| "loss": 1.1653, | |
| "step": 738 | |
| }, | |
| { | |
| "epoch": 2.032967032967033, | |
| "grad_norm": 0.10424422472715378, | |
| "learning_rate": 5.752550482904674e-06, | |
| "loss": 1.1651, | |
| "step": 740 | |
| }, | |
| { | |
| "epoch": 2.0384615384615383, | |
| "grad_norm": 0.0930488184094429, | |
| "learning_rate": 5.728833481736339e-06, | |
| "loss": 1.1666, | |
| "step": 742 | |
| }, | |
| { | |
| "epoch": 2.043956043956044, | |
| "grad_norm": 0.1007128432393074, | |
| "learning_rate": 5.705099713982491e-06, | |
| "loss": 1.1737, | |
| "step": 744 | |
| }, | |
| { | |
| "epoch": 2.0494505494505493, | |
| "grad_norm": 0.0997442975640297, | |
| "learning_rate": 5.6813497256310124e-06, | |
| "loss": 1.1701, | |
| "step": 746 | |
| }, | |
| { | |
| "epoch": 2.0549450549450547, | |
| "grad_norm": 0.09021608531475067, | |
| "learning_rate": 5.6575840630429295e-06, | |
| "loss": 1.1677, | |
| "step": 748 | |
| }, | |
| { | |
| "epoch": 2.0604395604395602, | |
| "grad_norm": 0.08044509589672089, | |
| "learning_rate": 5.633803272939851e-06, | |
| "loss": 1.1696, | |
| "step": 750 | |
| }, | |
| { | |
| "epoch": 2.065934065934066, | |
| "grad_norm": 0.08909393101930618, | |
| "learning_rate": 5.610007902391387e-06, | |
| "loss": 1.1616, | |
| "step": 752 | |
| }, | |
| { | |
| "epoch": 2.0714285714285716, | |
| "grad_norm": 0.08932027965784073, | |
| "learning_rate": 5.586198498802577e-06, | |
| "loss": 1.1623, | |
| "step": 754 | |
| }, | |
| { | |
| "epoch": 2.076923076923077, | |
| "grad_norm": 0.1681545227766037, | |
| "learning_rate": 5.562375609901273e-06, | |
| "loss": 1.1654, | |
| "step": 756 | |
| }, | |
| { | |
| "epoch": 2.0824175824175826, | |
| "grad_norm": 0.1894630342721939, | |
| "learning_rate": 5.538539783725556e-06, | |
| "loss": 1.1681, | |
| "step": 758 | |
| }, | |
| { | |
| "epoch": 2.087912087912088, | |
| "grad_norm": 0.11349444836378098, | |
| "learning_rate": 5.51469156861113e-06, | |
| "loss": 1.1688, | |
| "step": 760 | |
| }, | |
| { | |
| "epoch": 2.0934065934065935, | |
| "grad_norm": 0.08764316141605377, | |
| "learning_rate": 5.490831513178698e-06, | |
| "loss": 1.1718, | |
| "step": 762 | |
| }, | |
| { | |
| "epoch": 2.098901098901099, | |
| "grad_norm": 0.08182983100414276, | |
| "learning_rate": 5.466960166321348e-06, | |
| "loss": 1.1698, | |
| "step": 764 | |
| }, | |
| { | |
| "epoch": 2.1043956043956045, | |
| "grad_norm": 0.08446374535560608, | |
| "learning_rate": 5.44307807719192e-06, | |
| "loss": 1.1605, | |
| "step": 766 | |
| }, | |
| { | |
| "epoch": 2.10989010989011, | |
| "grad_norm": 0.06986381858587265, | |
| "learning_rate": 5.4191857951903825e-06, | |
| "loss": 1.1661, | |
| "step": 768 | |
| }, | |
| { | |
| "epoch": 2.1153846153846154, | |
| "grad_norm": 0.09340260177850723, | |
| "learning_rate": 5.395283869951184e-06, | |
| "loss": 1.1685, | |
| "step": 770 | |
| }, | |
| { | |
| "epoch": 2.120879120879121, | |
| "grad_norm": 0.10494975745677948, | |
| "learning_rate": 5.371372851330612e-06, | |
| "loss": 1.1748, | |
| "step": 772 | |
| }, | |
| { | |
| "epoch": 2.1263736263736264, | |
| "grad_norm": 0.09624402970075607, | |
| "learning_rate": 5.347453289394146e-06, | |
| "loss": 1.1683, | |
| "step": 774 | |
| }, | |
| { | |
| "epoch": 2.131868131868132, | |
| "grad_norm": 0.09158363193273544, | |
| "learning_rate": 5.3235257344037996e-06, | |
| "loss": 1.1693, | |
| "step": 776 | |
| }, | |
| { | |
| "epoch": 2.1373626373626373, | |
| "grad_norm": 0.07997617870569229, | |
| "learning_rate": 5.29959073680547e-06, | |
| "loss": 1.1671, | |
| "step": 778 | |
| }, | |
| { | |
| "epoch": 2.142857142857143, | |
| "grad_norm": 0.08307652175426483, | |
| "learning_rate": 5.275648847216263e-06, | |
| "loss": 1.1648, | |
| "step": 780 | |
| }, | |
| { | |
| "epoch": 2.1483516483516483, | |
| "grad_norm": 0.09365742653608322, | |
| "learning_rate": 5.251700616411836e-06, | |
| "loss": 1.1738, | |
| "step": 782 | |
| }, | |
| { | |
| "epoch": 2.1538461538461537, | |
| "grad_norm": 0.09766220301389694, | |
| "learning_rate": 5.22774659531372e-06, | |
| "loss": 1.1649, | |
| "step": 784 | |
| }, | |
| { | |
| "epoch": 2.159340659340659, | |
| "grad_norm": 0.10807793587446213, | |
| "learning_rate": 5.203787334976655e-06, | |
| "loss": 1.1728, | |
| "step": 786 | |
| }, | |
| { | |
| "epoch": 2.1648351648351647, | |
| "grad_norm": 0.07477546483278275, | |
| "learning_rate": 5.179823386575908e-06, | |
| "loss": 1.1701, | |
| "step": 788 | |
| }, | |
| { | |
| "epoch": 2.17032967032967, | |
| "grad_norm": 0.08150489628314972, | |
| "learning_rate": 5.155855301394585e-06, | |
| "loss": 1.1672, | |
| "step": 790 | |
| }, | |
| { | |
| "epoch": 2.1758241758241756, | |
| "grad_norm": 0.11139123886823654, | |
| "learning_rate": 5.131883630810966e-06, | |
| "loss": 1.172, | |
| "step": 792 | |
| }, | |
| { | |
| "epoch": 2.181318681318681, | |
| "grad_norm": 0.09428671002388, | |
| "learning_rate": 5.107908926285813e-06, | |
| "loss": 1.1662, | |
| "step": 794 | |
| }, | |
| { | |
| "epoch": 2.186813186813187, | |
| "grad_norm": 0.11421363055706024, | |
| "learning_rate": 5.083931739349675e-06, | |
| "loss": 1.1666, | |
| "step": 796 | |
| }, | |
| { | |
| "epoch": 2.1923076923076925, | |
| "grad_norm": 0.08080089837312698, | |
| "learning_rate": 5.059952621590216e-06, | |
| "loss": 1.1681, | |
| "step": 798 | |
| }, | |
| { | |
| "epoch": 2.197802197802198, | |
| "grad_norm": 0.09634223580360413, | |
| "learning_rate": 5.035972124639511e-06, | |
| "loss": 1.1654, | |
| "step": 800 | |
| }, | |
| { | |
| "epoch": 2.2032967032967035, | |
| "grad_norm": 0.08151613175868988, | |
| "learning_rate": 5.011990800161369e-06, | |
| "loss": 1.1668, | |
| "step": 802 | |
| }, | |
| { | |
| "epoch": 2.208791208791209, | |
| "grad_norm": 0.09357167035341263, | |
| "learning_rate": 4.988009199838632e-06, | |
| "loss": 1.1611, | |
| "step": 804 | |
| }, | |
| { | |
| "epoch": 2.2142857142857144, | |
| "grad_norm": 0.08290430158376694, | |
| "learning_rate": 4.96402787536049e-06, | |
| "loss": 1.1562, | |
| "step": 806 | |
| }, | |
| { | |
| "epoch": 2.21978021978022, | |
| "grad_norm": 0.09985602647066116, | |
| "learning_rate": 4.940047378409786e-06, | |
| "loss": 1.1716, | |
| "step": 808 | |
| }, | |
| { | |
| "epoch": 2.2252747252747254, | |
| "grad_norm": 0.08841745555400848, | |
| "learning_rate": 4.9160682606503255e-06, | |
| "loss": 1.1723, | |
| "step": 810 | |
| }, | |
| { | |
| "epoch": 2.230769230769231, | |
| "grad_norm": 0.08120245486497879, | |
| "learning_rate": 4.892091073714189e-06, | |
| "loss": 1.1642, | |
| "step": 812 | |
| }, | |
| { | |
| "epoch": 2.2362637362637363, | |
| "grad_norm": 0.08893705904483795, | |
| "learning_rate": 4.868116369189033e-06, | |
| "loss": 1.1614, | |
| "step": 814 | |
| }, | |
| { | |
| "epoch": 2.241758241758242, | |
| "grad_norm": 0.16162922978401184, | |
| "learning_rate": 4.844144698605418e-06, | |
| "loss": 1.1682, | |
| "step": 816 | |
| }, | |
| { | |
| "epoch": 2.2472527472527473, | |
| "grad_norm": 0.08067131042480469, | |
| "learning_rate": 4.820176613424095e-06, | |
| "loss": 1.1676, | |
| "step": 818 | |
| }, | |
| { | |
| "epoch": 2.2527472527472527, | |
| "grad_norm": 0.11158166825771332, | |
| "learning_rate": 4.796212665023345e-06, | |
| "loss": 1.1731, | |
| "step": 820 | |
| }, | |
| { | |
| "epoch": 2.258241758241758, | |
| "grad_norm": 0.08565322309732437, | |
| "learning_rate": 4.7722534046862805e-06, | |
| "loss": 1.1624, | |
| "step": 822 | |
| }, | |
| { | |
| "epoch": 2.2637362637362637, | |
| "grad_norm": 0.09021101146936417, | |
| "learning_rate": 4.748299383588167e-06, | |
| "loss": 1.1649, | |
| "step": 824 | |
| }, | |
| { | |
| "epoch": 2.269230769230769, | |
| "grad_norm": 0.09416112303733826, | |
| "learning_rate": 4.7243511527837374e-06, | |
| "loss": 1.1643, | |
| "step": 826 | |
| }, | |
| { | |
| "epoch": 2.2747252747252746, | |
| "grad_norm": 0.10305804759263992, | |
| "learning_rate": 4.7004092631945315e-06, | |
| "loss": 1.1709, | |
| "step": 828 | |
| }, | |
| { | |
| "epoch": 2.28021978021978, | |
| "grad_norm": 0.11213865131139755, | |
| "learning_rate": 4.6764742655962e-06, | |
| "loss": 1.1684, | |
| "step": 830 | |
| }, | |
| { | |
| "epoch": 2.2857142857142856, | |
| "grad_norm": 0.10108992457389832, | |
| "learning_rate": 4.652546710605857e-06, | |
| "loss": 1.1619, | |
| "step": 832 | |
| }, | |
| { | |
| "epoch": 2.291208791208791, | |
| "grad_norm": 0.08697344362735748, | |
| "learning_rate": 4.628627148669391e-06, | |
| "loss": 1.1686, | |
| "step": 834 | |
| }, | |
| { | |
| "epoch": 2.2967032967032965, | |
| "grad_norm": 0.10428237169981003, | |
| "learning_rate": 4.604716130048818e-06, | |
| "loss": 1.1668, | |
| "step": 836 | |
| }, | |
| { | |
| "epoch": 2.302197802197802, | |
| "grad_norm": 0.08793749660253525, | |
| "learning_rate": 4.580814204809618e-06, | |
| "loss": 1.1713, | |
| "step": 838 | |
| }, | |
| { | |
| "epoch": 2.3076923076923075, | |
| "grad_norm": 0.11877471953630447, | |
| "learning_rate": 4.5569219228080805e-06, | |
| "loss": 1.162, | |
| "step": 840 | |
| }, | |
| { | |
| "epoch": 2.313186813186813, | |
| "grad_norm": 0.08249559253454208, | |
| "learning_rate": 4.5330398336786526e-06, | |
| "loss": 1.1678, | |
| "step": 842 | |
| }, | |
| { | |
| "epoch": 2.3186813186813184, | |
| "grad_norm": 0.11602500081062317, | |
| "learning_rate": 4.509168486821304e-06, | |
| "loss": 1.164, | |
| "step": 844 | |
| }, | |
| { | |
| "epoch": 2.3241758241758244, | |
| "grad_norm": 0.11740805953741074, | |
| "learning_rate": 4.48530843138887e-06, | |
| "loss": 1.1643, | |
| "step": 846 | |
| }, | |
| { | |
| "epoch": 2.32967032967033, | |
| "grad_norm": 0.10009398311376572, | |
| "learning_rate": 4.4614602162744455e-06, | |
| "loss": 1.1629, | |
| "step": 848 | |
| }, | |
| { | |
| "epoch": 2.3351648351648353, | |
| "grad_norm": 0.07898547500371933, | |
| "learning_rate": 4.4376243900987296e-06, | |
| "loss": 1.1568, | |
| "step": 850 | |
| }, | |
| { | |
| "epoch": 2.340659340659341, | |
| "grad_norm": 0.08736059069633484, | |
| "learning_rate": 4.413801501197424e-06, | |
| "loss": 1.1703, | |
| "step": 852 | |
| }, | |
| { | |
| "epoch": 2.3461538461538463, | |
| "grad_norm": 0.08960308879613876, | |
| "learning_rate": 4.389992097608613e-06, | |
| "loss": 1.1632, | |
| "step": 854 | |
| }, | |
| { | |
| "epoch": 2.3516483516483517, | |
| "grad_norm": 0.10276441276073456, | |
| "learning_rate": 4.366196727060152e-06, | |
| "loss": 1.1634, | |
| "step": 856 | |
| }, | |
| { | |
| "epoch": 2.357142857142857, | |
| "grad_norm": 0.07904151827096939, | |
| "learning_rate": 4.342415936957073e-06, | |
| "loss": 1.1709, | |
| "step": 858 | |
| }, | |
| { | |
| "epoch": 2.3626373626373627, | |
| "grad_norm": 0.07296542823314667, | |
| "learning_rate": 4.318650274368989e-06, | |
| "loss": 1.1672, | |
| "step": 860 | |
| }, | |
| { | |
| "epoch": 2.368131868131868, | |
| "grad_norm": 0.08600784838199615, | |
| "learning_rate": 4.294900286017509e-06, | |
| "loss": 1.1612, | |
| "step": 862 | |
| }, | |
| { | |
| "epoch": 2.3736263736263736, | |
| "grad_norm": 0.08018805086612701, | |
| "learning_rate": 4.271166518263662e-06, | |
| "loss": 1.1722, | |
| "step": 864 | |
| }, | |
| { | |
| "epoch": 2.379120879120879, | |
| "grad_norm": 0.07395070046186447, | |
| "learning_rate": 4.247449517095329e-06, | |
| "loss": 1.1711, | |
| "step": 866 | |
| }, | |
| { | |
| "epoch": 2.3846153846153846, | |
| "grad_norm": 0.07601243257522583, | |
| "learning_rate": 4.223749828114672e-06, | |
| "loss": 1.1771, | |
| "step": 868 | |
| }, | |
| { | |
| "epoch": 2.39010989010989, | |
| "grad_norm": 0.09423944354057312, | |
| "learning_rate": 4.2000679965256045e-06, | |
| "loss": 1.1603, | |
| "step": 870 | |
| }, | |
| { | |
| "epoch": 2.3956043956043955, | |
| "grad_norm": 0.08968156576156616, | |
| "learning_rate": 4.176404567121225e-06, | |
| "loss": 1.1608, | |
| "step": 872 | |
| }, | |
| { | |
| "epoch": 2.401098901098901, | |
| "grad_norm": 0.10625026375055313, | |
| "learning_rate": 4.152760084271305e-06, | |
| "loss": 1.1574, | |
| "step": 874 | |
| }, | |
| { | |
| "epoch": 2.4065934065934065, | |
| "grad_norm": 0.18610498309135437, | |
| "learning_rate": 4.129135091909752e-06, | |
| "loss": 1.1747, | |
| "step": 876 | |
| }, | |
| { | |
| "epoch": 2.4065934065934065, | |
| "eval_loss": 1.1678768396377563, | |
| "eval_runtime": 299.619, | |
| "eval_samples_per_second": 17.275, | |
| "eval_steps_per_second": 0.27, | |
| "step": 876 | |
| }, | |
| { | |
| "epoch": 2.412087912087912, | |
| "grad_norm": 0.15964192152023315, | |
| "learning_rate": 4.105530133522096e-06, | |
| "loss": 1.1678, | |
| "step": 878 | |
| }, | |
| { | |
| "epoch": 2.4175824175824174, | |
| "grad_norm": 0.11698123812675476, | |
| "learning_rate": 4.081945752133e-06, | |
| "loss": 1.1678, | |
| "step": 880 | |
| }, | |
| { | |
| "epoch": 2.423076923076923, | |
| "grad_norm": 0.16632910072803497, | |
| "learning_rate": 4.058382490293755e-06, | |
| "loss": 1.1722, | |
| "step": 882 | |
| }, | |
| { | |
| "epoch": 2.4285714285714284, | |
| "grad_norm": 0.17577840387821198, | |
| "learning_rate": 4.034840890069805e-06, | |
| "loss": 1.1643, | |
| "step": 884 | |
| }, | |
| { | |
| "epoch": 2.4340659340659343, | |
| "grad_norm": 0.17302070558071136, | |
| "learning_rate": 4.0113214930282765e-06, | |
| "loss": 1.1641, | |
| "step": 886 | |
| }, | |
| { | |
| "epoch": 2.4395604395604398, | |
| "grad_norm": 0.2563433349132538, | |
| "learning_rate": 3.987824840225512e-06, | |
| "loss": 1.1678, | |
| "step": 888 | |
| }, | |
| { | |
| "epoch": 2.4450549450549453, | |
| "grad_norm": 0.19088977575302124, | |
| "learning_rate": 3.964351472194642e-06, | |
| "loss": 1.174, | |
| "step": 890 | |
| }, | |
| { | |
| "epoch": 2.4505494505494507, | |
| "grad_norm": 0.22029054164886475, | |
| "learning_rate": 3.940901928933127e-06, | |
| "loss": 1.1641, | |
| "step": 892 | |
| }, | |
| { | |
| "epoch": 2.456043956043956, | |
| "grad_norm": 0.26073768734931946, | |
| "learning_rate": 3.917476749890351e-06, | |
| "loss": 1.1594, | |
| "step": 894 | |
| }, | |
| { | |
| "epoch": 2.4615384615384617, | |
| "grad_norm": 0.14887195825576782, | |
| "learning_rate": 3.894076473955207e-06, | |
| "loss": 1.1696, | |
| "step": 896 | |
| }, | |
| { | |
| "epoch": 2.467032967032967, | |
| "grad_norm": 0.09114201366901398, | |
| "learning_rate": 3.8707016394436985e-06, | |
| "loss": 1.1686, | |
| "step": 898 | |
| }, | |
| { | |
| "epoch": 2.4725274725274726, | |
| "grad_norm": 0.12883096933364868, | |
| "learning_rate": 3.847352784086556e-06, | |
| "loss": 1.1614, | |
| "step": 900 | |
| }, | |
| { | |
| "epoch": 2.478021978021978, | |
| "grad_norm": 0.10688427090644836, | |
| "learning_rate": 3.8240304450168716e-06, | |
| "loss": 1.1583, | |
| "step": 902 | |
| }, | |
| { | |
| "epoch": 2.4835164835164836, | |
| "grad_norm": 0.08104757964611053, | |
| "learning_rate": 3.8007351587577342e-06, | |
| "loss": 1.1713, | |
| "step": 904 | |
| }, | |
| { | |
| "epoch": 2.489010989010989, | |
| "grad_norm": 0.11496758460998535, | |
| "learning_rate": 3.777467461209895e-06, | |
| "loss": 1.1662, | |
| "step": 906 | |
| }, | |
| { | |
| "epoch": 2.4945054945054945, | |
| "grad_norm": 0.08005277067422867, | |
| "learning_rate": 3.754227887639434e-06, | |
| "loss": 1.1683, | |
| "step": 908 | |
| }, | |
| { | |
| "epoch": 2.5, | |
| "grad_norm": 0.08510690182447433, | |
| "learning_rate": 3.7310169726654444e-06, | |
| "loss": 1.1603, | |
| "step": 910 | |
| }, | |
| { | |
| "epoch": 2.5054945054945055, | |
| "grad_norm": 0.08159387856721878, | |
| "learning_rate": 3.707835250247745e-06, | |
| "loss": 1.1703, | |
| "step": 912 | |
| }, | |
| { | |
| "epoch": 2.510989010989011, | |
| "grad_norm": 0.08646809309720993, | |
| "learning_rate": 3.684683253674583e-06, | |
| "loss": 1.1744, | |
| "step": 914 | |
| }, | |
| { | |
| "epoch": 2.5164835164835164, | |
| "grad_norm": 0.10196442157030106, | |
| "learning_rate": 3.6615615155503703e-06, | |
| "loss": 1.1607, | |
| "step": 916 | |
| }, | |
| { | |
| "epoch": 2.521978021978022, | |
| "grad_norm": 0.07023598998785019, | |
| "learning_rate": 3.638470567783442e-06, | |
| "loss": 1.17, | |
| "step": 918 | |
| }, | |
| { | |
| "epoch": 2.5274725274725274, | |
| "grad_norm": 0.09794013947248459, | |
| "learning_rate": 3.615410941573799e-06, | |
| "loss": 1.1713, | |
| "step": 920 | |
| }, | |
| { | |
| "epoch": 2.532967032967033, | |
| "grad_norm": 0.07678301632404327, | |
| "learning_rate": 3.59238316740091e-06, | |
| "loss": 1.1673, | |
| "step": 922 | |
| }, | |
| { | |
| "epoch": 2.5384615384615383, | |
| "grad_norm": 0.09355033934116364, | |
| "learning_rate": 3.5693877750114903e-06, | |
| "loss": 1.1639, | |
| "step": 924 | |
| }, | |
| { | |
| "epoch": 2.543956043956044, | |
| "grad_norm": 0.08433817327022552, | |
| "learning_rate": 3.546425293407324e-06, | |
| "loss": 1.1614, | |
| "step": 926 | |
| }, | |
| { | |
| "epoch": 2.5494505494505493, | |
| "grad_norm": 0.08144976943731308, | |
| "learning_rate": 3.523496250833098e-06, | |
| "loss": 1.1599, | |
| "step": 928 | |
| }, | |
| { | |
| "epoch": 2.5549450549450547, | |
| "grad_norm": 0.0796588808298111, | |
| "learning_rate": 3.5006011747642366e-06, | |
| "loss": 1.1667, | |
| "step": 930 | |
| }, | |
| { | |
| "epoch": 2.5604395604395602, | |
| "grad_norm": 0.08032579720020294, | |
| "learning_rate": 3.4777405918947795e-06, | |
| "loss": 1.1612, | |
| "step": 932 | |
| }, | |
| { | |
| "epoch": 2.5659340659340657, | |
| "grad_norm": 0.08575133234262466, | |
| "learning_rate": 3.4549150281252635e-06, | |
| "loss": 1.1695, | |
| "step": 934 | |
| }, | |
| { | |
| "epoch": 2.571428571428571, | |
| "grad_norm": 0.08064404129981995, | |
| "learning_rate": 3.4321250085506174e-06, | |
| "loss": 1.1698, | |
| "step": 936 | |
| }, | |
| { | |
| "epoch": 2.5769230769230766, | |
| "grad_norm": 0.07455819100141525, | |
| "learning_rate": 3.4093710574480926e-06, | |
| "loss": 1.1643, | |
| "step": 938 | |
| }, | |
| { | |
| "epoch": 2.5824175824175826, | |
| "grad_norm": 0.08538255095481873, | |
| "learning_rate": 3.386653698265189e-06, | |
| "loss": 1.1593, | |
| "step": 940 | |
| }, | |
| { | |
| "epoch": 2.587912087912088, | |
| "grad_norm": 0.08684508502483368, | |
| "learning_rate": 3.3639734536076263e-06, | |
| "loss": 1.1651, | |
| "step": 942 | |
| }, | |
| { | |
| "epoch": 2.5934065934065935, | |
| "grad_norm": 0.08089859038591385, | |
| "learning_rate": 3.341330845227316e-06, | |
| "loss": 1.165, | |
| "step": 944 | |
| }, | |
| { | |
| "epoch": 2.598901098901099, | |
| "grad_norm": 0.07942517846822739, | |
| "learning_rate": 3.3187263940103587e-06, | |
| "loss": 1.1685, | |
| "step": 946 | |
| }, | |
| { | |
| "epoch": 2.6043956043956045, | |
| "grad_norm": 0.08427475392818451, | |
| "learning_rate": 3.296160619965056e-06, | |
| "loss": 1.1547, | |
| "step": 948 | |
| }, | |
| { | |
| "epoch": 2.60989010989011, | |
| "grad_norm": 0.08193778246641159, | |
| "learning_rate": 3.2736340422099633e-06, | |
| "loss": 1.1651, | |
| "step": 950 | |
| }, | |
| { | |
| "epoch": 2.6153846153846154, | |
| "grad_norm": 0.0785454586148262, | |
| "learning_rate": 3.2511471789619274e-06, | |
| "loss": 1.169, | |
| "step": 952 | |
| }, | |
| { | |
| "epoch": 2.620879120879121, | |
| "grad_norm": 0.08144666999578476, | |
| "learning_rate": 3.228700547524184e-06, | |
| "loss": 1.1656, | |
| "step": 954 | |
| }, | |
| { | |
| "epoch": 2.6263736263736264, | |
| "grad_norm": 0.08215656876564026, | |
| "learning_rate": 3.206294664274443e-06, | |
| "loss": 1.163, | |
| "step": 956 | |
| }, | |
| { | |
| "epoch": 2.631868131868132, | |
| "grad_norm": 0.07643819600343704, | |
| "learning_rate": 3.183930044653014e-06, | |
| "loss": 1.1609, | |
| "step": 958 | |
| }, | |
| { | |
| "epoch": 2.6373626373626373, | |
| "grad_norm": 0.06851556152105331, | |
| "learning_rate": 3.1616072031509594e-06, | |
| "loss": 1.168, | |
| "step": 960 | |
| }, | |
| { | |
| "epoch": 2.642857142857143, | |
| "grad_norm": 0.07472985982894897, | |
| "learning_rate": 3.139326653298236e-06, | |
| "loss": 1.1672, | |
| "step": 962 | |
| }, | |
| { | |
| "epoch": 2.6483516483516483, | |
| "grad_norm": 0.08239593356847763, | |
| "learning_rate": 3.117088907651902e-06, | |
| "loss": 1.1668, | |
| "step": 964 | |
| }, | |
| { | |
| "epoch": 2.6538461538461537, | |
| "grad_norm": 0.07118227332830429, | |
| "learning_rate": 3.094894477784318e-06, | |
| "loss": 1.163, | |
| "step": 966 | |
| }, | |
| { | |
| "epoch": 2.659340659340659, | |
| "grad_norm": 0.08010434359312057, | |
| "learning_rate": 3.0727438742713766e-06, | |
| "loss": 1.1674, | |
| "step": 968 | |
| }, | |
| { | |
| "epoch": 2.6648351648351647, | |
| "grad_norm": 0.06395678967237473, | |
| "learning_rate": 3.0506376066807632e-06, | |
| "loss": 1.1709, | |
| "step": 970 | |
| }, | |
| { | |
| "epoch": 2.67032967032967, | |
| "grad_norm": 0.07488574087619781, | |
| "learning_rate": 3.028576183560221e-06, | |
| "loss": 1.1616, | |
| "step": 972 | |
| }, | |
| { | |
| "epoch": 2.675824175824176, | |
| "grad_norm": 0.0717063769698143, | |
| "learning_rate": 3.006560112425867e-06, | |
| "loss": 1.159, | |
| "step": 974 | |
| }, | |
| { | |
| "epoch": 2.6813186813186816, | |
| "grad_norm": 0.0665455088019371, | |
| "learning_rate": 2.9845898997505102e-06, | |
| "loss": 1.1717, | |
| "step": 976 | |
| }, | |
| { | |
| "epoch": 2.686813186813187, | |
| "grad_norm": 0.07839926332235336, | |
| "learning_rate": 2.962666050951997e-06, | |
| "loss": 1.1635, | |
| "step": 978 | |
| }, | |
| { | |
| "epoch": 2.6923076923076925, | |
| "grad_norm": 0.08876995742321014, | |
| "learning_rate": 2.940789070381587e-06, | |
| "loss": 1.1634, | |
| "step": 980 | |
| }, | |
| { | |
| "epoch": 2.697802197802198, | |
| "grad_norm": 0.09154446423053741, | |
| "learning_rate": 2.918959461312353e-06, | |
| "loss": 1.1651, | |
| "step": 982 | |
| }, | |
| { | |
| "epoch": 2.7032967032967035, | |
| "grad_norm": 0.08424372225999832, | |
| "learning_rate": 2.897177725927599e-06, | |
| "loss": 1.153, | |
| "step": 984 | |
| }, | |
| { | |
| "epoch": 2.708791208791209, | |
| "grad_norm": 0.11759259551763535, | |
| "learning_rate": 2.8754443653093186e-06, | |
| "loss": 1.1662, | |
| "step": 986 | |
| }, | |
| { | |
| "epoch": 2.7142857142857144, | |
| "grad_norm": 0.08856125921010971, | |
| "learning_rate": 2.853759879426644e-06, | |
| "loss": 1.1644, | |
| "step": 988 | |
| }, | |
| { | |
| "epoch": 2.71978021978022, | |
| "grad_norm": 0.08224651217460632, | |
| "learning_rate": 2.8321247671243695e-06, | |
| "loss": 1.1628, | |
| "step": 990 | |
| }, | |
| { | |
| "epoch": 2.7252747252747254, | |
| "grad_norm": 0.11355423927307129, | |
| "learning_rate": 2.8105395261114666e-06, | |
| "loss": 1.1663, | |
| "step": 992 | |
| }, | |
| { | |
| "epoch": 2.730769230769231, | |
| "grad_norm": 0.06467260420322418, | |
| "learning_rate": 2.7890046529496284e-06, | |
| "loss": 1.1736, | |
| "step": 994 | |
| }, | |
| { | |
| "epoch": 2.7362637362637363, | |
| "grad_norm": 0.08972840011119843, | |
| "learning_rate": 2.7675206430418542e-06, | |
| "loss": 1.1692, | |
| "step": 996 | |
| }, | |
| { | |
| "epoch": 2.741758241758242, | |
| "grad_norm": 0.09491213411092758, | |
| "learning_rate": 2.7460879906210485e-06, | |
| "loss": 1.1707, | |
| "step": 998 | |
| }, | |
| { | |
| "epoch": 2.7472527472527473, | |
| "grad_norm": 0.07063695043325424, | |
| "learning_rate": 2.7247071887386544e-06, | |
| "loss": 1.1605, | |
| "step": 1000 | |
| }, | |
| { | |
| "epoch": 2.7527472527472527, | |
| "grad_norm": 0.07039818912744522, | |
| "learning_rate": 2.70337872925331e-06, | |
| "loss": 1.1648, | |
| "step": 1002 | |
| }, | |
| { | |
| "epoch": 2.758241758241758, | |
| "grad_norm": 0.07515694946050644, | |
| "learning_rate": 2.68210310281953e-06, | |
| "loss": 1.1563, | |
| "step": 1004 | |
| }, | |
| { | |
| "epoch": 2.7637362637362637, | |
| "grad_norm": 0.06818992644548416, | |
| "learning_rate": 2.6608807988764252e-06, | |
| "loss": 1.1735, | |
| "step": 1006 | |
| }, | |
| { | |
| "epoch": 2.769230769230769, | |
| "grad_norm": 0.0827600434422493, | |
| "learning_rate": 2.6397123056364364e-06, | |
| "loss": 1.1698, | |
| "step": 1008 | |
| }, | |
| { | |
| "epoch": 2.7747252747252746, | |
| "grad_norm": 0.10222798585891724, | |
| "learning_rate": 2.618598110074105e-06, | |
| "loss": 1.1689, | |
| "step": 1010 | |
| }, | |
| { | |
| "epoch": 2.78021978021978, | |
| "grad_norm": 0.0823441818356514, | |
| "learning_rate": 2.5975386979148792e-06, | |
| "loss": 1.166, | |
| "step": 1012 | |
| }, | |
| { | |
| "epoch": 2.7857142857142856, | |
| "grad_norm": 0.0735846534371376, | |
| "learning_rate": 2.576534553623925e-06, | |
| "loss": 1.1592, | |
| "step": 1014 | |
| }, | |
| { | |
| "epoch": 2.791208791208791, | |
| "grad_norm": 0.07179060578346252, | |
| "learning_rate": 2.5555861603949832e-06, | |
| "loss": 1.1755, | |
| "step": 1016 | |
| }, | |
| { | |
| "epoch": 2.7967032967032965, | |
| "grad_norm": 0.08374089747667313, | |
| "learning_rate": 2.534694000139273e-06, | |
| "loss": 1.1757, | |
| "step": 1018 | |
| }, | |
| { | |
| "epoch": 2.802197802197802, | |
| "grad_norm": 0.07065007835626602, | |
| "learning_rate": 2.513858553474382e-06, | |
| "loss": 1.1698, | |
| "step": 1020 | |
| }, | |
| { | |
| "epoch": 2.8076923076923075, | |
| "grad_norm": 0.08045931160449982, | |
| "learning_rate": 2.4930802997132213e-06, | |
| "loss": 1.1582, | |
| "step": 1022 | |
| }, | |
| { | |
| "epoch": 2.8076923076923075, | |
| "eval_loss": 1.1672074794769287, | |
| "eval_runtime": 299.934, | |
| "eval_samples_per_second": 17.257, | |
| "eval_steps_per_second": 0.27, | |
| "step": 1022 | |
| }, | |
| { | |
| "epoch": 2.813186813186813, | |
| "grad_norm": 0.07823072373867035, | |
| "learning_rate": 2.4723597168529984e-06, | |
| "loss": 1.1596, | |
| "step": 1024 | |
| }, | |
| { | |
| "epoch": 2.8186813186813184, | |
| "grad_norm": 0.08508284389972687, | |
| "learning_rate": 2.4516972815642166e-06, | |
| "loss": 1.1723, | |
| "step": 1026 | |
| }, | |
| { | |
| "epoch": 2.824175824175824, | |
| "grad_norm": 0.07922125607728958, | |
| "learning_rate": 2.4310934691797207e-06, | |
| "loss": 1.1617, | |
| "step": 1028 | |
| }, | |
| { | |
| "epoch": 2.82967032967033, | |
| "grad_norm": 0.0768875777721405, | |
| "learning_rate": 2.410548753683743e-06, | |
| "loss": 1.1664, | |
| "step": 1030 | |
| }, | |
| { | |
| "epoch": 2.8351648351648353, | |
| "grad_norm": 0.07595735043287277, | |
| "learning_rate": 2.390063607701016e-06, | |
| "loss": 1.1731, | |
| "step": 1032 | |
| }, | |
| { | |
| "epoch": 2.840659340659341, | |
| "grad_norm": 0.08126144856214523, | |
| "learning_rate": 2.369638502485897e-06, | |
| "loss": 1.163, | |
| "step": 1034 | |
| }, | |
| { | |
| "epoch": 2.8461538461538463, | |
| "grad_norm": 0.06850147992372513, | |
| "learning_rate": 2.3492739079115214e-06, | |
| "loss": 1.1685, | |
| "step": 1036 | |
| }, | |
| { | |
| "epoch": 2.8516483516483517, | |
| "grad_norm": 0.08522983640432358, | |
| "learning_rate": 2.3289702924589914e-06, | |
| "loss": 1.1686, | |
| "step": 1038 | |
| }, | |
| { | |
| "epoch": 2.857142857142857, | |
| "grad_norm": 0.08155310899019241, | |
| "learning_rate": 2.3087281232066134e-06, | |
| "loss": 1.1653, | |
| "step": 1040 | |
| }, | |
| { | |
| "epoch": 2.8626373626373627, | |
| "grad_norm": 0.07040958106517792, | |
| "learning_rate": 2.2885478658191364e-06, | |
| "loss": 1.1557, | |
| "step": 1042 | |
| }, | |
| { | |
| "epoch": 2.868131868131868, | |
| "grad_norm": 0.0749848261475563, | |
| "learning_rate": 2.268429984537048e-06, | |
| "loss": 1.1724, | |
| "step": 1044 | |
| }, | |
| { | |
| "epoch": 2.8736263736263736, | |
| "grad_norm": 0.06682237237691879, | |
| "learning_rate": 2.248374942165894e-06, | |
| "loss": 1.158, | |
| "step": 1046 | |
| }, | |
| { | |
| "epoch": 2.879120879120879, | |
| "grad_norm": 0.0770927369594574, | |
| "learning_rate": 2.2283832000656304e-06, | |
| "loss": 1.1676, | |
| "step": 1048 | |
| }, | |
| { | |
| "epoch": 2.8846153846153846, | |
| "grad_norm": 0.08294253051280975, | |
| "learning_rate": 2.2084552181400087e-06, | |
| "loss": 1.1654, | |
| "step": 1050 | |
| }, | |
| { | |
| "epoch": 2.89010989010989, | |
| "grad_norm": 0.08596468716859818, | |
| "learning_rate": 2.188591454826e-06, | |
| "loss": 1.1689, | |
| "step": 1052 | |
| }, | |
| { | |
| "epoch": 2.8956043956043955, | |
| "grad_norm": 0.08016978204250336, | |
| "learning_rate": 2.168792367083243e-06, | |
| "loss": 1.1614, | |
| "step": 1054 | |
| }, | |
| { | |
| "epoch": 2.901098901098901, | |
| "grad_norm": 0.0838996022939682, | |
| "learning_rate": 2.1490584103835433e-06, | |
| "loss": 1.1658, | |
| "step": 1056 | |
| }, | |
| { | |
| "epoch": 2.9065934065934065, | |
| "grad_norm": 0.07020293921232224, | |
| "learning_rate": 2.1293900387003742e-06, | |
| "loss": 1.1594, | |
| "step": 1058 | |
| }, | |
| { | |
| "epoch": 2.912087912087912, | |
| "grad_norm": 0.07320253551006317, | |
| "learning_rate": 2.109787704498459e-06, | |
| "loss": 1.1635, | |
| "step": 1060 | |
| }, | |
| { | |
| "epoch": 2.9175824175824174, | |
| "grad_norm": 0.0821409672498703, | |
| "learning_rate": 2.0902518587233418e-06, | |
| "loss": 1.1628, | |
| "step": 1062 | |
| }, | |
| { | |
| "epoch": 2.9230769230769234, | |
| "grad_norm": 0.07211437821388245, | |
| "learning_rate": 2.0707829507910237e-06, | |
| "loss": 1.1689, | |
| "step": 1064 | |
| }, | |
| { | |
| "epoch": 2.928571428571429, | |
| "grad_norm": 0.08253902941942215, | |
| "learning_rate": 2.051381428577622e-06, | |
| "loss": 1.1654, | |
| "step": 1066 | |
| }, | |
| { | |
| "epoch": 2.9340659340659343, | |
| "grad_norm": 0.080472432076931, | |
| "learning_rate": 2.0320477384090665e-06, | |
| "loss": 1.1707, | |
| "step": 1068 | |
| }, | |
| { | |
| "epoch": 2.9395604395604398, | |
| "grad_norm": 0.08331170678138733, | |
| "learning_rate": 2.012782325050831e-06, | |
| "loss": 1.1729, | |
| "step": 1070 | |
| }, | |
| { | |
| "epoch": 2.9450549450549453, | |
| "grad_norm": 0.07666671276092529, | |
| "learning_rate": 1.9935856316977044e-06, | |
| "loss": 1.1715, | |
| "step": 1072 | |
| }, | |
| { | |
| "epoch": 2.9505494505494507, | |
| "grad_norm": 0.09519699960947037, | |
| "learning_rate": 1.9744580999635902e-06, | |
| "loss": 1.1629, | |
| "step": 1074 | |
| }, | |
| { | |
| "epoch": 2.956043956043956, | |
| "grad_norm": 0.0872369259595871, | |
| "learning_rate": 1.9554001698713572e-06, | |
| "loss": 1.1683, | |
| "step": 1076 | |
| }, | |
| { | |
| "epoch": 2.9615384615384617, | |
| "grad_norm": 0.07113870233297348, | |
| "learning_rate": 1.936412279842705e-06, | |
| "loss": 1.1637, | |
| "step": 1078 | |
| }, | |
| { | |
| "epoch": 2.967032967032967, | |
| "grad_norm": 0.08545450866222382, | |
| "learning_rate": 1.9174948666880805e-06, | |
| "loss": 1.1627, | |
| "step": 1080 | |
| }, | |
| { | |
| "epoch": 2.9725274725274726, | |
| "grad_norm": 0.08711759746074677, | |
| "learning_rate": 1.8986483655966408e-06, | |
| "loss": 1.1582, | |
| "step": 1082 | |
| }, | |
| { | |
| "epoch": 2.978021978021978, | |
| "grad_norm": 0.08870179951190948, | |
| "learning_rate": 1.879873210126229e-06, | |
| "loss": 1.167, | |
| "step": 1084 | |
| }, | |
| { | |
| "epoch": 2.9835164835164836, | |
| "grad_norm": 0.1006346121430397, | |
| "learning_rate": 1.8611698321933991e-06, | |
| "loss": 1.1724, | |
| "step": 1086 | |
| }, | |
| { | |
| "epoch": 2.989010989010989, | |
| "grad_norm": 0.07408854365348816, | |
| "learning_rate": 1.8425386620634961e-06, | |
| "loss": 1.1705, | |
| "step": 1088 | |
| }, | |
| { | |
| "epoch": 2.9945054945054945, | |
| "grad_norm": 0.09146919846534729, | |
| "learning_rate": 1.8239801283407393e-06, | |
| "loss": 1.16, | |
| "step": 1090 | |
| }, | |
| { | |
| "epoch": 3.0, | |
| "grad_norm": 0.0709662064909935, | |
| "learning_rate": 1.8054946579583732e-06, | |
| "loss": 1.1636, | |
| "step": 1092 | |
| }, | |
| { | |
| "epoch": 3.0054945054945055, | |
| "grad_norm": 0.10273056477308273, | |
| "learning_rate": 1.787082676168842e-06, | |
| "loss": 1.1647, | |
| "step": 1094 | |
| }, | |
| { | |
| "epoch": 3.010989010989011, | |
| "grad_norm": 0.07023092359304428, | |
| "learning_rate": 1.7687446065340074e-06, | |
| "loss": 1.162, | |
| "step": 1096 | |
| }, | |
| { | |
| "epoch": 3.0164835164835164, | |
| "grad_norm": 0.08073507994413376, | |
| "learning_rate": 1.7504808709154104e-06, | |
| "loss": 1.1697, | |
| "step": 1098 | |
| }, | |
| { | |
| "epoch": 3.021978021978022, | |
| "grad_norm": 0.07398983091115952, | |
| "learning_rate": 1.7322918894645525e-06, | |
| "loss": 1.1637, | |
| "step": 1100 | |
| }, | |
| { | |
| "epoch": 3.0274725274725274, | |
| "grad_norm": 0.08476796001195908, | |
| "learning_rate": 1.7141780806132429e-06, | |
| "loss": 1.161, | |
| "step": 1102 | |
| }, | |
| { | |
| "epoch": 3.032967032967033, | |
| "grad_norm": 0.07763465493917465, | |
| "learning_rate": 1.696139861063974e-06, | |
| "loss": 1.1685, | |
| "step": 1104 | |
| }, | |
| { | |
| "epoch": 3.0384615384615383, | |
| "grad_norm": 0.08244740962982178, | |
| "learning_rate": 1.6781776457803227e-06, | |
| "loss": 1.1683, | |
| "step": 1106 | |
| }, | |
| { | |
| "epoch": 3.043956043956044, | |
| "grad_norm": 0.09578699618577957, | |
| "learning_rate": 1.660291847977415e-06, | |
| "loss": 1.1677, | |
| "step": 1108 | |
| }, | |
| { | |
| "epoch": 3.0494505494505493, | |
| "grad_norm": 0.07818014919757843, | |
| "learning_rate": 1.6424828791124159e-06, | |
| "loss": 1.166, | |
| "step": 1110 | |
| }, | |
| { | |
| "epoch": 3.0549450549450547, | |
| "grad_norm": 0.07996879518032074, | |
| "learning_rate": 1.624751148875065e-06, | |
| "loss": 1.1732, | |
| "step": 1112 | |
| }, | |
| { | |
| "epoch": 3.0604395604395602, | |
| "grad_norm": 0.06753943115472794, | |
| "learning_rate": 1.6070970651782514e-06, | |
| "loss": 1.1628, | |
| "step": 1114 | |
| }, | |
| { | |
| "epoch": 3.065934065934066, | |
| "grad_norm": 0.06478522717952728, | |
| "learning_rate": 1.5895210341486279e-06, | |
| "loss": 1.1607, | |
| "step": 1116 | |
| }, | |
| { | |
| "epoch": 3.0714285714285716, | |
| "grad_norm": 0.07904413342475891, | |
| "learning_rate": 1.5720234601172767e-06, | |
| "loss": 1.1655, | |
| "step": 1118 | |
| }, | |
| { | |
| "epoch": 3.076923076923077, | |
| "grad_norm": 0.07776004076004028, | |
| "learning_rate": 1.5546047456103964e-06, | |
| "loss": 1.1677, | |
| "step": 1120 | |
| }, | |
| { | |
| "epoch": 3.0824175824175826, | |
| "grad_norm": 0.07198958098888397, | |
| "learning_rate": 1.537265291340042e-06, | |
| "loss": 1.1636, | |
| "step": 1122 | |
| }, | |
| { | |
| "epoch": 3.087912087912088, | |
| "grad_norm": 0.07310649752616882, | |
| "learning_rate": 1.5200054961949233e-06, | |
| "loss": 1.1601, | |
| "step": 1124 | |
| }, | |
| { | |
| "epoch": 3.0934065934065935, | |
| "grad_norm": 0.07591935992240906, | |
| "learning_rate": 1.5028257572312105e-06, | |
| "loss": 1.1649, | |
| "step": 1126 | |
| }, | |
| { | |
| "epoch": 3.098901098901099, | |
| "grad_norm": 0.07674799114465714, | |
| "learning_rate": 1.485726469663401e-06, | |
| "loss": 1.1704, | |
| "step": 1128 | |
| }, | |
| { | |
| "epoch": 3.1043956043956045, | |
| "grad_norm": 0.09393850713968277, | |
| "learning_rate": 1.468708026855245e-06, | |
| "loss": 1.1692, | |
| "step": 1130 | |
| }, | |
| { | |
| "epoch": 3.10989010989011, | |
| "grad_norm": 0.07599301636219025, | |
| "learning_rate": 1.4517708203106763e-06, | |
| "loss": 1.1645, | |
| "step": 1132 | |
| }, | |
| { | |
| "epoch": 3.1153846153846154, | |
| "grad_norm": 0.08221649378538132, | |
| "learning_rate": 1.4349152396648153e-06, | |
| "loss": 1.1631, | |
| "step": 1134 | |
| }, | |
| { | |
| "epoch": 3.120879120879121, | |
| "grad_norm": 0.07295921444892883, | |
| "learning_rate": 1.4181416726750052e-06, | |
| "loss": 1.1612, | |
| "step": 1136 | |
| }, | |
| { | |
| "epoch": 3.1263736263736264, | |
| "grad_norm": 0.079580157995224, | |
| "learning_rate": 1.4014505052118893e-06, | |
| "loss": 1.1599, | |
| "step": 1138 | |
| }, | |
| { | |
| "epoch": 3.131868131868132, | |
| "grad_norm": 0.09730138629674911, | |
| "learning_rate": 1.3848421212505404e-06, | |
| "loss": 1.1632, | |
| "step": 1140 | |
| }, | |
| { | |
| "epoch": 3.1373626373626373, | |
| "grad_norm": 0.09287888556718826, | |
| "learning_rate": 1.3683169028616155e-06, | |
| "loss": 1.1602, | |
| "step": 1142 | |
| }, | |
| { | |
| "epoch": 3.142857142857143, | |
| "grad_norm": 0.07959942519664764, | |
| "learning_rate": 1.3518752302025773e-06, | |
| "loss": 1.1629, | |
| "step": 1144 | |
| }, | |
| { | |
| "epoch": 3.1483516483516483, | |
| "grad_norm": 0.07988713681697845, | |
| "learning_rate": 1.3355174815089477e-06, | |
| "loss": 1.1641, | |
| "step": 1146 | |
| }, | |
| { | |
| "epoch": 3.1538461538461537, | |
| "grad_norm": 0.06947878748178482, | |
| "learning_rate": 1.3192440330856005e-06, | |
| "loss": 1.1614, | |
| "step": 1148 | |
| }, | |
| { | |
| "epoch": 3.159340659340659, | |
| "grad_norm": 0.06804593652486801, | |
| "learning_rate": 1.30305525929811e-06, | |
| "loss": 1.1666, | |
| "step": 1150 | |
| }, | |
| { | |
| "epoch": 3.1648351648351647, | |
| "grad_norm": 0.07112333178520203, | |
| "learning_rate": 1.2869515325641357e-06, | |
| "loss": 1.1592, | |
| "step": 1152 | |
| }, | |
| { | |
| "epoch": 3.17032967032967, | |
| "grad_norm": 0.07313236594200134, | |
| "learning_rate": 1.2709332233448573e-06, | |
| "loss": 1.1686, | |
| "step": 1154 | |
| }, | |
| { | |
| "epoch": 3.1758241758241756, | |
| "grad_norm": 0.07543834298849106, | |
| "learning_rate": 1.2550007001364518e-06, | |
| "loss": 1.1626, | |
| "step": 1156 | |
| }, | |
| { | |
| "epoch": 3.181318681318681, | |
| "grad_norm": 0.07730558514595032, | |
| "learning_rate": 1.239154329461615e-06, | |
| "loss": 1.164, | |
| "step": 1158 | |
| }, | |
| { | |
| "epoch": 3.186813186813187, | |
| "grad_norm": 0.07528182864189148, | |
| "learning_rate": 1.223394475861131e-06, | |
| "loss": 1.1621, | |
| "step": 1160 | |
| }, | |
| { | |
| "epoch": 3.1923076923076925, | |
| "grad_norm": 0.07378531992435455, | |
| "learning_rate": 1.207721501885486e-06, | |
| "loss": 1.162, | |
| "step": 1162 | |
| }, | |
| { | |
| "epoch": 3.197802197802198, | |
| "grad_norm": 0.07334302365779877, | |
| "learning_rate": 1.1921357680865258e-06, | |
| "loss": 1.1642, | |
| "step": 1164 | |
| }, | |
| { | |
| "epoch": 3.2032967032967035, | |
| "grad_norm": 0.07697130739688873, | |
| "learning_rate": 1.1766376330091684e-06, | |
| "loss": 1.1647, | |
| "step": 1166 | |
| }, | |
| { | |
| "epoch": 3.208791208791209, | |
| "grad_norm": 0.0726918876171112, | |
| "learning_rate": 1.1612274531831463e-06, | |
| "loss": 1.1719, | |
| "step": 1168 | |
| }, | |
| { | |
| "epoch": 3.208791208791209, | |
| "eval_loss": 1.1669209003448486, | |
| "eval_runtime": 299.9103, | |
| "eval_samples_per_second": 17.258, | |
| "eval_steps_per_second": 0.27, | |
| "step": 1168 | |
| }, | |
| { | |
| "epoch": 3.2142857142857144, | |
| "grad_norm": 0.07847239077091217, | |
| "learning_rate": 1.1459055831148074e-06, | |
| "loss": 1.1651, | |
| "step": 1170 | |
| }, | |
| { | |
| "epoch": 3.21978021978022, | |
| "grad_norm": 0.06886722892522812, | |
| "learning_rate": 1.1306723752789672e-06, | |
| "loss": 1.1648, | |
| "step": 1172 | |
| }, | |
| { | |
| "epoch": 3.2252747252747254, | |
| "grad_norm": 0.0860794335603714, | |
| "learning_rate": 1.1155281801107897e-06, | |
| "loss": 1.168, | |
| "step": 1174 | |
| }, | |
| { | |
| "epoch": 3.230769230769231, | |
| "grad_norm": 0.07369523495435715, | |
| "learning_rate": 1.1004733459977325e-06, | |
| "loss": 1.1669, | |
| "step": 1176 | |
| }, | |
| { | |
| "epoch": 3.2362637362637363, | |
| "grad_norm": 0.07247929275035858, | |
| "learning_rate": 1.0855082192715294e-06, | |
| "loss": 1.168, | |
| "step": 1178 | |
| }, | |
| { | |
| "epoch": 3.241758241758242, | |
| "grad_norm": 0.07475174218416214, | |
| "learning_rate": 1.0706331442002226e-06, | |
| "loss": 1.1622, | |
| "step": 1180 | |
| }, | |
| { | |
| "epoch": 3.2472527472527473, | |
| "grad_norm": 0.07838430255651474, | |
| "learning_rate": 1.0558484629802502e-06, | |
| "loss": 1.171, | |
| "step": 1182 | |
| }, | |
| { | |
| "epoch": 3.2527472527472527, | |
| "grad_norm": 0.0696970596909523, | |
| "learning_rate": 1.041154515728559e-06, | |
| "loss": 1.1621, | |
| "step": 1184 | |
| }, | |
| { | |
| "epoch": 3.258241758241758, | |
| "grad_norm": 0.07396616786718369, | |
| "learning_rate": 1.0265516404747943e-06, | |
| "loss": 1.1641, | |
| "step": 1186 | |
| }, | |
| { | |
| "epoch": 3.2637362637362637, | |
| "grad_norm": 0.08104917407035828, | |
| "learning_rate": 1.0120401731535213e-06, | |
| "loss": 1.1663, | |
| "step": 1188 | |
| }, | |
| { | |
| "epoch": 3.269230769230769, | |
| "grad_norm": 0.0944487676024437, | |
| "learning_rate": 9.976204475964907e-07, | |
| "loss": 1.1618, | |
| "step": 1190 | |
| }, | |
| { | |
| "epoch": 3.2747252747252746, | |
| "grad_norm": 0.07517724484205246, | |
| "learning_rate": 9.832927955249605e-07, | |
| "loss": 1.1726, | |
| "step": 1192 | |
| }, | |
| { | |
| "epoch": 3.28021978021978, | |
| "grad_norm": 0.08082670718431473, | |
| "learning_rate": 9.690575465420733e-07, | |
| "loss": 1.1689, | |
| "step": 1194 | |
| }, | |
| { | |
| "epoch": 3.2857142857142856, | |
| "grad_norm": 0.07103955000638962, | |
| "learning_rate": 9.549150281252633e-07, | |
| "loss": 1.1724, | |
| "step": 1196 | |
| }, | |
| { | |
| "epoch": 3.291208791208791, | |
| "grad_norm": 0.08387453854084015, | |
| "learning_rate": 9.408655656187282e-07, | |
| "loss": 1.1598, | |
| "step": 1198 | |
| }, | |
| { | |
| "epoch": 3.2967032967032965, | |
| "grad_norm": 0.07405807077884674, | |
| "learning_rate": 9.269094822259439e-07, | |
| "loss": 1.1648, | |
| "step": 1200 | |
| }, | |
| { | |
| "epoch": 3.302197802197802, | |
| "grad_norm": 0.07046142220497131, | |
| "learning_rate": 9.130470990022283e-07, | |
| "loss": 1.1707, | |
| "step": 1202 | |
| }, | |
| { | |
| "epoch": 3.3076923076923075, | |
| "grad_norm": 0.07296803593635559, | |
| "learning_rate": 8.992787348473575e-07, | |
| "loss": 1.1642, | |
| "step": 1204 | |
| }, | |
| { | |
| "epoch": 3.313186813186813, | |
| "grad_norm": 0.0734080895781517, | |
| "learning_rate": 8.856047064982276e-07, | |
| "loss": 1.1558, | |
| "step": 1206 | |
| }, | |
| { | |
| "epoch": 3.3186813186813184, | |
| "grad_norm": 0.07395216077566147, | |
| "learning_rate": 8.720253285215685e-07, | |
| "loss": 1.1721, | |
| "step": 1208 | |
| }, | |
| { | |
| "epoch": 3.3241758241758244, | |
| "grad_norm": 0.07748089730739594, | |
| "learning_rate": 8.585409133067119e-07, | |
| "loss": 1.1653, | |
| "step": 1210 | |
| }, | |
| { | |
| "epoch": 3.32967032967033, | |
| "grad_norm": 0.07397377490997314, | |
| "learning_rate": 8.451517710583934e-07, | |
| "loss": 1.1623, | |
| "step": 1212 | |
| }, | |
| { | |
| "epoch": 3.3351648351648353, | |
| "grad_norm": 0.07272295653820038, | |
| "learning_rate": 8.318582097896316e-07, | |
| "loss": 1.1643, | |
| "step": 1214 | |
| }, | |
| { | |
| "epoch": 3.340659340659341, | |
| "grad_norm": 0.07338716834783554, | |
| "learning_rate": 8.18660535314631e-07, | |
| "loss": 1.1612, | |
| "step": 1216 | |
| }, | |
| { | |
| "epoch": 3.3461538461538463, | |
| "grad_norm": 0.06233609840273857, | |
| "learning_rate": 8.055590512417499e-07, | |
| "loss": 1.1642, | |
| "step": 1218 | |
| }, | |
| { | |
| "epoch": 3.3516483516483517, | |
| "grad_norm": 0.07398121803998947, | |
| "learning_rate": 7.925540589665187e-07, | |
| "loss": 1.1719, | |
| "step": 1220 | |
| }, | |
| { | |
| "epoch": 3.357142857142857, | |
| "grad_norm": 0.0798347070813179, | |
| "learning_rate": 7.796458576647015e-07, | |
| "loss": 1.1669, | |
| "step": 1222 | |
| }, | |
| { | |
| "epoch": 3.3626373626373627, | |
| "grad_norm": 0.06861409544944763, | |
| "learning_rate": 7.668347442854218e-07, | |
| "loss": 1.1698, | |
| "step": 1224 | |
| }, | |
| { | |
| "epoch": 3.368131868131868, | |
| "grad_norm": 0.06391950696706772, | |
| "learning_rate": 7.541210135443188e-07, | |
| "loss": 1.166, | |
| "step": 1226 | |
| }, | |
| { | |
| "epoch": 3.3736263736263736, | |
| "grad_norm": 0.07030785083770752, | |
| "learning_rate": 7.415049579167783e-07, | |
| "loss": 1.166, | |
| "step": 1228 | |
| }, | |
| { | |
| "epoch": 3.379120879120879, | |
| "grad_norm": 0.07540637254714966, | |
| "learning_rate": 7.289868676312023e-07, | |
| "loss": 1.1643, | |
| "step": 1230 | |
| }, | |
| { | |
| "epoch": 3.3846153846153846, | |
| "grad_norm": 0.06893002241849899, | |
| "learning_rate": 7.165670306623296e-07, | |
| "loss": 1.163, | |
| "step": 1232 | |
| }, | |
| { | |
| "epoch": 3.39010989010989, | |
| "grad_norm": 0.07210598886013031, | |
| "learning_rate": 7.042457327246088e-07, | |
| "loss": 1.1648, | |
| "step": 1234 | |
| }, | |
| { | |
| "epoch": 3.3956043956043955, | |
| "grad_norm": 0.076958067715168, | |
| "learning_rate": 6.920232572656349e-07, | |
| "loss": 1.1635, | |
| "step": 1236 | |
| }, | |
| { | |
| "epoch": 3.401098901098901, | |
| "grad_norm": 0.07737283408641815, | |
| "learning_rate": 6.79899885459619e-07, | |
| "loss": 1.161, | |
| "step": 1238 | |
| }, | |
| { | |
| "epoch": 3.4065934065934065, | |
| "grad_norm": 0.07089677453041077, | |
| "learning_rate": 6.678758962009241e-07, | |
| "loss": 1.1595, | |
| "step": 1240 | |
| }, | |
| { | |
| "epoch": 3.412087912087912, | |
| "grad_norm": 0.06980301439762115, | |
| "learning_rate": 6.559515660976506e-07, | |
| "loss": 1.1646, | |
| "step": 1242 | |
| }, | |
| { | |
| "epoch": 3.4175824175824174, | |
| "grad_norm": 0.0859365463256836, | |
| "learning_rate": 6.441271694652701e-07, | |
| "loss": 1.1653, | |
| "step": 1244 | |
| }, | |
| { | |
| "epoch": 3.423076923076923, | |
| "grad_norm": 0.0688340812921524, | |
| "learning_rate": 6.32402978320315e-07, | |
| "loss": 1.171, | |
| "step": 1246 | |
| }, | |
| { | |
| "epoch": 3.4285714285714284, | |
| "grad_norm": 0.07327734678983688, | |
| "learning_rate": 6.207792623741249e-07, | |
| "loss": 1.1664, | |
| "step": 1248 | |
| }, | |
| { | |
| "epoch": 3.4340659340659343, | |
| "grad_norm": 0.06622574478387833, | |
| "learning_rate": 6.092562890266341e-07, | |
| "loss": 1.162, | |
| "step": 1250 | |
| }, | |
| { | |
| "epoch": 3.4395604395604398, | |
| "grad_norm": 0.06825845688581467, | |
| "learning_rate": 5.97834323360233e-07, | |
| "loss": 1.1601, | |
| "step": 1252 | |
| }, | |
| { | |
| "epoch": 3.4450549450549453, | |
| "grad_norm": 0.0715174600481987, | |
| "learning_rate": 5.86513628133652e-07, | |
| "loss": 1.1653, | |
| "step": 1254 | |
| }, | |
| { | |
| "epoch": 3.4505494505494507, | |
| "grad_norm": 0.06850449740886688, | |
| "learning_rate": 5.75294463775935e-07, | |
| "loss": 1.1625, | |
| "step": 1256 | |
| }, | |
| { | |
| "epoch": 3.456043956043956, | |
| "grad_norm": 0.061297595500946045, | |
| "learning_rate": 5.641770883804365e-07, | |
| "loss": 1.168, | |
| "step": 1258 | |
| }, | |
| { | |
| "epoch": 3.4615384615384617, | |
| "grad_norm": 0.07319982349872589, | |
| "learning_rate": 5.531617576988879e-07, | |
| "loss": 1.1693, | |
| "step": 1260 | |
| }, | |
| { | |
| "epoch": 3.467032967032967, | |
| "grad_norm": 0.07959607243537903, | |
| "learning_rate": 5.422487251355146e-07, | |
| "loss": 1.1625, | |
| "step": 1262 | |
| }, | |
| { | |
| "epoch": 3.4725274725274726, | |
| "grad_norm": 0.06803479790687561, | |
| "learning_rate": 5.314382417412062e-07, | |
| "loss": 1.162, | |
| "step": 1264 | |
| }, | |
| { | |
| "epoch": 3.478021978021978, | |
| "grad_norm": 0.07737120240926743, | |
| "learning_rate": 5.207305562077403e-07, | |
| "loss": 1.1705, | |
| "step": 1266 | |
| }, | |
| { | |
| "epoch": 3.4835164835164836, | |
| "grad_norm": 0.06910370290279388, | |
| "learning_rate": 5.101259148620618e-07, | |
| "loss": 1.1619, | |
| "step": 1268 | |
| }, | |
| { | |
| "epoch": 3.489010989010989, | |
| "grad_norm": 0.06398583203554153, | |
| "learning_rate": 4.99624561660616e-07, | |
| "loss": 1.1659, | |
| "step": 1270 | |
| }, | |
| { | |
| "epoch": 3.4945054945054945, | |
| "grad_norm": 0.06311172246932983, | |
| "learning_rate": 4.892267381837396e-07, | |
| "loss": 1.1595, | |
| "step": 1272 | |
| }, | |
| { | |
| "epoch": 3.5, | |
| "grad_norm": 0.06687135994434357, | |
| "learning_rate": 4.789326836300983e-07, | |
| "loss": 1.1639, | |
| "step": 1274 | |
| }, | |
| { | |
| "epoch": 3.5054945054945055, | |
| "grad_norm": 0.06371094286441803, | |
| "learning_rate": 4.687426348111834e-07, | |
| "loss": 1.1644, | |
| "step": 1276 | |
| }, | |
| { | |
| "epoch": 3.510989010989011, | |
| "grad_norm": 0.0776761993765831, | |
| "learning_rate": 4.586568261458729e-07, | |
| "loss": 1.1745, | |
| "step": 1278 | |
| }, | |
| { | |
| "epoch": 3.5164835164835164, | |
| "grad_norm": 0.06816533952951431, | |
| "learning_rate": 4.486754896550288e-07, | |
| "loss": 1.1636, | |
| "step": 1280 | |
| }, | |
| { | |
| "epoch": 3.521978021978022, | |
| "grad_norm": 0.07598984241485596, | |
| "learning_rate": 4.3879885495616505e-07, | |
| "loss": 1.1657, | |
| "step": 1282 | |
| }, | |
| { | |
| "epoch": 3.5274725274725274, | |
| "grad_norm": 0.06942006200551987, | |
| "learning_rate": 4.290271492581627e-07, | |
| "loss": 1.1638, | |
| "step": 1284 | |
| }, | |
| { | |
| "epoch": 3.532967032967033, | |
| "grad_norm": 0.06620027124881744, | |
| "learning_rate": 4.1936059735604497e-07, | |
| "loss": 1.1698, | |
| "step": 1286 | |
| }, | |
| { | |
| "epoch": 3.5384615384615383, | |
| "grad_norm": 0.07451992481946945, | |
| "learning_rate": 4.0979942162580387e-07, | |
| "loss": 1.1663, | |
| "step": 1288 | |
| }, | |
| { | |
| "epoch": 3.543956043956044, | |
| "grad_norm": 0.07096972316503525, | |
| "learning_rate": 4.003438420192873e-07, | |
| "loss": 1.1627, | |
| "step": 1290 | |
| }, | |
| { | |
| "epoch": 3.5494505494505493, | |
| "grad_norm": 0.0655079111456871, | |
| "learning_rate": 3.9099407605913576e-07, | |
| "loss": 1.1645, | |
| "step": 1292 | |
| }, | |
| { | |
| "epoch": 3.5549450549450547, | |
| "grad_norm": 0.06677290052175522, | |
| "learning_rate": 3.8175033883378233e-07, | |
| "loss": 1.1642, | |
| "step": 1294 | |
| }, | |
| { | |
| "epoch": 3.5604395604395602, | |
| "grad_norm": 0.06589250266551971, | |
| "learning_rate": 3.7261284299249967e-07, | |
| "loss": 1.1661, | |
| "step": 1296 | |
| }, | |
| { | |
| "epoch": 3.5659340659340657, | |
| "grad_norm": 0.06369265913963318, | |
| "learning_rate": 3.63581798740511e-07, | |
| "loss": 1.1591, | |
| "step": 1298 | |
| }, | |
| { | |
| "epoch": 3.571428571428571, | |
| "grad_norm": 0.06584078073501587, | |
| "learning_rate": 3.5465741383415684e-07, | |
| "loss": 1.1669, | |
| "step": 1300 | |
| }, | |
| { | |
| "epoch": 3.5769230769230766, | |
| "grad_norm": 0.07220979779958725, | |
| "learning_rate": 3.4583989357611037e-07, | |
| "loss": 1.1637, | |
| "step": 1302 | |
| }, | |
| { | |
| "epoch": 3.5824175824175826, | |
| "grad_norm": 0.07124118506908417, | |
| "learning_rate": 3.371294408106585e-07, | |
| "loss": 1.1685, | |
| "step": 1304 | |
| }, | |
| { | |
| "epoch": 3.587912087912088, | |
| "grad_norm": 0.0632823258638382, | |
| "learning_rate": 3.285262559190322e-07, | |
| "loss": 1.1646, | |
| "step": 1306 | |
| }, | |
| { | |
| "epoch": 3.5934065934065935, | |
| "grad_norm": 0.07331771403551102, | |
| "learning_rate": 3.20030536814801e-07, | |
| "loss": 1.1639, | |
| "step": 1308 | |
| }, | |
| { | |
| "epoch": 3.598901098901099, | |
| "grad_norm": 0.07538831979036331, | |
| "learning_rate": 3.1164247893931575e-07, | |
| "loss": 1.1642, | |
| "step": 1310 | |
| }, | |
| { | |
| "epoch": 3.6043956043956045, | |
| "grad_norm": 0.06919172406196594, | |
| "learning_rate": 3.033622752572157e-07, | |
| "loss": 1.165, | |
| "step": 1312 | |
| }, | |
| { | |
| "epoch": 3.60989010989011, | |
| "grad_norm": 0.0683453157544136, | |
| "learning_rate": 2.951901162519877e-07, | |
| "loss": 1.1657, | |
| "step": 1314 | |
| }, | |
| { | |
| "epoch": 3.60989010989011, | |
| "eval_loss": 1.1666840314865112, | |
| "eval_runtime": 300.2612, | |
| "eval_samples_per_second": 17.238, | |
| "eval_steps_per_second": 0.27, | |
| "step": 1314 | |
| }, | |
| { | |
| "epoch": 3.6153846153846154, | |
| "grad_norm": 0.07349937409162521, | |
| "learning_rate": 2.8712618992158656e-07, | |
| "loss": 1.167, | |
| "step": 1316 | |
| }, | |
| { | |
| "epoch": 3.620879120879121, | |
| "grad_norm": 0.0776103138923645, | |
| "learning_rate": 2.791706817741041e-07, | |
| "loss": 1.1628, | |
| "step": 1318 | |
| }, | |
| { | |
| "epoch": 3.6263736263736264, | |
| "grad_norm": 0.06213715299963951, | |
| "learning_rate": 2.7132377482351037e-07, | |
| "loss": 1.1623, | |
| "step": 1320 | |
| }, | |
| { | |
| "epoch": 3.631868131868132, | |
| "grad_norm": 0.06590726226568222, | |
| "learning_rate": 2.635856495854372e-07, | |
| "loss": 1.1675, | |
| "step": 1322 | |
| }, | |
| { | |
| "epoch": 3.6373626373626373, | |
| "grad_norm": 0.06559966504573822, | |
| "learning_rate": 2.5595648407302496e-07, | |
| "loss": 1.1692, | |
| "step": 1324 | |
| }, | |
| { | |
| "epoch": 3.642857142857143, | |
| "grad_norm": 0.0632384866476059, | |
| "learning_rate": 2.484364537928341e-07, | |
| "loss": 1.1632, | |
| "step": 1326 | |
| }, | |
| { | |
| "epoch": 3.6483516483516483, | |
| "grad_norm": 0.07011737674474716, | |
| "learning_rate": 2.41025731740801e-07, | |
| "loss": 1.1626, | |
| "step": 1328 | |
| }, | |
| { | |
| "epoch": 3.6538461538461537, | |
| "grad_norm": 0.06871969997882843, | |
| "learning_rate": 2.3372448839825978e-07, | |
| "loss": 1.1597, | |
| "step": 1330 | |
| }, | |
| { | |
| "epoch": 3.659340659340659, | |
| "grad_norm": 0.06817732751369476, | |
| "learning_rate": 2.2653289172802295e-07, | |
| "loss": 1.1714, | |
| "step": 1332 | |
| }, | |
| { | |
| "epoch": 3.6648351648351647, | |
| "grad_norm": 0.06385420262813568, | |
| "learning_rate": 2.194511071705141e-07, | |
| "loss": 1.1747, | |
| "step": 1334 | |
| }, | |
| { | |
| "epoch": 3.67032967032967, | |
| "grad_norm": 0.06753863394260406, | |
| "learning_rate": 2.1247929763996534e-07, | |
| "loss": 1.1602, | |
| "step": 1336 | |
| }, | |
| { | |
| "epoch": 3.675824175824176, | |
| "grad_norm": 0.0678897351026535, | |
| "learning_rate": 2.0561762352066638e-07, | |
| "loss": 1.1685, | |
| "step": 1338 | |
| }, | |
| { | |
| "epoch": 3.6813186813186816, | |
| "grad_norm": 0.06596899032592773, | |
| "learning_rate": 1.988662426632765e-07, | |
| "loss": 1.1657, | |
| "step": 1340 | |
| }, | |
| { | |
| "epoch": 3.686813186813187, | |
| "grad_norm": 0.0690767765045166, | |
| "learning_rate": 1.922253103811944e-07, | |
| "loss": 1.167, | |
| "step": 1342 | |
| }, | |
| { | |
| "epoch": 3.6923076923076925, | |
| "grad_norm": 0.07675404101610184, | |
| "learning_rate": 1.85694979446982e-07, | |
| "loss": 1.1661, | |
| "step": 1344 | |
| }, | |
| { | |
| "epoch": 3.697802197802198, | |
| "grad_norm": 0.06820071488618851, | |
| "learning_rate": 1.7927540008885414e-07, | |
| "loss": 1.1635, | |
| "step": 1346 | |
| }, | |
| { | |
| "epoch": 3.7032967032967035, | |
| "grad_norm": 0.07236689329147339, | |
| "learning_rate": 1.729667199872187e-07, | |
| "loss": 1.1581, | |
| "step": 1348 | |
| }, | |
| { | |
| "epoch": 3.708791208791209, | |
| "grad_norm": 0.06689772009849548, | |
| "learning_rate": 1.6676908427128103e-07, | |
| "loss": 1.1617, | |
| "step": 1350 | |
| }, | |
| { | |
| "epoch": 3.7142857142857144, | |
| "grad_norm": 0.06681732088327408, | |
| "learning_rate": 1.6068263551570596e-07, | |
| "loss": 1.1732, | |
| "step": 1352 | |
| }, | |
| { | |
| "epoch": 3.71978021978022, | |
| "grad_norm": 0.07003802061080933, | |
| "learning_rate": 1.5470751373733773e-07, | |
| "loss": 1.1631, | |
| "step": 1354 | |
| }, | |
| { | |
| "epoch": 3.7252747252747254, | |
| "grad_norm": 0.06234800070524216, | |
| "learning_rate": 1.488438563919764e-07, | |
| "loss": 1.1696, | |
| "step": 1356 | |
| }, | |
| { | |
| "epoch": 3.730769230769231, | |
| "grad_norm": 0.0727643072605133, | |
| "learning_rate": 1.4309179837122045e-07, | |
| "loss": 1.1654, | |
| "step": 1358 | |
| }, | |
| { | |
| "epoch": 3.7362637362637363, | |
| "grad_norm": 0.0751447007060051, | |
| "learning_rate": 1.374514719993575e-07, | |
| "loss": 1.1647, | |
| "step": 1360 | |
| }, | |
| { | |
| "epoch": 3.741758241758242, | |
| "grad_norm": 0.061782509088516235, | |
| "learning_rate": 1.3192300703032733e-07, | |
| "loss": 1.1672, | |
| "step": 1362 | |
| }, | |
| { | |
| "epoch": 3.7472527472527473, | |
| "grad_norm": 0.06823485344648361, | |
| "learning_rate": 1.2650653064473106e-07, | |
| "loss": 1.1682, | |
| "step": 1364 | |
| }, | |
| { | |
| "epoch": 3.7527472527472527, | |
| "grad_norm": 0.061277277767658234, | |
| "learning_rate": 1.2120216744690716e-07, | |
| "loss": 1.1671, | |
| "step": 1366 | |
| }, | |
| { | |
| "epoch": 3.758241758241758, | |
| "grad_norm": 0.06526022404432297, | |
| "learning_rate": 1.1601003946206723e-07, | |
| "loss": 1.1643, | |
| "step": 1368 | |
| }, | |
| { | |
| "epoch": 3.7637362637362637, | |
| "grad_norm": 0.06104410067200661, | |
| "learning_rate": 1.1093026613348601e-07, | |
| "loss": 1.1574, | |
| "step": 1370 | |
| }, | |
| { | |
| "epoch": 3.769230769230769, | |
| "grad_norm": 0.07134535163640976, | |
| "learning_rate": 1.0596296431975406e-07, | |
| "loss": 1.1614, | |
| "step": 1372 | |
| }, | |
| { | |
| "epoch": 3.7747252747252746, | |
| "grad_norm": 0.06213730573654175, | |
| "learning_rate": 1.0110824829209164e-07, | |
| "loss": 1.1657, | |
| "step": 1374 | |
| }, | |
| { | |
| "epoch": 3.78021978021978, | |
| "grad_norm": 0.06847725808620453, | |
| "learning_rate": 9.636622973171583e-08, | |
| "loss": 1.1663, | |
| "step": 1376 | |
| }, | |
| { | |
| "epoch": 3.7857142857142856, | |
| "grad_norm": 0.07414616644382477, | |
| "learning_rate": 9.17370177272775e-08, | |
| "loss": 1.1668, | |
| "step": 1378 | |
| }, | |
| { | |
| "epoch": 3.791208791208791, | |
| "grad_norm": 0.06581508368253708, | |
| "learning_rate": 8.72207187723445e-08, | |
| "loss": 1.1693, | |
| "step": 1380 | |
| }, | |
| { | |
| "epoch": 3.7967032967032965, | |
| "grad_norm": 0.06397808343172073, | |
| "learning_rate": 8.281743676295639e-08, | |
| "loss": 1.159, | |
| "step": 1382 | |
| }, | |
| { | |
| "epoch": 3.802197802197802, | |
| "grad_norm": 0.06523909419775009, | |
| "learning_rate": 7.852727299523577e-08, | |
| "loss": 1.1562, | |
| "step": 1384 | |
| }, | |
| { | |
| "epoch": 3.8076923076923075, | |
| "grad_norm": 0.06779211014509201, | |
| "learning_rate": 7.435032616305238e-08, | |
| "loss": 1.1727, | |
| "step": 1386 | |
| }, | |
| { | |
| "epoch": 3.813186813186813, | |
| "grad_norm": 0.06538354605436325, | |
| "learning_rate": 7.028669235575714e-08, | |
| "loss": 1.16, | |
| "step": 1388 | |
| }, | |
| { | |
| "epoch": 3.8186813186813184, | |
| "grad_norm": 0.06357073783874512, | |
| "learning_rate": 6.633646505597113e-08, | |
| "loss": 1.1637, | |
| "step": 1390 | |
| }, | |
| { | |
| "epoch": 3.824175824175824, | |
| "grad_norm": 0.0635320246219635, | |
| "learning_rate": 6.249973513743345e-08, | |
| "loss": 1.1639, | |
| "step": 1392 | |
| }, | |
| { | |
| "epoch": 3.82967032967033, | |
| "grad_norm": 0.06830117851495743, | |
| "learning_rate": 5.8776590862911764e-08, | |
| "loss": 1.1585, | |
| "step": 1394 | |
| }, | |
| { | |
| "epoch": 3.8351648351648353, | |
| "grad_norm": 0.06444601714611053, | |
| "learning_rate": 5.5167117882171104e-08, | |
| "loss": 1.1655, | |
| "step": 1396 | |
| }, | |
| { | |
| "epoch": 3.840659340659341, | |
| "grad_norm": 0.06375352293252945, | |
| "learning_rate": 5.167139923000553e-08, | |
| "loss": 1.167, | |
| "step": 1398 | |
| }, | |
| { | |
| "epoch": 3.8461538461538463, | |
| "grad_norm": 0.06707257032394409, | |
| "learning_rate": 4.828951532432457e-08, | |
| "loss": 1.171, | |
| "step": 1400 | |
| }, | |
| { | |
| "epoch": 3.8516483516483517, | |
| "grad_norm": 0.06371629983186722, | |
| "learning_rate": 4.5021543964306466e-08, | |
| "loss": 1.1648, | |
| "step": 1402 | |
| }, | |
| { | |
| "epoch": 3.857142857142857, | |
| "grad_norm": 0.0662907212972641, | |
| "learning_rate": 4.186756032860728e-08, | |
| "loss": 1.1609, | |
| "step": 1404 | |
| }, | |
| { | |
| "epoch": 3.8626373626373627, | |
| "grad_norm": 0.06402067840099335, | |
| "learning_rate": 3.8827636973630126e-08, | |
| "loss": 1.1653, | |
| "step": 1406 | |
| }, | |
| { | |
| "epoch": 3.868131868131868, | |
| "grad_norm": 0.07026324421167374, | |
| "learning_rate": 3.590184383185758e-08, | |
| "loss": 1.1583, | |
| "step": 1408 | |
| }, | |
| { | |
| "epoch": 3.8736263736263736, | |
| "grad_norm": 0.06799634546041489, | |
| "learning_rate": 3.309024821024354e-08, | |
| "loss": 1.1688, | |
| "step": 1410 | |
| }, | |
| { | |
| "epoch": 3.879120879120879, | |
| "grad_norm": 0.06444913148880005, | |
| "learning_rate": 3.039291478866169e-08, | |
| "loss": 1.1616, | |
| "step": 1412 | |
| }, | |
| { | |
| "epoch": 3.8846153846153846, | |
| "grad_norm": 0.06899133324623108, | |
| "learning_rate": 2.7809905618422227e-08, | |
| "loss": 1.1598, | |
| "step": 1414 | |
| }, | |
| { | |
| "epoch": 3.89010989010989, | |
| "grad_norm": 0.06785506010055542, | |
| "learning_rate": 2.534128012083914e-08, | |
| "loss": 1.1532, | |
| "step": 1416 | |
| }, | |
| { | |
| "epoch": 3.8956043956043955, | |
| "grad_norm": 0.06701900064945221, | |
| "learning_rate": 2.298709508586794e-08, | |
| "loss": 1.1654, | |
| "step": 1418 | |
| }, | |
| { | |
| "epoch": 3.901098901098901, | |
| "grad_norm": 0.06285455822944641, | |
| "learning_rate": 2.074740467079672e-08, | |
| "loss": 1.157, | |
| "step": 1420 | |
| }, | |
| { | |
| "epoch": 3.9065934065934065, | |
| "grad_norm": 0.06540726125240326, | |
| "learning_rate": 1.862226039899995e-08, | |
| "loss": 1.1608, | |
| "step": 1422 | |
| }, | |
| { | |
| "epoch": 3.912087912087912, | |
| "grad_norm": 0.07258068770170212, | |
| "learning_rate": 1.661171115875493e-08, | |
| "loss": 1.1696, | |
| "step": 1424 | |
| }, | |
| { | |
| "epoch": 3.9175824175824174, | |
| "grad_norm": 0.06025758385658264, | |
| "learning_rate": 1.4715803202116075e-08, | |
| "loss": 1.1644, | |
| "step": 1426 | |
| }, | |
| { | |
| "epoch": 3.9230769230769234, | |
| "grad_norm": 0.07302309572696686, | |
| "learning_rate": 1.2934580143851294e-08, | |
| "loss": 1.1621, | |
| "step": 1428 | |
| }, | |
| { | |
| "epoch": 3.928571428571429, | |
| "grad_norm": 0.05992519110441208, | |
| "learning_rate": 1.1268082960436688e-08, | |
| "loss": 1.1689, | |
| "step": 1430 | |
| }, | |
| { | |
| "epoch": 3.9340659340659343, | |
| "grad_norm": 0.06481453776359558, | |
| "learning_rate": 9.716349989118412e-09, | |
| "loss": 1.166, | |
| "step": 1432 | |
| }, | |
| { | |
| "epoch": 3.9395604395604398, | |
| "grad_norm": 0.07549932599067688, | |
| "learning_rate": 8.279416927026163e-09, | |
| "loss": 1.159, | |
| "step": 1434 | |
| }, | |
| { | |
| "epoch": 3.9450549450549453, | |
| "grad_norm": 0.06642387062311172, | |
| "learning_rate": 6.9573168303532775e-09, | |
| "loss": 1.1602, | |
| "step": 1436 | |
| }, | |
| { | |
| "epoch": 3.9505494505494507, | |
| "grad_norm": 0.0685223713517189, | |
| "learning_rate": 5.750080113598455e-09, | |
| "loss": 1.1659, | |
| "step": 1438 | |
| }, | |
| { | |
| "epoch": 3.956043956043956, | |
| "grad_norm": 0.06598740071058273, | |
| "learning_rate": 4.65773454886298e-09, | |
| "loss": 1.1607, | |
| "step": 1440 | |
| }, | |
| { | |
| "epoch": 3.9615384615384617, | |
| "grad_norm": 0.06653392314910889, | |
| "learning_rate": 3.6803052652134572e-09, | |
| "loss": 1.1545, | |
| "step": 1442 | |
| }, | |
| { | |
| "epoch": 3.967032967032967, | |
| "grad_norm": 0.06884676963090897, | |
| "learning_rate": 2.817814748104497e-09, | |
| "loss": 1.1557, | |
| "step": 1444 | |
| }, | |
| { | |
| "epoch": 3.9725274725274726, | |
| "grad_norm": 0.06552927196025848, | |
| "learning_rate": 2.070282838859683e-09, | |
| "loss": 1.1591, | |
| "step": 1446 | |
| }, | |
| { | |
| "epoch": 3.978021978021978, | |
| "grad_norm": 0.06410259753465652, | |
| "learning_rate": 1.4377267342158274e-09, | |
| "loss": 1.169, | |
| "step": 1448 | |
| }, | |
| { | |
| "epoch": 3.9835164835164836, | |
| "grad_norm": 0.06548061966896057, | |
| "learning_rate": 9.201609859271765e-10, | |
| "loss": 1.1665, | |
| "step": 1450 | |
| }, | |
| { | |
| "epoch": 3.989010989010989, | |
| "grad_norm": 0.06399548798799515, | |
| "learning_rate": 5.17597500432343e-10, | |
| "loss": 1.1717, | |
| "step": 1452 | |
| }, | |
| { | |
| "epoch": 3.9945054945054945, | |
| "grad_norm": 0.06421104073524475, | |
| "learning_rate": 2.3004553857675082e-10, | |
| "loss": 1.1725, | |
| "step": 1454 | |
| }, | |
| { | |
| "epoch": 4.0, | |
| "grad_norm": 0.06589093804359436, | |
| "learning_rate": 5.751171540391287e-11, | |
| "loss": 1.1709, | |
| "step": 1456 | |
| } | |
| ], | |
| "logging_steps": 2, | |
| "max_steps": 1456, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 4, | |
| "save_steps": 364, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": true | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 3.7105900771564585e+18, | |
| "train_batch_size": 4, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |