| { | |
| "best_global_step": null, | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 6.0, | |
| "eval_steps": 500, | |
| "global_step": 1080, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.05555555555555555, | |
| "grad_norm": 0.49686384201049805, | |
| "learning_rate": 4.958333333333334e-05, | |
| "loss": 0.0828, | |
| "step": 10 | |
| }, | |
| { | |
| "epoch": 0.1111111111111111, | |
| "grad_norm": 0.5520921945571899, | |
| "learning_rate": 4.912037037037037e-05, | |
| "loss": 0.074, | |
| "step": 20 | |
| }, | |
| { | |
| "epoch": 0.16666666666666666, | |
| "grad_norm": 0.3245736062526703, | |
| "learning_rate": 4.865740740740741e-05, | |
| "loss": 0.0576, | |
| "step": 30 | |
| }, | |
| { | |
| "epoch": 0.2222222222222222, | |
| "grad_norm": 0.3899650275707245, | |
| "learning_rate": 4.819444444444445e-05, | |
| "loss": 0.065, | |
| "step": 40 | |
| }, | |
| { | |
| "epoch": 0.2777777777777778, | |
| "grad_norm": 0.35350099205970764, | |
| "learning_rate": 4.773148148148148e-05, | |
| "loss": 0.055, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 0.3333333333333333, | |
| "grad_norm": 0.30941131711006165, | |
| "learning_rate": 4.726851851851852e-05, | |
| "loss": 0.0558, | |
| "step": 60 | |
| }, | |
| { | |
| "epoch": 0.3888888888888889, | |
| "grad_norm": 0.3238453269004822, | |
| "learning_rate": 4.6805555555555556e-05, | |
| "loss": 0.0508, | |
| "step": 70 | |
| }, | |
| { | |
| "epoch": 0.4444444444444444, | |
| "grad_norm": 0.4079723656177521, | |
| "learning_rate": 4.6342592592592595e-05, | |
| "loss": 0.0488, | |
| "step": 80 | |
| }, | |
| { | |
| "epoch": 0.5, | |
| "grad_norm": 0.27488774061203003, | |
| "learning_rate": 4.5879629629629635e-05, | |
| "loss": 0.051, | |
| "step": 90 | |
| }, | |
| { | |
| "epoch": 0.5555555555555556, | |
| "grad_norm": 0.27709904313087463, | |
| "learning_rate": 4.541666666666667e-05, | |
| "loss": 0.046, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 0.6111111111111112, | |
| "grad_norm": 0.22847816348075867, | |
| "learning_rate": 4.49537037037037e-05, | |
| "loss": 0.0439, | |
| "step": 110 | |
| }, | |
| { | |
| "epoch": 0.6666666666666666, | |
| "grad_norm": 0.23230929672718048, | |
| "learning_rate": 4.449074074074074e-05, | |
| "loss": 0.045, | |
| "step": 120 | |
| }, | |
| { | |
| "epoch": 0.7222222222222222, | |
| "grad_norm": 0.19948454201221466, | |
| "learning_rate": 4.402777777777778e-05, | |
| "loss": 0.0431, | |
| "step": 130 | |
| }, | |
| { | |
| "epoch": 0.7777777777777778, | |
| "grad_norm": 0.3548349142074585, | |
| "learning_rate": 4.356481481481482e-05, | |
| "loss": 0.0442, | |
| "step": 140 | |
| }, | |
| { | |
| "epoch": 0.8333333333333334, | |
| "grad_norm": 0.23642666637897491, | |
| "learning_rate": 4.3101851851851854e-05, | |
| "loss": 0.0383, | |
| "step": 150 | |
| }, | |
| { | |
| "epoch": 0.8888888888888888, | |
| "grad_norm": 0.262961208820343, | |
| "learning_rate": 4.263888888888889e-05, | |
| "loss": 0.0416, | |
| "step": 160 | |
| }, | |
| { | |
| "epoch": 0.9444444444444444, | |
| "grad_norm": 0.21744681894779205, | |
| "learning_rate": 4.217592592592593e-05, | |
| "loss": 0.0427, | |
| "step": 170 | |
| }, | |
| { | |
| "epoch": 1.0, | |
| "grad_norm": 0.2349257618188858, | |
| "learning_rate": 4.171296296296297e-05, | |
| "loss": 0.0412, | |
| "step": 180 | |
| }, | |
| { | |
| "epoch": 1.0, | |
| "eval_loss": 0.029657404869794846, | |
| "eval_runtime": 6.5879, | |
| "eval_samples_per_second": 48.574, | |
| "eval_steps_per_second": 3.036, | |
| "step": 180 | |
| }, | |
| { | |
| "epoch": 1.0555555555555556, | |
| "grad_norm": 0.20215728878974915, | |
| "learning_rate": 4.125e-05, | |
| "loss": 0.0384, | |
| "step": 190 | |
| }, | |
| { | |
| "epoch": 1.1111111111111112, | |
| "grad_norm": 0.15817931294441223, | |
| "learning_rate": 4.078703703703704e-05, | |
| "loss": 0.0409, | |
| "step": 200 | |
| }, | |
| { | |
| "epoch": 1.1666666666666667, | |
| "grad_norm": 0.1944839209318161, | |
| "learning_rate": 4.032407407407407e-05, | |
| "loss": 0.04, | |
| "step": 210 | |
| }, | |
| { | |
| "epoch": 1.2222222222222223, | |
| "grad_norm": 0.40207526087760925, | |
| "learning_rate": 3.986111111111111e-05, | |
| "loss": 0.0372, | |
| "step": 220 | |
| }, | |
| { | |
| "epoch": 1.2777777777777777, | |
| "grad_norm": 0.27425676584243774, | |
| "learning_rate": 3.939814814814815e-05, | |
| "loss": 0.037, | |
| "step": 230 | |
| }, | |
| { | |
| "epoch": 1.3333333333333333, | |
| "grad_norm": 0.148152157664299, | |
| "learning_rate": 3.8935185185185185e-05, | |
| "loss": 0.0366, | |
| "step": 240 | |
| }, | |
| { | |
| "epoch": 1.3888888888888888, | |
| "grad_norm": 0.1749790757894516, | |
| "learning_rate": 3.8472222222222225e-05, | |
| "loss": 0.0397, | |
| "step": 250 | |
| }, | |
| { | |
| "epoch": 1.4444444444444444, | |
| "grad_norm": 0.25152885913848877, | |
| "learning_rate": 3.800925925925926e-05, | |
| "loss": 0.0384, | |
| "step": 260 | |
| }, | |
| { | |
| "epoch": 1.5, | |
| "grad_norm": 0.15564148128032684, | |
| "learning_rate": 3.75462962962963e-05, | |
| "loss": 0.0367, | |
| "step": 270 | |
| }, | |
| { | |
| "epoch": 1.5555555555555556, | |
| "grad_norm": 0.277736634016037, | |
| "learning_rate": 3.708333333333334e-05, | |
| "loss": 0.0361, | |
| "step": 280 | |
| }, | |
| { | |
| "epoch": 1.6111111111111112, | |
| "grad_norm": 0.1521805077791214, | |
| "learning_rate": 3.662037037037037e-05, | |
| "loss": 0.0358, | |
| "step": 290 | |
| }, | |
| { | |
| "epoch": 1.6666666666666665, | |
| "grad_norm": 0.20427845418453217, | |
| "learning_rate": 3.6157407407407404e-05, | |
| "loss": 0.0358, | |
| "step": 300 | |
| }, | |
| { | |
| "epoch": 1.7222222222222223, | |
| "grad_norm": 0.1444249451160431, | |
| "learning_rate": 3.5694444444444444e-05, | |
| "loss": 0.036, | |
| "step": 310 | |
| }, | |
| { | |
| "epoch": 1.7777777777777777, | |
| "grad_norm": 0.16637395322322845, | |
| "learning_rate": 3.5231481481481484e-05, | |
| "loss": 0.0351, | |
| "step": 320 | |
| }, | |
| { | |
| "epoch": 1.8333333333333335, | |
| "grad_norm": 0.19704970717430115, | |
| "learning_rate": 3.4768518518518524e-05, | |
| "loss": 0.0338, | |
| "step": 330 | |
| }, | |
| { | |
| "epoch": 1.8888888888888888, | |
| "grad_norm": 0.18837705254554749, | |
| "learning_rate": 3.430555555555556e-05, | |
| "loss": 0.0324, | |
| "step": 340 | |
| }, | |
| { | |
| "epoch": 1.9444444444444444, | |
| "grad_norm": 0.2518753111362457, | |
| "learning_rate": 3.384259259259259e-05, | |
| "loss": 0.0379, | |
| "step": 350 | |
| }, | |
| { | |
| "epoch": 2.0, | |
| "grad_norm": 0.2375190258026123, | |
| "learning_rate": 3.337962962962963e-05, | |
| "loss": 0.0348, | |
| "step": 360 | |
| }, | |
| { | |
| "epoch": 2.0, | |
| "eval_loss": 0.02760537900030613, | |
| "eval_runtime": 6.5831, | |
| "eval_samples_per_second": 48.61, | |
| "eval_steps_per_second": 3.038, | |
| "step": 360 | |
| }, | |
| { | |
| "epoch": 2.0555555555555554, | |
| "grad_norm": 0.13346998393535614, | |
| "learning_rate": 3.291666666666667e-05, | |
| "loss": 0.0365, | |
| "step": 370 | |
| }, | |
| { | |
| "epoch": 2.111111111111111, | |
| "grad_norm": 0.21457839012145996, | |
| "learning_rate": 3.24537037037037e-05, | |
| "loss": 0.0336, | |
| "step": 380 | |
| }, | |
| { | |
| "epoch": 2.1666666666666665, | |
| "grad_norm": 0.12173231691122055, | |
| "learning_rate": 3.199074074074074e-05, | |
| "loss": 0.0341, | |
| "step": 390 | |
| }, | |
| { | |
| "epoch": 2.2222222222222223, | |
| "grad_norm": 0.15279892086982727, | |
| "learning_rate": 3.1527777777777775e-05, | |
| "loss": 0.032, | |
| "step": 400 | |
| }, | |
| { | |
| "epoch": 2.2777777777777777, | |
| "grad_norm": 0.18990376591682434, | |
| "learning_rate": 3.106481481481482e-05, | |
| "loss": 0.0349, | |
| "step": 410 | |
| }, | |
| { | |
| "epoch": 2.3333333333333335, | |
| "grad_norm": 0.29108214378356934, | |
| "learning_rate": 3.0601851851851855e-05, | |
| "loss": 0.0326, | |
| "step": 420 | |
| }, | |
| { | |
| "epoch": 2.388888888888889, | |
| "grad_norm": 0.14332085847854614, | |
| "learning_rate": 3.0138888888888888e-05, | |
| "loss": 0.0328, | |
| "step": 430 | |
| }, | |
| { | |
| "epoch": 2.4444444444444446, | |
| "grad_norm": 0.12165053933858871, | |
| "learning_rate": 2.9675925925925928e-05, | |
| "loss": 0.0326, | |
| "step": 440 | |
| }, | |
| { | |
| "epoch": 2.5, | |
| "grad_norm": 0.22552748024463654, | |
| "learning_rate": 2.9212962962962964e-05, | |
| "loss": 0.0331, | |
| "step": 450 | |
| }, | |
| { | |
| "epoch": 2.5555555555555554, | |
| "grad_norm": 0.3424241244792938, | |
| "learning_rate": 2.8749999999999997e-05, | |
| "loss": 0.0325, | |
| "step": 460 | |
| }, | |
| { | |
| "epoch": 2.611111111111111, | |
| "grad_norm": 0.13250669836997986, | |
| "learning_rate": 2.828703703703704e-05, | |
| "loss": 0.0315, | |
| "step": 470 | |
| }, | |
| { | |
| "epoch": 2.6666666666666665, | |
| "grad_norm": 0.15798448026180267, | |
| "learning_rate": 2.7824074074074074e-05, | |
| "loss": 0.0315, | |
| "step": 480 | |
| }, | |
| { | |
| "epoch": 2.7222222222222223, | |
| "grad_norm": 0.10952693223953247, | |
| "learning_rate": 2.7361111111111114e-05, | |
| "loss": 0.0311, | |
| "step": 490 | |
| }, | |
| { | |
| "epoch": 2.7777777777777777, | |
| "grad_norm": 0.17310163378715515, | |
| "learning_rate": 2.689814814814815e-05, | |
| "loss": 0.0326, | |
| "step": 500 | |
| }, | |
| { | |
| "epoch": 2.8333333333333335, | |
| "grad_norm": 0.14377371966838837, | |
| "learning_rate": 2.6435185185185187e-05, | |
| "loss": 0.0332, | |
| "step": 510 | |
| }, | |
| { | |
| "epoch": 2.888888888888889, | |
| "grad_norm": 0.10982959717512131, | |
| "learning_rate": 2.5972222222222226e-05, | |
| "loss": 0.0331, | |
| "step": 520 | |
| }, | |
| { | |
| "epoch": 2.9444444444444446, | |
| "grad_norm": 0.1141400933265686, | |
| "learning_rate": 2.550925925925926e-05, | |
| "loss": 0.033, | |
| "step": 530 | |
| }, | |
| { | |
| "epoch": 3.0, | |
| "grad_norm": 0.0858101025223732, | |
| "learning_rate": 2.5046296296296296e-05, | |
| "loss": 0.032, | |
| "step": 540 | |
| }, | |
| { | |
| "epoch": 3.0, | |
| "eval_loss": 0.027356069535017014, | |
| "eval_runtime": 6.321, | |
| "eval_samples_per_second": 50.625, | |
| "eval_steps_per_second": 3.164, | |
| "step": 540 | |
| }, | |
| { | |
| "epoch": 3.0555555555555554, | |
| "grad_norm": 0.159243643283844, | |
| "learning_rate": 2.4583333333333332e-05, | |
| "loss": 0.0299, | |
| "step": 550 | |
| }, | |
| { | |
| "epoch": 3.111111111111111, | |
| "grad_norm": 0.08856271952390671, | |
| "learning_rate": 2.4120370370370372e-05, | |
| "loss": 0.0317, | |
| "step": 560 | |
| }, | |
| { | |
| "epoch": 3.1666666666666665, | |
| "grad_norm": 0.13010160624980927, | |
| "learning_rate": 2.365740740740741e-05, | |
| "loss": 0.0322, | |
| "step": 570 | |
| }, | |
| { | |
| "epoch": 3.2222222222222223, | |
| "grad_norm": 0.10837047547101974, | |
| "learning_rate": 2.3194444444444445e-05, | |
| "loss": 0.0325, | |
| "step": 580 | |
| }, | |
| { | |
| "epoch": 3.2777777777777777, | |
| "grad_norm": 0.17628169059753418, | |
| "learning_rate": 2.273148148148148e-05, | |
| "loss": 0.0318, | |
| "step": 590 | |
| }, | |
| { | |
| "epoch": 3.3333333333333335, | |
| "grad_norm": 0.1559268981218338, | |
| "learning_rate": 2.2268518518518518e-05, | |
| "loss": 0.033, | |
| "step": 600 | |
| }, | |
| { | |
| "epoch": 3.388888888888889, | |
| "grad_norm": 0.15573599934577942, | |
| "learning_rate": 2.1805555555555558e-05, | |
| "loss": 0.0314, | |
| "step": 610 | |
| }, | |
| { | |
| "epoch": 3.4444444444444446, | |
| "grad_norm": 0.13026462495326996, | |
| "learning_rate": 2.1342592592592594e-05, | |
| "loss": 0.0319, | |
| "step": 620 | |
| }, | |
| { | |
| "epoch": 3.5, | |
| "grad_norm": 0.11255809664726257, | |
| "learning_rate": 2.087962962962963e-05, | |
| "loss": 0.0306, | |
| "step": 630 | |
| }, | |
| { | |
| "epoch": 3.5555555555555554, | |
| "grad_norm": 0.15510410070419312, | |
| "learning_rate": 2.0416666666666667e-05, | |
| "loss": 0.0538, | |
| "step": 640 | |
| }, | |
| { | |
| "epoch": 3.611111111111111, | |
| "grad_norm": 0.10521262139081955, | |
| "learning_rate": 1.9953703703703704e-05, | |
| "loss": 0.03, | |
| "step": 650 | |
| }, | |
| { | |
| "epoch": 3.6666666666666665, | |
| "grad_norm": 0.14359714090824127, | |
| "learning_rate": 1.9490740740740743e-05, | |
| "loss": 0.0334, | |
| "step": 660 | |
| }, | |
| { | |
| "epoch": 3.7222222222222223, | |
| "grad_norm": 0.11077132821083069, | |
| "learning_rate": 1.9027777777777776e-05, | |
| "loss": 0.0299, | |
| "step": 670 | |
| }, | |
| { | |
| "epoch": 3.7777777777777777, | |
| "grad_norm": 0.1317073106765747, | |
| "learning_rate": 1.8564814814814816e-05, | |
| "loss": 0.0327, | |
| "step": 680 | |
| }, | |
| { | |
| "epoch": 3.8333333333333335, | |
| "grad_norm": 0.13942557573318481, | |
| "learning_rate": 1.8101851851851853e-05, | |
| "loss": 0.0318, | |
| "step": 690 | |
| }, | |
| { | |
| "epoch": 3.888888888888889, | |
| "grad_norm": 0.10866592079401016, | |
| "learning_rate": 1.763888888888889e-05, | |
| "loss": 0.0293, | |
| "step": 700 | |
| }, | |
| { | |
| "epoch": 3.9444444444444446, | |
| "grad_norm": 0.10337399691343307, | |
| "learning_rate": 1.7175925925925926e-05, | |
| "loss": 0.0306, | |
| "step": 710 | |
| }, | |
| { | |
| "epoch": 4.0, | |
| "grad_norm": 0.08381380140781403, | |
| "learning_rate": 1.6712962962962962e-05, | |
| "loss": 0.0306, | |
| "step": 720 | |
| }, | |
| { | |
| "epoch": 4.0, | |
| "eval_loss": 0.02674299106001854, | |
| "eval_runtime": 6.3173, | |
| "eval_samples_per_second": 50.655, | |
| "eval_steps_per_second": 3.166, | |
| "step": 720 | |
| }, | |
| { | |
| "epoch": 4.055555555555555, | |
| "grad_norm": 0.20555126667022705, | |
| "learning_rate": 1.6250000000000002e-05, | |
| "loss": 0.0298, | |
| "step": 730 | |
| }, | |
| { | |
| "epoch": 4.111111111111111, | |
| "grad_norm": 0.11750379204750061, | |
| "learning_rate": 1.578703703703704e-05, | |
| "loss": 0.0298, | |
| "step": 740 | |
| }, | |
| { | |
| "epoch": 4.166666666666667, | |
| "grad_norm": 0.11085871607065201, | |
| "learning_rate": 1.5324074074074075e-05, | |
| "loss": 0.0305, | |
| "step": 750 | |
| }, | |
| { | |
| "epoch": 4.222222222222222, | |
| "grad_norm": 0.12449198961257935, | |
| "learning_rate": 1.4861111111111111e-05, | |
| "loss": 0.03, | |
| "step": 760 | |
| }, | |
| { | |
| "epoch": 4.277777777777778, | |
| "grad_norm": 0.14551545679569244, | |
| "learning_rate": 1.439814814814815e-05, | |
| "loss": 0.0308, | |
| "step": 770 | |
| }, | |
| { | |
| "epoch": 4.333333333333333, | |
| "grad_norm": 0.1231958270072937, | |
| "learning_rate": 1.3935185185185188e-05, | |
| "loss": 0.0312, | |
| "step": 780 | |
| }, | |
| { | |
| "epoch": 4.388888888888889, | |
| "grad_norm": 0.1063426211476326, | |
| "learning_rate": 1.3472222222222222e-05, | |
| "loss": 0.0298, | |
| "step": 790 | |
| }, | |
| { | |
| "epoch": 4.444444444444445, | |
| "grad_norm": 0.12436050921678543, | |
| "learning_rate": 1.300925925925926e-05, | |
| "loss": 0.0315, | |
| "step": 800 | |
| }, | |
| { | |
| "epoch": 4.5, | |
| "grad_norm": 0.13953514397144318, | |
| "learning_rate": 1.2546296296296297e-05, | |
| "loss": 0.0314, | |
| "step": 810 | |
| }, | |
| { | |
| "epoch": 4.555555555555555, | |
| "grad_norm": 0.0922839418053627, | |
| "learning_rate": 1.2083333333333333e-05, | |
| "loss": 0.0308, | |
| "step": 820 | |
| }, | |
| { | |
| "epoch": 4.611111111111111, | |
| "grad_norm": 0.13846085965633392, | |
| "learning_rate": 1.162037037037037e-05, | |
| "loss": 0.0297, | |
| "step": 830 | |
| }, | |
| { | |
| "epoch": 4.666666666666667, | |
| "grad_norm": 0.07957114279270172, | |
| "learning_rate": 1.1157407407407408e-05, | |
| "loss": 0.0298, | |
| "step": 840 | |
| }, | |
| { | |
| "epoch": 4.722222222222222, | |
| "grad_norm": 0.0851704403758049, | |
| "learning_rate": 1.0694444444444444e-05, | |
| "loss": 0.0318, | |
| "step": 850 | |
| }, | |
| { | |
| "epoch": 4.777777777777778, | |
| "grad_norm": 0.11411622166633606, | |
| "learning_rate": 1.0231481481481483e-05, | |
| "loss": 0.0295, | |
| "step": 860 | |
| }, | |
| { | |
| "epoch": 4.833333333333333, | |
| "grad_norm": 0.10176681727170944, | |
| "learning_rate": 9.768518518518519e-06, | |
| "loss": 0.0316, | |
| "step": 870 | |
| }, | |
| { | |
| "epoch": 4.888888888888889, | |
| "grad_norm": 0.0951576679944992, | |
| "learning_rate": 9.305555555555555e-06, | |
| "loss": 0.0308, | |
| "step": 880 | |
| }, | |
| { | |
| "epoch": 4.944444444444445, | |
| "grad_norm": 0.08655095100402832, | |
| "learning_rate": 8.842592592592592e-06, | |
| "loss": 0.0294, | |
| "step": 890 | |
| }, | |
| { | |
| "epoch": 5.0, | |
| "grad_norm": 0.1173749566078186, | |
| "learning_rate": 8.37962962962963e-06, | |
| "loss": 0.0307, | |
| "step": 900 | |
| }, | |
| { | |
| "epoch": 5.0, | |
| "eval_loss": 0.026756376028060913, | |
| "eval_runtime": 6.4257, | |
| "eval_samples_per_second": 49.8, | |
| "eval_steps_per_second": 3.112, | |
| "step": 900 | |
| }, | |
| { | |
| "epoch": 5.055555555555555, | |
| "grad_norm": 0.10281986743211746, | |
| "learning_rate": 7.916666666666667e-06, | |
| "loss": 0.0281, | |
| "step": 910 | |
| }, | |
| { | |
| "epoch": 5.111111111111111, | |
| "grad_norm": 0.10464879125356674, | |
| "learning_rate": 7.453703703703705e-06, | |
| "loss": 0.0295, | |
| "step": 920 | |
| }, | |
| { | |
| "epoch": 5.166666666666667, | |
| "grad_norm": 0.14110395312309265, | |
| "learning_rate": 6.990740740740741e-06, | |
| "loss": 0.0303, | |
| "step": 930 | |
| }, | |
| { | |
| "epoch": 5.222222222222222, | |
| "grad_norm": 0.08916814625263214, | |
| "learning_rate": 6.5277777777777784e-06, | |
| "loss": 0.0303, | |
| "step": 940 | |
| }, | |
| { | |
| "epoch": 5.277777777777778, | |
| "grad_norm": 0.08186755329370499, | |
| "learning_rate": 6.064814814814815e-06, | |
| "loss": 0.0305, | |
| "step": 950 | |
| }, | |
| { | |
| "epoch": 5.333333333333333, | |
| "grad_norm": 0.10332108289003372, | |
| "learning_rate": 5.601851851851852e-06, | |
| "loss": 0.0303, | |
| "step": 960 | |
| }, | |
| { | |
| "epoch": 5.388888888888889, | |
| "grad_norm": 0.09380320459604263, | |
| "learning_rate": 5.138888888888889e-06, | |
| "loss": 0.0281, | |
| "step": 970 | |
| }, | |
| { | |
| "epoch": 5.444444444444445, | |
| "grad_norm": 0.1488436758518219, | |
| "learning_rate": 4.675925925925926e-06, | |
| "loss": 0.0308, | |
| "step": 980 | |
| }, | |
| { | |
| "epoch": 5.5, | |
| "grad_norm": 0.11125043779611588, | |
| "learning_rate": 4.212962962962963e-06, | |
| "loss": 0.0309, | |
| "step": 990 | |
| }, | |
| { | |
| "epoch": 5.555555555555555, | |
| "grad_norm": 0.08724004775285721, | |
| "learning_rate": 3.75e-06, | |
| "loss": 0.0318, | |
| "step": 1000 | |
| }, | |
| { | |
| "epoch": 5.611111111111111, | |
| "grad_norm": 0.12417462468147278, | |
| "learning_rate": 3.287037037037037e-06, | |
| "loss": 0.0296, | |
| "step": 1010 | |
| }, | |
| { | |
| "epoch": 5.666666666666667, | |
| "grad_norm": 0.08415894210338593, | |
| "learning_rate": 2.8240740740740743e-06, | |
| "loss": 0.0311, | |
| "step": 1020 | |
| }, | |
| { | |
| "epoch": 5.722222222222222, | |
| "grad_norm": 0.09413722902536392, | |
| "learning_rate": 2.361111111111111e-06, | |
| "loss": 0.0309, | |
| "step": 1030 | |
| }, | |
| { | |
| "epoch": 5.777777777777778, | |
| "grad_norm": 0.08906271308660507, | |
| "learning_rate": 1.8981481481481482e-06, | |
| "loss": 0.0331, | |
| "step": 1040 | |
| }, | |
| { | |
| "epoch": 5.833333333333333, | |
| "grad_norm": 0.2548229992389679, | |
| "learning_rate": 1.4351851851851853e-06, | |
| "loss": 0.0325, | |
| "step": 1050 | |
| }, | |
| { | |
| "epoch": 5.888888888888889, | |
| "grad_norm": 0.2592361569404602, | |
| "learning_rate": 9.722222222222222e-07, | |
| "loss": 0.0291, | |
| "step": 1060 | |
| }, | |
| { | |
| "epoch": 5.944444444444445, | |
| "grad_norm": 0.13197167217731476, | |
| "learning_rate": 5.092592592592594e-07, | |
| "loss": 0.0298, | |
| "step": 1070 | |
| }, | |
| { | |
| "epoch": 6.0, | |
| "grad_norm": 0.0932869166135788, | |
| "learning_rate": 4.62962962962963e-08, | |
| "loss": 0.0309, | |
| "step": 1080 | |
| }, | |
| { | |
| "epoch": 6.0, | |
| "eval_loss": 0.026577139273285866, | |
| "eval_runtime": 6.4249, | |
| "eval_samples_per_second": 49.806, | |
| "eval_steps_per_second": 3.113, | |
| "step": 1080 | |
| } | |
| ], | |
| "logging_steps": 10, | |
| "max_steps": 1080, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 6, | |
| "save_steps": 500, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": true | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 2397334005596160.0, | |
| "train_batch_size": 16, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |