| { |
| "best_global_step": 456, |
| "best_metric": 0.0554032064974308, |
| "best_model_checkpoint": "saves_multiple/p-tuning/llama-3-8b-instruct/train_cb_789_1760637868/checkpoint-456", |
| "epoch": 20.0, |
| "eval_steps": 57, |
| "global_step": 1140, |
| "is_hyper_param_search": false, |
| "is_local_process_zero": true, |
| "is_world_process_zero": true, |
| "log_history": [ |
| { |
| "epoch": 0.08771929824561403, |
| "grad_norm": 80.17578125, |
| "learning_rate": 3.508771929824561e-05, |
| "loss": 4.9733, |
| "num_input_tokens_seen": 3136, |
| "step": 5 |
| }, |
| { |
| "epoch": 0.17543859649122806, |
| "grad_norm": 18.527280807495117, |
| "learning_rate": 7.894736842105263e-05, |
| "loss": 1.344, |
| "num_input_tokens_seen": 6112, |
| "step": 10 |
| }, |
| { |
| "epoch": 0.2631578947368421, |
| "grad_norm": 14.144380569458008, |
| "learning_rate": 0.00012280701754385965, |
| "loss": 0.7777, |
| "num_input_tokens_seen": 10112, |
| "step": 15 |
| }, |
| { |
| "epoch": 0.3508771929824561, |
| "grad_norm": 9.432901382446289, |
| "learning_rate": 0.00016666666666666666, |
| "loss": 0.5275, |
| "num_input_tokens_seen": 13280, |
| "step": 20 |
| }, |
| { |
| "epoch": 0.43859649122807015, |
| "grad_norm": 8.182435035705566, |
| "learning_rate": 0.00021052631578947367, |
| "loss": 0.4235, |
| "num_input_tokens_seen": 16288, |
| "step": 25 |
| }, |
| { |
| "epoch": 0.5263157894736842, |
| "grad_norm": 15.486543655395508, |
| "learning_rate": 0.0002543859649122807, |
| "loss": 0.5063, |
| "num_input_tokens_seen": 19104, |
| "step": 30 |
| }, |
| { |
| "epoch": 0.6140350877192983, |
| "grad_norm": 5.083335876464844, |
| "learning_rate": 0.0002982456140350877, |
| "loss": 0.4182, |
| "num_input_tokens_seen": 22144, |
| "step": 35 |
| }, |
| { |
| "epoch": 0.7017543859649122, |
| "grad_norm": 5.8651580810546875, |
| "learning_rate": 0.00034210526315789477, |
| "loss": 0.4154, |
| "num_input_tokens_seen": 25792, |
| "step": 40 |
| }, |
| { |
| "epoch": 0.7894736842105263, |
| "grad_norm": 5.307326793670654, |
| "learning_rate": 0.00038596491228070175, |
| "loss": 0.3517, |
| "num_input_tokens_seen": 28576, |
| "step": 45 |
| }, |
| { |
| "epoch": 0.8771929824561403, |
| "grad_norm": 1.6044645309448242, |
| "learning_rate": 0.0004298245614035088, |
| "loss": 0.252, |
| "num_input_tokens_seen": 31424, |
| "step": 50 |
| }, |
| { |
| "epoch": 0.9649122807017544, |
| "grad_norm": 21.3470458984375, |
| "learning_rate": 0.00047368421052631577, |
| "loss": 0.4751, |
| "num_input_tokens_seen": 34720, |
| "step": 55 |
| }, |
| { |
| "epoch": 1.0, |
| "eval_loss": 1.2149380445480347, |
| "eval_runtime": 0.8439, |
| "eval_samples_per_second": 29.624, |
| "eval_steps_per_second": 8.295, |
| "num_input_tokens_seen": 35448, |
| "step": 57 |
| }, |
| { |
| "epoch": 1.0526315789473684, |
| "grad_norm": 47.61418151855469, |
| "learning_rate": 0.0005175438596491229, |
| "loss": 0.9889, |
| "num_input_tokens_seen": 37688, |
| "step": 60 |
| }, |
| { |
| "epoch": 1.1403508771929824, |
| "grad_norm": 5.174376964569092, |
| "learning_rate": 0.0005614035087719298, |
| "loss": 0.5786, |
| "num_input_tokens_seen": 40792, |
| "step": 65 |
| }, |
| { |
| "epoch": 1.2280701754385965, |
| "grad_norm": 2.1170897483825684, |
| "learning_rate": 0.0006052631578947369, |
| "loss": 0.2984, |
| "num_input_tokens_seen": 43832, |
| "step": 70 |
| }, |
| { |
| "epoch": 1.3157894736842106, |
| "grad_norm": 2.5492770671844482, |
| "learning_rate": 0.0006491228070175439, |
| "loss": 0.3784, |
| "num_input_tokens_seen": 46648, |
| "step": 75 |
| }, |
| { |
| "epoch": 1.4035087719298245, |
| "grad_norm": 1.1470354795455933, |
| "learning_rate": 0.0006929824561403509, |
| "loss": 0.3617, |
| "num_input_tokens_seen": 49848, |
| "step": 80 |
| }, |
| { |
| "epoch": 1.4912280701754386, |
| "grad_norm": 13.955451011657715, |
| "learning_rate": 0.0007368421052631579, |
| "loss": 0.5845, |
| "num_input_tokens_seen": 52504, |
| "step": 85 |
| }, |
| { |
| "epoch": 1.5789473684210527, |
| "grad_norm": 3.778008222579956, |
| "learning_rate": 0.0007807017543859649, |
| "loss": 0.3749, |
| "num_input_tokens_seen": 55448, |
| "step": 90 |
| }, |
| { |
| "epoch": 1.6666666666666665, |
| "grad_norm": 3.5065560340881348, |
| "learning_rate": 0.000824561403508772, |
| "loss": 0.2262, |
| "num_input_tokens_seen": 58776, |
| "step": 95 |
| }, |
| { |
| "epoch": 1.7543859649122808, |
| "grad_norm": 3.016477584838867, |
| "learning_rate": 0.000868421052631579, |
| "loss": 0.2606, |
| "num_input_tokens_seen": 61624, |
| "step": 100 |
| }, |
| { |
| "epoch": 1.8421052631578947, |
| "grad_norm": 3.628634452819824, |
| "learning_rate": 0.000912280701754386, |
| "loss": 0.3744, |
| "num_input_tokens_seen": 65336, |
| "step": 105 |
| }, |
| { |
| "epoch": 1.9298245614035088, |
| "grad_norm": 2.2490060329437256, |
| "learning_rate": 0.0009561403508771929, |
| "loss": 0.3845, |
| "num_input_tokens_seen": 68280, |
| "step": 110 |
| }, |
| { |
| "epoch": 2.0, |
| "eval_loss": 1.1474884748458862, |
| "eval_runtime": 0.8436, |
| "eval_samples_per_second": 29.634, |
| "eval_steps_per_second": 8.297, |
| "num_input_tokens_seen": 70496, |
| "step": 114 |
| }, |
| { |
| "epoch": 2.017543859649123, |
| "grad_norm": 55.403846740722656, |
| "learning_rate": 0.001, |
| "loss": 0.4412, |
| "num_input_tokens_seen": 71008, |
| "step": 115 |
| }, |
| { |
| "epoch": 2.1052631578947367, |
| "grad_norm": 2.2277448177337646, |
| "learning_rate": 0.000999941402841295, |
| "loss": 0.7242, |
| "num_input_tokens_seen": 74176, |
| "step": 120 |
| }, |
| { |
| "epoch": 2.192982456140351, |
| "grad_norm": 0.3559485077857971, |
| "learning_rate": 0.0009997656250996883, |
| "loss": 0.2506, |
| "num_input_tokens_seen": 77472, |
| "step": 125 |
| }, |
| { |
| "epoch": 2.280701754385965, |
| "grad_norm": 0.6201609373092651, |
| "learning_rate": 0.0009994727079754844, |
| "loss": 0.4359, |
| "num_input_tokens_seen": 80192, |
| "step": 130 |
| }, |
| { |
| "epoch": 2.3684210526315788, |
| "grad_norm": 0.7193291187286377, |
| "learning_rate": 0.0009990627201251284, |
| "loss": 0.3637, |
| "num_input_tokens_seen": 83424, |
| "step": 135 |
| }, |
| { |
| "epoch": 2.456140350877193, |
| "grad_norm": 0.6246382594108582, |
| "learning_rate": 0.0009985357576451127, |
| "loss": 0.2151, |
| "num_input_tokens_seen": 86080, |
| "step": 140 |
| }, |
| { |
| "epoch": 2.543859649122807, |
| "grad_norm": 1.0634218454360962, |
| "learning_rate": 0.0009978919440494537, |
| "loss": 0.3518, |
| "num_input_tokens_seen": 89600, |
| "step": 145 |
| }, |
| { |
| "epoch": 2.6315789473684212, |
| "grad_norm": 0.8262342810630798, |
| "learning_rate": 0.0009971314302407413, |
| "loss": 0.4052, |
| "num_input_tokens_seen": 92704, |
| "step": 150 |
| }, |
| { |
| "epoch": 2.719298245614035, |
| "grad_norm": 2.1158297061920166, |
| "learning_rate": 0.0009962543944747686, |
| "loss": 0.2407, |
| "num_input_tokens_seen": 96160, |
| "step": 155 |
| }, |
| { |
| "epoch": 2.807017543859649, |
| "grad_norm": 0.8602972626686096, |
| "learning_rate": 0.0009952610423187517, |
| "loss": 0.3001, |
| "num_input_tokens_seen": 99520, |
| "step": 160 |
| }, |
| { |
| "epoch": 2.8947368421052633, |
| "grad_norm": 1.3600105047225952, |
| "learning_rate": 0.0009941516066031462, |
| "loss": 0.2196, |
| "num_input_tokens_seen": 102528, |
| "step": 165 |
| }, |
| { |
| "epoch": 2.982456140350877, |
| "grad_norm": 1.0132993459701538, |
| "learning_rate": 0.0009929263473670749, |
| "loss": 0.2278, |
| "num_input_tokens_seen": 106304, |
| "step": 170 |
| }, |
| { |
| "epoch": 3.0, |
| "eval_loss": 0.19849923253059387, |
| "eval_runtime": 0.8446, |
| "eval_samples_per_second": 29.601, |
| "eval_steps_per_second": 8.288, |
| "num_input_tokens_seen": 106416, |
| "step": 171 |
| }, |
| { |
| "epoch": 3.0701754385964914, |
| "grad_norm": 0.8767890334129333, |
| "learning_rate": 0.0009915855517973774, |
| "loss": 0.2223, |
| "num_input_tokens_seen": 108880, |
| "step": 175 |
| }, |
| { |
| "epoch": 3.1578947368421053, |
| "grad_norm": 18.88866424560547, |
| "learning_rate": 0.0009901295341612972, |
| "loss": 0.4317, |
| "num_input_tokens_seen": 111984, |
| "step": 180 |
| }, |
| { |
| "epoch": 3.245614035087719, |
| "grad_norm": 0.25612902641296387, |
| "learning_rate": 0.00098855863573282, |
| "loss": 0.391, |
| "num_input_tokens_seen": 115248, |
| "step": 185 |
| }, |
| { |
| "epoch": 3.3333333333333335, |
| "grad_norm": 0.8117824196815491, |
| "learning_rate": 0.0009868732247126839, |
| "loss": 0.0946, |
| "num_input_tokens_seen": 118384, |
| "step": 190 |
| }, |
| { |
| "epoch": 3.4210526315789473, |
| "grad_norm": 1.0302726030349731, |
| "learning_rate": 0.000985073696142077, |
| "loss": 0.1712, |
| "num_input_tokens_seen": 122224, |
| "step": 195 |
| }, |
| { |
| "epoch": 3.5087719298245617, |
| "grad_norm": 0.5782110691070557, |
| "learning_rate": 0.0009831604718100442, |
| "loss": 0.1537, |
| "num_input_tokens_seen": 125584, |
| "step": 200 |
| }, |
| { |
| "epoch": 3.5964912280701755, |
| "grad_norm": 0.2848152816295624, |
| "learning_rate": 0.0009811340001546253, |
| "loss": 0.1084, |
| "num_input_tokens_seen": 128816, |
| "step": 205 |
| }, |
| { |
| "epoch": 3.6842105263157894, |
| "grad_norm": 0.5926535725593567, |
| "learning_rate": 0.0009789947561577445, |
| "loss": 0.4722, |
| "num_input_tokens_seen": 131952, |
| "step": 210 |
| }, |
| { |
| "epoch": 3.7719298245614032, |
| "grad_norm": 0.14324168860912323, |
| "learning_rate": 0.000976743241233882, |
| "loss": 0.1285, |
| "num_input_tokens_seen": 135248, |
| "step": 215 |
| }, |
| { |
| "epoch": 3.8596491228070176, |
| "grad_norm": 0.16379037499427795, |
| "learning_rate": 0.0009743799831125471, |
| "loss": 0.087, |
| "num_input_tokens_seen": 138384, |
| "step": 220 |
| }, |
| { |
| "epoch": 3.9473684210526314, |
| "grad_norm": 0.9538668394088745, |
| "learning_rate": 0.0009719055357145847, |
| "loss": 0.2558, |
| "num_input_tokens_seen": 141264, |
| "step": 225 |
| }, |
| { |
| "epoch": 4.0, |
| "eval_loss": 0.09314573556184769, |
| "eval_runtime": 0.8469, |
| "eval_samples_per_second": 29.521, |
| "eval_steps_per_second": 8.266, |
| "num_input_tokens_seen": 142480, |
| "step": 228 |
| }, |
| { |
| "epoch": 4.035087719298246, |
| "grad_norm": 0.5259274244308472, |
| "learning_rate": 0.0009693204790223423, |
| "loss": 0.2478, |
| "num_input_tokens_seen": 143664, |
| "step": 230 |
| }, |
| { |
| "epoch": 4.12280701754386, |
| "grad_norm": 0.18078230321407318, |
| "learning_rate": 0.0009666254189437286, |
| "loss": 0.0571, |
| "num_input_tokens_seen": 146800, |
| "step": 235 |
| }, |
| { |
| "epoch": 4.2105263157894735, |
| "grad_norm": 0.40128281712532043, |
| "learning_rate": 0.0009638209871701966, |
| "loss": 0.0745, |
| "num_input_tokens_seen": 149456, |
| "step": 240 |
| }, |
| { |
| "epoch": 4.298245614035087, |
| "grad_norm": 0.10880351066589355, |
| "learning_rate": 0.0009609078410286809, |
| "loss": 0.1353, |
| "num_input_tokens_seen": 152880, |
| "step": 245 |
| }, |
| { |
| "epoch": 4.385964912280702, |
| "grad_norm": 0.33941584825515747, |
| "learning_rate": 0.0009578866633275287, |
| "loss": 0.0391, |
| "num_input_tokens_seen": 156720, |
| "step": 250 |
| }, |
| { |
| "epoch": 4.473684210526316, |
| "grad_norm": 0.24353857338428497, |
| "learning_rate": 0.0009547581621964571, |
| "loss": 0.1566, |
| "num_input_tokens_seen": 159600, |
| "step": 255 |
| }, |
| { |
| "epoch": 4.56140350877193, |
| "grad_norm": 0.34911903738975525, |
| "learning_rate": 0.0009515230709205749, |
| "loss": 0.1274, |
| "num_input_tokens_seen": 162224, |
| "step": 260 |
| }, |
| { |
| "epoch": 4.649122807017544, |
| "grad_norm": 0.14672434329986572, |
| "learning_rate": 0.0009481821477685101, |
| "loss": 0.0674, |
| "num_input_tokens_seen": 165328, |
| "step": 265 |
| }, |
| { |
| "epoch": 4.7368421052631575, |
| "grad_norm": 1.1086465120315552, |
| "learning_rate": 0.0009447361758146791, |
| "loss": 0.1621, |
| "num_input_tokens_seen": 168592, |
| "step": 270 |
| }, |
| { |
| "epoch": 4.824561403508772, |
| "grad_norm": 0.16363075375556946, |
| "learning_rate": 0.0009411859627557439, |
| "loss": 0.148, |
| "num_input_tokens_seen": 171696, |
| "step": 275 |
| }, |
| { |
| "epoch": 4.912280701754386, |
| "grad_norm": 0.6936156749725342, |
| "learning_rate": 0.0009375323407212969, |
| "loss": 0.216, |
| "num_input_tokens_seen": 174672, |
| "step": 280 |
| }, |
| { |
| "epoch": 5.0, |
| "grad_norm": 0.13603536784648895, |
| "learning_rate": 0.0009337761660788185, |
| "loss": 0.1601, |
| "num_input_tokens_seen": 177224, |
| "step": 285 |
| }, |
| { |
| "epoch": 5.0, |
| "eval_loss": 0.1481173187494278, |
| "eval_runtime": 0.8475, |
| "eval_samples_per_second": 29.499, |
| "eval_steps_per_second": 8.26, |
| "num_input_tokens_seen": 177224, |
| "step": 285 |
| }, |
| { |
| "epoch": 5.087719298245614, |
| "grad_norm": 0.12188196927309036, |
| "learning_rate": 0.0009299183192329556, |
| "loss": 0.0475, |
| "num_input_tokens_seen": 180840, |
| "step": 290 |
| }, |
| { |
| "epoch": 5.175438596491228, |
| "grad_norm": 0.3683392405509949, |
| "learning_rate": 0.0009259597044191636, |
| "loss": 0.0633, |
| "num_input_tokens_seen": 183976, |
| "step": 295 |
| }, |
| { |
| "epoch": 5.2631578947368425, |
| "grad_norm": 0.42130985856056213, |
| "learning_rate": 0.0009219012494917644, |
| "loss": 0.1973, |
| "num_input_tokens_seen": 187016, |
| "step": 300 |
| }, |
| { |
| "epoch": 5.350877192982456, |
| "grad_norm": 0.2578742802143097, |
| "learning_rate": 0.0009177439057064682, |
| "loss": 0.0991, |
| "num_input_tokens_seen": 189480, |
| "step": 305 |
| }, |
| { |
| "epoch": 5.43859649122807, |
| "grad_norm": 0.1314030885696411, |
| "learning_rate": 0.0009134886474974092, |
| "loss": 0.132, |
| "num_input_tokens_seen": 192712, |
| "step": 310 |
| }, |
| { |
| "epoch": 5.526315789473684, |
| "grad_norm": 0.15120628476142883, |
| "learning_rate": 0.0009091364722487496, |
| "loss": 0.1531, |
| "num_input_tokens_seen": 195496, |
| "step": 315 |
| }, |
| { |
| "epoch": 5.614035087719298, |
| "grad_norm": 0.08090299367904663, |
| "learning_rate": 0.0009046884000609047, |
| "loss": 0.0416, |
| "num_input_tokens_seen": 199240, |
| "step": 320 |
| }, |
| { |
| "epoch": 5.701754385964913, |
| "grad_norm": 0.29662391543388367, |
| "learning_rate": 0.0009001454735114421, |
| "loss": 0.1052, |
| "num_input_tokens_seen": 201832, |
| "step": 325 |
| }, |
| { |
| "epoch": 5.7894736842105265, |
| "grad_norm": 1.577031135559082, |
| "learning_rate": 0.0008955087574107137, |
| "loss": 0.0604, |
| "num_input_tokens_seen": 204968, |
| "step": 330 |
| }, |
| { |
| "epoch": 5.87719298245614, |
| "grad_norm": 0.5615358352661133, |
| "learning_rate": 0.0008907793385522767, |
| "loss": 0.1132, |
| "num_input_tokens_seen": 208360, |
| "step": 335 |
| }, |
| { |
| "epoch": 5.964912280701754, |
| "grad_norm": 0.5985087752342224, |
| "learning_rate": 0.0008859583254581605, |
| "loss": 0.0544, |
| "num_input_tokens_seen": 211080, |
| "step": 340 |
| }, |
| { |
| "epoch": 6.0, |
| "eval_loss": 0.09723968058824539, |
| "eval_runtime": 0.8472, |
| "eval_samples_per_second": 29.51, |
| "eval_steps_per_second": 8.263, |
| "num_input_tokens_seen": 212000, |
| "step": 342 |
| }, |
| { |
| "epoch": 6.052631578947368, |
| "grad_norm": 0.028615077957510948, |
| "learning_rate": 0.0008810468481190428, |
| "loss": 0.0889, |
| "num_input_tokens_seen": 214176, |
| "step": 345 |
| }, |
| { |
| "epoch": 6.140350877192983, |
| "grad_norm": 0.08168599754571915, |
| "learning_rate": 0.000876046057729392, |
| "loss": 0.0667, |
| "num_input_tokens_seen": 216736, |
| "step": 350 |
| }, |
| { |
| "epoch": 6.228070175438597, |
| "grad_norm": 0.19951264560222626, |
| "learning_rate": 0.0008709571264176408, |
| "loss": 0.0484, |
| "num_input_tokens_seen": 220224, |
| "step": 355 |
| }, |
| { |
| "epoch": 6.315789473684211, |
| "grad_norm": 1.007753849029541, |
| "learning_rate": 0.0008657812469714519, |
| "loss": 0.0542, |
| "num_input_tokens_seen": 223136, |
| "step": 360 |
| }, |
| { |
| "epoch": 6.4035087719298245, |
| "grad_norm": 0.4764297306537628, |
| "learning_rate": 0.0008605196325581425, |
| "loss": 0.1475, |
| "num_input_tokens_seen": 226944, |
| "step": 365 |
| }, |
| { |
| "epoch": 6.491228070175438, |
| "grad_norm": 0.15180326998233795, |
| "learning_rate": 0.000855173516440332, |
| "loss": 0.0126, |
| "num_input_tokens_seen": 230368, |
| "step": 370 |
| }, |
| { |
| "epoch": 6.578947368421053, |
| "grad_norm": 0.42743873596191406, |
| "learning_rate": 0.000849744151686879, |
| "loss": 0.1144, |
| "num_input_tokens_seen": 233376, |
| "step": 375 |
| }, |
| { |
| "epoch": 6.666666666666667, |
| "grad_norm": 0.1333133429288864, |
| "learning_rate": 0.000844232810879176, |
| "loss": 0.0617, |
| "num_input_tokens_seen": 236384, |
| "step": 380 |
| }, |
| { |
| "epoch": 6.754385964912281, |
| "grad_norm": 0.18063558638095856, |
| "learning_rate": 0.0008386407858128706, |
| "loss": 0.0186, |
| "num_input_tokens_seen": 239584, |
| "step": 385 |
| }, |
| { |
| "epoch": 6.842105263157895, |
| "grad_norm": 0.25018367171287537, |
| "learning_rate": 0.0008329693871950843, |
| "loss": 0.168, |
| "num_input_tokens_seen": 242624, |
| "step": 390 |
| }, |
| { |
| "epoch": 6.9298245614035086, |
| "grad_norm": 0.05364036560058594, |
| "learning_rate": 0.0008272199443371966, |
| "loss": 0.0212, |
| "num_input_tokens_seen": 246304, |
| "step": 395 |
| }, |
| { |
| "epoch": 7.0, |
| "eval_loss": 0.0559244342148304, |
| "eval_runtime": 0.8472, |
| "eval_samples_per_second": 29.51, |
| "eval_steps_per_second": 8.263, |
| "num_input_tokens_seen": 248272, |
| "step": 399 |
| }, |
| { |
| "epoch": 7.017543859649122, |
| "grad_norm": 0.14856205880641937, |
| "learning_rate": 0.0008213938048432696, |
| "loss": 0.0662, |
| "num_input_tokens_seen": 248912, |
| "step": 400 |
| }, |
| { |
| "epoch": 7.105263157894737, |
| "grad_norm": 0.134119912981987, |
| "learning_rate": 0.0008154923342941862, |
| "loss": 0.0653, |
| "num_input_tokens_seen": 252016, |
| "step": 405 |
| }, |
| { |
| "epoch": 7.192982456140351, |
| "grad_norm": 0.344247043132782, |
| "learning_rate": 0.0008095169159275712, |
| "loss": 0.0297, |
| "num_input_tokens_seen": 255856, |
| "step": 410 |
| }, |
| { |
| "epoch": 7.280701754385965, |
| "grad_norm": 0.43787074089050293, |
| "learning_rate": 0.0008034689503135784, |
| "loss": 0.065, |
| "num_input_tokens_seen": 258800, |
| "step": 415 |
| }, |
| { |
| "epoch": 7.368421052631579, |
| "grad_norm": 0.05480087921023369, |
| "learning_rate": 0.0007973498550266114, |
| "loss": 0.0156, |
| "num_input_tokens_seen": 261584, |
| "step": 420 |
| }, |
| { |
| "epoch": 7.456140350877193, |
| "grad_norm": 0.06517962366342545, |
| "learning_rate": 0.0007911610643130608, |
| "loss": 0.0847, |
| "num_input_tokens_seen": 265168, |
| "step": 425 |
| }, |
| { |
| "epoch": 7.543859649122807, |
| "grad_norm": 0.013027893379330635, |
| "learning_rate": 0.0007849040287551332, |
| "loss": 0.0425, |
| "num_input_tokens_seen": 268240, |
| "step": 430 |
| }, |
| { |
| "epoch": 7.631578947368421, |
| "grad_norm": 0.175403892993927, |
| "learning_rate": 0.000778580214930851, |
| "loss": 0.0193, |
| "num_input_tokens_seen": 271728, |
| "step": 435 |
| }, |
| { |
| "epoch": 7.719298245614035, |
| "grad_norm": 0.028361421078443527, |
| "learning_rate": 0.0007721911050703032, |
| "loss": 0.0406, |
| "num_input_tokens_seen": 274736, |
| "step": 440 |
| }, |
| { |
| "epoch": 7.807017543859649, |
| "grad_norm": 0.11488582193851471, |
| "learning_rate": 0.000765738196708228, |
| "loss": 0.0237, |
| "num_input_tokens_seen": 278096, |
| "step": 445 |
| }, |
| { |
| "epoch": 7.894736842105263, |
| "grad_norm": 0.0711611956357956, |
| "learning_rate": 0.0007592230023330069, |
| "loss": 0.0181, |
| "num_input_tokens_seen": 280592, |
| "step": 450 |
| }, |
| { |
| "epoch": 7.982456140350877, |
| "grad_norm": 1.2116117477416992, |
| "learning_rate": 0.000752647049032155, |
| "loss": 0.0503, |
| "num_input_tokens_seen": 284144, |
| "step": 455 |
| }, |
| { |
| "epoch": 8.0, |
| "eval_loss": 0.0554032064974308, |
| "eval_runtime": 0.8474, |
| "eval_samples_per_second": 29.502, |
| "eval_steps_per_second": 8.261, |
| "num_input_tokens_seen": 284248, |
| "step": 456 |
| }, |
| { |
| "epoch": 8.070175438596491, |
| "grad_norm": 0.6429955959320068, |
| "learning_rate": 0.0007460118781343892, |
| "loss": 0.0392, |
| "num_input_tokens_seen": 287128, |
| "step": 460 |
| }, |
| { |
| "epoch": 8.157894736842104, |
| "grad_norm": 0.0051684132777154446, |
| "learning_rate": 0.000739319044848358, |
| "loss": 0.003, |
| "num_input_tokens_seen": 290744, |
| "step": 465 |
| }, |
| { |
| "epoch": 8.24561403508772, |
| "grad_norm": 0.005634957924485207, |
| "learning_rate": 0.0007325701178981183, |
| "loss": 0.038, |
| "num_input_tokens_seen": 293816, |
| "step": 470 |
| }, |
| { |
| "epoch": 8.333333333333334, |
| "grad_norm": 0.5682314038276672, |
| "learning_rate": 0.0007257666791554447, |
| "loss": 0.1005, |
| "num_input_tokens_seen": 297080, |
| "step": 475 |
| }, |
| { |
| "epoch": 8.421052631578947, |
| "grad_norm": 0.0833849161863327, |
| "learning_rate": 0.0007189103232690561, |
| "loss": 0.0303, |
| "num_input_tokens_seen": 300408, |
| "step": 480 |
| }, |
| { |
| "epoch": 8.508771929824562, |
| "grad_norm": 0.6095874309539795, |
| "learning_rate": 0.0007120026572908484, |
| "loss": 0.0492, |
| "num_input_tokens_seen": 303384, |
| "step": 485 |
| }, |
| { |
| "epoch": 8.596491228070175, |
| "grad_norm": 0.21416215598583221, |
| "learning_rate": 0.0007050453002992201, |
| "loss": 0.0229, |
| "num_input_tokens_seen": 306232, |
| "step": 490 |
| }, |
| { |
| "epoch": 8.68421052631579, |
| "grad_norm": 0.04654688388109207, |
| "learning_rate": 0.0006980398830195785, |
| "loss": 0.0107, |
| "num_input_tokens_seen": 309816, |
| "step": 495 |
| }, |
| { |
| "epoch": 8.771929824561404, |
| "grad_norm": 0.016985950991511345, |
| "learning_rate": 0.000690988047442116, |
| "loss": 0.0063, |
| "num_input_tokens_seen": 312408, |
| "step": 500 |
| }, |
| { |
| "epoch": 8.859649122807017, |
| "grad_norm": 0.1783420890569687, |
| "learning_rate": 0.0006838914464369467, |
| "loss": 0.0072, |
| "num_input_tokens_seen": 315416, |
| "step": 505 |
| }, |
| { |
| "epoch": 8.947368421052632, |
| "grad_norm": 0.1028895154595375, |
| "learning_rate": 0.0006767517433666918, |
| "loss": 0.0561, |
| "num_input_tokens_seen": 318392, |
| "step": 510 |
| }, |
| { |
| "epoch": 9.0, |
| "eval_loss": 0.09938608109951019, |
| "eval_runtime": 0.8472, |
| "eval_samples_per_second": 29.509, |
| "eval_steps_per_second": 8.263, |
| "num_input_tokens_seen": 319488, |
| "step": 513 |
| }, |
| { |
| "epoch": 9.035087719298245, |
| "grad_norm": 0.05689363181591034, |
| "learning_rate": 0.0006695706116966074, |
| "loss": 0.0023, |
| "num_input_tokens_seen": 320832, |
| "step": 515 |
| }, |
| { |
| "epoch": 9.12280701754386, |
| "grad_norm": 0.04645015299320221, |
| "learning_rate": 0.0006623497346023419, |
| "loss": 0.0047, |
| "num_input_tokens_seen": 323840, |
| "step": 520 |
| }, |
| { |
| "epoch": 9.210526315789474, |
| "grad_norm": 0.009710798040032387, |
| "learning_rate": 0.0006550908045754194, |
| "loss": 0.0119, |
| "num_input_tokens_seen": 327200, |
| "step": 525 |
| }, |
| { |
| "epoch": 9.298245614035087, |
| "grad_norm": 0.1202242448925972, |
| "learning_rate": 0.0006477955230265393, |
| "loss": 0.0102, |
| "num_input_tokens_seen": 330208, |
| "step": 530 |
| }, |
| { |
| "epoch": 9.385964912280702, |
| "grad_norm": 0.007562646176666021, |
| "learning_rate": 0.0006404655998867848, |
| "loss": 0.0008, |
| "num_input_tokens_seen": 332864, |
| "step": 535 |
| }, |
| { |
| "epoch": 9.473684210526315, |
| "grad_norm": 0.007219177670776844, |
| "learning_rate": 0.0006331027532068335, |
| "loss": 0.0952, |
| "num_input_tokens_seen": 336224, |
| "step": 540 |
| }, |
| { |
| "epoch": 9.56140350877193, |
| "grad_norm": 0.01520033460110426, |
| "learning_rate": 0.0006257087087542672, |
| "loss": 0.004, |
| "num_input_tokens_seen": 339392, |
| "step": 545 |
| }, |
| { |
| "epoch": 9.649122807017545, |
| "grad_norm": 0.02746237814426422, |
| "learning_rate": 0.0006182851996090712, |
| "loss": 0.0089, |
| "num_input_tokens_seen": 342624, |
| "step": 550 |
| }, |
| { |
| "epoch": 9.736842105263158, |
| "grad_norm": 0.37325340509414673, |
| "learning_rate": 0.0006108339657574193, |
| "loss": 0.0381, |
| "num_input_tokens_seen": 345472, |
| "step": 555 |
| }, |
| { |
| "epoch": 9.824561403508772, |
| "grad_norm": 0.22606229782104492, |
| "learning_rate": 0.000603356753683842, |
| "loss": 0.0288, |
| "num_input_tokens_seen": 348160, |
| "step": 560 |
| }, |
| { |
| "epoch": 9.912280701754385, |
| "grad_norm": 0.009390784427523613, |
| "learning_rate": 0.0005958553159618693, |
| "loss": 0.003, |
| "num_input_tokens_seen": 351456, |
| "step": 565 |
| }, |
| { |
| "epoch": 10.0, |
| "grad_norm": 0.03701547160744667, |
| "learning_rate": 0.0005883314108432481, |
| "loss": 0.0031, |
| "num_input_tokens_seen": 354472, |
| "step": 570 |
| }, |
| { |
| "epoch": 10.0, |
| "eval_loss": 0.07178983837366104, |
| "eval_runtime": 0.8454, |
| "eval_samples_per_second": 29.572, |
| "eval_steps_per_second": 8.28, |
| "num_input_tokens_seen": 354472, |
| "step": 570 |
| }, |
| { |
| "epoch": 10.087719298245615, |
| "grad_norm": 0.013904523104429245, |
| "learning_rate": 0.0005807868018458274, |
| "loss": 0.0038, |
| "num_input_tokens_seen": 358024, |
| "step": 575 |
| }, |
| { |
| "epoch": 10.175438596491228, |
| "grad_norm": 0.0030093665700405836, |
| "learning_rate": 0.0005732232573402109, |
| "loss": 0.0058, |
| "num_input_tokens_seen": 361192, |
| "step": 580 |
| }, |
| { |
| "epoch": 10.263157894736842, |
| "grad_norm": 0.0005254853167571127, |
| "learning_rate": 0.0005656425501352691, |
| "loss": 0.0044, |
| "num_input_tokens_seen": 363816, |
| "step": 585 |
| }, |
| { |
| "epoch": 10.350877192982455, |
| "grad_norm": 0.005843406543135643, |
| "learning_rate": 0.0005580464570626152, |
| "loss": 0.001, |
| "num_input_tokens_seen": 366440, |
| "step": 590 |
| }, |
| { |
| "epoch": 10.43859649122807, |
| "grad_norm": 0.05471239984035492, |
| "learning_rate": 0.0005504367585601342, |
| "loss": 0.0009, |
| "num_input_tokens_seen": 369672, |
| "step": 595 |
| }, |
| { |
| "epoch": 10.526315789473685, |
| "grad_norm": 0.010388413444161415, |
| "learning_rate": 0.0005428152382546695, |
| "loss": 0.0006, |
| "num_input_tokens_seen": 372808, |
| "step": 600 |
| }, |
| { |
| "epoch": 10.614035087719298, |
| "grad_norm": 0.015497655607759953, |
| "learning_rate": 0.0005351836825439609, |
| "loss": 0.0027, |
| "num_input_tokens_seen": 375560, |
| "step": 605 |
| }, |
| { |
| "epoch": 10.701754385964913, |
| "grad_norm": 0.0032305310014635324, |
| "learning_rate": 0.0005275438801779327, |
| "loss": 0.0009, |
| "num_input_tokens_seen": 378792, |
| "step": 610 |
| }, |
| { |
| "epoch": 10.789473684210526, |
| "grad_norm": 0.012135961093008518, |
| "learning_rate": 0.0005198976218394321, |
| "loss": 0.0006, |
| "num_input_tokens_seen": 382312, |
| "step": 615 |
| }, |
| { |
| "epoch": 10.87719298245614, |
| "grad_norm": 0.01056765578687191, |
| "learning_rate": 0.0005122466997245124, |
| "loss": 0.0082, |
| "num_input_tokens_seen": 386088, |
| "step": 620 |
| }, |
| { |
| "epoch": 10.964912280701755, |
| "grad_norm": 0.027750033885240555, |
| "learning_rate": 0.0005045929071223632, |
| "loss": 0.0013, |
| "num_input_tokens_seen": 388840, |
| "step": 625 |
| }, |
| { |
| "epoch": 11.0, |
| "eval_loss": 0.15399591624736786, |
| "eval_runtime": 0.8473, |
| "eval_samples_per_second": 29.507, |
| "eval_steps_per_second": 8.262, |
| "num_input_tokens_seen": 389408, |
| "step": 627 |
| }, |
| { |
| "epoch": 11.052631578947368, |
| "grad_norm": 1.376333475112915, |
| "learning_rate": 0.0004969380379949836, |
| "loss": 0.0424, |
| "num_input_tokens_seen": 391200, |
| "step": 630 |
| }, |
| { |
| "epoch": 11.140350877192983, |
| "grad_norm": 0.007453666068613529, |
| "learning_rate": 0.0004892838865566986, |
| "loss": 0.0008, |
| "num_input_tokens_seen": 394880, |
| "step": 635 |
| }, |
| { |
| "epoch": 11.228070175438596, |
| "grad_norm": 0.0032238473650068045, |
| "learning_rate": 0.00048163224685361384, |
| "loss": 0.0008, |
| "num_input_tokens_seen": 398336, |
| "step": 640 |
| }, |
| { |
| "epoch": 11.31578947368421, |
| "grad_norm": 0.002195168286561966, |
| "learning_rate": 0.0004739849123431138, |
| "loss": 0.0006, |
| "num_input_tokens_seen": 401632, |
| "step": 645 |
| }, |
| { |
| "epoch": 11.403508771929825, |
| "grad_norm": 0.06381990760564804, |
| "learning_rate": 0.00046634367547349433, |
| "loss": 0.001, |
| "num_input_tokens_seen": 405120, |
| "step": 650 |
| }, |
| { |
| "epoch": 11.491228070175438, |
| "grad_norm": 0.012357674539089203, |
| "learning_rate": 0.0004587103272638339, |
| "loss": 0.0081, |
| "num_input_tokens_seen": 408192, |
| "step": 655 |
| }, |
| { |
| "epoch": 11.578947368421053, |
| "grad_norm": 0.06260731816291809, |
| "learning_rate": 0.0004510866568841981, |
| "loss": 0.0022, |
| "num_input_tokens_seen": 411360, |
| "step": 660 |
| }, |
| { |
| "epoch": 11.666666666666666, |
| "grad_norm": 0.04787437990307808, |
| "learning_rate": 0.0004434744512362797, |
| "loss": 0.0017, |
| "num_input_tokens_seen": 414144, |
| "step": 665 |
| }, |
| { |
| "epoch": 11.75438596491228, |
| "grad_norm": 0.02501210942864418, |
| "learning_rate": 0.00043587549453456836, |
| "loss": 0.0019, |
| "num_input_tokens_seen": 417760, |
| "step": 670 |
| }, |
| { |
| "epoch": 11.842105263157894, |
| "grad_norm": 0.0029899149667471647, |
| "learning_rate": 0.00042829156788815195, |
| "loss": 0.0157, |
| "num_input_tokens_seen": 420640, |
| "step": 675 |
| }, |
| { |
| "epoch": 11.929824561403509, |
| "grad_norm": 0.006241807248443365, |
| "learning_rate": 0.0004207244488832429, |
| "loss": 0.0013, |
| "num_input_tokens_seen": 423360, |
| "step": 680 |
| }, |
| { |
| "epoch": 12.0, |
| "eval_loss": 0.13970787823200226, |
| "eval_runtime": 0.8469, |
| "eval_samples_per_second": 29.518, |
| "eval_steps_per_second": 8.265, |
| "num_input_tokens_seen": 425328, |
| "step": 684 |
| }, |
| { |
| "epoch": 12.017543859649123, |
| "grad_norm": 0.01167436596006155, |
| "learning_rate": 0.00041317591116653486, |
| "loss": 0.001, |
| "num_input_tokens_seen": 426288, |
| "step": 685 |
| }, |
| { |
| "epoch": 12.105263157894736, |
| "grad_norm": 0.0012018810957670212, |
| "learning_rate": 0.00040564772402947784, |
| "loss": 0.0019, |
| "num_input_tokens_seen": 429136, |
| "step": 690 |
| }, |
| { |
| "epoch": 12.192982456140351, |
| "grad_norm": 0.0004732354427687824, |
| "learning_rate": 0.00039814165199357807, |
| "loss": 0.0006, |
| "num_input_tokens_seen": 432272, |
| "step": 695 |
| }, |
| { |
| "epoch": 12.280701754385966, |
| "grad_norm": 0.0005588581552729011, |
| "learning_rate": 0.00039065945439681213, |
| "loss": 0.0016, |
| "num_input_tokens_seen": 435760, |
| "step": 700 |
| }, |
| { |
| "epoch": 12.368421052631579, |
| "grad_norm": 0.04562971740961075, |
| "learning_rate": 0.0003832028849812607, |
| "loss": 0.0008, |
| "num_input_tokens_seen": 439312, |
| "step": 705 |
| }, |
| { |
| "epoch": 12.456140350877194, |
| "grad_norm": 0.22231784462928772, |
| "learning_rate": 0.00037577369148204934, |
| "loss": 0.0023, |
| "num_input_tokens_seen": 442640, |
| "step": 710 |
| }, |
| { |
| "epoch": 12.543859649122806, |
| "grad_norm": 0.0012393173528835177, |
| "learning_rate": 0.00036837361521770053, |
| "loss": 0.0014, |
| "num_input_tokens_seen": 445648, |
| "step": 715 |
| }, |
| { |
| "epoch": 12.631578947368421, |
| "grad_norm": 0.013922294601798058, |
| "learning_rate": 0.00036100439068198676, |
| "loss": 0.0011, |
| "num_input_tokens_seen": 448208, |
| "step": 720 |
| }, |
| { |
| "epoch": 12.719298245614034, |
| "grad_norm": 0.014475143514573574, |
| "learning_rate": 0.00035366774513738707, |
| "loss": 0.0009, |
| "num_input_tokens_seen": 451600, |
| "step": 725 |
| }, |
| { |
| "epoch": 12.807017543859649, |
| "grad_norm": 0.003514606272801757, |
| "learning_rate": 0.0003463653982102347, |
| "loss": 0.0005, |
| "num_input_tokens_seen": 454992, |
| "step": 730 |
| }, |
| { |
| "epoch": 12.894736842105264, |
| "grad_norm": 0.0005705193034373224, |
| "learning_rate": 0.00033909906148765724, |
| "loss": 0.0003, |
| "num_input_tokens_seen": 458032, |
| "step": 735 |
| }, |
| { |
| "epoch": 12.982456140350877, |
| "grad_norm": 0.0007108288118615746, |
| "learning_rate": 0.00033187043811639863, |
| "loss": 0.0005, |
| "num_input_tokens_seen": 461104, |
| "step": 740 |
| }, |
| { |
| "epoch": 13.0, |
| "eval_loss": 0.11178340017795563, |
| "eval_runtime": 0.848, |
| "eval_samples_per_second": 29.48, |
| "eval_steps_per_second": 8.254, |
| "num_input_tokens_seen": 461216, |
| "step": 741 |
| }, |
| { |
| "epoch": 13.070175438596491, |
| "grad_norm": 0.0032430188730359077, |
| "learning_rate": 0.00032468122240362287, |
| "loss": 0.0002, |
| "num_input_tokens_seen": 463904, |
| "step": 745 |
| }, |
| { |
| "epoch": 13.157894736842104, |
| "grad_norm": 0.0033710510469973087, |
| "learning_rate": 0.00031753309941978615, |
| "loss": 0.0003, |
| "num_input_tokens_seen": 467328, |
| "step": 750 |
| }, |
| { |
| "epoch": 13.24561403508772, |
| "grad_norm": 0.002130727982148528, |
| "learning_rate": 0.0003104277446036764, |
| "loss": 0.0004, |
| "num_input_tokens_seen": 469792, |
| "step": 755 |
| }, |
| { |
| "epoch": 13.333333333333334, |
| "grad_norm": 0.0034273844212293625, |
| "learning_rate": 0.00030336682336970847, |
| "loss": 0.0003, |
| "num_input_tokens_seen": 472896, |
| "step": 760 |
| }, |
| { |
| "epoch": 13.421052631578947, |
| "grad_norm": 0.01330488920211792, |
| "learning_rate": 0.0002963519907175713, |
| "loss": 0.0006, |
| "num_input_tokens_seen": 475904, |
| "step": 765 |
| }, |
| { |
| "epoch": 13.508771929824562, |
| "grad_norm": 0.0011820456711575389, |
| "learning_rate": 0.00028938489084431363, |
| "loss": 0.0007, |
| "num_input_tokens_seen": 478848, |
| "step": 770 |
| }, |
| { |
| "epoch": 13.596491228070175, |
| "grad_norm": 0.0031454204581677914, |
| "learning_rate": 0.0002824671567589635, |
| "loss": 0.0003, |
| "num_input_tokens_seen": 482080, |
| "step": 775 |
| }, |
| { |
| "epoch": 13.68421052631579, |
| "grad_norm": 0.0009693540050648153, |
| "learning_rate": 0.00027560040989976894, |
| "loss": 0.0003, |
| "num_input_tokens_seen": 485440, |
| "step": 780 |
| }, |
| { |
| "epoch": 13.771929824561404, |
| "grad_norm": 0.013067127205431461, |
| "learning_rate": 0.0002687862597541523, |
| "loss": 0.0004, |
| "num_input_tokens_seen": 488640, |
| "step": 785 |
| }, |
| { |
| "epoch": 13.859649122807017, |
| "grad_norm": 0.0015233588637784123, |
| "learning_rate": 0.0002620263034814632, |
| "loss": 0.0003, |
| "num_input_tokens_seen": 492032, |
| "step": 790 |
| }, |
| { |
| "epoch": 13.947368421052632, |
| "grad_norm": 0.0022633865009993315, |
| "learning_rate": 0.00025532212553862446, |
| "loss": 0.0003, |
| "num_input_tokens_seen": 495264, |
| "step": 795 |
| }, |
| { |
| "epoch": 14.0, |
| "eval_loss": 0.11026651412248611, |
| "eval_runtime": 0.8472, |
| "eval_samples_per_second": 29.509, |
| "eval_steps_per_second": 8.263, |
| "num_input_tokens_seen": 496704, |
| "step": 798 |
| }, |
| { |
| "epoch": 14.035087719298245, |
| "grad_norm": 0.002026034751906991, |
| "learning_rate": 0.000248675297308751, |
| "loss": 0.0002, |
| "num_input_tokens_seen": 498464, |
| "step": 800 |
| }, |
| { |
| "epoch": 14.12280701754386, |
| "grad_norm": 0.001779843820258975, |
| "learning_rate": 0.00024208737673283814, |
| "loss": 0.0002, |
| "num_input_tokens_seen": 501632, |
| "step": 805 |
| }, |
| { |
| "epoch": 14.210526315789474, |
| "grad_norm": 0.0009660604991950095, |
| "learning_rate": 0.00023555990794459542, |
| "loss": 0.0003, |
| "num_input_tokens_seen": 504544, |
| "step": 810 |
| }, |
| { |
| "epoch": 14.298245614035087, |
| "grad_norm": 0.0004894082085229456, |
| "learning_rate": 0.00022909442090852144, |
| "loss": 0.0002, |
| "num_input_tokens_seen": 507456, |
| "step": 815 |
| }, |
| { |
| "epoch": 14.385964912280702, |
| "grad_norm": 0.0006691705784760416, |
| "learning_rate": 0.0002226924310612956, |
| "loss": 0.0003, |
| "num_input_tokens_seen": 511136, |
| "step": 820 |
| }, |
| { |
| "epoch": 14.473684210526315, |
| "grad_norm": 0.0027478181291371584, |
| "learning_rate": 0.00021635543895657866, |
| "loss": 0.0003, |
| "num_input_tokens_seen": 514368, |
| "step": 825 |
| }, |
| { |
| "epoch": 14.56140350877193, |
| "grad_norm": 0.003300681710243225, |
| "learning_rate": 0.00021008492991329863, |
| "loss": 0.0004, |
| "num_input_tokens_seen": 517248, |
| "step": 830 |
| }, |
| { |
| "epoch": 14.649122807017545, |
| "grad_norm": 0.0008559192647226155, |
| "learning_rate": 0.00020388237366751006, |
| "loss": 0.0005, |
| "num_input_tokens_seen": 520160, |
| "step": 835 |
| }, |
| { |
| "epoch": 14.736842105263158, |
| "grad_norm": 0.00447432603687048, |
| "learning_rate": 0.0001977492240279035, |
| "loss": 0.0003, |
| "num_input_tokens_seen": 523424, |
| "step": 840 |
| }, |
| { |
| "epoch": 14.824561403508772, |
| "grad_norm": 0.00465738819912076, |
| "learning_rate": 0.0001916869185350505, |
| "loss": 0.0005, |
| "num_input_tokens_seen": 526240, |
| "step": 845 |
| }, |
| { |
| "epoch": 14.912280701754385, |
| "grad_norm": 0.0008756824536249042, |
| "learning_rate": 0.00018569687812445895, |
| "loss": 0.0003, |
| "num_input_tokens_seen": 529440, |
| "step": 850 |
| }, |
| { |
| "epoch": 15.0, |
| "grad_norm": 0.0006853164522908628, |
| "learning_rate": 0.00017978050679352359, |
| "loss": 0.0002, |
| "num_input_tokens_seen": 532504, |
| "step": 855 |
| }, |
| { |
| "epoch": 15.0, |
| "eval_loss": 0.1186794564127922, |
| "eval_runtime": 0.8475, |
| "eval_samples_per_second": 29.499, |
| "eval_steps_per_second": 8.26, |
| "num_input_tokens_seen": 532504, |
| "step": 855 |
| }, |
| { |
| "epoch": 15.087719298245615, |
| "grad_norm": 0.0005724055808968842, |
| "learning_rate": 0.00017393919127244346, |
| "loss": 0.0002, |
| "num_input_tokens_seen": 535544, |
| "step": 860 |
| }, |
| { |
| "epoch": 15.175438596491228, |
| "grad_norm": 0.004078974947333336, |
| "learning_rate": 0.00016817430069918936, |
| "loss": 0.0003, |
| "num_input_tokens_seen": 538744, |
| "step": 865 |
| }, |
| { |
| "epoch": 15.263157894736842, |
| "grad_norm": 0.0005777571932412684, |
| "learning_rate": 0.00016248718629859244, |
| "loss": 0.0002, |
| "num_input_tokens_seen": 542040, |
| "step": 870 |
| }, |
| { |
| "epoch": 15.350877192982455, |
| "grad_norm": 0.00299166701734066, |
| "learning_rate": 0.00015687918106563326, |
| "loss": 0.0002, |
| "num_input_tokens_seen": 544888, |
| "step": 875 |
| }, |
| { |
| "epoch": 15.43859649122807, |
| "grad_norm": 0.005229136906564236, |
| "learning_rate": 0.0001513515994530023, |
| "loss": 0.0004, |
| "num_input_tokens_seen": 547640, |
| "step": 880 |
| }, |
| { |
| "epoch": 15.526315789473685, |
| "grad_norm": 0.0009054221445694566, |
| "learning_rate": 0.00014590573706300782, |
| "loss": 0.0002, |
| "num_input_tokens_seen": 551224, |
| "step": 885 |
| }, |
| { |
| "epoch": 15.614035087719298, |
| "grad_norm": 0.002199713606387377, |
| "learning_rate": 0.00014054287034390045, |
| "loss": 0.0004, |
| "num_input_tokens_seen": 554264, |
| "step": 890 |
| }, |
| { |
| "epoch": 15.701754385964913, |
| "grad_norm": 0.0003153159050270915, |
| "learning_rate": 0.00013526425629068966, |
| "loss": 0.0002, |
| "num_input_tokens_seen": 557880, |
| "step": 895 |
| }, |
| { |
| "epoch": 15.789473684210526, |
| "grad_norm": 0.004834799095988274, |
| "learning_rate": 0.00013007113215051673, |
| "loss": 0.0003, |
| "num_input_tokens_seen": 560376, |
| "step": 900 |
| }, |
| { |
| "epoch": 15.87719298245614, |
| "grad_norm": 0.0017280657775700092, |
| "learning_rate": 0.00012496471513265967, |
| "loss": 0.0002, |
| "num_input_tokens_seen": 563864, |
| "step": 905 |
| }, |
| { |
| "epoch": 15.964912280701755, |
| "grad_norm": 0.0009963869815692306, |
| "learning_rate": 0.00011994620212323176, |
| "loss": 0.0003, |
| "num_input_tokens_seen": 567352, |
| "step": 910 |
| }, |
| { |
| "epoch": 16.0, |
| "eval_loss": 0.11453289538621902, |
| "eval_runtime": 0.8481, |
| "eval_samples_per_second": 29.479, |
| "eval_steps_per_second": 8.254, |
| "num_input_tokens_seen": 567952, |
| "step": 912 |
| }, |
| { |
| "epoch": 16.05263157894737, |
| "grad_norm": 0.0014190895017236471, |
| "learning_rate": 0.00011501676940464645, |
| "loss": 0.0004, |
| "num_input_tokens_seen": 570448, |
| "step": 915 |
| }, |
| { |
| "epoch": 16.140350877192983, |
| "grad_norm": 0.001433204161003232, |
| "learning_rate": 0.00011017757237990877, |
| "loss": 0.0002, |
| "num_input_tokens_seen": 573808, |
| "step": 920 |
| }, |
| { |
| "epoch": 16.228070175438596, |
| "grad_norm": 0.0006016744882799685, |
| "learning_rate": 0.00010542974530180327, |
| "loss": 0.0002, |
| "num_input_tokens_seen": 577200, |
| "step": 925 |
| }, |
| { |
| "epoch": 16.31578947368421, |
| "grad_norm": 0.008334847167134285, |
| "learning_rate": 0.00010077440100703683, |
| "loss": 0.0004, |
| "num_input_tokens_seen": 580240, |
| "step": 930 |
| }, |
| { |
| "epoch": 16.403508771929825, |
| "grad_norm": 0.004835701547563076, |
| "learning_rate": 9.621263065540364e-05, |
| "loss": 0.0003, |
| "num_input_tokens_seen": 583120, |
| "step": 935 |
| }, |
| { |
| "epoch": 16.49122807017544, |
| "grad_norm": 0.0018706199480220675, |
| "learning_rate": 9.174550347402855e-05, |
| "loss": 0.0002, |
| "num_input_tokens_seen": 586032, |
| "step": 940 |
| }, |
| { |
| "epoch": 16.57894736842105, |
| "grad_norm": 0.0009366751182824373, |
| "learning_rate": 8.737406650675333e-05, |
| "loss": 0.0003, |
| "num_input_tokens_seen": 589392, |
| "step": 945 |
| }, |
| { |
| "epoch": 16.666666666666668, |
| "grad_norm": 0.008944787085056305, |
| "learning_rate": 8.309934436872074e-05, |
| "loss": 0.0003, |
| "num_input_tokens_seen": 592624, |
| "step": 950 |
| }, |
| { |
| "epoch": 16.75438596491228, |
| "grad_norm": 0.0003236531338188797, |
| "learning_rate": 7.89223390062172e-05, |
| "loss": 0.0002, |
| "num_input_tokens_seen": 595440, |
| "step": 955 |
| }, |
| { |
| "epoch": 16.842105263157894, |
| "grad_norm": 0.0076569146476686, |
| "learning_rate": 7.4844029461827e-05, |
| "loss": 0.0003, |
| "num_input_tokens_seen": 598640, |
| "step": 960 |
| }, |
| { |
| "epoch": 16.92982456140351, |
| "grad_norm": 0.000499847752507776, |
| "learning_rate": 7.086537164495688e-05, |
| "loss": 0.0002, |
| "num_input_tokens_seen": 601584, |
| "step": 965 |
| }, |
| { |
| "epoch": 17.0, |
| "eval_loss": 0.10751347243785858, |
| "eval_runtime": 0.8481, |
| "eval_samples_per_second": 29.478, |
| "eval_steps_per_second": 8.254, |
| "num_input_tokens_seen": 603760, |
| "step": 969 |
| }, |
| { |
| "epoch": 17.017543859649123, |
| "grad_norm": 0.0006260258960537612, |
| "learning_rate": 6.698729810778065e-05, |
| "loss": 0.0002, |
| "num_input_tokens_seen": 604464, |
| "step": 970 |
| }, |
| { |
| "epoch": 17.105263157894736, |
| "grad_norm": 0.0009727178839966655, |
| "learning_rate": 6.321071782666077e-05, |
| "loss": 0.0002, |
| "num_input_tokens_seen": 607600, |
| "step": 975 |
| }, |
| { |
| "epoch": 17.19298245614035, |
| "grad_norm": 0.005761510692536831, |
| "learning_rate": 5.953651598909332e-05, |
| "loss": 0.0002, |
| "num_input_tokens_seen": 611152, |
| "step": 980 |
| }, |
| { |
| "epoch": 17.280701754385966, |
| "grad_norm": 0.0003497640718705952, |
| "learning_rate": 5.596555378623125e-05, |
| "loss": 0.0002, |
| "num_input_tokens_seen": 614768, |
| "step": 985 |
| }, |
| { |
| "epoch": 17.36842105263158, |
| "grad_norm": 0.0003051406529266387, |
| "learning_rate": 5.2498668211030166e-05, |
| "loss": 0.0001, |
| "num_input_tokens_seen": 618384, |
| "step": 990 |
| }, |
| { |
| "epoch": 17.45614035087719, |
| "grad_norm": 0.0022287527099251747, |
| "learning_rate": 4.913667186206722e-05, |
| "loss": 0.0003, |
| "num_input_tokens_seen": 621424, |
| "step": 995 |
| }, |
| { |
| "epoch": 17.54385964912281, |
| "grad_norm": 0.005819144193083048, |
| "learning_rate": 4.588035275307689e-05, |
| "loss": 0.0004, |
| "num_input_tokens_seen": 624464, |
| "step": 1000 |
| }, |
| { |
| "epoch": 17.63157894736842, |
| "grad_norm": 0.0035049670841544867, |
| "learning_rate": 4.273047412824954e-05, |
| "loss": 0.0003, |
| "num_input_tokens_seen": 627824, |
| "step": 1005 |
| }, |
| { |
| "epoch": 17.719298245614034, |
| "grad_norm": 0.0033917517866939306, |
| "learning_rate": 3.9687774283335975e-05, |
| "loss": 0.0002, |
| "num_input_tokens_seen": 631056, |
| "step": 1010 |
| }, |
| { |
| "epoch": 17.80701754385965, |
| "grad_norm": 0.0024384823627769947, |
| "learning_rate": 3.675296639259912e-05, |
| "loss": 0.0002, |
| "num_input_tokens_seen": 634128, |
| "step": 1015 |
| }, |
| { |
| "epoch": 17.894736842105264, |
| "grad_norm": 0.005948670674115419, |
| "learning_rate": 3.392673834165388e-05, |
| "loss": 0.0002, |
| "num_input_tokens_seen": 636656, |
| "step": 1020 |
| }, |
| { |
| "epoch": 17.982456140350877, |
| "grad_norm": 0.014509391039609909, |
| "learning_rate": 3.120975256623465e-05, |
| "loss": 0.0004, |
| "num_input_tokens_seen": 639696, |
| "step": 1025 |
| }, |
| { |
| "epoch": 18.0, |
| "eval_loss": 0.11331921070814133, |
| "eval_runtime": 0.8492, |
| "eval_samples_per_second": 29.438, |
| "eval_steps_per_second": 8.243, |
| "num_input_tokens_seen": 639784, |
| "step": 1026 |
| }, |
| { |
| "epoch": 18.07017543859649, |
| "grad_norm": 0.002213421743363142, |
| "learning_rate": 2.8602645896928293e-05, |
| "loss": 0.0002, |
| "num_input_tokens_seen": 642280, |
| "step": 1030 |
| }, |
| { |
| "epoch": 18.157894736842106, |
| "grad_norm": 0.001457712845876813, |
| "learning_rate": 2.610602940990797e-05, |
| "loss": 0.0002, |
| "num_input_tokens_seen": 645128, |
| "step": 1035 |
| }, |
| { |
| "epoch": 18.24561403508772, |
| "grad_norm": 0.0004003915237262845, |
| "learning_rate": 2.3720488283703547e-05, |
| "loss": 0.0002, |
| "num_input_tokens_seen": 647528, |
| "step": 1040 |
| }, |
| { |
| "epoch": 18.333333333333332, |
| "grad_norm": 0.005710965022444725, |
| "learning_rate": 2.144658166204294e-05, |
| "loss": 0.0003, |
| "num_input_tokens_seen": 651560, |
| "step": 1045 |
| }, |
| { |
| "epoch": 18.42105263157895, |
| "grad_norm": 0.01610150933265686, |
| "learning_rate": 1.9284842522794942e-05, |
| "loss": 0.0003, |
| "num_input_tokens_seen": 654568, |
| "step": 1050 |
| }, |
| { |
| "epoch": 18.50877192982456, |
| "grad_norm": 0.007621182128787041, |
| "learning_rate": 1.7235777553045283e-05, |
| "loss": 0.0002, |
| "num_input_tokens_seen": 657448, |
| "step": 1055 |
| }, |
| { |
| "epoch": 18.596491228070175, |
| "grad_norm": 0.007080764044076204, |
| "learning_rate": 1.5299867030334813e-05, |
| "loss": 0.0003, |
| "num_input_tokens_seen": 660392, |
| "step": 1060 |
| }, |
| { |
| "epoch": 18.68421052631579, |
| "grad_norm": 0.0010832418920472264, |
| "learning_rate": 1.3477564710088097e-05, |
| "loss": 0.0002, |
| "num_input_tokens_seen": 663656, |
| "step": 1065 |
| }, |
| { |
| "epoch": 18.771929824561404, |
| "grad_norm": 0.0006301250541582704, |
| "learning_rate": 1.1769297719258221e-05, |
| "loss": 0.0002, |
| "num_input_tokens_seen": 667208, |
| "step": 1070 |
| }, |
| { |
| "epoch": 18.859649122807017, |
| "grad_norm": 0.00047135481145232916, |
| "learning_rate": 1.0175466456213034e-05, |
| "loss": 0.0002, |
| "num_input_tokens_seen": 670568, |
| "step": 1075 |
| }, |
| { |
| "epoch": 18.94736842105263, |
| "grad_norm": 0.004568538162857294, |
| "learning_rate": 8.696444496886502e-06, |
| "loss": 0.0002, |
| "num_input_tokens_seen": 674024, |
| "step": 1080 |
| }, |
| { |
| "epoch": 19.0, |
| "eval_loss": 0.11408393085002899, |
| "eval_runtime": 0.8501, |
| "eval_samples_per_second": 29.409, |
| "eval_steps_per_second": 8.234, |
| "num_input_tokens_seen": 675800, |
| "step": 1083 |
| }, |
| { |
| "epoch": 19.035087719298247, |
| "grad_norm": 0.005523706320673227, |
| "learning_rate": 7.332578507216469e-06, |
| "loss": 0.0003, |
| "num_input_tokens_seen": 676920, |
| "step": 1085 |
| }, |
| { |
| "epoch": 19.12280701754386, |
| "grad_norm": 0.01047577615827322, |
| "learning_rate": 6.084188161890325e-06, |
| "loss": 0.0003, |
| "num_input_tokens_seen": 679896, |
| "step": 1090 |
| }, |
| { |
| "epoch": 19.210526315789473, |
| "grad_norm": 0.000606419169344008, |
| "learning_rate": 4.95156606941688e-06, |
| "loss": 0.0002, |
| "num_input_tokens_seen": 683288, |
| "step": 1095 |
| }, |
| { |
| "epoch": 19.29824561403509, |
| "grad_norm": 0.003566417610272765, |
| "learning_rate": 3.9349777035421194e-06, |
| "loss": 0.0003, |
| "num_input_tokens_seen": 686168, |
| "step": 1100 |
| }, |
| { |
| "epoch": 19.385964912280702, |
| "grad_norm": 0.0005153213860467076, |
| "learning_rate": 3.034661341025258e-06, |
| "loss": 0.0001, |
| "num_input_tokens_seen": 689016, |
| "step": 1105 |
| }, |
| { |
| "epoch": 19.473684210526315, |
| "grad_norm": 0.0054157329723238945, |
| "learning_rate": 2.250828005789518e-06, |
| "loss": 0.0003, |
| "num_input_tokens_seen": 692376, |
| "step": 1110 |
| }, |
| { |
| "epoch": 19.56140350877193, |
| "grad_norm": 0.0025253540370613337, |
| "learning_rate": 1.5836614194602028e-06, |
| "loss": 0.0003, |
| "num_input_tokens_seen": 695480, |
| "step": 1115 |
| }, |
| { |
| "epoch": 19.649122807017545, |
| "grad_norm": 0.0005263139610178769, |
| "learning_rate": 1.033317958302693e-06, |
| "loss": 0.0002, |
| "num_input_tokens_seen": 698424, |
| "step": 1120 |
| }, |
| { |
| "epoch": 19.736842105263158, |
| "grad_norm": 0.0031342417933046818, |
| "learning_rate": 5.999266165694906e-07, |
| "loss": 0.0002, |
| "num_input_tokens_seen": 701592, |
| "step": 1125 |
| }, |
| { |
| "epoch": 19.82456140350877, |
| "grad_norm": 0.005905716679990292, |
| "learning_rate": 2.8358897626556966e-07, |
| "loss": 0.0002, |
| "num_input_tokens_seen": 704504, |
| "step": 1130 |
| }, |
| { |
| "epoch": 19.912280701754387, |
| "grad_norm": 0.007031762972474098, |
| "learning_rate": 8.437918333864537e-08, |
| "loss": 0.0004, |
| "num_input_tokens_seen": 707992, |
| "step": 1135 |
| }, |
| { |
| "epoch": 20.0, |
| "grad_norm": 0.0011116194073110819, |
| "learning_rate": 2.343930299963937e-09, |
| "loss": 0.0002, |
| "num_input_tokens_seen": 711112, |
| "step": 1140 |
| }, |
| { |
| "epoch": 20.0, |
| "eval_loss": 0.11126314103603363, |
| "eval_runtime": 0.8518, |
| "eval_samples_per_second": 29.351, |
| "eval_steps_per_second": 8.218, |
| "num_input_tokens_seen": 711112, |
| "step": 1140 |
| }, |
| { |
| "epoch": 20.0, |
| "num_input_tokens_seen": 711112, |
| "step": 1140, |
| "total_flos": 3.2021051960131584e+16, |
| "train_loss": 0.11695527727077429, |
| "train_runtime": 288.6328, |
| "train_samples_per_second": 15.591, |
| "train_steps_per_second": 3.95 |
| } |
| ], |
| "logging_steps": 5, |
| "max_steps": 1140, |
| "num_input_tokens_seen": 711112, |
| "num_train_epochs": 20, |
| "save_steps": 57, |
| "stateful_callbacks": { |
| "TrainerControl": { |
| "args": { |
| "should_epoch_stop": false, |
| "should_evaluate": false, |
| "should_log": false, |
| "should_save": true, |
| "should_training_stop": true |
| }, |
| "attributes": {} |
| } |
| }, |
| "total_flos": 3.2021051960131584e+16, |
| "train_batch_size": 4, |
| "trial_name": null, |
| "trial_params": null |
| } |
|
|